applied-ai-018 commited on
Commit
9e65f67
·
verified ·
1 Parent(s): b85cd85

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/18.input_layernorm.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step40/zero/20.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step40/zero/20.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step40/zero/20.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step40/zero/22.attention.dense.weight/exp_avg.pt +3 -0
  6. ckpts/universal/global_step40/zero/22.attention.dense.weight/exp_avg_sq.pt +3 -0
  7. ckpts/universal/global_step40/zero/22.attention.dense.weight/fp32.pt +3 -0
  8. ckpts/universal/global_step40/zero/3.input_layernorm.weight/exp_avg.pt +3 -0
  9. ckpts/universal/global_step40/zero/3.input_layernorm.weight/exp_avg_sq.pt +3 -0
  10. ckpts/universal/global_step40/zero/3.input_layernorm.weight/fp32.pt +3 -0
  11. venv/lib/python3.10/site-packages/jinja2/__pycache__/nativetypes.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER +1 -0
  13. venv/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt +1568 -0
  14. venv/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA +38 -0
  15. venv/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD +22 -0
  16. venv/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL +5 -0
  17. venv/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt +1 -0
  18. venv/lib/python3.10/site-packages/sklearn/__init__.py +157 -0
  19. venv/lib/python3.10/site-packages/sklearn/_config.py +373 -0
  20. venv/lib/python3.10/site-packages/sklearn/_distributor_init.py +10 -0
  21. venv/lib/python3.10/site-packages/sklearn/_isotonic.cpython-310-x86_64-linux-gnu.so +0 -0
  22. venv/lib/python3.10/site-packages/sklearn/_min_dependencies.py +65 -0
  23. venv/lib/python3.10/site-packages/sklearn/base.py +1478 -0
  24. venv/lib/python3.10/site-packages/sklearn/calibration.py +1410 -0
  25. venv/lib/python3.10/site-packages/sklearn/conftest.py +309 -0
  26. venv/lib/python3.10/site-packages/sklearn/discriminant_analysis.py +1047 -0
  27. venv/lib/python3.10/site-packages/sklearn/dummy.py +682 -0
  28. venv/lib/python3.10/site-packages/sklearn/exceptions.py +191 -0
  29. venv/lib/python3.10/site-packages/sklearn/impute/__init__.py +24 -0
  30. venv/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/sklearn/impute/_base.py +1075 -0
  35. venv/lib/python3.10/site-packages/sklearn/impute/_iterative.py +906 -0
  36. venv/lib/python3.10/site-packages/sklearn/impute/_knn.py +401 -0
  37. venv/lib/python3.10/site-packages/sklearn/impute/tests/__init__.py +0 -0
  38. venv/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py +107 -0
  44. venv/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py +220 -0
  45. venv/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py +1754 -0
  46. venv/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py +547 -0
  47. venv/lib/python3.10/site-packages/sklearn/isotonic.py +498 -0
  48. venv/lib/python3.10/site-packages/sklearn/kernel_approximation.py +1137 -0
  49. venv/lib/python3.10/site-packages/sklearn/kernel_ridge.py +237 -0
  50. venv/lib/python3.10/site-packages/sklearn/linear_model/__init__.py +100 -0
ckpts/universal/global_step40/zero/18.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1177400b840e2c191169583a76002761f96bb99b5f67425ebd52751265e0ecb
3
+ size 9293
ckpts/universal/global_step40/zero/20.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6d96fb68895c0c01c25aac7ead04602097d2ff1ba9dc549846b0f78fd88af51
3
+ size 33555612
ckpts/universal/global_step40/zero/20.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc850f493ae005bbed8f11e1d964bb005efc7b87d26974f549c3f57b15378a36
3
+ size 33555627
ckpts/universal/global_step40/zero/20.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f676a0ca800fa917113e3ac7a7d7b50bd5040655ea94cc4e2aa7d46c0563546
3
+ size 33555533
ckpts/universal/global_step40/zero/22.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25067ad5627fbdd70ebfa32bacdee9f01e041d8465102b238a27530d597394bf
3
+ size 16778396
ckpts/universal/global_step40/zero/22.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa94a7a348b85459f0c171b4b25a864011136bcf7ced192e4ea5633685bb68fa
3
+ size 16778411
ckpts/universal/global_step40/zero/22.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e7df42ccee30f27759c5675321c515c010dd0154eba474293c8b8a7b73e06db
3
+ size 16778317
ckpts/universal/global_step40/zero/3.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc6f9da600277be3830c3a81a820de263e14381d13d3a4b8f7d0f52e2b72e156
3
+ size 9372
ckpts/universal/global_step40/zero/3.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ee3c5b3883fd1ad2a34badf88af87e7202b880f4142975263caa831546bd39b
3
+ size 9387
ckpts/universal/global_step40/zero/3.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f69ad1f4a732e803db53b885ce479ec9dbc0a2910ad826fb159401fb2f0255b
3
+ size 9293
venv/lib/python3.10/site-packages/jinja2/__pycache__/nativetypes.cpython-310.pyc ADDED
Binary file (5.01 kB). View file
 
venv/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ End User License Agreement
2
+ --------------------------
3
+
4
+
5
+ Preface
6
+ -------
7
+
8
+ The Software License Agreement in Chapter 1 and the Supplement
9
+ in Chapter 2 contain license terms and conditions that govern
10
+ the use of NVIDIA software. By accepting this agreement, you
11
+ agree to comply with all the terms and conditions applicable
12
+ to the product(s) included herein.
13
+
14
+
15
+ NVIDIA Driver
16
+
17
+
18
+ Description
19
+
20
+ This package contains the operating system driver and
21
+ fundamental system software components for NVIDIA GPUs.
22
+
23
+
24
+ NVIDIA CUDA Toolkit
25
+
26
+
27
+ Description
28
+
29
+ The NVIDIA CUDA Toolkit provides command-line and graphical
30
+ tools for building, debugging and optimizing the performance
31
+ of applications accelerated by NVIDIA GPUs, runtime and math
32
+ libraries, and documentation including programming guides,
33
+ user manuals, and API references.
34
+
35
+
36
+ Default Install Location of CUDA Toolkit
37
+
38
+ Windows platform:
39
+
40
+ %ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
41
+
42
+ Linux platform:
43
+
44
+ /usr/local/cuda-#.#
45
+
46
+ Mac platform:
47
+
48
+ /Developer/NVIDIA/CUDA-#.#
49
+
50
+
51
+ NVIDIA CUDA Samples
52
+
53
+
54
+ Description
55
+
56
+ This package includes over 100+ CUDA examples that demonstrate
57
+ various CUDA programming principles, and efficient CUDA
58
+ implementation of algorithms in specific application domains.
59
+
60
+
61
+ Default Install Location of CUDA Samples
62
+
63
+ Windows platform:
64
+
65
+ %ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
66
+
67
+ Linux platform:
68
+
69
+ /usr/local/cuda-#.#/samples
70
+
71
+ and
72
+
73
+ $HOME/NVIDIA_CUDA-#.#_Samples
74
+
75
+ Mac platform:
76
+
77
+ /Developer/NVIDIA/CUDA-#.#/samples
78
+
79
+
80
+ NVIDIA Nsight Visual Studio Edition (Windows only)
81
+
82
+
83
+ Description
84
+
85
+ NVIDIA Nsight Development Platform, Visual Studio Edition is a
86
+ development environment integrated into Microsoft Visual
87
+ Studio that provides tools for debugging, profiling, analyzing
88
+ and optimizing your GPU computing and graphics applications.
89
+
90
+
91
+ Default Install Location of Nsight Visual Studio Edition
92
+
93
+ Windows platform:
94
+
95
+ %ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
96
+
97
+
98
+ 1. License Agreement for NVIDIA Software Development Kits
99
+ ---------------------------------------------------------
100
+
101
+
102
+ Release Date: July 26, 2018
103
+ ---------------------------
104
+
105
+
106
+ Important NoticeRead before downloading, installing,
107
+ copying or using the licensed software:
108
+ -------------------------------------------------------
109
+
110
+ This license agreement, including exhibits attached
111
+ ("Agreement”) is a legal agreement between you and NVIDIA
112
+ Corporation ("NVIDIA") and governs your use of a NVIDIA
113
+ software development kit (“SDK”).
114
+
115
+ Each SDK has its own set of software and materials, but here
116
+ is a description of the types of items that may be included in
117
+ a SDK: source code, header files, APIs, data sets and assets
118
+ (examples include images, textures, models, scenes, videos,
119
+ native API input/output files), binary software, sample code,
120
+ libraries, utility programs, programming code and
121
+ documentation.
122
+
123
+ This Agreement can be accepted only by an adult of legal age
124
+ of majority in the country in which the SDK is used.
125
+
126
+ If you are entering into this Agreement on behalf of a company
127
+ or other legal entity, you represent that you have the legal
128
+ authority to bind the entity to this Agreement, in which case
129
+ “you” will mean the entity you represent.
130
+
131
+ If you don’t have the required age or authority to accept
132
+ this Agreement, or if you don’t accept all the terms and
133
+ conditions of this Agreement, do not download, install or use
134
+ the SDK.
135
+
136
+ You agree to use the SDK only for purposes that are permitted
137
+ by (a) this Agreement, and (b) any applicable law, regulation
138
+ or generally accepted practices or guidelines in the relevant
139
+ jurisdictions.
140
+
141
+
142
+ 1.1. License
143
+
144
+
145
+ 1.1.1. License Grant
146
+
147
+ Subject to the terms of this Agreement, NVIDIA hereby grants
148
+ you a non-exclusive, non-transferable license, without the
149
+ right to sublicense (except as expressly provided in this
150
+ Agreement) to:
151
+
152
+ 1. Install and use the SDK,
153
+
154
+ 2. Modify and create derivative works of sample source code
155
+ delivered in the SDK, and
156
+
157
+ 3. Distribute those portions of the SDK that are identified
158
+ in this Agreement as distributable, as incorporated in
159
+ object code format into a software application that meets
160
+ the distribution requirements indicated in this Agreement.
161
+
162
+
163
+ 1.1.2. Distribution Requirements
164
+
165
+ These are the distribution requirements for you to exercise
166
+ the distribution grant:
167
+
168
+ 1. Your application must have material additional
169
+ functionality, beyond the included portions of the SDK.
170
+
171
+ 2. The distributable portions of the SDK shall only be
172
+ accessed by your application.
173
+
174
+ 3. The following notice shall be included in modifications
175
+ and derivative works of sample source code distributed:
176
+ “This software contains source code provided by NVIDIA
177
+ Corporation.”
178
+
179
+ 4. Unless a developer tool is identified in this Agreement
180
+ as distributable, it is delivered for your internal use
181
+ only.
182
+
183
+ 5. The terms under which you distribute your application
184
+ must be consistent with the terms of this Agreement,
185
+ including (without limitation) terms relating to the
186
+ license grant and license restrictions and protection of
187
+ NVIDIA’s intellectual property rights. Additionally, you
188
+ agree that you will protect the privacy, security and
189
+ legal rights of your application users.
190
+
191
+ 6. You agree to notify NVIDIA in writing of any known or
192
+ suspected distribution or use of the SDK not in compliance
193
+ with the requirements of this Agreement, and to enforce
194
+ the terms of your agreements with respect to distributed
195
+ SDK.
196
+
197
+
198
+ 1.1.3. Authorized Users
199
+
200
+ You may allow employees and contractors of your entity or of
201
+ your subsidiary(ies) to access and use the SDK from your
202
+ secure network to perform work on your behalf.
203
+
204
+ If you are an academic institution you may allow users
205
+ enrolled or employed by the academic institution to access and
206
+ use the SDK from your secure network.
207
+
208
+ You are responsible for the compliance with the terms of this
209
+ Agreement by your authorized users. If you become aware that
210
+ your authorized users didn’t follow the terms of this
211
+ Agreement, you agree to take reasonable steps to resolve the
212
+ non-compliance and prevent new occurrences.
213
+
214
+
215
+ 1.1.4. Pre-Release SDK
216
+
217
+ The SDK versions identified as alpha, beta, preview or
218
+ otherwise as pre-release, may not be fully functional, may
219
+ contain errors or design flaws, and may have reduced or
220
+ different security, privacy, accessibility, availability, and
221
+ reliability standards relative to commercial versions of
222
+ NVIDIA software and materials. Use of a pre-release SDK may
223
+ result in unexpected results, loss of data, project delays or
224
+ other unpredictable damage or loss.
225
+
226
+ You may use a pre-release SDK at your own risk, understanding
227
+ that pre-release SDKs are not intended for use in production
228
+ or business-critical systems.
229
+
230
+ NVIDIA may choose not to make available a commercial version
231
+ of any pre-release SDK. NVIDIA may also choose to abandon
232
+ development and terminate the availability of a pre-release
233
+ SDK at any time without liability.
234
+
235
+
236
+ 1.1.5. Updates
237
+
238
+ NVIDIA may, at its option, make available patches, workarounds
239
+ or other updates to this SDK. Unless the updates are provided
240
+ with their separate governing terms, they are deemed part of
241
+ the SDK licensed to you as provided in this Agreement. You
242
+ agree that the form and content of the SDK that NVIDIA
243
+ provides may change without prior notice to you. While NVIDIA
244
+ generally maintains compatibility between versions, NVIDIA may
245
+ in some cases make changes that introduce incompatibilities in
246
+ future versions of the SDK.
247
+
248
+
249
+ 1.1.6. Third Party Licenses
250
+
251
+ The SDK may come bundled with, or otherwise include or be
252
+ distributed with, third party software licensed by a NVIDIA
253
+ supplier and/or open source software provided under an open
254
+ source license. Use of third party software is subject to the
255
+ third-party license terms, or in the absence of third party
256
+ terms, the terms of this Agreement. Copyright to third party
257
+ software is held by the copyright holders indicated in the
258
+ third-party software or license.
259
+
260
+
261
+ 1.1.7. Reservation of Rights
262
+
263
+ NVIDIA reserves all rights, title, and interest in and to the
264
+ SDK, not expressly granted to you under this Agreement.
265
+
266
+
267
+ 1.2. Limitations
268
+
269
+ The following license limitations apply to your use of the
270
+ SDK:
271
+
272
+ 1. You may not reverse engineer, decompile or disassemble,
273
+ or remove copyright or other proprietary notices from any
274
+ portion of the SDK or copies of the SDK.
275
+
276
+ 2. Except as expressly provided in this Agreement, you may
277
+ not copy, sell, rent, sublicense, transfer, distribute,
278
+ modify, or create derivative works of any portion of the
279
+ SDK. For clarity, you may not distribute or sublicense the
280
+ SDK as a stand-alone product.
281
+
282
+ 3. Unless you have an agreement with NVIDIA for this
283
+ purpose, you may not indicate that an application created
284
+ with the SDK is sponsored or endorsed by NVIDIA.
285
+
286
+ 4. You may not bypass, disable, or circumvent any
287
+ encryption, security, digital rights management or
288
+ authentication mechanism in the SDK.
289
+
290
+ 5. You may not use the SDK in any manner that would cause it
291
+ to become subject to an open source software license. As
292
+ examples, licenses that require as a condition of use,
293
+ modification, and/or distribution that the SDK be:
294
+
295
+ a. Disclosed or distributed in source code form;
296
+
297
+ b. Licensed for the purpose of making derivative works;
298
+ or
299
+
300
+ c. Redistributable at no charge.
301
+
302
+ 6. Unless you have an agreement with NVIDIA for this
303
+ purpose, you may not use the SDK with any system or
304
+ application where the use or failure of the system or
305
+ application can reasonably be expected to threaten or
306
+ result in personal injury, death, or catastrophic loss.
307
+ Examples include use in avionics, navigation, military,
308
+ medical, life support or other life critical applications.
309
+ NVIDIA does not design, test or manufacture the SDK for
310
+ these critical uses and NVIDIA shall not be liable to you
311
+ or any third party, in whole or in part, for any claims or
312
+ damages arising from such uses.
313
+
314
+ 7. You agree to defend, indemnify and hold harmless NVIDIA
315
+ and its affiliates, and their respective employees,
316
+ contractors, agents, officers and directors, from and
317
+ against any and all claims, damages, obligations, losses,
318
+ liabilities, costs or debt, fines, restitutions and
319
+ expenses (including but not limited to attorney’s fees
320
+ and costs incident to establishing the right of
321
+ indemnification) arising out of or related to your use of
322
+ the SDK outside of the scope of this Agreement, or not in
323
+ compliance with its terms.
324
+
325
+
326
+ 1.3. Ownership
327
+
328
+ 1. NVIDIA or its licensors hold all rights, title and
329
+ interest in and to the SDK and its modifications and
330
+ derivative works, including their respective intellectual
331
+ property rights, subject to your rights described in this
332
+ section. This SDK may include software and materials from
333
+ NVIDIA’s licensors, and these licensors are intended
334
+ third party beneficiaries that may enforce this Agreement
335
+ with respect to their intellectual property rights.
336
+
337
+ 2. You hold all rights, title and interest in and to your
338
+ applications and your derivative works of the sample
339
+ source code delivered in the SDK, including their
340
+ respective intellectual property rights, subject to
341
+ NVIDIA’s rights described in this section.
342
+
343
+ 3. You may, but don’t have to, provide to NVIDIA
344
+ suggestions, feature requests or other feedback regarding
345
+ the SDK, including possible enhancements or modifications
346
+ to the SDK. For any feedback that you voluntarily provide,
347
+ you hereby grant NVIDIA and its affiliates a perpetual,
348
+ non-exclusive, worldwide, irrevocable license to use,
349
+ reproduce, modify, license, sublicense (through multiple
350
+ tiers of sublicensees), and distribute (through multiple
351
+ tiers of distributors) it without the payment of any
352
+ royalties or fees to you. NVIDIA will use feedback at its
353
+ choice. NVIDIA is constantly looking for ways to improve
354
+ its products, so you may send feedback to NVIDIA through
355
+ the developer portal at https://developer.nvidia.com.
356
+
357
+
358
+ 1.4. No Warranties
359
+
360
+ THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
361
+ FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
362
+ ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
363
+ OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
364
+ BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
365
+ FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
366
+ ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
367
+ WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
368
+ DEALING OR COURSE OF TRADE.
369
+
370
+
371
+ 1.5. Limitation of Liability
372
+
373
+ TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
374
+ AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
375
+ PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
376
+ OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
377
+ PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
378
+ WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
379
+ WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
380
+ OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
381
+ PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
382
+ LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
383
+ TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
384
+ AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
385
+ NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
386
+ LIMIT.
387
+
388
+ These exclusions and limitations of liability shall apply
389
+ regardless if NVIDIA or its affiliates have been advised of
390
+ the possibility of such damages, and regardless of whether a
391
+ remedy fails its essential purpose. These exclusions and
392
+ limitations of liability form an essential basis of the
393
+ bargain between the parties, and, absent any of these
394
+ exclusions or limitations of liability, the provisions of this
395
+ Agreement, including, without limitation, the economic terms,
396
+ would be substantially different.
397
+
398
+
399
+ 1.6. Termination
400
+
401
+ 1. This Agreement will continue to apply until terminated by
402
+ either you or NVIDIA as described below.
403
+
404
+ 2. If you want to terminate this Agreement, you may do so by
405
+ stopping to use the SDK.
406
+
407
+ 3. NVIDIA may, at any time, terminate this Agreement if:
408
+
409
+ a. (i) you fail to comply with any term of this
410
+ Agreement and the non-compliance is not fixed within
411
+ thirty (30) days following notice from NVIDIA (or
412
+ immediately if you violate NVIDIA’s intellectual
413
+ property rights);
414
+
415
+ b. (ii) you commence or participate in any legal
416
+ proceeding against NVIDIA with respect to the SDK; or
417
+
418
+ c. (iii) NVIDIA decides to no longer provide the SDK in
419
+ a country or, in NVIDIA’s sole discretion, the
420
+ continued use of it is no longer commercially viable.
421
+
422
+ 4. Upon any termination of this Agreement, you agree to
423
+ promptly discontinue use of the SDK and destroy all copies
424
+ in your possession or control. Your prior distributions in
425
+ accordance with this Agreement are not affected by the
426
+ termination of this Agreement. Upon written request, you
427
+ will certify in writing that you have complied with your
428
+ commitments under this section. Upon any termination of
429
+ this Agreement all provisions survive except for the
430
+ license grant provisions.
431
+
432
+
433
+ 1.7. General
434
+
435
+ If you wish to assign this Agreement or your rights and
436
+ obligations, including by merger, consolidation, dissolution
437
+ or operation of law, contact NVIDIA to ask for permission. Any
438
+ attempted assignment not approved by NVIDIA in writing shall
439
+ be void and of no effect. NVIDIA may assign, delegate or
440
+ transfer this Agreement and its rights and obligations, and if
441
+ to a non-affiliate you will be notified.
442
+
443
+ You agree to cooperate with NVIDIA and provide reasonably
444
+ requested information to verify your compliance with this
445
+ Agreement.
446
+
447
+ This Agreement will be governed in all respects by the laws of
448
+ the United States and of the State of Delaware as those laws
449
+ are applied to contracts entered into and performed entirely
450
+ within Delaware by Delaware residents, without regard to the
451
+ conflicts of laws principles. The United Nations Convention on
452
+ Contracts for the International Sale of Goods is specifically
453
+ disclaimed. You agree to all terms of this Agreement in the
454
+ English language.
455
+
456
+ The state or federal courts residing in Santa Clara County,
457
+ California shall have exclusive jurisdiction over any dispute
458
+ or claim arising out of this Agreement. Notwithstanding this,
459
+ you agree that NVIDIA shall still be allowed to apply for
460
+ injunctive remedies or an equivalent type of urgent legal
461
+ relief in any jurisdiction.
462
+
463
+ If any court of competent jurisdiction determines that any
464
+ provision of this Agreement is illegal, invalid or
465
+ unenforceable, such provision will be construed as limited to
466
+ the extent necessary to be consistent with and fully
467
+ enforceable under the law and the remaining provisions will
468
+ remain in full force and effect. Unless otherwise specified,
469
+ remedies are cumulative.
470
+
471
+ Each party acknowledges and agrees that the other is an
472
+ independent contractor in the performance of this Agreement.
473
+
474
+ The SDK has been developed entirely at private expense and is
475
+ “commercial items” consisting of “commercial computer
476
+ software” and “commercial computer software
477
+ documentation” provided with RESTRICTED RIGHTS. Use,
478
+ duplication or disclosure by the U.S. Government or a U.S.
479
+ Government subcontractor is subject to the restrictions in
480
+ this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
481
+ in subparagraphs (c)(1) and (2) of the Commercial Computer
482
+ Software - Restricted Rights clause at FAR 52.227-19, as
483
+ applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
484
+ Expressway, Santa Clara, CA 95051.
485
+
486
+ The SDK is subject to United States export laws and
487
+ regulations. You agree that you will not ship, transfer or
488
+ export the SDK into any country, or use the SDK in any manner,
489
+ prohibited by the United States Bureau of Industry and
490
+ Security or economic sanctions regulations administered by the
491
+ U.S. Department of Treasury’s Office of Foreign Assets
492
+ Control (OFAC), or any applicable export laws, restrictions or
493
+ regulations. These laws include restrictions on destinations,
494
+ end users and end use. By accepting this Agreement, you
495
+ confirm that you are not a resident or citizen of any country
496
+ currently embargoed by the U.S. and that you are not otherwise
497
+ prohibited from receiving the SDK.
498
+
499
+ Any notice delivered by NVIDIA to you under this Agreement
500
+ will be delivered via mail, email or fax. You agree that any
501
+ notices that NVIDIA sends you electronically will satisfy any
502
+ legal communication requirements. Please direct your legal
503
+ notices or other correspondence to NVIDIA Corporation, 2788
504
+ San Tomas Expressway, Santa Clara, California 95051, United
505
+ States of America, Attention: Legal Department.
506
+
507
+ This Agreement and any exhibits incorporated into this
508
+ Agreement constitute the entire agreement of the parties with
509
+ respect to the subject matter of this Agreement and supersede
510
+ all prior negotiations or documentation exchanged between the
511
+ parties relating to this SDK license. Any additional and/or
512
+ conflicting terms on documents issued by you are null, void,
513
+ and invalid. Any amendment or waiver under this Agreement
514
+ shall be in writing and signed by representatives of both
515
+ parties.
516
+
517
+
518
+ 2. CUDA Toolkit Supplement to Software License Agreement for
519
+ NVIDIA Software Development Kits
520
+ ------------------------------------------------------------
521
+
522
+
523
+ Release date: August 16, 2018
524
+ -----------------------------
525
+
526
+ The terms in this supplement govern your use of the NVIDIA
527
+ CUDA Toolkit SDK under the terms of your license agreement
528
+ (“Agreement”) as modified by this supplement. Capitalized
529
+ terms used but not defined below have the meaning assigned to
530
+ them in the Agreement.
531
+
532
+ This supplement is an exhibit to the Agreement and is
533
+ incorporated as an integral part of the Agreement. In the
534
+ event of conflict between the terms in this supplement and the
535
+ terms in the Agreement, the terms in this supplement govern.
536
+
537
+
538
+ 2.1. License Scope
539
+
540
+ The SDK is licensed for you to develop applications only for
541
+ use in systems with NVIDIA GPUs.
542
+
543
+
544
+ 2.2. Distribution
545
+
546
+ The portions of the SDK that are distributable under the
547
+ Agreement are listed in Attachment A.
548
+
549
+
550
+ 2.3. Operating Systems
551
+
552
+ Those portions of the SDK designed exclusively for use on the
553
+ Linux or FreeBSD operating systems, or other operating systems
554
+ derived from the source code to these operating systems, may
555
+ be copied and redistributed for use in accordance with this
556
+ Agreement, provided that the object code files are not
557
+ modified in any way (except for unzipping of compressed
558
+ files).
559
+
560
+
561
+ 2.4. Audio and Video Encoders and Decoders
562
+
563
+ You acknowledge and agree that it is your sole responsibility
564
+ to obtain any additional third-party licenses required to
565
+ make, have made, use, have used, sell, import, and offer for
566
+ sale your products or services that include or incorporate any
567
+ third-party software and content relating to audio and/or
568
+ video encoders and decoders from, including but not limited
569
+ to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
570
+ MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
571
+ under this Agreement any necessary patent or other rights with
572
+ respect to any audio and/or video encoders and decoders.
573
+
574
+
575
+ 2.5. Licensing
576
+
577
+ If the distribution terms in this Agreement are not suitable
578
+ for your organization, or for any questions regarding this
579
+ Agreement, please contact NVIDIA at
580
581
+
582
+
583
+ 2.6. Attachment A
584
+
585
+ The following portions of the SDK are distributable under the
586
+ Agreement:
587
+
588
+ Component
589
+
590
+ CUDA Runtime
591
+
592
+ Windows
593
+
594
+ cudart.dll, cudart_static.lib, cudadevrt.lib
595
+
596
+ Mac OSX
597
+
598
+ libcudart.dylib, libcudart_static.a, libcudadevrt.a
599
+
600
+ Linux
601
+
602
+ libcudart.so, libcudart_static.a, libcudadevrt.a
603
+
604
+ Android
605
+
606
+ libcudart.so, libcudart_static.a, libcudadevrt.a
607
+
608
+ Component
609
+
610
+ CUDA FFT Library
611
+
612
+ Windows
613
+
614
+ cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
615
+
616
+ Mac OSX
617
+
618
+ libcufft.dylib, libcufft_static.a, libcufftw.dylib,
619
+ libcufftw_static.a
620
+
621
+ Linux
622
+
623
+ libcufft.so, libcufft_static.a, libcufftw.so,
624
+ libcufftw_static.a
625
+
626
+ Android
627
+
628
+ libcufft.so, libcufft_static.a, libcufftw.so,
629
+ libcufftw_static.a
630
+
631
+ Component
632
+
633
+ CUDA BLAS Library
634
+
635
+ Windows
636
+
637
+ cublas.dll, cublasLt.dll
638
+
639
+ Mac OSX
640
+
641
+ libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
642
+ libcublasLt_static.a
643
+
644
+ Linux
645
+
646
+ libcublas.so, libcublasLt.so, libcublas_static.a,
647
+ libcublasLt_static.a
648
+
649
+ Android
650
+
651
+ libcublas.so, libcublasLt.so, libcublas_static.a,
652
+ libcublasLt_static.a
653
+
654
+ Component
655
+
656
+ NVIDIA "Drop-in" BLAS Library
657
+
658
+ Windows
659
+
660
+ nvblas.dll
661
+
662
+ Mac OSX
663
+
664
+ libnvblas.dylib
665
+
666
+ Linux
667
+
668
+ libnvblas.so
669
+
670
+ Component
671
+
672
+ CUDA Sparse Matrix Library
673
+
674
+ Windows
675
+
676
+ cusparse.dll, cusparse.lib
677
+
678
+ Mac OSX
679
+
680
+ libcusparse.dylib, libcusparse_static.a
681
+
682
+ Linux
683
+
684
+ libcusparse.so, libcusparse_static.a
685
+
686
+ Android
687
+
688
+ libcusparse.so, libcusparse_static.a
689
+
690
+ Component
691
+
692
+ CUDA Linear Solver Library
693
+
694
+ Windows
695
+
696
+ cusolver.dll, cusolver.lib
697
+
698
+ Mac OSX
699
+
700
+ libcusolver.dylib, libcusolver_static.a
701
+
702
+ Linux
703
+
704
+ libcusolver.so, libcusolver_static.a
705
+
706
+ Android
707
+
708
+ libcusolver.so, libcusolver_static.a
709
+
710
+ Component
711
+
712
+ CUDA Random Number Generation Library
713
+
714
+ Windows
715
+
716
+ curand.dll, curand.lib
717
+
718
+ Mac OSX
719
+
720
+ libcurand.dylib, libcurand_static.a
721
+
722
+ Linux
723
+
724
+ libcurand.so, libcurand_static.a
725
+
726
+ Android
727
+
728
+ libcurand.so, libcurand_static.a
729
+
730
+ Component
731
+
732
+ CUDA Accelerated Graph Library
733
+
734
+ Component
735
+
736
+ NVIDIA Performance Primitives Library
737
+
738
+ Windows
739
+
740
+ nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
741
+ nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
742
+ nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
743
+ nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
744
+ nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
745
+
746
+ Mac OSX
747
+
748
+ libnppc.dylib, libnppc_static.a, libnppial.dylib,
749
+ libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
750
+ libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
751
+ libnppidei_static.a, libnppif.dylib, libnppif_static.a,
752
+ libnppig.dylib, libnppig_static.a, libnppim.dylib,
753
+ libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
754
+ libnpps.dylib, libnpps_static.a
755
+
756
+ Linux
757
+
758
+ libnppc.so, libnppc_static.a, libnppial.so,
759
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
760
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
761
+ libnppidei_static.a, libnppif.so, libnppif_static.a
762
+ libnppig.so, libnppig_static.a, libnppim.so,
763
+ libnppim_static.a, libnppist.so, libnppist_static.a,
764
+ libnppisu.so, libnppisu_static.a, libnppitc.so
765
+ libnppitc_static.a, libnpps.so, libnpps_static.a
766
+
767
+ Android
768
+
769
+ libnppc.so, libnppc_static.a, libnppial.so,
770
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
771
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
772
+ libnppidei_static.a, libnppif.so, libnppif_static.a
773
+ libnppig.so, libnppig_static.a, libnppim.so,
774
+ libnppim_static.a, libnppist.so, libnppist_static.a,
775
+ libnppisu.so, libnppisu_static.a, libnppitc.so
776
+ libnppitc_static.a, libnpps.so, libnpps_static.a
777
+
778
+ Component
779
+
780
+ NVIDIA JPEG Library
781
+
782
+ Linux
783
+
784
+ libnvjpeg.so, libnvjpeg_static.a
785
+
786
+ Component
787
+
788
+ Internal common library required for statically linking to
789
+ cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
790
+
791
+ Mac OSX
792
+
793
+ libculibos.a
794
+
795
+ Linux
796
+
797
+ libculibos.a
798
+
799
+ Component
800
+
801
+ NVIDIA Runtime Compilation Library and Header
802
+
803
+ All
804
+
805
+ nvrtc.h
806
+
807
+ Windows
808
+
809
+ nvrtc.dll, nvrtc-builtins.dll
810
+
811
+ Mac OSX
812
+
813
+ libnvrtc.dylib, libnvrtc-builtins.dylib
814
+
815
+ Linux
816
+
817
+ libnvrtc.so, libnvrtc-builtins.so
818
+
819
+ Component
820
+
821
+ NVIDIA Optimizing Compiler Library
822
+
823
+ Windows
824
+
825
+ nvvm.dll
826
+
827
+ Mac OSX
828
+
829
+ libnvvm.dylib
830
+
831
+ Linux
832
+
833
+ libnvvm.so
834
+
835
+ Component
836
+
837
+ NVIDIA Common Device Math Functions Library
838
+
839
+ Windows
840
+
841
+ libdevice.10.bc
842
+
843
+ Mac OSX
844
+
845
+ libdevice.10.bc
846
+
847
+ Linux
848
+
849
+ libdevice.10.bc
850
+
851
+ Component
852
+
853
+ CUDA Occupancy Calculation Header Library
854
+
855
+ All
856
+
857
+ cuda_occupancy.h
858
+
859
+ Component
860
+
861
+ CUDA Half Precision Headers
862
+
863
+ All
864
+
865
+ cuda_fp16.h, cuda_fp16.hpp
866
+
867
+ Component
868
+
869
+ CUDA Profiling Tools Interface (CUPTI) Library
870
+
871
+ Windows
872
+
873
+ cupti.dll
874
+
875
+ Mac OSX
876
+
877
+ libcupti.dylib
878
+
879
+ Linux
880
+
881
+ libcupti.so
882
+
883
+ Component
884
+
885
+ NVIDIA Tools Extension Library
886
+
887
+ Windows
888
+
889
+ nvToolsExt.dll, nvToolsExt.lib
890
+
891
+ Mac OSX
892
+
893
+ libnvToolsExt.dylib
894
+
895
+ Linux
896
+
897
+ libnvToolsExt.so
898
+
899
+ Component
900
+
901
+ NVIDIA CUDA Driver Libraries
902
+
903
+ Linux
904
+
905
+ libcuda.so, libnvidia-fatbinaryloader.so,
906
+ libnvidia-ptxjitcompiler.so
907
+
908
+ The NVIDIA CUDA Driver Libraries are only distributable in
909
+ applications that meet this criteria:
910
+
911
+ 1. The application was developed starting from a NVIDIA CUDA
912
+ container obtained from Docker Hub or the NVIDIA GPU
913
+ Cloud, and
914
+
915
+ 2. The resulting application is packaged as a Docker
916
+ container and distributed to users on Docker Hub or the
917
+ NVIDIA GPU Cloud only.
918
+
919
+
920
+ 2.7. Attachment B
921
+
922
+
923
+ Additional Licensing Obligations
924
+
925
+ The following third party components included in the SOFTWARE
926
+ are licensed to Licensee pursuant to the following terms and
927
+ conditions:
928
+
929
+ 1. Licensee's use of the GDB third party component is
930
+ subject to the terms and conditions of GNU GPL v3:
931
+
932
+ This product includes copyrighted third-party software licensed
933
+ under the terms of the GNU General Public License v3 ("GPL v3").
934
+ All third-party software packages are copyright by their respective
935
+ authors. GPL v3 terms and conditions are hereby incorporated into
936
+ the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
937
+
938
+ Consistent with these licensing requirements, the software
939
+ listed below is provided under the terms of the specified
940
+ open source software licenses. To obtain source code for
941
+ software provided under licenses that require
942
+ redistribution of source code, including the GNU General
943
+ Public License (GPL) and GNU Lesser General Public License
944
+ (LGPL), contact [email protected]. This offer is
945
+ valid for a period of three (3) years from the date of the
946
+ distribution of this product by NVIDIA CORPORATION.
947
+
948
+ Component License
949
+ CUDA-GDB GPL v3
950
+
951
+ 2. Licensee represents and warrants that any and all third
952
+ party licensing and/or royalty payment obligations in
953
+ connection with Licensee's use of the H.264 video codecs
954
+ are solely the responsibility of Licensee.
955
+
956
+ 3. Licensee's use of the Thrust library is subject to the
957
+ terms and conditions of the Apache License Version 2.0.
958
+ All third-party software packages are copyright by their
959
+ respective authors. Apache License Version 2.0 terms and
960
+ conditions are hereby incorporated into the Agreement by
961
+ this reference.
962
+ http://www.apache.org/licenses/LICENSE-2.0.html
963
+
964
+ In addition, Licensee acknowledges the following notice:
965
+ Thrust includes source code from the Boost Iterator,
966
+ Tuple, System, and Random Number libraries.
967
+
968
+ Boost Software License - Version 1.0 - August 17th, 2003
969
+ . . . .
970
+
971
+ Permission is hereby granted, free of charge, to any person or
972
+ organization obtaining a copy of the software and accompanying
973
+ documentation covered by this license (the "Software") to use,
974
+ reproduce, display, distribute, execute, and transmit the Software,
975
+ and to prepare derivative works of the Software, and to permit
976
+ third-parties to whom the Software is furnished to do so, all
977
+ subject to the following:
978
+
979
+ The copyright notices in the Software and this entire statement,
980
+ including the above license grant, this restriction and the following
981
+ disclaimer, must be included in all copies of the Software, in whole
982
+ or in part, and all derivative works of the Software, unless such
983
+ copies or derivative works are solely in the form of machine-executable
984
+ object code generated by a source language processor.
985
+
986
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
987
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
988
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
989
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
990
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
991
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
992
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
993
+ OTHER DEALINGS IN THE SOFTWARE.
994
+
995
+ 4. Licensee's use of the LLVM third party component is
996
+ subject to the following terms and conditions:
997
+
998
+ ======================================================
999
+ LLVM Release License
1000
+ ======================================================
1001
+ University of Illinois/NCSA
1002
+ Open Source License
1003
+
1004
+ Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
1005
+ All rights reserved.
1006
+
1007
+ Developed by:
1008
+
1009
+ LLVM Team
1010
+
1011
+ University of Illinois at Urbana-Champaign
1012
+
1013
+ http://llvm.org
1014
+
1015
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1016
+ of this software and associated documentation files (the "Software"), to
1017
+ deal with the Software without restriction, including without limitation the
1018
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1019
+ sell copies of the Software, and to permit persons to whom the Software is
1020
+ furnished to do so, subject to the following conditions:
1021
+
1022
+ * Redistributions of source code must retain the above copyright notice,
1023
+ this list of conditions and the following disclaimers.
1024
+
1025
+ * Redistributions in binary form must reproduce the above copyright
1026
+ notice, this list of conditions and the following disclaimers in the
1027
+ documentation and/or other materials provided with the distribution.
1028
+
1029
+ * Neither the names of the LLVM Team, University of Illinois at Urbana-
1030
+ Champaign, nor the names of its contributors may be used to endorse or
1031
+ promote products derived from this Software without specific prior
1032
+ written permission.
1033
+
1034
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1035
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1036
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1037
+ THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
1038
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1039
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1040
+ DEALINGS WITH THE SOFTWARE.
1041
+
1042
+ 5. Licensee's use (e.g. nvprof) of the PCRE third party
1043
+ component is subject to the following terms and
1044
+ conditions:
1045
+
1046
+ ------------
1047
+ PCRE LICENCE
1048
+ ------------
1049
+ PCRE is a library of functions to support regular expressions whose syntax
1050
+ and semantics are as close as possible to those of the Perl 5 language.
1051
+ Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
1052
+ specified below. The documentation for PCRE, supplied in the "doc"
1053
+ directory, is distributed under the same terms as the software itself. The
1054
+ basic library functions are written in C and are freestanding. Also
1055
+ included in the distribution is a set of C++ wrapper functions, and a just-
1056
+ in-time compiler that can be used to optimize pattern matching. These are
1057
+ both optional features that can be omitted when the library is built.
1058
+
1059
+ THE BASIC LIBRARY FUNCTIONS
1060
+ ---------------------------
1061
+ Written by: Philip Hazel
1062
+ Email local part: ph10
1063
+ Email domain: cam.ac.uk
1064
+ University of Cambridge Computing Service,
1065
+ Cambridge, England.
1066
+ Copyright (c) 1997-2012 University of Cambridge
1067
+ All rights reserved.
1068
+
1069
+ PCRE JUST-IN-TIME COMPILATION SUPPORT
1070
+ -------------------------------------
1071
+ Written by: Zoltan Herczeg
1072
+ Email local part: hzmester
1073
+ Emain domain: freemail.hu
1074
+ Copyright(c) 2010-2012 Zoltan Herczeg
1075
+ All rights reserved.
1076
+
1077
+ STACK-LESS JUST-IN-TIME COMPILER
1078
+ --------------------------------
1079
+ Written by: Zoltan Herczeg
1080
+ Email local part: hzmester
1081
+ Emain domain: freemail.hu
1082
+ Copyright(c) 2009-2012 Zoltan Herczeg
1083
+ All rights reserved.
1084
+
1085
+ THE C++ WRAPPER FUNCTIONS
1086
+ -------------------------
1087
+ Contributed by: Google Inc.
1088
+ Copyright (c) 2007-2012, Google Inc.
1089
+ All rights reserved.
1090
+
1091
+ THE "BSD" LICENCE
1092
+ -----------------
1093
+ Redistribution and use in source and binary forms, with or without
1094
+ modification, are permitted provided that the following conditions are met:
1095
+
1096
+ * Redistributions of source code must retain the above copyright notice,
1097
+ this list of conditions and the following disclaimer.
1098
+
1099
+ * Redistributions in binary form must reproduce the above copyright
1100
+ notice, this list of conditions and the following disclaimer in the
1101
+ documentation and/or other materials provided with the distribution.
1102
+
1103
+ * Neither the name of the University of Cambridge nor the name of Google
1104
+ Inc. nor the names of their contributors may be used to endorse or
1105
+ promote products derived from this software without specific prior
1106
+ written permission.
1107
+
1108
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1109
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1110
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1111
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
1112
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1113
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1114
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1115
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1116
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1117
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1118
+ POSSIBILITY OF SUCH DAMAGE.
1119
+
1120
+ 6. Some of the cuBLAS library routines were written by or
1121
+ derived from code written by Vasily Volkov and are subject
1122
+ to the Modified Berkeley Software Distribution License as
1123
+ follows:
1124
+
1125
+ Copyright (c) 2007-2009, Regents of the University of California
1126
+
1127
+ All rights reserved.
1128
+
1129
+ Redistribution and use in source and binary forms, with or without
1130
+ modification, are permitted provided that the following conditions are
1131
+ met:
1132
+ * Redistributions of source code must retain the above copyright
1133
+ notice, this list of conditions and the following disclaimer.
1134
+ * Redistributions in binary form must reproduce the above
1135
+ copyright notice, this list of conditions and the following
1136
+ disclaimer in the documentation and/or other materials provided
1137
+ with the distribution.
1138
+ * Neither the name of the University of California, Berkeley nor
1139
+ the names of its contributors may be used to endorse or promote
1140
+ products derived from this software without specific prior
1141
+ written permission.
1142
+
1143
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1144
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1145
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1146
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1147
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1148
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1149
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1150
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1151
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1152
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1153
+ POSSIBILITY OF SUCH DAMAGE.
1154
+
1155
+ 7. Some of the cuBLAS library routines were written by or
1156
+ derived from code written by Davide Barbieri and are
1157
+ subject to the Modified Berkeley Software Distribution
1158
+ License as follows:
1159
+
1160
+ Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
1161
+
1162
+ All rights reserved.
1163
+
1164
+ Redistribution and use in source and binary forms, with or without
1165
+ modification, are permitted provided that the following conditions are
1166
+ met:
1167
+ * Redistributions of source code must retain the above copyright
1168
+ notice, this list of conditions and the following disclaimer.
1169
+ * Redistributions in binary form must reproduce the above
1170
+ copyright notice, this list of conditions and the following
1171
+ disclaimer in the documentation and/or other materials provided
1172
+ with the distribution.
1173
+ * The name of the author may not be used to endorse or promote
1174
+ products derived from this software without specific prior
1175
+ written permission.
1176
+
1177
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1178
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1179
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1180
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1181
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1182
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1183
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1184
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1185
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1186
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1187
+ POSSIBILITY OF SUCH DAMAGE.
1188
+
1189
+ 8. Some of the cuBLAS library routines were derived from
1190
+ code developed by the University of Tennessee and are
1191
+ subject to the Modified Berkeley Software Distribution
1192
+ License as follows:
1193
+
1194
+ Copyright (c) 2010 The University of Tennessee.
1195
+
1196
+ All rights reserved.
1197
+
1198
+ Redistribution and use in source and binary forms, with or without
1199
+ modification, are permitted provided that the following conditions are
1200
+ met:
1201
+ * Redistributions of source code must retain the above copyright
1202
+ notice, this list of conditions and the following disclaimer.
1203
+ * Redistributions in binary form must reproduce the above
1204
+ copyright notice, this list of conditions and the following
1205
+ disclaimer listed in this license in the documentation and/or
1206
+ other materials provided with the distribution.
1207
+ * Neither the name of the copyright holders nor the names of its
1208
+ contributors may be used to endorse or promote products derived
1209
+ from this software without specific prior written permission.
1210
+
1211
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1212
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1213
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1214
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1215
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1216
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1217
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1218
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1219
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1220
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1221
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1222
+
1223
+ 9. Some of the cuBLAS library routines were written by or
1224
+ derived from code written by Jonathan Hogg and are subject
1225
+ to the Modified Berkeley Software Distribution License as
1226
+ follows:
1227
+
1228
+ Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
1229
+
1230
+ All rights reserved.
1231
+
1232
+ Redistribution and use in source and binary forms, with or without
1233
+ modification, are permitted provided that the following conditions are
1234
+ met:
1235
+ * Redistributions of source code must retain the above copyright
1236
+ notice, this list of conditions and the following disclaimer.
1237
+ * Redistributions in binary form must reproduce the above
1238
+ copyright notice, this list of conditions and the following
1239
+ disclaimer in the documentation and/or other materials provided
1240
+ with the distribution.
1241
+ * Neither the name of the STFC nor the names of its contributors
1242
+ may be used to endorse or promote products derived from this
1243
+ software without specific prior written permission.
1244
+
1245
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1246
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1247
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1248
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
1249
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1250
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1251
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
1252
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
1253
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
1254
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
1255
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1256
+
1257
+ 10. Some of the cuBLAS library routines were written by or
1258
+ derived from code written by Ahmad M. Abdelfattah, David
1259
+ Keyes, and Hatem Ltaief, and are subject to the Apache
1260
+ License, Version 2.0, as follows:
1261
+
1262
+ -- (C) Copyright 2013 King Abdullah University of Science and Technology
1263
+ Authors:
1264
+ Ahmad Abdelfattah ([email protected])
1265
+ David Keyes ([email protected])
1266
+ Hatem Ltaief ([email protected])
1267
+
1268
+ Redistribution and use in source and binary forms, with or without
1269
+ modification, are permitted provided that the following conditions
1270
+ are met:
1271
+
1272
+ * Redistributions of source code must retain the above copyright
1273
+ notice, this list of conditions and the following disclaimer.
1274
+ * Redistributions in binary form must reproduce the above copyright
1275
+ notice, this list of conditions and the following disclaimer in the
1276
+ documentation and/or other materials provided with the distribution.
1277
+ * Neither the name of the King Abdullah University of Science and
1278
+ Technology nor the names of its contributors may be used to endorse
1279
+ or promote products derived from this software without specific prior
1280
+ written permission.
1281
+
1282
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1283
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1284
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1285
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1286
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1287
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1288
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1289
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1290
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1291
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1292
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
1293
+
1294
+ 11. Some of the cuSPARSE library routines were written by or
1295
+ derived from code written by Li-Wen Chang and are subject
1296
+ to the NCSA Open Source License as follows:
1297
+
1298
+ Copyright (c) 2012, University of Illinois.
1299
+
1300
+ All rights reserved.
1301
+
1302
+ Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
1303
+
1304
+ Permission is hereby granted, free of charge, to any person obtaining
1305
+ a copy of this software and associated documentation files (the
1306
+ "Software"), to deal with the Software without restriction, including
1307
+ without limitation the rights to use, copy, modify, merge, publish,
1308
+ distribute, sublicense, and/or sell copies of the Software, and to
1309
+ permit persons to whom the Software is furnished to do so, subject to
1310
+ the following conditions:
1311
+ * Redistributions of source code must retain the above copyright
1312
+ notice, this list of conditions and the following disclaimer.
1313
+ * Redistributions in binary form must reproduce the above
1314
+ copyright notice, this list of conditions and the following
1315
+ disclaimers in the documentation and/or other materials provided
1316
+ with the distribution.
1317
+ * Neither the names of IMPACT Group, University of Illinois, nor
1318
+ the names of its contributors may be used to endorse or promote
1319
+ products derived from this Software without specific prior
1320
+ written permission.
1321
+
1322
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1323
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1324
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1325
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
1326
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
1327
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
1328
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
1329
+ SOFTWARE.
1330
+
1331
+ 12. Some of the cuRAND library routines were written by or
1332
+ derived from code written by Mutsuo Saito and Makoto
1333
+ Matsumoto and are subject to the following license:
1334
+
1335
+ Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
1336
+ University. All rights reserved.
1337
+
1338
+ Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
1339
+ University and University of Tokyo. All rights reserved.
1340
+
1341
+ Redistribution and use in source and binary forms, with or without
1342
+ modification, are permitted provided that the following conditions are
1343
+ met:
1344
+ * Redistributions of source code must retain the above copyright
1345
+ notice, this list of conditions and the following disclaimer.
1346
+ * Redistributions in binary form must reproduce the above
1347
+ copyright notice, this list of conditions and the following
1348
+ disclaimer in the documentation and/or other materials provided
1349
+ with the distribution.
1350
+ * Neither the name of the Hiroshima University nor the names of
1351
+ its contributors may be used to endorse or promote products
1352
+ derived from this software without specific prior written
1353
+ permission.
1354
+
1355
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1356
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1357
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1358
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1359
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1360
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1361
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1362
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1363
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1364
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1365
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1366
+
1367
+ 13. Some of the cuRAND library routines were derived from
1368
+ code developed by D. E. Shaw Research and are subject to
1369
+ the following license:
1370
+
1371
+ Copyright 2010-2011, D. E. Shaw Research.
1372
+
1373
+ All rights reserved.
1374
+
1375
+ Redistribution and use in source and binary forms, with or without
1376
+ modification, are permitted provided that the following conditions are
1377
+ met:
1378
+ * Redistributions of source code must retain the above copyright
1379
+ notice, this list of conditions, and the following disclaimer.
1380
+ * Redistributions in binary form must reproduce the above
1381
+ copyright notice, this list of conditions, and the following
1382
+ disclaimer in the documentation and/or other materials provided
1383
+ with the distribution.
1384
+ * Neither the name of D. E. Shaw Research nor the names of its
1385
+ contributors may be used to endorse or promote products derived
1386
+ from this software without specific prior written permission.
1387
+
1388
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1389
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1390
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1391
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1392
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1393
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1394
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1395
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1396
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1397
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1398
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1399
+
1400
+ 14. Some of the Math library routines were written by or
1401
+ derived from code developed by Norbert Juffa and are
1402
+ subject to the following license:
1403
+
1404
+ Copyright (c) 2015-2017, Norbert Juffa
1405
+ All rights reserved.
1406
+
1407
+ Redistribution and use in source and binary forms, with or without
1408
+ modification, are permitted provided that the following conditions
1409
+ are met:
1410
+
1411
+ 1. Redistributions of source code must retain the above copyright
1412
+ notice, this list of conditions and the following disclaimer.
1413
+
1414
+ 2. Redistributions in binary form must reproduce the above copyright
1415
+ notice, this list of conditions and the following disclaimer in the
1416
+ documentation and/or other materials provided with the distribution.
1417
+
1418
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1419
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1420
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1421
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1422
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1423
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1424
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1425
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1426
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1427
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1428
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1429
+
1430
+ 15. Licensee's use of the lz4 third party component is
1431
+ subject to the following terms and conditions:
1432
+
1433
+ Copyright (C) 2011-2013, Yann Collet.
1434
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1435
+
1436
+ Redistribution and use in source and binary forms, with or without
1437
+ modification, are permitted provided that the following conditions are
1438
+ met:
1439
+
1440
+ * Redistributions of source code must retain the above copyright
1441
+ notice, this list of conditions and the following disclaimer.
1442
+ * Redistributions in binary form must reproduce the above
1443
+ copyright notice, this list of conditions and the following disclaimer
1444
+ in the documentation and/or other materials provided with the
1445
+ distribution.
1446
+
1447
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1448
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1449
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1450
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1451
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1452
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1453
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1454
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1455
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1456
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1457
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1458
+
1459
+ 16. The NPP library uses code from the Boost Math Toolkit,
1460
+ and is subject to the following license:
1461
+
1462
+ Boost Software License - Version 1.0 - August 17th, 2003
1463
+ . . . .
1464
+
1465
+ Permission is hereby granted, free of charge, to any person or
1466
+ organization obtaining a copy of the software and accompanying
1467
+ documentation covered by this license (the "Software") to use,
1468
+ reproduce, display, distribute, execute, and transmit the Software,
1469
+ and to prepare derivative works of the Software, and to permit
1470
+ third-parties to whom the Software is furnished to do so, all
1471
+ subject to the following:
1472
+
1473
+ The copyright notices in the Software and this entire statement,
1474
+ including the above license grant, this restriction and the following
1475
+ disclaimer, must be included in all copies of the Software, in whole
1476
+ or in part, and all derivative works of the Software, unless such
1477
+ copies or derivative works are solely in the form of machine-executable
1478
+ object code generated by a source language processor.
1479
+
1480
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1481
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1482
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
1483
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
1484
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
1485
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
1486
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1487
+ OTHER DEALINGS IN THE SOFTWARE.
1488
+
1489
+ 17. Portions of the Nsight Eclipse Edition is subject to the
1490
+ following license:
1491
+
1492
+ The Eclipse Foundation makes available all content in this plug-in
1493
+ ("Content"). Unless otherwise indicated below, the Content is provided
1494
+ to you under the terms and conditions of the Eclipse Public License
1495
+ Version 1.0 ("EPL"). A copy of the EPL is available at http://
1496
+ www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
1497
+ will mean the Content.
1498
+
1499
+ If you did not receive this Content directly from the Eclipse
1500
+ Foundation, the Content is being redistributed by another party
1501
+ ("Redistributor") and different terms and conditions may apply to your
1502
+ use of any object code in the Content. Check the Redistributor's
1503
+ license that was provided with the Content. If no such license exists,
1504
+ contact the Redistributor. Unless otherwise indicated below, the terms
1505
+ and conditions of the EPL still apply to any source code in the
1506
+ Content and such source code may be obtained at http://www.eclipse.org.
1507
+
1508
+ 18. Some of the cuBLAS library routines uses code from
1509
+ OpenAI, which is subject to the following license:
1510
+
1511
+ License URL
1512
+ https://github.com/openai/openai-gemm/blob/master/LICENSE
1513
+
1514
+ License Text
1515
+ The MIT License
1516
+
1517
+ Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
1518
+
1519
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1520
+ of this software and associated documentation files (the "Software"), to deal
1521
+ in the Software without restriction, including without limitation the rights
1522
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1523
+ copies of the Software, and to permit persons to whom the Software is
1524
+ furnished to do so, subject to the following conditions:
1525
+
1526
+ The above copyright notice and this permission notice shall be included in
1527
+ all copies or substantial portions of the Software.
1528
+
1529
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1530
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1531
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1532
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1533
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1534
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
1535
+ THE SOFTWARE.
1536
+
1537
+ 19. Licensee's use of the Visual Studio Setup Configuration
1538
+ Samples is subject to the following license:
1539
+
1540
+ The MIT License (MIT)
1541
+ Copyright (C) Microsoft Corporation. All rights reserved.
1542
+
1543
+ Permission is hereby granted, free of charge, to any person
1544
+ obtaining a copy of this software and associated documentation
1545
+ files (the "Software"), to deal in the Software without restriction,
1546
+ including without limitation the rights to use, copy, modify, merge,
1547
+ publish, distribute, sublicense, and/or sell copies of the Software,
1548
+ and to permit persons to whom the Software is furnished to do so,
1549
+ subject to the following conditions:
1550
+
1551
+ The above copyright notice and this permission notice shall be included
1552
+ in all copies or substantial portions of the Software.
1553
+
1554
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1555
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1556
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1557
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1558
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1559
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1560
+
1561
+ 20. Licensee's use of linmath.h header for CPU functions for
1562
+ GL vector/matrix operations from lunarG is subject to the
1563
+ Apache License Version 2.0.
1564
+
1565
+ 21. The DX12-CUDA sample uses the d3dx12.h header, which is
1566
+ subject to the MIT license .
1567
+
1568
+ -----------------
venv/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: nvidia-cusolver-cu12
3
+ Version: 11.4.5.107
4
+ Summary: CUDA solver native runtime libraries
5
+ Home-page: https://developer.nvidia.com/cuda-zone
6
+ Author: Nvidia CUDA Installer Team
7
+ Author-email: [email protected]
8
+ License: NVIDIA Proprietary Software
9
+ Keywords: cuda,nvidia,runtime,machine learning,deep learning
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Education
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: Other/Proprietary License
15
+ Classifier: Natural Language :: English
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.5
18
+ Classifier: Programming Language :: Python :: 3.6
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3 :: Only
25
+ Classifier: Topic :: Scientific/Engineering
26
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
27
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
+ Classifier: Topic :: Software Development
29
+ Classifier: Topic :: Software Development :: Libraries
30
+ Classifier: Operating System :: Microsoft :: Windows
31
+ Classifier: Operating System :: POSIX :: Linux
32
+ Requires-Python: >=3
33
+ License-File: License.txt
34
+ Requires-Dist: nvidia-cublas-cu12
35
+ Requires-Dist: nvidia-nvjitlink-cu12
36
+ Requires-Dist: nvidia-cusparse-cu12
37
+
38
+ CUDA solver native runtime libraries
venv/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ nvidia/__pycache__/__init__.cpython-310.pyc,,
3
+ nvidia/cusolver/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ nvidia/cusolver/__pycache__/__init__.cpython-310.pyc,,
5
+ nvidia/cusolver/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ nvidia/cusolver/include/__pycache__/__init__.cpython-310.pyc,,
7
+ nvidia/cusolver/include/cusolverDn.h,sha256=8KUcqUxWPr8jpz3ZVpTB6I3IXMme1ok7E7vi9XXKRzk,147406
8
+ nvidia/cusolver/include/cusolverMg.h,sha256=N8989nnS2BleeMyuftbQgBDJ4sMAkLPSnmy_S_7fxng,11549
9
+ nvidia/cusolver/include/cusolverRf.h,sha256=7BZfWeuMJ8w1Pz4iZeGmwvDZbDNNq0ivG5MHtiATtls,14292
10
+ nvidia/cusolver/include/cusolverSp.h,sha256=8fev0XawDBd0xrOxUlQ3WhclKlUuVAT64zKxwnP8iT0,32561
11
+ nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h,sha256=rTuS0rxwGV3bAz50ua59WVPQ9SvlijORj732oPejoCk,37495
12
+ nvidia/cusolver/include/cusolver_common.h,sha256=8SMCLEPkMN9Ni_KANkvPSHCieV1jrTARuS-Mhmuq5H8,8826
13
+ nvidia/cusolver/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ nvidia/cusolver/lib/__pycache__/__init__.cpython-310.pyc,,
15
+ nvidia/cusolver/lib/libcusolver.so.11,sha256=ECh6vHzpxfx-fBY3YVZrWZ6uGzYsR-EACRHRmEQ9bVI,114481816
16
+ nvidia/cusolver/lib/libcusolverMg.so.11,sha256=0f3uK8NQhMAFtQ5r76UCApP7coB7wWG2pQOMh1RMmwY,79763496
17
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
18
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
19
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA,sha256=b8Zxnx3ZVIwttTKBnzgVXjXu8-_pRL6wBkYMTV7i6gA,1626
20
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD,,
21
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
22
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
venv/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-manylinux1_x86_64
5
+
venv/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ nvidia
venv/lib/python3.10/site-packages/sklearn/__init__.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn` module includes functions to configure global settings and
3
+ get information about the working environment.
4
+ """
5
+
6
+ # Machine learning module for Python
7
+ # ==================================
8
+ #
9
+ # sklearn is a Python module integrating classical machine
10
+ # learning algorithms in the tightly-knit world of scientific Python
11
+ # packages (numpy, scipy, matplotlib).
12
+ #
13
+ # It aims to provide simple and efficient solutions to learning problems
14
+ # that are accessible to everybody and reusable in various contexts:
15
+ # machine-learning as a versatile tool for science and engineering.
16
+ #
17
+ # See https://scikit-learn.org for complete documentation.
18
+
19
+ import logging
20
+ import os
21
+ import random
22
+ import sys
23
+
24
+ from ._config import config_context, get_config, set_config
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ # PEP0440 compatible formatted version, see:
30
+ # https://www.python.org/dev/peps/pep-0440/
31
+ #
32
+ # Generic release markers:
33
+ # X.Y.0 # For first release after an increment in Y
34
+ # X.Y.Z # For bugfix releases
35
+ #
36
+ # Admissible pre-release markers:
37
+ # X.Y.ZaN # Alpha release
38
+ # X.Y.ZbN # Beta release
39
+ # X.Y.ZrcN # Release Candidate
40
+ # X.Y.Z # Final release
41
+ #
42
+ # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
43
+ # 'X.Y.dev0' is the canonical version of 'X.Y.dev'
44
+ #
45
+ __version__ = "1.4.2"
46
+
47
+
48
+ # On OSX, we can get a runtime error due to multiple OpenMP libraries loaded
49
+ # simultaneously. This can happen for instance when calling BLAS inside a
50
+ # prange. Setting the following environment variable allows multiple OpenMP
51
+ # libraries to be loaded. It should not degrade performances since we manually
52
+ # take care of potential over-subcription performance issues, in sections of
53
+ # the code where nested OpenMP loops can happen, by dynamically reconfiguring
54
+ # the inner OpenMP runtime to temporarily disable it while under the scope of
55
+ # the outer OpenMP parallel section.
56
+ os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True")
57
+
58
+ # Workaround issue discovered in intel-openmp 2019.5:
59
+ # https://github.com/ContinuumIO/anaconda-issues/issues/11294
60
+ os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
61
+
62
+ try:
63
+ # This variable is injected in the __builtins__ by the build
64
+ # process. It is used to enable importing subpackages of sklearn when
65
+ # the binaries are not built
66
+ # mypy error: Cannot determine type of '__SKLEARN_SETUP__'
67
+ __SKLEARN_SETUP__ # type: ignore
68
+ except NameError:
69
+ __SKLEARN_SETUP__ = False
70
+
71
+ if __SKLEARN_SETUP__:
72
+ sys.stderr.write("Partial import of sklearn during the build process.\n")
73
+ # We are not importing the rest of scikit-learn during the build
74
+ # process, as it may not be compiled yet
75
+ else:
76
+ # `_distributor_init` allows distributors to run custom init code.
77
+ # For instance, for the Windows wheel, this is used to pre-load the
78
+ # vcomp shared library runtime for OpenMP embedded in the sklearn/.libs
79
+ # sub-folder.
80
+ # It is necessary to do this prior to importing show_versions as the
81
+ # later is linked to the OpenMP runtime to make it possible to introspect
82
+ # it and importing it first would fail if the OpenMP dll cannot be found.
83
+ from . import (
84
+ __check_build, # noqa: F401
85
+ _distributor_init, # noqa: F401
86
+ )
87
+ from .base import clone
88
+ from .utils._show_versions import show_versions
89
+
90
+ __all__ = [
91
+ "calibration",
92
+ "cluster",
93
+ "covariance",
94
+ "cross_decomposition",
95
+ "datasets",
96
+ "decomposition",
97
+ "dummy",
98
+ "ensemble",
99
+ "exceptions",
100
+ "experimental",
101
+ "externals",
102
+ "feature_extraction",
103
+ "feature_selection",
104
+ "gaussian_process",
105
+ "inspection",
106
+ "isotonic",
107
+ "kernel_approximation",
108
+ "kernel_ridge",
109
+ "linear_model",
110
+ "manifold",
111
+ "metrics",
112
+ "mixture",
113
+ "model_selection",
114
+ "multiclass",
115
+ "multioutput",
116
+ "naive_bayes",
117
+ "neighbors",
118
+ "neural_network",
119
+ "pipeline",
120
+ "preprocessing",
121
+ "random_projection",
122
+ "semi_supervised",
123
+ "svm",
124
+ "tree",
125
+ "discriminant_analysis",
126
+ "impute",
127
+ "compose",
128
+ # Non-modules:
129
+ "clone",
130
+ "get_config",
131
+ "set_config",
132
+ "config_context",
133
+ "show_versions",
134
+ ]
135
+
136
+ _BUILT_WITH_MESON = False
137
+ try:
138
+ import sklearn._built_with_meson # noqa: F401
139
+
140
+ _BUILT_WITH_MESON = True
141
+ except ModuleNotFoundError:
142
+ pass
143
+
144
+
145
+ def setup_module(module):
146
+ """Fixture for the tests to assure globally controllable seeding of RNGs"""
147
+
148
+ import numpy as np
149
+
150
+ # Check if a random seed exists in the environment, if not create one.
151
+ _random_seed = os.environ.get("SKLEARN_SEED", None)
152
+ if _random_seed is None:
153
+ _random_seed = np.random.uniform() * np.iinfo(np.int32).max
154
+ _random_seed = int(_random_seed)
155
+ print("I: Seeding RNGs with %r" % _random_seed)
156
+ np.random.seed(_random_seed)
157
+ random.seed(_random_seed)
venv/lib/python3.10/site-packages/sklearn/_config.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Global configuration state and functions for management
2
+ """
3
+ import os
4
+ import threading
5
+ from contextlib import contextmanager as contextmanager
6
+
7
+ _global_config = {
8
+ "assume_finite": bool(os.environ.get("SKLEARN_ASSUME_FINITE", False)),
9
+ "working_memory": int(os.environ.get("SKLEARN_WORKING_MEMORY", 1024)),
10
+ "print_changed_only": True,
11
+ "display": "diagram",
12
+ "pairwise_dist_chunk_size": int(
13
+ os.environ.get("SKLEARN_PAIRWISE_DIST_CHUNK_SIZE", 256)
14
+ ),
15
+ "enable_cython_pairwise_dist": True,
16
+ "array_api_dispatch": False,
17
+ "transform_output": "default",
18
+ "enable_metadata_routing": False,
19
+ "skip_parameter_validation": False,
20
+ }
21
+ _threadlocal = threading.local()
22
+
23
+
24
+ def _get_threadlocal_config():
25
+ """Get a threadlocal **mutable** configuration. If the configuration
26
+ does not exist, copy the default global configuration."""
27
+ if not hasattr(_threadlocal, "global_config"):
28
+ _threadlocal.global_config = _global_config.copy()
29
+ return _threadlocal.global_config
30
+
31
+
32
+ def get_config():
33
+ """Retrieve current values for configuration set by :func:`set_config`.
34
+
35
+ Returns
36
+ -------
37
+ config : dict
38
+ Keys are parameter names that can be passed to :func:`set_config`.
39
+
40
+ See Also
41
+ --------
42
+ config_context : Context manager for global scikit-learn configuration.
43
+ set_config : Set global scikit-learn configuration.
44
+
45
+ Examples
46
+ --------
47
+ >>> import sklearn
48
+ >>> config = sklearn.get_config()
49
+ >>> config.keys()
50
+ dict_keys([...])
51
+ """
52
+ # Return a copy of the threadlocal configuration so that users will
53
+ # not be able to modify the configuration with the returned dict.
54
+ return _get_threadlocal_config().copy()
55
+
56
+
57
+ def set_config(
58
+ assume_finite=None,
59
+ working_memory=None,
60
+ print_changed_only=None,
61
+ display=None,
62
+ pairwise_dist_chunk_size=None,
63
+ enable_cython_pairwise_dist=None,
64
+ array_api_dispatch=None,
65
+ transform_output=None,
66
+ enable_metadata_routing=None,
67
+ skip_parameter_validation=None,
68
+ ):
69
+ """Set global scikit-learn configuration.
70
+
71
+ .. versionadded:: 0.19
72
+
73
+ Parameters
74
+ ----------
75
+ assume_finite : bool, default=None
76
+ If True, validation for finiteness will be skipped,
77
+ saving time, but leading to potential crashes. If
78
+ False, validation for finiteness will be performed,
79
+ avoiding error. Global default: False.
80
+
81
+ .. versionadded:: 0.19
82
+
83
+ working_memory : int, default=None
84
+ If set, scikit-learn will attempt to limit the size of temporary arrays
85
+ to this number of MiB (per job when parallelised), often saving both
86
+ computation time and memory on expensive operations that can be
87
+ performed in chunks. Global default: 1024.
88
+
89
+ .. versionadded:: 0.20
90
+
91
+ print_changed_only : bool, default=None
92
+ If True, only the parameters that were set to non-default
93
+ values will be printed when printing an estimator. For example,
94
+ ``print(SVC())`` while True will only print 'SVC()' while the default
95
+ behaviour would be to print 'SVC(C=1.0, cache_size=200, ...)' with
96
+ all the non-changed parameters.
97
+
98
+ .. versionadded:: 0.21
99
+
100
+ display : {'text', 'diagram'}, default=None
101
+ If 'diagram', estimators will be displayed as a diagram in a Jupyter
102
+ lab or notebook context. If 'text', estimators will be displayed as
103
+ text. Default is 'diagram'.
104
+
105
+ .. versionadded:: 0.23
106
+
107
+ pairwise_dist_chunk_size : int, default=None
108
+ The number of row vectors per chunk for the accelerated pairwise-
109
+ distances reduction backend. Default is 256 (suitable for most of
110
+ modern laptops' caches and architectures).
111
+
112
+ Intended for easier benchmarking and testing of scikit-learn internals.
113
+ End users are not expected to benefit from customizing this configuration
114
+ setting.
115
+
116
+ .. versionadded:: 1.1
117
+
118
+ enable_cython_pairwise_dist : bool, default=None
119
+ Use the accelerated pairwise-distances reduction backend when
120
+ possible. Global default: True.
121
+
122
+ Intended for easier benchmarking and testing of scikit-learn internals.
123
+ End users are not expected to benefit from customizing this configuration
124
+ setting.
125
+
126
+ .. versionadded:: 1.1
127
+
128
+ array_api_dispatch : bool, default=None
129
+ Use Array API dispatching when inputs follow the Array API standard.
130
+ Default is False.
131
+
132
+ See the :ref:`User Guide <array_api>` for more details.
133
+
134
+ .. versionadded:: 1.2
135
+
136
+ transform_output : str, default=None
137
+ Configure output of `transform` and `fit_transform`.
138
+
139
+ See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
140
+ for an example on how to use the API.
141
+
142
+ - `"default"`: Default output format of a transformer
143
+ - `"pandas"`: DataFrame output
144
+ - `"polars"`: Polars output
145
+ - `None`: Transform configuration is unchanged
146
+
147
+ .. versionadded:: 1.2
148
+ .. versionadded:: 1.4
149
+ `"polars"` option was added.
150
+
151
+ enable_metadata_routing : bool, default=None
152
+ Enable metadata routing. By default this feature is disabled.
153
+
154
+ Refer to :ref:`metadata routing user guide <metadata_routing>` for more
155
+ details.
156
+
157
+ - `True`: Metadata routing is enabled
158
+ - `False`: Metadata routing is disabled, use the old syntax.
159
+ - `None`: Configuration is unchanged
160
+
161
+ .. versionadded:: 1.3
162
+
163
+ skip_parameter_validation : bool, default=None
164
+ If `True`, disable the validation of the hyper-parameters' types and values in
165
+ the fit method of estimators and for arguments passed to public helper
166
+ functions. It can save time in some situations but can lead to low level
167
+ crashes and exceptions with confusing error messages.
168
+
169
+ Note that for data parameters, such as `X` and `y`, only type validation is
170
+ skipped but validation with `check_array` will continue to run.
171
+
172
+ .. versionadded:: 1.3
173
+
174
+ See Also
175
+ --------
176
+ config_context : Context manager for global scikit-learn configuration.
177
+ get_config : Retrieve current values of the global configuration.
178
+
179
+ Examples
180
+ --------
181
+ >>> from sklearn import set_config
182
+ >>> set_config(display='diagram') # doctest: +SKIP
183
+ """
184
+ local_config = _get_threadlocal_config()
185
+
186
+ if assume_finite is not None:
187
+ local_config["assume_finite"] = assume_finite
188
+ if working_memory is not None:
189
+ local_config["working_memory"] = working_memory
190
+ if print_changed_only is not None:
191
+ local_config["print_changed_only"] = print_changed_only
192
+ if display is not None:
193
+ local_config["display"] = display
194
+ if pairwise_dist_chunk_size is not None:
195
+ local_config["pairwise_dist_chunk_size"] = pairwise_dist_chunk_size
196
+ if enable_cython_pairwise_dist is not None:
197
+ local_config["enable_cython_pairwise_dist"] = enable_cython_pairwise_dist
198
+ if array_api_dispatch is not None:
199
+ from .utils._array_api import _check_array_api_dispatch
200
+
201
+ _check_array_api_dispatch(array_api_dispatch)
202
+ local_config["array_api_dispatch"] = array_api_dispatch
203
+ if transform_output is not None:
204
+ local_config["transform_output"] = transform_output
205
+ if enable_metadata_routing is not None:
206
+ local_config["enable_metadata_routing"] = enable_metadata_routing
207
+ if skip_parameter_validation is not None:
208
+ local_config["skip_parameter_validation"] = skip_parameter_validation
209
+
210
+
211
+ @contextmanager
212
+ def config_context(
213
+ *,
214
+ assume_finite=None,
215
+ working_memory=None,
216
+ print_changed_only=None,
217
+ display=None,
218
+ pairwise_dist_chunk_size=None,
219
+ enable_cython_pairwise_dist=None,
220
+ array_api_dispatch=None,
221
+ transform_output=None,
222
+ enable_metadata_routing=None,
223
+ skip_parameter_validation=None,
224
+ ):
225
+ """Context manager for global scikit-learn configuration.
226
+
227
+ Parameters
228
+ ----------
229
+ assume_finite : bool, default=None
230
+ If True, validation for finiteness will be skipped,
231
+ saving time, but leading to potential crashes. If
232
+ False, validation for finiteness will be performed,
233
+ avoiding error. If None, the existing value won't change.
234
+ The default value is False.
235
+
236
+ working_memory : int, default=None
237
+ If set, scikit-learn will attempt to limit the size of temporary arrays
238
+ to this number of MiB (per job when parallelised), often saving both
239
+ computation time and memory on expensive operations that can be
240
+ performed in chunks. If None, the existing value won't change.
241
+ The default value is 1024.
242
+
243
+ print_changed_only : bool, default=None
244
+ If True, only the parameters that were set to non-default
245
+ values will be printed when printing an estimator. For example,
246
+ ``print(SVC())`` while True will only print 'SVC()', but would print
247
+ 'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters
248
+ when False. If None, the existing value won't change.
249
+ The default value is True.
250
+
251
+ .. versionchanged:: 0.23
252
+ Default changed from False to True.
253
+
254
+ display : {'text', 'diagram'}, default=None
255
+ If 'diagram', estimators will be displayed as a diagram in a Jupyter
256
+ lab or notebook context. If 'text', estimators will be displayed as
257
+ text. If None, the existing value won't change.
258
+ The default value is 'diagram'.
259
+
260
+ .. versionadded:: 0.23
261
+
262
+ pairwise_dist_chunk_size : int, default=None
263
+ The number of row vectors per chunk for the accelerated pairwise-
264
+ distances reduction backend. Default is 256 (suitable for most of
265
+ modern laptops' caches and architectures).
266
+
267
+ Intended for easier benchmarking and testing of scikit-learn internals.
268
+ End users are not expected to benefit from customizing this configuration
269
+ setting.
270
+
271
+ .. versionadded:: 1.1
272
+
273
+ enable_cython_pairwise_dist : bool, default=None
274
+ Use the accelerated pairwise-distances reduction backend when
275
+ possible. Global default: True.
276
+
277
+ Intended for easier benchmarking and testing of scikit-learn internals.
278
+ End users are not expected to benefit from customizing this configuration
279
+ setting.
280
+
281
+ .. versionadded:: 1.1
282
+
283
+ array_api_dispatch : bool, default=None
284
+ Use Array API dispatching when inputs follow the Array API standard.
285
+ Default is False.
286
+
287
+ See the :ref:`User Guide <array_api>` for more details.
288
+
289
+ .. versionadded:: 1.2
290
+
291
+ transform_output : str, default=None
292
+ Configure output of `transform` and `fit_transform`.
293
+
294
+ See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
295
+ for an example on how to use the API.
296
+
297
+ - `"default"`: Default output format of a transformer
298
+ - `"pandas"`: DataFrame output
299
+ - `"polars"`: Polars output
300
+ - `None`: Transform configuration is unchanged
301
+
302
+ .. versionadded:: 1.2
303
+ .. versionadded:: 1.4
304
+ `"polars"` option was added.
305
+
306
+ enable_metadata_routing : bool, default=None
307
+ Enable metadata routing. By default this feature is disabled.
308
+
309
+ Refer to :ref:`metadata routing user guide <metadata_routing>` for more
310
+ details.
311
+
312
+ - `True`: Metadata routing is enabled
313
+ - `False`: Metadata routing is disabled, use the old syntax.
314
+ - `None`: Configuration is unchanged
315
+
316
+ .. versionadded:: 1.3
317
+
318
+ skip_parameter_validation : bool, default=None
319
+ If `True`, disable the validation of the hyper-parameters' types and values in
320
+ the fit method of estimators and for arguments passed to public helper
321
+ functions. It can save time in some situations but can lead to low level
322
+ crashes and exceptions with confusing error messages.
323
+
324
+ Note that for data parameters, such as `X` and `y`, only type validation is
325
+ skipped but validation with `check_array` will continue to run.
326
+
327
+ .. versionadded:: 1.3
328
+
329
+ Yields
330
+ ------
331
+ None.
332
+
333
+ See Also
334
+ --------
335
+ set_config : Set global scikit-learn configuration.
336
+ get_config : Retrieve current values of the global configuration.
337
+
338
+ Notes
339
+ -----
340
+ All settings, not just those presently modified, will be returned to
341
+ their previous values when the context manager is exited.
342
+
343
+ Examples
344
+ --------
345
+ >>> import sklearn
346
+ >>> from sklearn.utils.validation import assert_all_finite
347
+ >>> with sklearn.config_context(assume_finite=True):
348
+ ... assert_all_finite([float('nan')])
349
+ >>> with sklearn.config_context(assume_finite=True):
350
+ ... with sklearn.config_context(assume_finite=False):
351
+ ... assert_all_finite([float('nan')])
352
+ Traceback (most recent call last):
353
+ ...
354
+ ValueError: Input contains NaN...
355
+ """
356
+ old_config = get_config()
357
+ set_config(
358
+ assume_finite=assume_finite,
359
+ working_memory=working_memory,
360
+ print_changed_only=print_changed_only,
361
+ display=display,
362
+ pairwise_dist_chunk_size=pairwise_dist_chunk_size,
363
+ enable_cython_pairwise_dist=enable_cython_pairwise_dist,
364
+ array_api_dispatch=array_api_dispatch,
365
+ transform_output=transform_output,
366
+ enable_metadata_routing=enable_metadata_routing,
367
+ skip_parameter_validation=skip_parameter_validation,
368
+ )
369
+
370
+ try:
371
+ yield
372
+ finally:
373
+ set_config(**old_config)
venv/lib/python3.10/site-packages/sklearn/_distributor_init.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Distributor init file
2
+
3
+ Distributors: you can add custom code here to support particular distributions
4
+ of scikit-learn.
5
+
6
+ For example, this is a good place to put any checks for hardware requirements.
7
+
8
+ The scikit-learn standard source distribution will not put code in this file,
9
+ so you can safely replace this file with your own version.
10
+ """
venv/lib/python3.10/site-packages/sklearn/_isotonic.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (307 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/_min_dependencies.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """All minimum dependencies for scikit-learn."""
2
+ import argparse
3
+ from collections import defaultdict
4
+
5
+ # scipy and cython should by in sync with pyproject.toml
6
+ NUMPY_MIN_VERSION = "1.19.5"
7
+ SCIPY_MIN_VERSION = "1.6.0"
8
+ JOBLIB_MIN_VERSION = "1.2.0"
9
+ THREADPOOLCTL_MIN_VERSION = "2.0.0"
10
+ PYTEST_MIN_VERSION = "7.1.2"
11
+ CYTHON_MIN_VERSION = "3.0.8"
12
+
13
+
14
+ # 'build' and 'install' is included to have structured metadata for CI.
15
+ # It will NOT be included in setup's extras_require
16
+ # The values are (version_spec, comma separated tags)
17
+ dependent_packages = {
18
+ "numpy": (NUMPY_MIN_VERSION, "build, install"),
19
+ "scipy": (SCIPY_MIN_VERSION, "build, install"),
20
+ "joblib": (JOBLIB_MIN_VERSION, "install"),
21
+ "threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
22
+ "cython": (CYTHON_MIN_VERSION, "build"),
23
+ "matplotlib": ("3.3.4", "benchmark, docs, examples, tests"),
24
+ "scikit-image": ("0.17.2", "docs, examples, tests"),
25
+ "pandas": ("1.1.5", "benchmark, docs, examples, tests"),
26
+ "seaborn": ("0.9.0", "docs, examples"),
27
+ "memory_profiler": ("0.57.0", "benchmark, docs"),
28
+ "pytest": (PYTEST_MIN_VERSION, "tests"),
29
+ "pytest-cov": ("2.9.0", "tests"),
30
+ "ruff": ("0.0.272", "tests"),
31
+ "black": ("23.3.0", "tests"),
32
+ "mypy": ("1.3", "tests"),
33
+ "pyamg": ("4.0.0", "tests"),
34
+ "polars": ("0.19.12", "tests"),
35
+ "pyarrow": ("12.0.0", "tests"),
36
+ "sphinx": ("6.0.0", "docs"),
37
+ "sphinx-copybutton": ("0.5.2", "docs"),
38
+ "sphinx-gallery": ("0.15.0", "docs"),
39
+ "numpydoc": ("1.2.0", "docs, tests"),
40
+ "Pillow": ("7.1.2", "docs"),
41
+ "pooch": ("1.6.0", "docs, examples, tests"),
42
+ "sphinx-prompt": ("1.3.0", "docs"),
43
+ "sphinxext-opengraph": ("0.4.2", "docs"),
44
+ "plotly": ("5.14.0", "docs, examples"),
45
+ # XXX: Pin conda-lock to the latest released version (needs manual update
46
+ # from time to time)
47
+ "conda-lock": ("2.4.2", "maintenance"),
48
+ }
49
+
50
+
51
+ # create inverse mapping for setuptools
52
+ tag_to_packages: dict = defaultdict(list)
53
+ for package, (min_version, extras) in dependent_packages.items():
54
+ for extra in extras.split(", "):
55
+ tag_to_packages[extra].append("{}>={}".format(package, min_version))
56
+
57
+
58
+ # Used by CI to get the min dependencies
59
+ if __name__ == "__main__":
60
+ parser = argparse.ArgumentParser(description="Get min dependencies for a package")
61
+
62
+ parser.add_argument("package", choices=dependent_packages)
63
+ args = parser.parse_args()
64
+ min_version = dependent_packages[args.package][0]
65
+ print(min_version)
venv/lib/python3.10/site-packages/sklearn/base.py ADDED
@@ -0,0 +1,1478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base classes for all estimators."""
2
+
3
+ # Author: Gael Varoquaux <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+ import copy
7
+ import functools
8
+ import inspect
9
+ import platform
10
+ import re
11
+ import warnings
12
+ from collections import defaultdict
13
+
14
+ import numpy as np
15
+
16
+ from . import __version__
17
+ from ._config import config_context, get_config
18
+ from .exceptions import InconsistentVersionWarning
19
+ from .utils import _IS_32BIT
20
+ from .utils._estimator_html_repr import _HTMLDocumentationLinkMixin, estimator_html_repr
21
+ from .utils._metadata_requests import _MetadataRequester, _routing_enabled
22
+ from .utils._param_validation import validate_parameter_constraints
23
+ from .utils._set_output import _SetOutputMixin
24
+ from .utils._tags import (
25
+ _DEFAULT_TAGS,
26
+ )
27
+ from .utils.validation import (
28
+ _check_feature_names_in,
29
+ _check_y,
30
+ _generate_get_feature_names_out,
31
+ _get_feature_names,
32
+ _is_fitted,
33
+ _num_features,
34
+ check_array,
35
+ check_is_fitted,
36
+ check_X_y,
37
+ )
38
+
39
+
40
+ def clone(estimator, *, safe=True):
41
+ """Construct a new unfitted estimator with the same parameters.
42
+
43
+ Clone does a deep copy of the model in an estimator
44
+ without actually copying attached data. It returns a new estimator
45
+ with the same parameters that has not been fitted on any data.
46
+
47
+ .. versionchanged:: 1.3
48
+ Delegates to `estimator.__sklearn_clone__` if the method exists.
49
+
50
+ Parameters
51
+ ----------
52
+ estimator : {list, tuple, set} of estimator instance or a single \
53
+ estimator instance
54
+ The estimator or group of estimators to be cloned.
55
+ safe : bool, default=True
56
+ If safe is False, clone will fall back to a deep copy on objects
57
+ that are not estimators. Ignored if `estimator.__sklearn_clone__`
58
+ exists.
59
+
60
+ Returns
61
+ -------
62
+ estimator : object
63
+ The deep copy of the input, an estimator if input is an estimator.
64
+
65
+ Notes
66
+ -----
67
+ If the estimator's `random_state` parameter is an integer (or if the
68
+ estimator doesn't have a `random_state` parameter), an *exact clone* is
69
+ returned: the clone and the original estimator will give the exact same
70
+ results. Otherwise, *statistical clone* is returned: the clone might
71
+ return different results from the original estimator. More details can be
72
+ found in :ref:`randomness`.
73
+
74
+ Examples
75
+ --------
76
+ >>> from sklearn.base import clone
77
+ >>> from sklearn.linear_model import LogisticRegression
78
+ >>> X = [[-1, 0], [0, 1], [0, -1], [1, 0]]
79
+ >>> y = [0, 0, 1, 1]
80
+ >>> classifier = LogisticRegression().fit(X, y)
81
+ >>> cloned_classifier = clone(classifier)
82
+ >>> hasattr(classifier, "classes_")
83
+ True
84
+ >>> hasattr(cloned_classifier, "classes_")
85
+ False
86
+ >>> classifier is cloned_classifier
87
+ False
88
+ """
89
+ if hasattr(estimator, "__sklearn_clone__") and not inspect.isclass(estimator):
90
+ return estimator.__sklearn_clone__()
91
+ return _clone_parametrized(estimator, safe=safe)
92
+
93
+
94
+ def _clone_parametrized(estimator, *, safe=True):
95
+ """Default implementation of clone. See :func:`sklearn.base.clone` for details."""
96
+
97
+ estimator_type = type(estimator)
98
+ if estimator_type is dict:
99
+ return {k: clone(v, safe=safe) for k, v in estimator.items()}
100
+ elif estimator_type in (list, tuple, set, frozenset):
101
+ return estimator_type([clone(e, safe=safe) for e in estimator])
102
+ elif not hasattr(estimator, "get_params") or isinstance(estimator, type):
103
+ if not safe:
104
+ return copy.deepcopy(estimator)
105
+ else:
106
+ if isinstance(estimator, type):
107
+ raise TypeError(
108
+ "Cannot clone object. "
109
+ + "You should provide an instance of "
110
+ + "scikit-learn estimator instead of a class."
111
+ )
112
+ else:
113
+ raise TypeError(
114
+ "Cannot clone object '%s' (type %s): "
115
+ "it does not seem to be a scikit-learn "
116
+ "estimator as it does not implement a "
117
+ "'get_params' method." % (repr(estimator), type(estimator))
118
+ )
119
+
120
+ klass = estimator.__class__
121
+ new_object_params = estimator.get_params(deep=False)
122
+ for name, param in new_object_params.items():
123
+ new_object_params[name] = clone(param, safe=False)
124
+
125
+ new_object = klass(**new_object_params)
126
+ try:
127
+ new_object._metadata_request = copy.deepcopy(estimator._metadata_request)
128
+ except AttributeError:
129
+ pass
130
+
131
+ params_set = new_object.get_params(deep=False)
132
+
133
+ # quick sanity check of the parameters of the clone
134
+ for name in new_object_params:
135
+ param1 = new_object_params[name]
136
+ param2 = params_set[name]
137
+ if param1 is not param2:
138
+ raise RuntimeError(
139
+ "Cannot clone object %s, as the constructor "
140
+ "either does not set or modifies parameter %s" % (estimator, name)
141
+ )
142
+
143
+ # _sklearn_output_config is used by `set_output` to configure the output
144
+ # container of an estimator.
145
+ if hasattr(estimator, "_sklearn_output_config"):
146
+ new_object._sklearn_output_config = copy.deepcopy(
147
+ estimator._sklearn_output_config
148
+ )
149
+ return new_object
150
+
151
+
152
+ class BaseEstimator(_HTMLDocumentationLinkMixin, _MetadataRequester):
153
+ """Base class for all estimators in scikit-learn.
154
+
155
+ Inheriting from this class provides default implementations of:
156
+
157
+ - setting and getting parameters used by `GridSearchCV` and friends;
158
+ - textual and HTML representation displayed in terminals and IDEs;
159
+ - estimator serialization;
160
+ - parameters validation;
161
+ - data validation;
162
+ - feature names validation.
163
+
164
+ Read more in the :ref:`User Guide <rolling_your_own_estimator>`.
165
+
166
+
167
+ Notes
168
+ -----
169
+ All estimators should specify all the parameters that can be set
170
+ at the class level in their ``__init__`` as explicit keyword
171
+ arguments (no ``*args`` or ``**kwargs``).
172
+
173
+ Examples
174
+ --------
175
+ >>> import numpy as np
176
+ >>> from sklearn.base import BaseEstimator
177
+ >>> class MyEstimator(BaseEstimator):
178
+ ... def __init__(self, *, param=1):
179
+ ... self.param = param
180
+ ... def fit(self, X, y=None):
181
+ ... self.is_fitted_ = True
182
+ ... return self
183
+ ... def predict(self, X):
184
+ ... return np.full(shape=X.shape[0], fill_value=self.param)
185
+ >>> estimator = MyEstimator(param=2)
186
+ >>> estimator.get_params()
187
+ {'param': 2}
188
+ >>> X = np.array([[1, 2], [2, 3], [3, 4]])
189
+ >>> y = np.array([1, 0, 1])
190
+ >>> estimator.fit(X, y).predict(X)
191
+ array([2, 2, 2])
192
+ >>> estimator.set_params(param=3).fit(X, y).predict(X)
193
+ array([3, 3, 3])
194
+ """
195
+
196
+ @classmethod
197
+ def _get_param_names(cls):
198
+ """Get parameter names for the estimator"""
199
+ # fetch the constructor or the original constructor before
200
+ # deprecation wrapping if any
201
+ init = getattr(cls.__init__, "deprecated_original", cls.__init__)
202
+ if init is object.__init__:
203
+ # No explicit constructor to introspect
204
+ return []
205
+
206
+ # introspect the constructor arguments to find the model parameters
207
+ # to represent
208
+ init_signature = inspect.signature(init)
209
+ # Consider the constructor parameters excluding 'self'
210
+ parameters = [
211
+ p
212
+ for p in init_signature.parameters.values()
213
+ if p.name != "self" and p.kind != p.VAR_KEYWORD
214
+ ]
215
+ for p in parameters:
216
+ if p.kind == p.VAR_POSITIONAL:
217
+ raise RuntimeError(
218
+ "scikit-learn estimators should always "
219
+ "specify their parameters in the signature"
220
+ " of their __init__ (no varargs)."
221
+ " %s with constructor %s doesn't "
222
+ " follow this convention." % (cls, init_signature)
223
+ )
224
+ # Extract and sort argument names excluding 'self'
225
+ return sorted([p.name for p in parameters])
226
+
227
+ def get_params(self, deep=True):
228
+ """
229
+ Get parameters for this estimator.
230
+
231
+ Parameters
232
+ ----------
233
+ deep : bool, default=True
234
+ If True, will return the parameters for this estimator and
235
+ contained subobjects that are estimators.
236
+
237
+ Returns
238
+ -------
239
+ params : dict
240
+ Parameter names mapped to their values.
241
+ """
242
+ out = dict()
243
+ for key in self._get_param_names():
244
+ value = getattr(self, key)
245
+ if deep and hasattr(value, "get_params") and not isinstance(value, type):
246
+ deep_items = value.get_params().items()
247
+ out.update((key + "__" + k, val) for k, val in deep_items)
248
+ out[key] = value
249
+ return out
250
+
251
+ def set_params(self, **params):
252
+ """Set the parameters of this estimator.
253
+
254
+ The method works on simple estimators as well as on nested objects
255
+ (such as :class:`~sklearn.pipeline.Pipeline`). The latter have
256
+ parameters of the form ``<component>__<parameter>`` so that it's
257
+ possible to update each component of a nested object.
258
+
259
+ Parameters
260
+ ----------
261
+ **params : dict
262
+ Estimator parameters.
263
+
264
+ Returns
265
+ -------
266
+ self : estimator instance
267
+ Estimator instance.
268
+ """
269
+ if not params:
270
+ # Simple optimization to gain speed (inspect is slow)
271
+ return self
272
+ valid_params = self.get_params(deep=True)
273
+
274
+ nested_params = defaultdict(dict) # grouped by prefix
275
+ for key, value in params.items():
276
+ key, delim, sub_key = key.partition("__")
277
+ if key not in valid_params:
278
+ local_valid_params = self._get_param_names()
279
+ raise ValueError(
280
+ f"Invalid parameter {key!r} for estimator {self}. "
281
+ f"Valid parameters are: {local_valid_params!r}."
282
+ )
283
+
284
+ if delim:
285
+ nested_params[key][sub_key] = value
286
+ else:
287
+ setattr(self, key, value)
288
+ valid_params[key] = value
289
+
290
+ for key, sub_params in nested_params.items():
291
+ valid_params[key].set_params(**sub_params)
292
+
293
+ return self
294
+
295
+ def __sklearn_clone__(self):
296
+ return _clone_parametrized(self)
297
+
298
+ def __repr__(self, N_CHAR_MAX=700):
299
+ # N_CHAR_MAX is the (approximate) maximum number of non-blank
300
+ # characters to render. We pass it as an optional parameter to ease
301
+ # the tests.
302
+
303
+ from .utils._pprint import _EstimatorPrettyPrinter
304
+
305
+ N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences
306
+
307
+ # use ellipsis for sequences with a lot of elements
308
+ pp = _EstimatorPrettyPrinter(
309
+ compact=True,
310
+ indent=1,
311
+ indent_at_name=True,
312
+ n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW,
313
+ )
314
+
315
+ repr_ = pp.pformat(self)
316
+
317
+ # Use bruteforce ellipsis when there are a lot of non-blank characters
318
+ n_nonblank = len("".join(repr_.split()))
319
+ if n_nonblank > N_CHAR_MAX:
320
+ lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends
321
+ regex = r"^(\s*\S){%d}" % lim
322
+ # The regex '^(\s*\S){%d}' % n
323
+ # matches from the start of the string until the nth non-blank
324
+ # character:
325
+ # - ^ matches the start of string
326
+ # - (pattern){n} matches n repetitions of pattern
327
+ # - \s*\S matches a non-blank char following zero or more blanks
328
+ left_lim = re.match(regex, repr_).end()
329
+ right_lim = re.match(regex, repr_[::-1]).end()
330
+
331
+ if "\n" in repr_[left_lim:-right_lim]:
332
+ # The left side and right side aren't on the same line.
333
+ # To avoid weird cuts, e.g.:
334
+ # categoric...ore',
335
+ # we need to start the right side with an appropriate newline
336
+ # character so that it renders properly as:
337
+ # categoric...
338
+ # handle_unknown='ignore',
339
+ # so we add [^\n]*\n which matches until the next \n
340
+ regex += r"[^\n]*\n"
341
+ right_lim = re.match(regex, repr_[::-1]).end()
342
+
343
+ ellipsis = "..."
344
+ if left_lim + len(ellipsis) < len(repr_) - right_lim:
345
+ # Only add ellipsis if it results in a shorter repr
346
+ repr_ = repr_[:left_lim] + "..." + repr_[-right_lim:]
347
+
348
+ return repr_
349
+
350
+ def __getstate__(self):
351
+ if getattr(self, "__slots__", None):
352
+ raise TypeError(
353
+ "You cannot use `__slots__` in objects inheriting from "
354
+ "`sklearn.base.BaseEstimator`."
355
+ )
356
+
357
+ try:
358
+ state = super().__getstate__()
359
+ if state is None:
360
+ # For Python 3.11+, empty instance (no `__slots__`,
361
+ # and `__dict__`) will return a state equal to `None`.
362
+ state = self.__dict__.copy()
363
+ except AttributeError:
364
+ # Python < 3.11
365
+ state = self.__dict__.copy()
366
+
367
+ if type(self).__module__.startswith("sklearn."):
368
+ return dict(state.items(), _sklearn_version=__version__)
369
+ else:
370
+ return state
371
+
372
+ def __setstate__(self, state):
373
+ if type(self).__module__.startswith("sklearn."):
374
+ pickle_version = state.pop("_sklearn_version", "pre-0.18")
375
+ if pickle_version != __version__:
376
+ warnings.warn(
377
+ InconsistentVersionWarning(
378
+ estimator_name=self.__class__.__name__,
379
+ current_sklearn_version=__version__,
380
+ original_sklearn_version=pickle_version,
381
+ ),
382
+ )
383
+ try:
384
+ super().__setstate__(state)
385
+ except AttributeError:
386
+ self.__dict__.update(state)
387
+
388
+ def _more_tags(self):
389
+ return _DEFAULT_TAGS
390
+
391
+ def _get_tags(self):
392
+ collected_tags = {}
393
+ for base_class in reversed(inspect.getmro(self.__class__)):
394
+ if hasattr(base_class, "_more_tags"):
395
+ # need the if because mixins might not have _more_tags
396
+ # but might do redundant work in estimators
397
+ # (i.e. calling more tags on BaseEstimator multiple times)
398
+ more_tags = base_class._more_tags(self)
399
+ collected_tags.update(more_tags)
400
+ return collected_tags
401
+
402
+ def _check_n_features(self, X, reset):
403
+ """Set the `n_features_in_` attribute, or check against it.
404
+
405
+ Parameters
406
+ ----------
407
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
408
+ The input samples.
409
+ reset : bool
410
+ If True, the `n_features_in_` attribute is set to `X.shape[1]`.
411
+ If False and the attribute exists, then check that it is equal to
412
+ `X.shape[1]`. If False and the attribute does *not* exist, then
413
+ the check is skipped.
414
+ .. note::
415
+ It is recommended to call reset=True in `fit` and in the first
416
+ call to `partial_fit`. All other methods that validate `X`
417
+ should set `reset=False`.
418
+ """
419
+ try:
420
+ n_features = _num_features(X)
421
+ except TypeError as e:
422
+ if not reset and hasattr(self, "n_features_in_"):
423
+ raise ValueError(
424
+ "X does not contain any features, but "
425
+ f"{self.__class__.__name__} is expecting "
426
+ f"{self.n_features_in_} features"
427
+ ) from e
428
+ # If the number of features is not defined and reset=True,
429
+ # then we skip this check
430
+ return
431
+
432
+ if reset:
433
+ self.n_features_in_ = n_features
434
+ return
435
+
436
+ if not hasattr(self, "n_features_in_"):
437
+ # Skip this check if the expected number of expected input features
438
+ # was not recorded by calling fit first. This is typically the case
439
+ # for stateless transformers.
440
+ return
441
+
442
+ if n_features != self.n_features_in_:
443
+ raise ValueError(
444
+ f"X has {n_features} features, but {self.__class__.__name__} "
445
+ f"is expecting {self.n_features_in_} features as input."
446
+ )
447
+
448
+ def _check_feature_names(self, X, *, reset):
449
+ """Set or check the `feature_names_in_` attribute.
450
+
451
+ .. versionadded:: 1.0
452
+
453
+ Parameters
454
+ ----------
455
+ X : {ndarray, dataframe} of shape (n_samples, n_features)
456
+ The input samples.
457
+
458
+ reset : bool
459
+ Whether to reset the `feature_names_in_` attribute.
460
+ If False, the input will be checked for consistency with
461
+ feature names of data provided when reset was last True.
462
+ .. note::
463
+ It is recommended to call `reset=True` in `fit` and in the first
464
+ call to `partial_fit`. All other methods that validate `X`
465
+ should set `reset=False`.
466
+ """
467
+
468
+ if reset:
469
+ feature_names_in = _get_feature_names(X)
470
+ if feature_names_in is not None:
471
+ self.feature_names_in_ = feature_names_in
472
+ elif hasattr(self, "feature_names_in_"):
473
+ # Delete the attribute when the estimator is fitted on a new dataset
474
+ # that has no feature names.
475
+ delattr(self, "feature_names_in_")
476
+ return
477
+
478
+ fitted_feature_names = getattr(self, "feature_names_in_", None)
479
+ X_feature_names = _get_feature_names(X)
480
+
481
+ if fitted_feature_names is None and X_feature_names is None:
482
+ # no feature names seen in fit and in X
483
+ return
484
+
485
+ if X_feature_names is not None and fitted_feature_names is None:
486
+ warnings.warn(
487
+ f"X has feature names, but {self.__class__.__name__} was fitted without"
488
+ " feature names"
489
+ )
490
+ return
491
+
492
+ if X_feature_names is None and fitted_feature_names is not None:
493
+ warnings.warn(
494
+ "X does not have valid feature names, but"
495
+ f" {self.__class__.__name__} was fitted with feature names"
496
+ )
497
+ return
498
+
499
+ # validate the feature names against the `feature_names_in_` attribute
500
+ if len(fitted_feature_names) != len(X_feature_names) or np.any(
501
+ fitted_feature_names != X_feature_names
502
+ ):
503
+ message = (
504
+ "The feature names should match those that were passed during fit.\n"
505
+ )
506
+ fitted_feature_names_set = set(fitted_feature_names)
507
+ X_feature_names_set = set(X_feature_names)
508
+
509
+ unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set)
510
+ missing_names = sorted(fitted_feature_names_set - X_feature_names_set)
511
+
512
+ def add_names(names):
513
+ output = ""
514
+ max_n_names = 5
515
+ for i, name in enumerate(names):
516
+ if i >= max_n_names:
517
+ output += "- ...\n"
518
+ break
519
+ output += f"- {name}\n"
520
+ return output
521
+
522
+ if unexpected_names:
523
+ message += "Feature names unseen at fit time:\n"
524
+ message += add_names(unexpected_names)
525
+
526
+ if missing_names:
527
+ message += "Feature names seen at fit time, yet now missing:\n"
528
+ message += add_names(missing_names)
529
+
530
+ if not missing_names and not unexpected_names:
531
+ message += (
532
+ "Feature names must be in the same order as they were in fit.\n"
533
+ )
534
+
535
+ raise ValueError(message)
536
+
537
+ def _validate_data(
538
+ self,
539
+ X="no_validation",
540
+ y="no_validation",
541
+ reset=True,
542
+ validate_separately=False,
543
+ cast_to_ndarray=True,
544
+ **check_params,
545
+ ):
546
+ """Validate input data and set or check the `n_features_in_` attribute.
547
+
548
+ Parameters
549
+ ----------
550
+ X : {array-like, sparse matrix, dataframe} of shape \
551
+ (n_samples, n_features), default='no validation'
552
+ The input samples.
553
+ If `'no_validation'`, no validation is performed on `X`. This is
554
+ useful for meta-estimator which can delegate input validation to
555
+ their underlying estimator(s). In that case `y` must be passed and
556
+ the only accepted `check_params` are `multi_output` and
557
+ `y_numeric`.
558
+
559
+ y : array-like of shape (n_samples,), default='no_validation'
560
+ The targets.
561
+
562
+ - If `None`, `check_array` is called on `X`. If the estimator's
563
+ requires_y tag is True, then an error will be raised.
564
+ - If `'no_validation'`, `check_array` is called on `X` and the
565
+ estimator's requires_y tag is ignored. This is a default
566
+ placeholder and is never meant to be explicitly set. In that case
567
+ `X` must be passed.
568
+ - Otherwise, only `y` with `_check_y` or both `X` and `y` are
569
+ checked with either `check_array` or `check_X_y` depending on
570
+ `validate_separately`.
571
+
572
+ reset : bool, default=True
573
+ Whether to reset the `n_features_in_` attribute.
574
+ If False, the input will be checked for consistency with data
575
+ provided when reset was last True.
576
+ .. note::
577
+ It is recommended to call reset=True in `fit` and in the first
578
+ call to `partial_fit`. All other methods that validate `X`
579
+ should set `reset=False`.
580
+
581
+ validate_separately : False or tuple of dicts, default=False
582
+ Only used if y is not None.
583
+ If False, call validate_X_y(). Else, it must be a tuple of kwargs
584
+ to be used for calling check_array() on X and y respectively.
585
+
586
+ `estimator=self` is automatically added to these dicts to generate
587
+ more informative error message in case of invalid input data.
588
+
589
+ cast_to_ndarray : bool, default=True
590
+ Cast `X` and `y` to ndarray with checks in `check_params`. If
591
+ `False`, `X` and `y` are unchanged and only `feature_names_in_` and
592
+ `n_features_in_` are checked.
593
+
594
+ **check_params : kwargs
595
+ Parameters passed to :func:`sklearn.utils.check_array` or
596
+ :func:`sklearn.utils.check_X_y`. Ignored if validate_separately
597
+ is not False.
598
+
599
+ `estimator=self` is automatically added to these params to generate
600
+ more informative error message in case of invalid input data.
601
+
602
+ Returns
603
+ -------
604
+ out : {ndarray, sparse matrix} or tuple of these
605
+ The validated input. A tuple is returned if both `X` and `y` are
606
+ validated.
607
+ """
608
+ self._check_feature_names(X, reset=reset)
609
+
610
+ if y is None and self._get_tags()["requires_y"]:
611
+ raise ValueError(
612
+ f"This {self.__class__.__name__} estimator "
613
+ "requires y to be passed, but the target y is None."
614
+ )
615
+
616
+ no_val_X = isinstance(X, str) and X == "no_validation"
617
+ no_val_y = y is None or isinstance(y, str) and y == "no_validation"
618
+
619
+ if no_val_X and no_val_y:
620
+ raise ValueError("Validation should be done on X, y or both.")
621
+
622
+ default_check_params = {"estimator": self}
623
+ check_params = {**default_check_params, **check_params}
624
+
625
+ if not cast_to_ndarray:
626
+ if not no_val_X and no_val_y:
627
+ out = X
628
+ elif no_val_X and not no_val_y:
629
+ out = y
630
+ else:
631
+ out = X, y
632
+ elif not no_val_X and no_val_y:
633
+ out = check_array(X, input_name="X", **check_params)
634
+ elif no_val_X and not no_val_y:
635
+ out = _check_y(y, **check_params)
636
+ else:
637
+ if validate_separately:
638
+ # We need this because some estimators validate X and y
639
+ # separately, and in general, separately calling check_array()
640
+ # on X and y isn't equivalent to just calling check_X_y()
641
+ # :(
642
+ check_X_params, check_y_params = validate_separately
643
+ if "estimator" not in check_X_params:
644
+ check_X_params = {**default_check_params, **check_X_params}
645
+ X = check_array(X, input_name="X", **check_X_params)
646
+ if "estimator" not in check_y_params:
647
+ check_y_params = {**default_check_params, **check_y_params}
648
+ y = check_array(y, input_name="y", **check_y_params)
649
+ else:
650
+ X, y = check_X_y(X, y, **check_params)
651
+ out = X, y
652
+
653
+ if not no_val_X and check_params.get("ensure_2d", True):
654
+ self._check_n_features(X, reset=reset)
655
+
656
+ return out
657
+
658
+ def _validate_params(self):
659
+ """Validate types and values of constructor parameters
660
+
661
+ The expected type and values must be defined in the `_parameter_constraints`
662
+ class attribute, which is a dictionary `param_name: list of constraints`. See
663
+ the docstring of `validate_parameter_constraints` for a description of the
664
+ accepted constraints.
665
+ """
666
+ validate_parameter_constraints(
667
+ self._parameter_constraints,
668
+ self.get_params(deep=False),
669
+ caller_name=self.__class__.__name__,
670
+ )
671
+
672
+ @property
673
+ def _repr_html_(self):
674
+ """HTML representation of estimator.
675
+
676
+ This is redundant with the logic of `_repr_mimebundle_`. The latter
677
+ should be favorted in the long term, `_repr_html_` is only
678
+ implemented for consumers who do not interpret `_repr_mimbundle_`.
679
+ """
680
+ if get_config()["display"] != "diagram":
681
+ raise AttributeError(
682
+ "_repr_html_ is only defined when the "
683
+ "'display' configuration option is set to "
684
+ "'diagram'"
685
+ )
686
+ return self._repr_html_inner
687
+
688
+ def _repr_html_inner(self):
689
+ """This function is returned by the @property `_repr_html_` to make
690
+ `hasattr(estimator, "_repr_html_") return `True` or `False` depending
691
+ on `get_config()["display"]`.
692
+ """
693
+ return estimator_html_repr(self)
694
+
695
+ def _repr_mimebundle_(self, **kwargs):
696
+ """Mime bundle used by jupyter kernels to display estimator"""
697
+ output = {"text/plain": repr(self)}
698
+ if get_config()["display"] == "diagram":
699
+ output["text/html"] = estimator_html_repr(self)
700
+ return output
701
+
702
+
703
+ class ClassifierMixin:
704
+ """Mixin class for all classifiers in scikit-learn.
705
+
706
+ This mixin defines the following functionality:
707
+
708
+ - `_estimator_type` class attribute defaulting to `"classifier"`;
709
+ - `score` method that default to :func:`~sklearn.metrics.accuracy_score`.
710
+ - enforce that `fit` requires `y` to be passed through the `requires_y` tag.
711
+
712
+ Read more in the :ref:`User Guide <rolling_your_own_estimator>`.
713
+
714
+ Examples
715
+ --------
716
+ >>> import numpy as np
717
+ >>> from sklearn.base import BaseEstimator, ClassifierMixin
718
+ >>> # Mixin classes should always be on the left-hand side for a correct MRO
719
+ >>> class MyEstimator(ClassifierMixin, BaseEstimator):
720
+ ... def __init__(self, *, param=1):
721
+ ... self.param = param
722
+ ... def fit(self, X, y=None):
723
+ ... self.is_fitted_ = True
724
+ ... return self
725
+ ... def predict(self, X):
726
+ ... return np.full(shape=X.shape[0], fill_value=self.param)
727
+ >>> estimator = MyEstimator(param=1)
728
+ >>> X = np.array([[1, 2], [2, 3], [3, 4]])
729
+ >>> y = np.array([1, 0, 1])
730
+ >>> estimator.fit(X, y).predict(X)
731
+ array([1, 1, 1])
732
+ >>> estimator.score(X, y)
733
+ 0.66...
734
+ """
735
+
736
+ _estimator_type = "classifier"
737
+
738
+ def score(self, X, y, sample_weight=None):
739
+ """
740
+ Return the mean accuracy on the given test data and labels.
741
+
742
+ In multi-label classification, this is the subset accuracy
743
+ which is a harsh metric since you require for each sample that
744
+ each label set be correctly predicted.
745
+
746
+ Parameters
747
+ ----------
748
+ X : array-like of shape (n_samples, n_features)
749
+ Test samples.
750
+
751
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
752
+ True labels for `X`.
753
+
754
+ sample_weight : array-like of shape (n_samples,), default=None
755
+ Sample weights.
756
+
757
+ Returns
758
+ -------
759
+ score : float
760
+ Mean accuracy of ``self.predict(X)`` w.r.t. `y`.
761
+ """
762
+ from .metrics import accuracy_score
763
+
764
+ return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
765
+
766
+ def _more_tags(self):
767
+ return {"requires_y": True}
768
+
769
+
770
+ class RegressorMixin:
771
+ """Mixin class for all regression estimators in scikit-learn.
772
+
773
+ This mixin defines the following functionality:
774
+
775
+ - `_estimator_type` class attribute defaulting to `"regressor"`;
776
+ - `score` method that default to :func:`~sklearn.metrics.r2_score`.
777
+ - enforce that `fit` requires `y` to be passed through the `requires_y` tag.
778
+
779
+ Read more in the :ref:`User Guide <rolling_your_own_estimator>`.
780
+
781
+ Examples
782
+ --------
783
+ >>> import numpy as np
784
+ >>> from sklearn.base import BaseEstimator, RegressorMixin
785
+ >>> # Mixin classes should always be on the left-hand side for a correct MRO
786
+ >>> class MyEstimator(RegressorMixin, BaseEstimator):
787
+ ... def __init__(self, *, param=1):
788
+ ... self.param = param
789
+ ... def fit(self, X, y=None):
790
+ ... self.is_fitted_ = True
791
+ ... return self
792
+ ... def predict(self, X):
793
+ ... return np.full(shape=X.shape[0], fill_value=self.param)
794
+ >>> estimator = MyEstimator(param=0)
795
+ >>> X = np.array([[1, 2], [2, 3], [3, 4]])
796
+ >>> y = np.array([-1, 0, 1])
797
+ >>> estimator.fit(X, y).predict(X)
798
+ array([0, 0, 0])
799
+ >>> estimator.score(X, y)
800
+ 0.0
801
+ """
802
+
803
+ _estimator_type = "regressor"
804
+
805
+ def score(self, X, y, sample_weight=None):
806
+ """Return the coefficient of determination of the prediction.
807
+
808
+ The coefficient of determination :math:`R^2` is defined as
809
+ :math:`(1 - \\frac{u}{v})`, where :math:`u` is the residual
810
+ sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`
811
+ is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.
812
+ The best possible score is 1.0 and it can be negative (because the
813
+ model can be arbitrarily worse). A constant model that always predicts
814
+ the expected value of `y`, disregarding the input features, would get
815
+ a :math:`R^2` score of 0.0.
816
+
817
+ Parameters
818
+ ----------
819
+ X : array-like of shape (n_samples, n_features)
820
+ Test samples. For some estimators this may be a precomputed
821
+ kernel matrix or a list of generic objects instead with shape
822
+ ``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
823
+ is the number of samples used in the fitting for the estimator.
824
+
825
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
826
+ True values for `X`.
827
+
828
+ sample_weight : array-like of shape (n_samples,), default=None
829
+ Sample weights.
830
+
831
+ Returns
832
+ -------
833
+ score : float
834
+ :math:`R^2` of ``self.predict(X)`` w.r.t. `y`.
835
+
836
+ Notes
837
+ -----
838
+ The :math:`R^2` score used when calling ``score`` on a regressor uses
839
+ ``multioutput='uniform_average'`` from version 0.23 to keep consistent
840
+ with default value of :func:`~sklearn.metrics.r2_score`.
841
+ This influences the ``score`` method of all the multioutput
842
+ regressors (except for
843
+ :class:`~sklearn.multioutput.MultiOutputRegressor`).
844
+ """
845
+
846
+ from .metrics import r2_score
847
+
848
+ y_pred = self.predict(X)
849
+ return r2_score(y, y_pred, sample_weight=sample_weight)
850
+
851
+ def _more_tags(self):
852
+ return {"requires_y": True}
853
+
854
+
855
+ class ClusterMixin:
856
+ """Mixin class for all cluster estimators in scikit-learn.
857
+
858
+ - `_estimator_type` class attribute defaulting to `"clusterer"`;
859
+ - `fit_predict` method returning the cluster labels associated to each sample.
860
+
861
+ Examples
862
+ --------
863
+ >>> import numpy as np
864
+ >>> from sklearn.base import BaseEstimator, ClusterMixin
865
+ >>> class MyClusterer(ClusterMixin, BaseEstimator):
866
+ ... def fit(self, X, y=None):
867
+ ... self.labels_ = np.ones(shape=(len(X),), dtype=np.int64)
868
+ ... return self
869
+ >>> X = [[1, 2], [2, 3], [3, 4]]
870
+ >>> MyClusterer().fit_predict(X)
871
+ array([1, 1, 1])
872
+ """
873
+
874
+ _estimator_type = "clusterer"
875
+
876
+ def fit_predict(self, X, y=None, **kwargs):
877
+ """
878
+ Perform clustering on `X` and returns cluster labels.
879
+
880
+ Parameters
881
+ ----------
882
+ X : array-like of shape (n_samples, n_features)
883
+ Input data.
884
+
885
+ y : Ignored
886
+ Not used, present for API consistency by convention.
887
+
888
+ **kwargs : dict
889
+ Arguments to be passed to ``fit``.
890
+
891
+ .. versionadded:: 1.4
892
+
893
+ Returns
894
+ -------
895
+ labels : ndarray of shape (n_samples,), dtype=np.int64
896
+ Cluster labels.
897
+ """
898
+ # non-optimized default implementation; override when a better
899
+ # method is possible for a given clustering algorithm
900
+ self.fit(X, **kwargs)
901
+ return self.labels_
902
+
903
+ def _more_tags(self):
904
+ return {"preserves_dtype": []}
905
+
906
+
907
+ class BiclusterMixin:
908
+ """Mixin class for all bicluster estimators in scikit-learn.
909
+
910
+ This mixin defines the following functionality:
911
+
912
+ - `biclusters_` property that returns the row and column indicators;
913
+ - `get_indices` method that returns the row and column indices of a bicluster;
914
+ - `get_shape` method that returns the shape of a bicluster;
915
+ - `get_submatrix` method that returns the submatrix corresponding to a bicluster.
916
+
917
+ Examples
918
+ --------
919
+ >>> import numpy as np
920
+ >>> from sklearn.base import BaseEstimator, BiclusterMixin
921
+ >>> class DummyBiClustering(BiclusterMixin, BaseEstimator):
922
+ ... def fit(self, X, y=None):
923
+ ... self.rows_ = np.ones(shape=(1, X.shape[0]), dtype=bool)
924
+ ... self.columns_ = np.ones(shape=(1, X.shape[1]), dtype=bool)
925
+ ... return self
926
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
927
+ ... [4, 7], [3, 5], [3, 6]])
928
+ >>> bicluster = DummyBiClustering().fit(X)
929
+ >>> hasattr(bicluster, "biclusters_")
930
+ True
931
+ >>> bicluster.get_indices(0)
932
+ (array([0, 1, 2, 3, 4, 5]), array([0, 1]))
933
+ """
934
+
935
+ @property
936
+ def biclusters_(self):
937
+ """Convenient way to get row and column indicators together.
938
+
939
+ Returns the ``rows_`` and ``columns_`` members.
940
+ """
941
+ return self.rows_, self.columns_
942
+
943
+ def get_indices(self, i):
944
+ """Row and column indices of the `i`'th bicluster.
945
+
946
+ Only works if ``rows_`` and ``columns_`` attributes exist.
947
+
948
+ Parameters
949
+ ----------
950
+ i : int
951
+ The index of the cluster.
952
+
953
+ Returns
954
+ -------
955
+ row_ind : ndarray, dtype=np.intp
956
+ Indices of rows in the dataset that belong to the bicluster.
957
+ col_ind : ndarray, dtype=np.intp
958
+ Indices of columns in the dataset that belong to the bicluster.
959
+ """
960
+ rows = self.rows_[i]
961
+ columns = self.columns_[i]
962
+ return np.nonzero(rows)[0], np.nonzero(columns)[0]
963
+
964
+ def get_shape(self, i):
965
+ """Shape of the `i`'th bicluster.
966
+
967
+ Parameters
968
+ ----------
969
+ i : int
970
+ The index of the cluster.
971
+
972
+ Returns
973
+ -------
974
+ n_rows : int
975
+ Number of rows in the bicluster.
976
+
977
+ n_cols : int
978
+ Number of columns in the bicluster.
979
+ """
980
+ indices = self.get_indices(i)
981
+ return tuple(len(i) for i in indices)
982
+
983
+ def get_submatrix(self, i, data):
984
+ """Return the submatrix corresponding to bicluster `i`.
985
+
986
+ Parameters
987
+ ----------
988
+ i : int
989
+ The index of the cluster.
990
+ data : array-like of shape (n_samples, n_features)
991
+ The data.
992
+
993
+ Returns
994
+ -------
995
+ submatrix : ndarray of shape (n_rows, n_cols)
996
+ The submatrix corresponding to bicluster `i`.
997
+
998
+ Notes
999
+ -----
1000
+ Works with sparse matrices. Only works if ``rows_`` and
1001
+ ``columns_`` attributes exist.
1002
+ """
1003
+ from .utils.validation import check_array
1004
+
1005
+ data = check_array(data, accept_sparse="csr")
1006
+ row_ind, col_ind = self.get_indices(i)
1007
+ return data[row_ind[:, np.newaxis], col_ind]
1008
+
1009
+
1010
+ class TransformerMixin(_SetOutputMixin):
1011
+ """Mixin class for all transformers in scikit-learn.
1012
+
1013
+ This mixin defines the following functionality:
1014
+
1015
+ - a `fit_transform` method that delegates to `fit` and `transform`;
1016
+ - a `set_output` method to output `X` as a specific container type.
1017
+
1018
+ If :term:`get_feature_names_out` is defined, then :class:`BaseEstimator` will
1019
+ automatically wrap `transform` and `fit_transform` to follow the `set_output`
1020
+ API. See the :ref:`developer_api_set_output` for details.
1021
+
1022
+ :class:`OneToOneFeatureMixin` and
1023
+ :class:`ClassNamePrefixFeaturesOutMixin` are helpful mixins for
1024
+ defining :term:`get_feature_names_out`.
1025
+
1026
+ Examples
1027
+ --------
1028
+ >>> import numpy as np
1029
+ >>> from sklearn.base import BaseEstimator, TransformerMixin
1030
+ >>> class MyTransformer(TransformerMixin, BaseEstimator):
1031
+ ... def __init__(self, *, param=1):
1032
+ ... self.param = param
1033
+ ... def fit(self, X, y=None):
1034
+ ... return self
1035
+ ... def transform(self, X):
1036
+ ... return np.full(shape=len(X), fill_value=self.param)
1037
+ >>> transformer = MyTransformer()
1038
+ >>> X = [[1, 2], [2, 3], [3, 4]]
1039
+ >>> transformer.fit_transform(X)
1040
+ array([1, 1, 1])
1041
+ """
1042
+
1043
+ def fit_transform(self, X, y=None, **fit_params):
1044
+ """
1045
+ Fit to data, then transform it.
1046
+
1047
+ Fits transformer to `X` and `y` with optional parameters `fit_params`
1048
+ and returns a transformed version of `X`.
1049
+
1050
+ Parameters
1051
+ ----------
1052
+ X : array-like of shape (n_samples, n_features)
1053
+ Input samples.
1054
+
1055
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
1056
+ default=None
1057
+ Target values (None for unsupervised transformations).
1058
+
1059
+ **fit_params : dict
1060
+ Additional fit parameters.
1061
+
1062
+ Returns
1063
+ -------
1064
+ X_new : ndarray array of shape (n_samples, n_features_new)
1065
+ Transformed array.
1066
+ """
1067
+ # non-optimized default implementation; override when a better
1068
+ # method is possible for a given clustering algorithm
1069
+
1070
+ # we do not route parameters here, since consumers don't route. But
1071
+ # since it's possible for a `transform` method to also consume
1072
+ # metadata, we check if that's the case, and we raise a warning telling
1073
+ # users that they should implement a custom `fit_transform` method
1074
+ # to forward metadata to `transform` as well.
1075
+ #
1076
+ # For that, we calculate routing and check if anything would be routed
1077
+ # to `transform` if we were to route them.
1078
+ if _routing_enabled():
1079
+ transform_params = self.get_metadata_routing().consumes(
1080
+ method="transform", params=fit_params.keys()
1081
+ )
1082
+ if transform_params:
1083
+ warnings.warn(
1084
+ (
1085
+ f"This object ({self.__class__.__name__}) has a `transform`"
1086
+ " method which consumes metadata, but `fit_transform` does not"
1087
+ " forward metadata to `transform`. Please implement a custom"
1088
+ " `fit_transform` method to forward metadata to `transform` as"
1089
+ " well. Alternatively, you can explicitly do"
1090
+ " `set_transform_request`and set all values to `False` to"
1091
+ " disable metadata routed to `transform`, if that's an option."
1092
+ ),
1093
+ UserWarning,
1094
+ )
1095
+
1096
+ if y is None:
1097
+ # fit method of arity 1 (unsupervised transformation)
1098
+ return self.fit(X, **fit_params).transform(X)
1099
+ else:
1100
+ # fit method of arity 2 (supervised transformation)
1101
+ return self.fit(X, y, **fit_params).transform(X)
1102
+
1103
+
1104
+ class OneToOneFeatureMixin:
1105
+ """Provides `get_feature_names_out` for simple transformers.
1106
+
1107
+ This mixin assumes there's a 1-to-1 correspondence between input features
1108
+ and output features, such as :class:`~sklearn.preprocessing.StandardScaler`.
1109
+
1110
+ Examples
1111
+ --------
1112
+ >>> import numpy as np
1113
+ >>> from sklearn.base import OneToOneFeatureMixin
1114
+ >>> class MyEstimator(OneToOneFeatureMixin):
1115
+ ... def fit(self, X, y=None):
1116
+ ... self.n_features_in_ = X.shape[1]
1117
+ ... return self
1118
+ >>> X = np.array([[1, 2], [3, 4]])
1119
+ >>> MyEstimator().fit(X).get_feature_names_out()
1120
+ array(['x0', 'x1'], dtype=object)
1121
+ """
1122
+
1123
+ def get_feature_names_out(self, input_features=None):
1124
+ """Get output feature names for transformation.
1125
+
1126
+ Parameters
1127
+ ----------
1128
+ input_features : array-like of str or None, default=None
1129
+ Input features.
1130
+
1131
+ - If `input_features` is `None`, then `feature_names_in_` is
1132
+ used as feature names in. If `feature_names_in_` is not defined,
1133
+ then the following input feature names are generated:
1134
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
1135
+ - If `input_features` is an array-like, then `input_features` must
1136
+ match `feature_names_in_` if `feature_names_in_` is defined.
1137
+
1138
+ Returns
1139
+ -------
1140
+ feature_names_out : ndarray of str objects
1141
+ Same as input features.
1142
+ """
1143
+ check_is_fitted(self, "n_features_in_")
1144
+ return _check_feature_names_in(self, input_features)
1145
+
1146
+
1147
+ class ClassNamePrefixFeaturesOutMixin:
1148
+ """Mixin class for transformers that generate their own names by prefixing.
1149
+
1150
+ This mixin is useful when the transformer needs to generate its own feature
1151
+ names out, such as :class:`~sklearn.decomposition.PCA`. For example, if
1152
+ :class:`~sklearn.decomposition.PCA` outputs 3 features, then the generated feature
1153
+ names out are: `["pca0", "pca1", "pca2"]`.
1154
+
1155
+ This mixin assumes that a `_n_features_out` attribute is defined when the
1156
+ transformer is fitted. `_n_features_out` is the number of output features
1157
+ that the transformer will return in `transform` of `fit_transform`.
1158
+
1159
+ Examples
1160
+ --------
1161
+ >>> import numpy as np
1162
+ >>> from sklearn.base import ClassNamePrefixFeaturesOutMixin
1163
+ >>> class MyEstimator(ClassNamePrefixFeaturesOutMixin):
1164
+ ... def fit(self, X, y=None):
1165
+ ... self._n_features_out = X.shape[1]
1166
+ ... return self
1167
+ >>> X = np.array([[1, 2], [3, 4]])
1168
+ >>> MyEstimator().fit(X).get_feature_names_out()
1169
+ array(['myestimator0', 'myestimator1'], dtype=object)
1170
+ """
1171
+
1172
+ def get_feature_names_out(self, input_features=None):
1173
+ """Get output feature names for transformation.
1174
+
1175
+ The feature names out will prefixed by the lowercased class name. For
1176
+ example, if the transformer outputs 3 features, then the feature names
1177
+ out are: `["class_name0", "class_name1", "class_name2"]`.
1178
+
1179
+ Parameters
1180
+ ----------
1181
+ input_features : array-like of str or None, default=None
1182
+ Only used to validate feature names with the names seen in `fit`.
1183
+
1184
+ Returns
1185
+ -------
1186
+ feature_names_out : ndarray of str objects
1187
+ Transformed feature names.
1188
+ """
1189
+ check_is_fitted(self, "_n_features_out")
1190
+ return _generate_get_feature_names_out(
1191
+ self, self._n_features_out, input_features=input_features
1192
+ )
1193
+
1194
+
1195
+ class DensityMixin:
1196
+ """Mixin class for all density estimators in scikit-learn.
1197
+
1198
+ This mixin defines the following functionality:
1199
+
1200
+ - `_estimator_type` class attribute defaulting to `"DensityEstimator"`;
1201
+ - `score` method that default that do no-op.
1202
+
1203
+ Examples
1204
+ --------
1205
+ >>> from sklearn.base import DensityMixin
1206
+ >>> class MyEstimator(DensityMixin):
1207
+ ... def fit(self, X, y=None):
1208
+ ... self.is_fitted_ = True
1209
+ ... return self
1210
+ >>> estimator = MyEstimator()
1211
+ >>> hasattr(estimator, "score")
1212
+ True
1213
+ """
1214
+
1215
+ _estimator_type = "DensityEstimator"
1216
+
1217
+ def score(self, X, y=None):
1218
+ """Return the score of the model on the data `X`.
1219
+
1220
+ Parameters
1221
+ ----------
1222
+ X : array-like of shape (n_samples, n_features)
1223
+ Test samples.
1224
+
1225
+ y : Ignored
1226
+ Not used, present for API consistency by convention.
1227
+
1228
+ Returns
1229
+ -------
1230
+ score : float
1231
+ """
1232
+ pass
1233
+
1234
+
1235
+ class OutlierMixin:
1236
+ """Mixin class for all outlier detection estimators in scikit-learn.
1237
+
1238
+ This mixin defines the following functionality:
1239
+
1240
+ - `_estimator_type` class attribute defaulting to `outlier_detector`;
1241
+ - `fit_predict` method that default to `fit` and `predict`.
1242
+
1243
+ Examples
1244
+ --------
1245
+ >>> import numpy as np
1246
+ >>> from sklearn.base import BaseEstimator, OutlierMixin
1247
+ >>> class MyEstimator(OutlierMixin):
1248
+ ... def fit(self, X, y=None):
1249
+ ... self.is_fitted_ = True
1250
+ ... return self
1251
+ ... def predict(self, X):
1252
+ ... return np.ones(shape=len(X))
1253
+ >>> estimator = MyEstimator()
1254
+ >>> X = np.array([[1, 2], [2, 3], [3, 4]])
1255
+ >>> estimator.fit_predict(X)
1256
+ array([1., 1., 1.])
1257
+ """
1258
+
1259
+ _estimator_type = "outlier_detector"
1260
+
1261
+ def fit_predict(self, X, y=None, **kwargs):
1262
+ """Perform fit on X and returns labels for X.
1263
+
1264
+ Returns -1 for outliers and 1 for inliers.
1265
+
1266
+ Parameters
1267
+ ----------
1268
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1269
+ The input samples.
1270
+
1271
+ y : Ignored
1272
+ Not used, present for API consistency by convention.
1273
+
1274
+ **kwargs : dict
1275
+ Arguments to be passed to ``fit``.
1276
+
1277
+ .. versionadded:: 1.4
1278
+
1279
+ Returns
1280
+ -------
1281
+ y : ndarray of shape (n_samples,)
1282
+ 1 for inliers, -1 for outliers.
1283
+ """
1284
+ # we do not route parameters here, since consumers don't route. But
1285
+ # since it's possible for a `predict` method to also consume
1286
+ # metadata, we check if that's the case, and we raise a warning telling
1287
+ # users that they should implement a custom `fit_predict` method
1288
+ # to forward metadata to `predict` as well.
1289
+ #
1290
+ # For that, we calculate routing and check if anything would be routed
1291
+ # to `predict` if we were to route them.
1292
+ if _routing_enabled():
1293
+ transform_params = self.get_metadata_routing().consumes(
1294
+ method="predict", params=kwargs.keys()
1295
+ )
1296
+ if transform_params:
1297
+ warnings.warn(
1298
+ (
1299
+ f"This object ({self.__class__.__name__}) has a `predict` "
1300
+ "method which consumes metadata, but `fit_predict` does not "
1301
+ "forward metadata to `predict`. Please implement a custom "
1302
+ "`fit_predict` method to forward metadata to `predict` as well."
1303
+ "Alternatively, you can explicitly do `set_predict_request`"
1304
+ "and set all values to `False` to disable metadata routed to "
1305
+ "`predict`, if that's an option."
1306
+ ),
1307
+ UserWarning,
1308
+ )
1309
+
1310
+ # override for transductive outlier detectors like LocalOulierFactor
1311
+ return self.fit(X, **kwargs).predict(X)
1312
+
1313
+
1314
+ class MetaEstimatorMixin:
1315
+ """Mixin class for all meta estimators in scikit-learn.
1316
+
1317
+ This mixin defines the following functionality:
1318
+
1319
+ - define `_required_parameters` that specify the mandatory `estimator` parameter.
1320
+
1321
+ Examples
1322
+ --------
1323
+ >>> from sklearn.base import MetaEstimatorMixin
1324
+ >>> from sklearn.datasets import load_iris
1325
+ >>> from sklearn.linear_model import LogisticRegression
1326
+ >>> class MyEstimator(MetaEstimatorMixin):
1327
+ ... def __init__(self, *, estimator=None):
1328
+ ... self.estimator = estimator
1329
+ ... def fit(self, X, y=None):
1330
+ ... if self.estimator is None:
1331
+ ... self.estimator_ = LogisticRegression()
1332
+ ... else:
1333
+ ... self.estimator_ = self.estimator
1334
+ ... return self
1335
+ >>> X, y = load_iris(return_X_y=True)
1336
+ >>> estimator = MyEstimator().fit(X, y)
1337
+ >>> estimator.estimator_
1338
+ LogisticRegression()
1339
+ """
1340
+
1341
+ _required_parameters = ["estimator"]
1342
+
1343
+
1344
+ class MultiOutputMixin:
1345
+ """Mixin to mark estimators that support multioutput."""
1346
+
1347
+ def _more_tags(self):
1348
+ return {"multioutput": True}
1349
+
1350
+
1351
+ class _UnstableArchMixin:
1352
+ """Mark estimators that are non-determinstic on 32bit or PowerPC"""
1353
+
1354
+ def _more_tags(self):
1355
+ return {
1356
+ "non_deterministic": _IS_32BIT or platform.machine().startswith(
1357
+ ("ppc", "powerpc")
1358
+ )
1359
+ }
1360
+
1361
+
1362
+ def is_classifier(estimator):
1363
+ """Return True if the given estimator is (probably) a classifier.
1364
+
1365
+ Parameters
1366
+ ----------
1367
+ estimator : object
1368
+ Estimator object to test.
1369
+
1370
+ Returns
1371
+ -------
1372
+ out : bool
1373
+ True if estimator is a classifier and False otherwise.
1374
+
1375
+ Examples
1376
+ --------
1377
+ >>> from sklearn.base import is_classifier
1378
+ >>> from sklearn.svm import SVC, SVR
1379
+ >>> classifier = SVC()
1380
+ >>> regressor = SVR()
1381
+ >>> is_classifier(classifier)
1382
+ True
1383
+ >>> is_classifier(regressor)
1384
+ False
1385
+ """
1386
+ return getattr(estimator, "_estimator_type", None) == "classifier"
1387
+
1388
+
1389
+ def is_regressor(estimator):
1390
+ """Return True if the given estimator is (probably) a regressor.
1391
+
1392
+ Parameters
1393
+ ----------
1394
+ estimator : estimator instance
1395
+ Estimator object to test.
1396
+
1397
+ Returns
1398
+ -------
1399
+ out : bool
1400
+ True if estimator is a regressor and False otherwise.
1401
+
1402
+ Examples
1403
+ --------
1404
+ >>> from sklearn.base import is_regressor
1405
+ >>> from sklearn.svm import SVC, SVR
1406
+ >>> classifier = SVC()
1407
+ >>> regressor = SVR()
1408
+ >>> is_regressor(classifier)
1409
+ False
1410
+ >>> is_regressor(regressor)
1411
+ True
1412
+ """
1413
+ return getattr(estimator, "_estimator_type", None) == "regressor"
1414
+
1415
+
1416
+ def is_outlier_detector(estimator):
1417
+ """Return True if the given estimator is (probably) an outlier detector.
1418
+
1419
+ Parameters
1420
+ ----------
1421
+ estimator : estimator instance
1422
+ Estimator object to test.
1423
+
1424
+ Returns
1425
+ -------
1426
+ out : bool
1427
+ True if estimator is an outlier detector and False otherwise.
1428
+ """
1429
+ return getattr(estimator, "_estimator_type", None) == "outlier_detector"
1430
+
1431
+
1432
+ def _fit_context(*, prefer_skip_nested_validation):
1433
+ """Decorator to run the fit methods of estimators within context managers.
1434
+
1435
+ Parameters
1436
+ ----------
1437
+ prefer_skip_nested_validation : bool
1438
+ If True, the validation of parameters of inner estimators or functions
1439
+ called during fit will be skipped.
1440
+
1441
+ This is useful to avoid validating many times the parameters passed by the
1442
+ user from the public facing API. It's also useful to avoid validating
1443
+ parameters that we pass internally to inner functions that are guaranteed to
1444
+ be valid by the test suite.
1445
+
1446
+ It should be set to True for most estimators, except for those that receive
1447
+ non-validated objects as parameters, such as meta-estimators that are given
1448
+ estimator objects.
1449
+
1450
+ Returns
1451
+ -------
1452
+ decorated_fit : method
1453
+ The decorated fit method.
1454
+ """
1455
+
1456
+ def decorator(fit_method):
1457
+ @functools.wraps(fit_method)
1458
+ def wrapper(estimator, *args, **kwargs):
1459
+ global_skip_validation = get_config()["skip_parameter_validation"]
1460
+
1461
+ # we don't want to validate again for each call to partial_fit
1462
+ partial_fit_and_fitted = (
1463
+ fit_method.__name__ == "partial_fit" and _is_fitted(estimator)
1464
+ )
1465
+
1466
+ if not global_skip_validation and not partial_fit_and_fitted:
1467
+ estimator._validate_params()
1468
+
1469
+ with config_context(
1470
+ skip_parameter_validation=(
1471
+ prefer_skip_nested_validation or global_skip_validation
1472
+ )
1473
+ ):
1474
+ return fit_method(estimator, *args, **kwargs)
1475
+
1476
+ return wrapper
1477
+
1478
+ return decorator
venv/lib/python3.10/site-packages/sklearn/calibration.py ADDED
@@ -0,0 +1,1410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Calibration of predicted probabilities."""
2
+
3
+ # Author: Alexandre Gramfort <[email protected]>
4
+ # Balazs Kegl <[email protected]>
5
+ # Jan Hendrik Metzen <[email protected]>
6
+ # Mathieu Blondel <[email protected]>
7
+ #
8
+ # License: BSD 3 clause
9
+
10
+ import warnings
11
+ from inspect import signature
12
+ from math import log
13
+ from numbers import Integral, Real
14
+
15
+ import numpy as np
16
+ from scipy.optimize import minimize
17
+ from scipy.special import expit
18
+
19
+ from sklearn.utils import Bunch
20
+
21
+ from ._loss import HalfBinomialLoss
22
+ from .base import (
23
+ BaseEstimator,
24
+ ClassifierMixin,
25
+ MetaEstimatorMixin,
26
+ RegressorMixin,
27
+ _fit_context,
28
+ clone,
29
+ )
30
+ from .isotonic import IsotonicRegression
31
+ from .model_selection import check_cv, cross_val_predict
32
+ from .preprocessing import LabelEncoder, label_binarize
33
+ from .svm import LinearSVC
34
+ from .utils import (
35
+ _safe_indexing,
36
+ column_or_1d,
37
+ indexable,
38
+ )
39
+ from .utils._param_validation import (
40
+ HasMethods,
41
+ Interval,
42
+ StrOptions,
43
+ validate_params,
44
+ )
45
+ from .utils._plotting import _BinaryClassifierCurveDisplayMixin
46
+ from .utils._response import _get_response_values, _process_predict_proba
47
+ from .utils.metadata_routing import (
48
+ MetadataRouter,
49
+ MethodMapping,
50
+ _routing_enabled,
51
+ process_routing,
52
+ )
53
+ from .utils.multiclass import check_classification_targets
54
+ from .utils.parallel import Parallel, delayed
55
+ from .utils.validation import (
56
+ _check_method_params,
57
+ _check_pos_label_consistency,
58
+ _check_response_method,
59
+ _check_sample_weight,
60
+ _num_samples,
61
+ check_consistent_length,
62
+ check_is_fitted,
63
+ )
64
+
65
+
66
+ class CalibratedClassifierCV(ClassifierMixin, MetaEstimatorMixin, BaseEstimator):
67
+ """Probability calibration with isotonic regression or logistic regression.
68
+
69
+ This class uses cross-validation to both estimate the parameters of a
70
+ classifier and subsequently calibrate a classifier. With default
71
+ `ensemble=True`, for each cv split it
72
+ fits a copy of the base estimator to the training subset, and calibrates it
73
+ using the testing subset. For prediction, predicted probabilities are
74
+ averaged across these individual calibrated classifiers. When
75
+ `ensemble=False`, cross-validation is used to obtain unbiased predictions,
76
+ via :func:`~sklearn.model_selection.cross_val_predict`, which are then
77
+ used for calibration. For prediction, the base estimator, trained using all
78
+ the data, is used. This is the prediction method implemented when
79
+ `probabilities=True` for :class:`~sklearn.svm.SVC` and :class:`~sklearn.svm.NuSVC`
80
+ estimators (see :ref:`User Guide <scores_probabilities>` for details).
81
+
82
+ Already fitted classifiers can be calibrated via the parameter
83
+ `cv="prefit"`. In this case, no cross-validation is used and all provided
84
+ data is used for calibration. The user has to take care manually that data
85
+ for model fitting and calibration are disjoint.
86
+
87
+ The calibration is based on the :term:`decision_function` method of the
88
+ `estimator` if it exists, else on :term:`predict_proba`.
89
+
90
+ Read more in the :ref:`User Guide <calibration>`.
91
+
92
+ Parameters
93
+ ----------
94
+ estimator : estimator instance, default=None
95
+ The classifier whose output need to be calibrated to provide more
96
+ accurate `predict_proba` outputs. The default classifier is
97
+ a :class:`~sklearn.svm.LinearSVC`.
98
+
99
+ .. versionadded:: 1.2
100
+
101
+ method : {'sigmoid', 'isotonic'}, default='sigmoid'
102
+ The method to use for calibration. Can be 'sigmoid' which
103
+ corresponds to Platt's method (i.e. a logistic regression model) or
104
+ 'isotonic' which is a non-parametric approach. It is not advised to
105
+ use isotonic calibration with too few calibration samples
106
+ ``(<<1000)`` since it tends to overfit.
107
+
108
+ cv : int, cross-validation generator, iterable or "prefit", \
109
+ default=None
110
+ Determines the cross-validation splitting strategy.
111
+ Possible inputs for cv are:
112
+
113
+ - None, to use the default 5-fold cross-validation,
114
+ - integer, to specify the number of folds.
115
+ - :term:`CV splitter`,
116
+ - An iterable yielding (train, test) splits as arrays of indices.
117
+
118
+ For integer/None inputs, if ``y`` is binary or multiclass,
119
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. If ``y`` is
120
+ neither binary nor multiclass, :class:`~sklearn.model_selection.KFold`
121
+ is used.
122
+
123
+ Refer to the :ref:`User Guide <cross_validation>` for the various
124
+ cross-validation strategies that can be used here.
125
+
126
+ If "prefit" is passed, it is assumed that `estimator` has been
127
+ fitted already and all data is used for calibration.
128
+
129
+ .. versionchanged:: 0.22
130
+ ``cv`` default value if None changed from 3-fold to 5-fold.
131
+
132
+ n_jobs : int, default=None
133
+ Number of jobs to run in parallel.
134
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
135
+ ``-1`` means using all processors.
136
+
137
+ Base estimator clones are fitted in parallel across cross-validation
138
+ iterations. Therefore parallelism happens only when `cv != "prefit"`.
139
+
140
+ See :term:`Glossary <n_jobs>` for more details.
141
+
142
+ .. versionadded:: 0.24
143
+
144
+ ensemble : bool, default=True
145
+ Determines how the calibrator is fitted when `cv` is not `'prefit'`.
146
+ Ignored if `cv='prefit'`.
147
+
148
+ If `True`, the `estimator` is fitted using training data, and
149
+ calibrated using testing data, for each `cv` fold. The final estimator
150
+ is an ensemble of `n_cv` fitted classifier and calibrator pairs, where
151
+ `n_cv` is the number of cross-validation folds. The output is the
152
+ average predicted probabilities of all pairs.
153
+
154
+ If `False`, `cv` is used to compute unbiased predictions, via
155
+ :func:`~sklearn.model_selection.cross_val_predict`, which are then
156
+ used for calibration. At prediction time, the classifier used is the
157
+ `estimator` trained on all the data.
158
+ Note that this method is also internally implemented in
159
+ :mod:`sklearn.svm` estimators with the `probabilities=True` parameter.
160
+
161
+ .. versionadded:: 0.24
162
+
163
+ Attributes
164
+ ----------
165
+ classes_ : ndarray of shape (n_classes,)
166
+ The class labels.
167
+
168
+ n_features_in_ : int
169
+ Number of features seen during :term:`fit`. Only defined if the
170
+ underlying estimator exposes such an attribute when fit.
171
+
172
+ .. versionadded:: 0.24
173
+
174
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
175
+ Names of features seen during :term:`fit`. Only defined if the
176
+ underlying estimator exposes such an attribute when fit.
177
+
178
+ .. versionadded:: 1.0
179
+
180
+ calibrated_classifiers_ : list (len() equal to cv or 1 if `cv="prefit"` \
181
+ or `ensemble=False`)
182
+ The list of classifier and calibrator pairs.
183
+
184
+ - When `cv="prefit"`, the fitted `estimator` and fitted
185
+ calibrator.
186
+ - When `cv` is not "prefit" and `ensemble=True`, `n_cv` fitted
187
+ `estimator` and calibrator pairs. `n_cv` is the number of
188
+ cross-validation folds.
189
+ - When `cv` is not "prefit" and `ensemble=False`, the `estimator`,
190
+ fitted on all the data, and fitted calibrator.
191
+
192
+ .. versionchanged:: 0.24
193
+ Single calibrated classifier case when `ensemble=False`.
194
+
195
+ See Also
196
+ --------
197
+ calibration_curve : Compute true and predicted probabilities
198
+ for a calibration curve.
199
+
200
+ References
201
+ ----------
202
+ .. [1] Obtaining calibrated probability estimates from decision trees
203
+ and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
204
+
205
+ .. [2] Transforming Classifier Scores into Accurate Multiclass
206
+ Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
207
+
208
+ .. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
209
+ Regularized Likelihood Methods, J. Platt, (1999)
210
+
211
+ .. [4] Predicting Good Probabilities with Supervised Learning,
212
+ A. Niculescu-Mizil & R. Caruana, ICML 2005
213
+
214
+ Examples
215
+ --------
216
+ >>> from sklearn.datasets import make_classification
217
+ >>> from sklearn.naive_bayes import GaussianNB
218
+ >>> from sklearn.calibration import CalibratedClassifierCV
219
+ >>> X, y = make_classification(n_samples=100, n_features=2,
220
+ ... n_redundant=0, random_state=42)
221
+ >>> base_clf = GaussianNB()
222
+ >>> calibrated_clf = CalibratedClassifierCV(base_clf, cv=3)
223
+ >>> calibrated_clf.fit(X, y)
224
+ CalibratedClassifierCV(...)
225
+ >>> len(calibrated_clf.calibrated_classifiers_)
226
+ 3
227
+ >>> calibrated_clf.predict_proba(X)[:5, :]
228
+ array([[0.110..., 0.889...],
229
+ [0.072..., 0.927...],
230
+ [0.928..., 0.071...],
231
+ [0.928..., 0.071...],
232
+ [0.071..., 0.928...]])
233
+ >>> from sklearn.model_selection import train_test_split
234
+ >>> X, y = make_classification(n_samples=100, n_features=2,
235
+ ... n_redundant=0, random_state=42)
236
+ >>> X_train, X_calib, y_train, y_calib = train_test_split(
237
+ ... X, y, random_state=42
238
+ ... )
239
+ >>> base_clf = GaussianNB()
240
+ >>> base_clf.fit(X_train, y_train)
241
+ GaussianNB()
242
+ >>> calibrated_clf = CalibratedClassifierCV(base_clf, cv="prefit")
243
+ >>> calibrated_clf.fit(X_calib, y_calib)
244
+ CalibratedClassifierCV(...)
245
+ >>> len(calibrated_clf.calibrated_classifiers_)
246
+ 1
247
+ >>> calibrated_clf.predict_proba([[-0.5, 0.5]])
248
+ array([[0.936..., 0.063...]])
249
+ """
250
+
251
+ _parameter_constraints: dict = {
252
+ "estimator": [
253
+ HasMethods(["fit", "predict_proba"]),
254
+ HasMethods(["fit", "decision_function"]),
255
+ None,
256
+ ],
257
+ "method": [StrOptions({"isotonic", "sigmoid"})],
258
+ "cv": ["cv_object", StrOptions({"prefit"})],
259
+ "n_jobs": [Integral, None],
260
+ "ensemble": ["boolean"],
261
+ }
262
+
263
+ def __init__(
264
+ self,
265
+ estimator=None,
266
+ *,
267
+ method="sigmoid",
268
+ cv=None,
269
+ n_jobs=None,
270
+ ensemble=True,
271
+ ):
272
+ self.estimator = estimator
273
+ self.method = method
274
+ self.cv = cv
275
+ self.n_jobs = n_jobs
276
+ self.ensemble = ensemble
277
+
278
+ def _get_estimator(self):
279
+ """Resolve which estimator to return (default is LinearSVC)"""
280
+ if self.estimator is None:
281
+ # we want all classifiers that don't expose a random_state
282
+ # to be deterministic (and we don't want to expose this one).
283
+ estimator = LinearSVC(random_state=0, dual="auto")
284
+ if _routing_enabled():
285
+ estimator.set_fit_request(sample_weight=True)
286
+ else:
287
+ estimator = self.estimator
288
+
289
+ return estimator
290
+
291
+ @_fit_context(
292
+ # CalibratedClassifierCV.estimator is not validated yet
293
+ prefer_skip_nested_validation=False
294
+ )
295
+ def fit(self, X, y, sample_weight=None, **fit_params):
296
+ """Fit the calibrated model.
297
+
298
+ Parameters
299
+ ----------
300
+ X : array-like of shape (n_samples, n_features)
301
+ Training data.
302
+
303
+ y : array-like of shape (n_samples,)
304
+ Target values.
305
+
306
+ sample_weight : array-like of shape (n_samples,), default=None
307
+ Sample weights. If None, then samples are equally weighted.
308
+
309
+ **fit_params : dict
310
+ Parameters to pass to the `fit` method of the underlying
311
+ classifier.
312
+
313
+ Returns
314
+ -------
315
+ self : object
316
+ Returns an instance of self.
317
+ """
318
+ check_classification_targets(y)
319
+ X, y = indexable(X, y)
320
+ if sample_weight is not None:
321
+ sample_weight = _check_sample_weight(sample_weight, X)
322
+
323
+ estimator = self._get_estimator()
324
+
325
+ self.calibrated_classifiers_ = []
326
+ if self.cv == "prefit":
327
+ # `classes_` should be consistent with that of estimator
328
+ check_is_fitted(self.estimator, attributes=["classes_"])
329
+ self.classes_ = self.estimator.classes_
330
+
331
+ predictions, _ = _get_response_values(
332
+ estimator,
333
+ X,
334
+ response_method=["decision_function", "predict_proba"],
335
+ )
336
+ if predictions.ndim == 1:
337
+ # Reshape binary output from `(n_samples,)` to `(n_samples, 1)`
338
+ predictions = predictions.reshape(-1, 1)
339
+
340
+ calibrated_classifier = _fit_calibrator(
341
+ estimator,
342
+ predictions,
343
+ y,
344
+ self.classes_,
345
+ self.method,
346
+ sample_weight,
347
+ )
348
+ self.calibrated_classifiers_.append(calibrated_classifier)
349
+ else:
350
+ # Set `classes_` using all `y`
351
+ label_encoder_ = LabelEncoder().fit(y)
352
+ self.classes_ = label_encoder_.classes_
353
+
354
+ if _routing_enabled():
355
+ routed_params = process_routing(
356
+ self,
357
+ "fit",
358
+ sample_weight=sample_weight,
359
+ **fit_params,
360
+ )
361
+ else:
362
+ # sample_weight checks
363
+ fit_parameters = signature(estimator.fit).parameters
364
+ supports_sw = "sample_weight" in fit_parameters
365
+ if sample_weight is not None and not supports_sw:
366
+ estimator_name = type(estimator).__name__
367
+ warnings.warn(
368
+ f"Since {estimator_name} does not appear to accept"
369
+ " sample_weight, sample weights will only be used for the"
370
+ " calibration itself. This can be caused by a limitation of"
371
+ " the current scikit-learn API. See the following issue for"
372
+ " more details:"
373
+ " https://github.com/scikit-learn/scikit-learn/issues/21134."
374
+ " Be warned that the result of the calibration is likely to be"
375
+ " incorrect."
376
+ )
377
+ routed_params = Bunch()
378
+ routed_params.splitter = Bunch(split={}) # no routing for splitter
379
+ routed_params.estimator = Bunch(fit=fit_params)
380
+ if sample_weight is not None and supports_sw:
381
+ routed_params.estimator.fit["sample_weight"] = sample_weight
382
+
383
+ # Check that each cross-validation fold can have at least one
384
+ # example per class
385
+ if isinstance(self.cv, int):
386
+ n_folds = self.cv
387
+ elif hasattr(self.cv, "n_splits"):
388
+ n_folds = self.cv.n_splits
389
+ else:
390
+ n_folds = None
391
+ if n_folds and np.any(
392
+ [np.sum(y == class_) < n_folds for class_ in self.classes_]
393
+ ):
394
+ raise ValueError(
395
+ f"Requesting {n_folds}-fold "
396
+ "cross-validation but provided less than "
397
+ f"{n_folds} examples for at least one class."
398
+ )
399
+ cv = check_cv(self.cv, y, classifier=True)
400
+
401
+ if self.ensemble:
402
+ parallel = Parallel(n_jobs=self.n_jobs)
403
+ self.calibrated_classifiers_ = parallel(
404
+ delayed(_fit_classifier_calibrator_pair)(
405
+ clone(estimator),
406
+ X,
407
+ y,
408
+ train=train,
409
+ test=test,
410
+ method=self.method,
411
+ classes=self.classes_,
412
+ sample_weight=sample_weight,
413
+ fit_params=routed_params.estimator.fit,
414
+ )
415
+ for train, test in cv.split(X, y, **routed_params.splitter.split)
416
+ )
417
+ else:
418
+ this_estimator = clone(estimator)
419
+ method_name = _check_response_method(
420
+ this_estimator,
421
+ ["decision_function", "predict_proba"],
422
+ ).__name__
423
+ predictions = cross_val_predict(
424
+ estimator=this_estimator,
425
+ X=X,
426
+ y=y,
427
+ cv=cv,
428
+ method=method_name,
429
+ n_jobs=self.n_jobs,
430
+ params=routed_params.estimator.fit,
431
+ )
432
+ if len(self.classes_) == 2:
433
+ # Ensure shape (n_samples, 1) in the binary case
434
+ if method_name == "predict_proba":
435
+ # Select the probability column of the postive class
436
+ predictions = _process_predict_proba(
437
+ y_pred=predictions,
438
+ target_type="binary",
439
+ classes=self.classes_,
440
+ pos_label=self.classes_[1],
441
+ )
442
+ predictions = predictions.reshape(-1, 1)
443
+
444
+ this_estimator.fit(X, y, **routed_params.estimator.fit)
445
+ # Note: Here we don't pass on fit_params because the supported
446
+ # calibrators don't support fit_params anyway
447
+ calibrated_classifier = _fit_calibrator(
448
+ this_estimator,
449
+ predictions,
450
+ y,
451
+ self.classes_,
452
+ self.method,
453
+ sample_weight,
454
+ )
455
+ self.calibrated_classifiers_.append(calibrated_classifier)
456
+
457
+ first_clf = self.calibrated_classifiers_[0].estimator
458
+ if hasattr(first_clf, "n_features_in_"):
459
+ self.n_features_in_ = first_clf.n_features_in_
460
+ if hasattr(first_clf, "feature_names_in_"):
461
+ self.feature_names_in_ = first_clf.feature_names_in_
462
+ return self
463
+
464
+ def predict_proba(self, X):
465
+ """Calibrated probabilities of classification.
466
+
467
+ This function returns calibrated probabilities of classification
468
+ according to each class on an array of test vectors X.
469
+
470
+ Parameters
471
+ ----------
472
+ X : array-like of shape (n_samples, n_features)
473
+ The samples, as accepted by `estimator.predict_proba`.
474
+
475
+ Returns
476
+ -------
477
+ C : ndarray of shape (n_samples, n_classes)
478
+ The predicted probas.
479
+ """
480
+ check_is_fitted(self)
481
+ # Compute the arithmetic mean of the predictions of the calibrated
482
+ # classifiers
483
+ mean_proba = np.zeros((_num_samples(X), len(self.classes_)))
484
+ for calibrated_classifier in self.calibrated_classifiers_:
485
+ proba = calibrated_classifier.predict_proba(X)
486
+ mean_proba += proba
487
+
488
+ mean_proba /= len(self.calibrated_classifiers_)
489
+
490
+ return mean_proba
491
+
492
+ def predict(self, X):
493
+ """Predict the target of new samples.
494
+
495
+ The predicted class is the class that has the highest probability,
496
+ and can thus be different from the prediction of the uncalibrated classifier.
497
+
498
+ Parameters
499
+ ----------
500
+ X : array-like of shape (n_samples, n_features)
501
+ The samples, as accepted by `estimator.predict`.
502
+
503
+ Returns
504
+ -------
505
+ C : ndarray of shape (n_samples,)
506
+ The predicted class.
507
+ """
508
+ check_is_fitted(self)
509
+ return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
510
+
511
+ def get_metadata_routing(self):
512
+ """Get metadata routing of this object.
513
+
514
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
515
+ mechanism works.
516
+
517
+ Returns
518
+ -------
519
+ routing : MetadataRouter
520
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
521
+ routing information.
522
+ """
523
+ router = (
524
+ MetadataRouter(owner=self.__class__.__name__)
525
+ .add_self_request(self)
526
+ .add(
527
+ estimator=self._get_estimator(),
528
+ method_mapping=MethodMapping().add(callee="fit", caller="fit"),
529
+ )
530
+ .add(
531
+ splitter=self.cv,
532
+ method_mapping=MethodMapping().add(callee="split", caller="fit"),
533
+ )
534
+ )
535
+ return router
536
+
537
+ def _more_tags(self):
538
+ return {
539
+ "_xfail_checks": {
540
+ "check_sample_weights_invariance": (
541
+ "Due to the cross-validation and sample ordering, removing a sample"
542
+ " is not strictly equal to putting is weight to zero. Specific unit"
543
+ " tests are added for CalibratedClassifierCV specifically."
544
+ ),
545
+ }
546
+ }
547
+
548
+
549
+ def _fit_classifier_calibrator_pair(
550
+ estimator,
551
+ X,
552
+ y,
553
+ train,
554
+ test,
555
+ method,
556
+ classes,
557
+ sample_weight=None,
558
+ fit_params=None,
559
+ ):
560
+ """Fit a classifier/calibration pair on a given train/test split.
561
+
562
+ Fit the classifier on the train set, compute its predictions on the test
563
+ set and use the predictions as input to fit the calibrator along with the
564
+ test labels.
565
+
566
+ Parameters
567
+ ----------
568
+ estimator : estimator instance
569
+ Cloned base estimator.
570
+
571
+ X : array-like, shape (n_samples, n_features)
572
+ Sample data.
573
+
574
+ y : array-like, shape (n_samples,)
575
+ Targets.
576
+
577
+ train : ndarray, shape (n_train_indices,)
578
+ Indices of the training subset.
579
+
580
+ test : ndarray, shape (n_test_indices,)
581
+ Indices of the testing subset.
582
+
583
+ method : {'sigmoid', 'isotonic'}
584
+ Method to use for calibration.
585
+
586
+ classes : ndarray, shape (n_classes,)
587
+ The target classes.
588
+
589
+ sample_weight : array-like, default=None
590
+ Sample weights for `X`.
591
+
592
+ fit_params : dict, default=None
593
+ Parameters to pass to the `fit` method of the underlying
594
+ classifier.
595
+
596
+ Returns
597
+ -------
598
+ calibrated_classifier : _CalibratedClassifier instance
599
+ """
600
+ fit_params_train = _check_method_params(X, params=fit_params, indices=train)
601
+ X_train, y_train = _safe_indexing(X, train), _safe_indexing(y, train)
602
+ X_test, y_test = _safe_indexing(X, test), _safe_indexing(y, test)
603
+
604
+ estimator.fit(X_train, y_train, **fit_params_train)
605
+
606
+ predictions, _ = _get_response_values(
607
+ estimator,
608
+ X_test,
609
+ response_method=["decision_function", "predict_proba"],
610
+ )
611
+ if predictions.ndim == 1:
612
+ # Reshape binary output from `(n_samples,)` to `(n_samples, 1)`
613
+ predictions = predictions.reshape(-1, 1)
614
+
615
+ sw_test = None if sample_weight is None else _safe_indexing(sample_weight, test)
616
+ calibrated_classifier = _fit_calibrator(
617
+ estimator, predictions, y_test, classes, method, sample_weight=sw_test
618
+ )
619
+ return calibrated_classifier
620
+
621
+
622
+ def _fit_calibrator(clf, predictions, y, classes, method, sample_weight=None):
623
+ """Fit calibrator(s) and return a `_CalibratedClassifier`
624
+ instance.
625
+
626
+ `n_classes` (i.e. `len(clf.classes_)`) calibrators are fitted.
627
+ However, if `n_classes` equals 2, one calibrator is fitted.
628
+
629
+ Parameters
630
+ ----------
631
+ clf : estimator instance
632
+ Fitted classifier.
633
+
634
+ predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1) \
635
+ when binary.
636
+ Raw predictions returned by the un-calibrated base classifier.
637
+
638
+ y : array-like, shape (n_samples,)
639
+ The targets.
640
+
641
+ classes : ndarray, shape (n_classes,)
642
+ All the prediction classes.
643
+
644
+ method : {'sigmoid', 'isotonic'}
645
+ The method to use for calibration.
646
+
647
+ sample_weight : ndarray, shape (n_samples,), default=None
648
+ Sample weights. If None, then samples are equally weighted.
649
+
650
+ Returns
651
+ -------
652
+ pipeline : _CalibratedClassifier instance
653
+ """
654
+ Y = label_binarize(y, classes=classes)
655
+ label_encoder = LabelEncoder().fit(classes)
656
+ pos_class_indices = label_encoder.transform(clf.classes_)
657
+ calibrators = []
658
+ for class_idx, this_pred in zip(pos_class_indices, predictions.T):
659
+ if method == "isotonic":
660
+ calibrator = IsotonicRegression(out_of_bounds="clip")
661
+ else: # "sigmoid"
662
+ calibrator = _SigmoidCalibration()
663
+ calibrator.fit(this_pred, Y[:, class_idx], sample_weight)
664
+ calibrators.append(calibrator)
665
+
666
+ pipeline = _CalibratedClassifier(clf, calibrators, method=method, classes=classes)
667
+ return pipeline
668
+
669
+
670
+ class _CalibratedClassifier:
671
+ """Pipeline-like chaining a fitted classifier and its fitted calibrators.
672
+
673
+ Parameters
674
+ ----------
675
+ estimator : estimator instance
676
+ Fitted classifier.
677
+
678
+ calibrators : list of fitted estimator instances
679
+ List of fitted calibrators (either 'IsotonicRegression' or
680
+ '_SigmoidCalibration'). The number of calibrators equals the number of
681
+ classes. However, if there are 2 classes, the list contains only one
682
+ fitted calibrator.
683
+
684
+ classes : array-like of shape (n_classes,)
685
+ All the prediction classes.
686
+
687
+ method : {'sigmoid', 'isotonic'}, default='sigmoid'
688
+ The method to use for calibration. Can be 'sigmoid' which
689
+ corresponds to Platt's method or 'isotonic' which is a
690
+ non-parametric approach based on isotonic regression.
691
+ """
692
+
693
+ def __init__(self, estimator, calibrators, *, classes, method="sigmoid"):
694
+ self.estimator = estimator
695
+ self.calibrators = calibrators
696
+ self.classes = classes
697
+ self.method = method
698
+
699
+ def predict_proba(self, X):
700
+ """Calculate calibrated probabilities.
701
+
702
+ Calculates classification calibrated probabilities
703
+ for each class, in a one-vs-all manner, for `X`.
704
+
705
+ Parameters
706
+ ----------
707
+ X : ndarray of shape (n_samples, n_features)
708
+ The sample data.
709
+
710
+ Returns
711
+ -------
712
+ proba : array, shape (n_samples, n_classes)
713
+ The predicted probabilities. Can be exact zeros.
714
+ """
715
+ predictions, _ = _get_response_values(
716
+ self.estimator,
717
+ X,
718
+ response_method=["decision_function", "predict_proba"],
719
+ )
720
+ if predictions.ndim == 1:
721
+ # Reshape binary output from `(n_samples,)` to `(n_samples, 1)`
722
+ predictions = predictions.reshape(-1, 1)
723
+
724
+ n_classes = len(self.classes)
725
+
726
+ label_encoder = LabelEncoder().fit(self.classes)
727
+ pos_class_indices = label_encoder.transform(self.estimator.classes_)
728
+
729
+ proba = np.zeros((_num_samples(X), n_classes))
730
+ for class_idx, this_pred, calibrator in zip(
731
+ pos_class_indices, predictions.T, self.calibrators
732
+ ):
733
+ if n_classes == 2:
734
+ # When binary, `predictions` consists only of predictions for
735
+ # clf.classes_[1] but `pos_class_indices` = 0
736
+ class_idx += 1
737
+ proba[:, class_idx] = calibrator.predict(this_pred)
738
+
739
+ # Normalize the probabilities
740
+ if n_classes == 2:
741
+ proba[:, 0] = 1.0 - proba[:, 1]
742
+ else:
743
+ denominator = np.sum(proba, axis=1)[:, np.newaxis]
744
+ # In the edge case where for each class calibrator returns a null
745
+ # probability for a given sample, use the uniform distribution
746
+ # instead.
747
+ uniform_proba = np.full_like(proba, 1 / n_classes)
748
+ proba = np.divide(
749
+ proba, denominator, out=uniform_proba, where=denominator != 0
750
+ )
751
+
752
+ # Deal with cases where the predicted probability minimally exceeds 1.0
753
+ proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
754
+
755
+ return proba
756
+
757
+
758
+ # The max_abs_prediction_threshold was approximated using
759
+ # logit(np.finfo(np.float64).eps) which is about -36
760
+ def _sigmoid_calibration(
761
+ predictions, y, sample_weight=None, max_abs_prediction_threshold=30
762
+ ):
763
+ """Probability Calibration with sigmoid method (Platt 2000)
764
+
765
+ Parameters
766
+ ----------
767
+ predictions : ndarray of shape (n_samples,)
768
+ The decision function or predict proba for the samples.
769
+
770
+ y : ndarray of shape (n_samples,)
771
+ The targets.
772
+
773
+ sample_weight : array-like of shape (n_samples,), default=None
774
+ Sample weights. If None, then samples are equally weighted.
775
+
776
+ Returns
777
+ -------
778
+ a : float
779
+ The slope.
780
+
781
+ b : float
782
+ The intercept.
783
+
784
+ References
785
+ ----------
786
+ Platt, "Probabilistic Outputs for Support Vector Machines"
787
+ """
788
+ predictions = column_or_1d(predictions)
789
+ y = column_or_1d(y)
790
+
791
+ F = predictions # F follows Platt's notations
792
+
793
+ scale_constant = 1.0
794
+ max_prediction = np.max(np.abs(F))
795
+
796
+ # If the predictions have large values we scale them in order to bring
797
+ # them within a suitable range. This has no effect on the final
798
+ # (prediction) result because linear models like Logisitic Regression
799
+ # without a penalty are invariant to multiplying the features by a
800
+ # constant.
801
+ if max_prediction >= max_abs_prediction_threshold:
802
+ scale_constant = max_prediction
803
+ # We rescale the features in a copy: inplace rescaling could confuse
804
+ # the caller and make the code harder to reason about.
805
+ F = F / scale_constant
806
+
807
+ # Bayesian priors (see Platt end of section 2.2):
808
+ # It corresponds to the number of samples, taking into account the
809
+ # `sample_weight`.
810
+ mask_negative_samples = y <= 0
811
+ if sample_weight is not None:
812
+ prior0 = (sample_weight[mask_negative_samples]).sum()
813
+ prior1 = (sample_weight[~mask_negative_samples]).sum()
814
+ else:
815
+ prior0 = float(np.sum(mask_negative_samples))
816
+ prior1 = y.shape[0] - prior0
817
+ T = np.zeros_like(y, dtype=predictions.dtype)
818
+ T[y > 0] = (prior1 + 1.0) / (prior1 + 2.0)
819
+ T[y <= 0] = 1.0 / (prior0 + 2.0)
820
+
821
+ bin_loss = HalfBinomialLoss()
822
+
823
+ def loss_grad(AB):
824
+ # .astype below is needed to ensure y_true and raw_prediction have the
825
+ # same dtype. With result = np.float64(0) * np.array([1, 2], dtype=np.float32)
826
+ # - in Numpy 2, result.dtype is float64
827
+ # - in Numpy<2, result.dtype is float32
828
+ raw_prediction = -(AB[0] * F + AB[1]).astype(dtype=predictions.dtype)
829
+ l, g = bin_loss.loss_gradient(
830
+ y_true=T,
831
+ raw_prediction=raw_prediction,
832
+ sample_weight=sample_weight,
833
+ )
834
+ loss = l.sum()
835
+ # TODO: Remove casting to np.float64 when minimum supported SciPy is 1.11.2
836
+ # With SciPy >= 1.11.2, the LBFGS implementation will cast to float64
837
+ # https://github.com/scipy/scipy/pull/18825.
838
+ # Here we cast to float64 to support SciPy < 1.11.2
839
+ grad = np.asarray([-g @ F, -g.sum()], dtype=np.float64)
840
+ return loss, grad
841
+
842
+ AB0 = np.array([0.0, log((prior0 + 1.0) / (prior1 + 1.0))])
843
+
844
+ opt_result = minimize(
845
+ loss_grad,
846
+ AB0,
847
+ method="L-BFGS-B",
848
+ jac=True,
849
+ options={
850
+ "gtol": 1e-6,
851
+ "ftol": 64 * np.finfo(float).eps,
852
+ },
853
+ )
854
+ AB_ = opt_result.x
855
+
856
+ # The tuned multiplicative parameter is converted back to the original
857
+ # input feature scale. The offset parameter does not need rescaling since
858
+ # we did not rescale the outcome variable.
859
+ return AB_[0] / scale_constant, AB_[1]
860
+
861
+
862
+ class _SigmoidCalibration(RegressorMixin, BaseEstimator):
863
+ """Sigmoid regression model.
864
+
865
+ Attributes
866
+ ----------
867
+ a_ : float
868
+ The slope.
869
+
870
+ b_ : float
871
+ The intercept.
872
+ """
873
+
874
+ def fit(self, X, y, sample_weight=None):
875
+ """Fit the model using X, y as training data.
876
+
877
+ Parameters
878
+ ----------
879
+ X : array-like of shape (n_samples,)
880
+ Training data.
881
+
882
+ y : array-like of shape (n_samples,)
883
+ Training target.
884
+
885
+ sample_weight : array-like of shape (n_samples,), default=None
886
+ Sample weights. If None, then samples are equally weighted.
887
+
888
+ Returns
889
+ -------
890
+ self : object
891
+ Returns an instance of self.
892
+ """
893
+ X = column_or_1d(X)
894
+ y = column_or_1d(y)
895
+ X, y = indexable(X, y)
896
+
897
+ self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
898
+ return self
899
+
900
+ def predict(self, T):
901
+ """Predict new data by linear interpolation.
902
+
903
+ Parameters
904
+ ----------
905
+ T : array-like of shape (n_samples,)
906
+ Data to predict from.
907
+
908
+ Returns
909
+ -------
910
+ T_ : ndarray of shape (n_samples,)
911
+ The predicted data.
912
+ """
913
+ T = column_or_1d(T)
914
+ return expit(-(self.a_ * T + self.b_))
915
+
916
+
917
+ @validate_params(
918
+ {
919
+ "y_true": ["array-like"],
920
+ "y_prob": ["array-like"],
921
+ "pos_label": [Real, str, "boolean", None],
922
+ "n_bins": [Interval(Integral, 1, None, closed="left")],
923
+ "strategy": [StrOptions({"uniform", "quantile"})],
924
+ },
925
+ prefer_skip_nested_validation=True,
926
+ )
927
+ def calibration_curve(
928
+ y_true,
929
+ y_prob,
930
+ *,
931
+ pos_label=None,
932
+ n_bins=5,
933
+ strategy="uniform",
934
+ ):
935
+ """Compute true and predicted probabilities for a calibration curve.
936
+
937
+ The method assumes the inputs come from a binary classifier, and
938
+ discretize the [0, 1] interval into bins.
939
+
940
+ Calibration curves may also be referred to as reliability diagrams.
941
+
942
+ Read more in the :ref:`User Guide <calibration>`.
943
+
944
+ Parameters
945
+ ----------
946
+ y_true : array-like of shape (n_samples,)
947
+ True targets.
948
+
949
+ y_prob : array-like of shape (n_samples,)
950
+ Probabilities of the positive class.
951
+
952
+ pos_label : int, float, bool or str, default=None
953
+ The label of the positive class.
954
+
955
+ .. versionadded:: 1.1
956
+
957
+ n_bins : int, default=5
958
+ Number of bins to discretize the [0, 1] interval. A bigger number
959
+ requires more data. Bins with no samples (i.e. without
960
+ corresponding values in `y_prob`) will not be returned, thus the
961
+ returned arrays may have less than `n_bins` values.
962
+
963
+ strategy : {'uniform', 'quantile'}, default='uniform'
964
+ Strategy used to define the widths of the bins.
965
+
966
+ uniform
967
+ The bins have identical widths.
968
+ quantile
969
+ The bins have the same number of samples and depend on `y_prob`.
970
+
971
+ Returns
972
+ -------
973
+ prob_true : ndarray of shape (n_bins,) or smaller
974
+ The proportion of samples whose class is the positive class, in each
975
+ bin (fraction of positives).
976
+
977
+ prob_pred : ndarray of shape (n_bins,) or smaller
978
+ The mean predicted probability in each bin.
979
+
980
+ References
981
+ ----------
982
+ Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
983
+ Probabilities With Supervised Learning, in Proceedings of the 22nd
984
+ International Conference on Machine Learning (ICML).
985
+ See section 4 (Qualitative Analysis of Predictions).
986
+
987
+ Examples
988
+ --------
989
+ >>> import numpy as np
990
+ >>> from sklearn.calibration import calibration_curve
991
+ >>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1])
992
+ >>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.])
993
+ >>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3)
994
+ >>> prob_true
995
+ array([0. , 0.5, 1. ])
996
+ >>> prob_pred
997
+ array([0.2 , 0.525, 0.85 ])
998
+ """
999
+ y_true = column_or_1d(y_true)
1000
+ y_prob = column_or_1d(y_prob)
1001
+ check_consistent_length(y_true, y_prob)
1002
+ pos_label = _check_pos_label_consistency(pos_label, y_true)
1003
+
1004
+ if y_prob.min() < 0 or y_prob.max() > 1:
1005
+ raise ValueError("y_prob has values outside [0, 1].")
1006
+
1007
+ labels = np.unique(y_true)
1008
+ if len(labels) > 2:
1009
+ raise ValueError(
1010
+ f"Only binary classification is supported. Provided labels {labels}."
1011
+ )
1012
+ y_true = y_true == pos_label
1013
+
1014
+ if strategy == "quantile": # Determine bin edges by distribution of data
1015
+ quantiles = np.linspace(0, 1, n_bins + 1)
1016
+ bins = np.percentile(y_prob, quantiles * 100)
1017
+ elif strategy == "uniform":
1018
+ bins = np.linspace(0.0, 1.0, n_bins + 1)
1019
+ else:
1020
+ raise ValueError(
1021
+ "Invalid entry to 'strategy' input. Strategy "
1022
+ "must be either 'quantile' or 'uniform'."
1023
+ )
1024
+
1025
+ binids = np.searchsorted(bins[1:-1], y_prob)
1026
+
1027
+ bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
1028
+ bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
1029
+ bin_total = np.bincount(binids, minlength=len(bins))
1030
+
1031
+ nonzero = bin_total != 0
1032
+ prob_true = bin_true[nonzero] / bin_total[nonzero]
1033
+ prob_pred = bin_sums[nonzero] / bin_total[nonzero]
1034
+
1035
+ return prob_true, prob_pred
1036
+
1037
+
1038
+ class CalibrationDisplay(_BinaryClassifierCurveDisplayMixin):
1039
+ """Calibration curve (also known as reliability diagram) visualization.
1040
+
1041
+ It is recommended to use
1042
+ :func:`~sklearn.calibration.CalibrationDisplay.from_estimator` or
1043
+ :func:`~sklearn.calibration.CalibrationDisplay.from_predictions`
1044
+ to create a `CalibrationDisplay`. All parameters are stored as attributes.
1045
+
1046
+ Read more about calibration in the :ref:`User Guide <calibration>` and
1047
+ more about the scikit-learn visualization API in :ref:`visualizations`.
1048
+
1049
+ .. versionadded:: 1.0
1050
+
1051
+ Parameters
1052
+ ----------
1053
+ prob_true : ndarray of shape (n_bins,)
1054
+ The proportion of samples whose class is the positive class (fraction
1055
+ of positives), in each bin.
1056
+
1057
+ prob_pred : ndarray of shape (n_bins,)
1058
+ The mean predicted probability in each bin.
1059
+
1060
+ y_prob : ndarray of shape (n_samples,)
1061
+ Probability estimates for the positive class, for each sample.
1062
+
1063
+ estimator_name : str, default=None
1064
+ Name of estimator. If None, the estimator name is not shown.
1065
+
1066
+ pos_label : int, float, bool or str, default=None
1067
+ The positive class when computing the calibration curve.
1068
+ By default, `pos_label` is set to `estimators.classes_[1]` when using
1069
+ `from_estimator` and set to 1 when using `from_predictions`.
1070
+
1071
+ .. versionadded:: 1.1
1072
+
1073
+ Attributes
1074
+ ----------
1075
+ line_ : matplotlib Artist
1076
+ Calibration curve.
1077
+
1078
+ ax_ : matplotlib Axes
1079
+ Axes with calibration curve.
1080
+
1081
+ figure_ : matplotlib Figure
1082
+ Figure containing the curve.
1083
+
1084
+ See Also
1085
+ --------
1086
+ calibration_curve : Compute true and predicted probabilities for a
1087
+ calibration curve.
1088
+ CalibrationDisplay.from_predictions : Plot calibration curve using true
1089
+ and predicted labels.
1090
+ CalibrationDisplay.from_estimator : Plot calibration curve using an
1091
+ estimator and data.
1092
+
1093
+ Examples
1094
+ --------
1095
+ >>> from sklearn.datasets import make_classification
1096
+ >>> from sklearn.model_selection import train_test_split
1097
+ >>> from sklearn.linear_model import LogisticRegression
1098
+ >>> from sklearn.calibration import calibration_curve, CalibrationDisplay
1099
+ >>> X, y = make_classification(random_state=0)
1100
+ >>> X_train, X_test, y_train, y_test = train_test_split(
1101
+ ... X, y, random_state=0)
1102
+ >>> clf = LogisticRegression(random_state=0)
1103
+ >>> clf.fit(X_train, y_train)
1104
+ LogisticRegression(random_state=0)
1105
+ >>> y_prob = clf.predict_proba(X_test)[:, 1]
1106
+ >>> prob_true, prob_pred = calibration_curve(y_test, y_prob, n_bins=10)
1107
+ >>> disp = CalibrationDisplay(prob_true, prob_pred, y_prob)
1108
+ >>> disp.plot()
1109
+ <...>
1110
+ """
1111
+
1112
+ def __init__(
1113
+ self, prob_true, prob_pred, y_prob, *, estimator_name=None, pos_label=None
1114
+ ):
1115
+ self.prob_true = prob_true
1116
+ self.prob_pred = prob_pred
1117
+ self.y_prob = y_prob
1118
+ self.estimator_name = estimator_name
1119
+ self.pos_label = pos_label
1120
+
1121
+ def plot(self, *, ax=None, name=None, ref_line=True, **kwargs):
1122
+ """Plot visualization.
1123
+
1124
+ Extra keyword arguments will be passed to
1125
+ :func:`matplotlib.pyplot.plot`.
1126
+
1127
+ Parameters
1128
+ ----------
1129
+ ax : Matplotlib Axes, default=None
1130
+ Axes object to plot on. If `None`, a new figure and axes is
1131
+ created.
1132
+
1133
+ name : str, default=None
1134
+ Name for labeling curve. If `None`, use `estimator_name` if
1135
+ not `None`, otherwise no labeling is shown.
1136
+
1137
+ ref_line : bool, default=True
1138
+ If `True`, plots a reference line representing a perfectly
1139
+ calibrated classifier.
1140
+
1141
+ **kwargs : dict
1142
+ Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
1143
+
1144
+ Returns
1145
+ -------
1146
+ display : :class:`~sklearn.calibration.CalibrationDisplay`
1147
+ Object that stores computed values.
1148
+ """
1149
+ self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name)
1150
+
1151
+ info_pos_label = (
1152
+ f"(Positive class: {self.pos_label})" if self.pos_label is not None else ""
1153
+ )
1154
+
1155
+ line_kwargs = {"marker": "s", "linestyle": "-"}
1156
+ if name is not None:
1157
+ line_kwargs["label"] = name
1158
+ line_kwargs.update(**kwargs)
1159
+
1160
+ ref_line_label = "Perfectly calibrated"
1161
+ existing_ref_line = ref_line_label in self.ax_.get_legend_handles_labels()[1]
1162
+ if ref_line and not existing_ref_line:
1163
+ self.ax_.plot([0, 1], [0, 1], "k:", label=ref_line_label)
1164
+ self.line_ = self.ax_.plot(self.prob_pred, self.prob_true, **line_kwargs)[0]
1165
+
1166
+ # We always have to show the legend for at least the reference line
1167
+ self.ax_.legend(loc="lower right")
1168
+
1169
+ xlabel = f"Mean predicted probability {info_pos_label}"
1170
+ ylabel = f"Fraction of positives {info_pos_label}"
1171
+ self.ax_.set(xlabel=xlabel, ylabel=ylabel)
1172
+
1173
+ return self
1174
+
1175
+ @classmethod
1176
+ def from_estimator(
1177
+ cls,
1178
+ estimator,
1179
+ X,
1180
+ y,
1181
+ *,
1182
+ n_bins=5,
1183
+ strategy="uniform",
1184
+ pos_label=None,
1185
+ name=None,
1186
+ ref_line=True,
1187
+ ax=None,
1188
+ **kwargs,
1189
+ ):
1190
+ """Plot calibration curve using a binary classifier and data.
1191
+
1192
+ A calibration curve, also known as a reliability diagram, uses inputs
1193
+ from a binary classifier and plots the average predicted probability
1194
+ for each bin against the fraction of positive classes, on the
1195
+ y-axis.
1196
+
1197
+ Extra keyword arguments will be passed to
1198
+ :func:`matplotlib.pyplot.plot`.
1199
+
1200
+ Read more about calibration in the :ref:`User Guide <calibration>` and
1201
+ more about the scikit-learn visualization API in :ref:`visualizations`.
1202
+
1203
+ .. versionadded:: 1.0
1204
+
1205
+ Parameters
1206
+ ----------
1207
+ estimator : estimator instance
1208
+ Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
1209
+ in which the last estimator is a classifier. The classifier must
1210
+ have a :term:`predict_proba` method.
1211
+
1212
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1213
+ Input values.
1214
+
1215
+ y : array-like of shape (n_samples,)
1216
+ Binary target values.
1217
+
1218
+ n_bins : int, default=5
1219
+ Number of bins to discretize the [0, 1] interval into when
1220
+ calculating the calibration curve. A bigger number requires more
1221
+ data.
1222
+
1223
+ strategy : {'uniform', 'quantile'}, default='uniform'
1224
+ Strategy used to define the widths of the bins.
1225
+
1226
+ - `'uniform'`: The bins have identical widths.
1227
+ - `'quantile'`: The bins have the same number of samples and depend
1228
+ on predicted probabilities.
1229
+
1230
+ pos_label : int, float, bool or str, default=None
1231
+ The positive class when computing the calibration curve.
1232
+ By default, `estimators.classes_[1]` is considered as the
1233
+ positive class.
1234
+
1235
+ .. versionadded:: 1.1
1236
+
1237
+ name : str, default=None
1238
+ Name for labeling curve. If `None`, the name of the estimator is
1239
+ used.
1240
+
1241
+ ref_line : bool, default=True
1242
+ If `True`, plots a reference line representing a perfectly
1243
+ calibrated classifier.
1244
+
1245
+ ax : matplotlib axes, default=None
1246
+ Axes object to plot on. If `None`, a new figure and axes is
1247
+ created.
1248
+
1249
+ **kwargs : dict
1250
+ Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
1251
+
1252
+ Returns
1253
+ -------
1254
+ display : :class:`~sklearn.calibration.CalibrationDisplay`.
1255
+ Object that stores computed values.
1256
+
1257
+ See Also
1258
+ --------
1259
+ CalibrationDisplay.from_predictions : Plot calibration curve using true
1260
+ and predicted labels.
1261
+
1262
+ Examples
1263
+ --------
1264
+ >>> import matplotlib.pyplot as plt
1265
+ >>> from sklearn.datasets import make_classification
1266
+ >>> from sklearn.model_selection import train_test_split
1267
+ >>> from sklearn.linear_model import LogisticRegression
1268
+ >>> from sklearn.calibration import CalibrationDisplay
1269
+ >>> X, y = make_classification(random_state=0)
1270
+ >>> X_train, X_test, y_train, y_test = train_test_split(
1271
+ ... X, y, random_state=0)
1272
+ >>> clf = LogisticRegression(random_state=0)
1273
+ >>> clf.fit(X_train, y_train)
1274
+ LogisticRegression(random_state=0)
1275
+ >>> disp = CalibrationDisplay.from_estimator(clf, X_test, y_test)
1276
+ >>> plt.show()
1277
+ """
1278
+ y_prob, pos_label, name = cls._validate_and_get_response_values(
1279
+ estimator,
1280
+ X,
1281
+ y,
1282
+ response_method="predict_proba",
1283
+ pos_label=pos_label,
1284
+ name=name,
1285
+ )
1286
+
1287
+ return cls.from_predictions(
1288
+ y,
1289
+ y_prob,
1290
+ n_bins=n_bins,
1291
+ strategy=strategy,
1292
+ pos_label=pos_label,
1293
+ name=name,
1294
+ ref_line=ref_line,
1295
+ ax=ax,
1296
+ **kwargs,
1297
+ )
1298
+
1299
+ @classmethod
1300
+ def from_predictions(
1301
+ cls,
1302
+ y_true,
1303
+ y_prob,
1304
+ *,
1305
+ n_bins=5,
1306
+ strategy="uniform",
1307
+ pos_label=None,
1308
+ name=None,
1309
+ ref_line=True,
1310
+ ax=None,
1311
+ **kwargs,
1312
+ ):
1313
+ """Plot calibration curve using true labels and predicted probabilities.
1314
+
1315
+ Calibration curve, also known as reliability diagram, uses inputs
1316
+ from a binary classifier and plots the average predicted probability
1317
+ for each bin against the fraction of positive classes, on the
1318
+ y-axis.
1319
+
1320
+ Extra keyword arguments will be passed to
1321
+ :func:`matplotlib.pyplot.plot`.
1322
+
1323
+ Read more about calibration in the :ref:`User Guide <calibration>` and
1324
+ more about the scikit-learn visualization API in :ref:`visualizations`.
1325
+
1326
+ .. versionadded:: 1.0
1327
+
1328
+ Parameters
1329
+ ----------
1330
+ y_true : array-like of shape (n_samples,)
1331
+ True labels.
1332
+
1333
+ y_prob : array-like of shape (n_samples,)
1334
+ The predicted probabilities of the positive class.
1335
+
1336
+ n_bins : int, default=5
1337
+ Number of bins to discretize the [0, 1] interval into when
1338
+ calculating the calibration curve. A bigger number requires more
1339
+ data.
1340
+
1341
+ strategy : {'uniform', 'quantile'}, default='uniform'
1342
+ Strategy used to define the widths of the bins.
1343
+
1344
+ - `'uniform'`: The bins have identical widths.
1345
+ - `'quantile'`: The bins have the same number of samples and depend
1346
+ on predicted probabilities.
1347
+
1348
+ pos_label : int, float, bool or str, default=None
1349
+ The positive class when computing the calibration curve.
1350
+ By default `pos_label` is set to 1.
1351
+
1352
+ .. versionadded:: 1.1
1353
+
1354
+ name : str, default=None
1355
+ Name for labeling curve.
1356
+
1357
+ ref_line : bool, default=True
1358
+ If `True`, plots a reference line representing a perfectly
1359
+ calibrated classifier.
1360
+
1361
+ ax : matplotlib axes, default=None
1362
+ Axes object to plot on. If `None`, a new figure and axes is
1363
+ created.
1364
+
1365
+ **kwargs : dict
1366
+ Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
1367
+
1368
+ Returns
1369
+ -------
1370
+ display : :class:`~sklearn.calibration.CalibrationDisplay`.
1371
+ Object that stores computed values.
1372
+
1373
+ See Also
1374
+ --------
1375
+ CalibrationDisplay.from_estimator : Plot calibration curve using an
1376
+ estimator and data.
1377
+
1378
+ Examples
1379
+ --------
1380
+ >>> import matplotlib.pyplot as plt
1381
+ >>> from sklearn.datasets import make_classification
1382
+ >>> from sklearn.model_selection import train_test_split
1383
+ >>> from sklearn.linear_model import LogisticRegression
1384
+ >>> from sklearn.calibration import CalibrationDisplay
1385
+ >>> X, y = make_classification(random_state=0)
1386
+ >>> X_train, X_test, y_train, y_test = train_test_split(
1387
+ ... X, y, random_state=0)
1388
+ >>> clf = LogisticRegression(random_state=0)
1389
+ >>> clf.fit(X_train, y_train)
1390
+ LogisticRegression(random_state=0)
1391
+ >>> y_prob = clf.predict_proba(X_test)[:, 1]
1392
+ >>> disp = CalibrationDisplay.from_predictions(y_test, y_prob)
1393
+ >>> plt.show()
1394
+ """
1395
+ pos_label_validated, name = cls._validate_from_predictions_params(
1396
+ y_true, y_prob, sample_weight=None, pos_label=pos_label, name=name
1397
+ )
1398
+
1399
+ prob_true, prob_pred = calibration_curve(
1400
+ y_true, y_prob, n_bins=n_bins, strategy=strategy, pos_label=pos_label
1401
+ )
1402
+
1403
+ disp = cls(
1404
+ prob_true=prob_true,
1405
+ prob_pred=prob_pred,
1406
+ y_prob=y_prob,
1407
+ estimator_name=name,
1408
+ pos_label=pos_label_validated,
1409
+ )
1410
+ return disp.plot(ax=ax, ref_line=ref_line, **kwargs)
venv/lib/python3.10/site-packages/sklearn/conftest.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ import platform
3
+ import sys
4
+ from contextlib import suppress
5
+ from functools import wraps
6
+ from os import environ
7
+ from unittest import SkipTest
8
+
9
+ import joblib
10
+ import numpy as np
11
+ import pytest
12
+ from _pytest.doctest import DoctestItem
13
+ from threadpoolctl import threadpool_limits
14
+
15
+ from sklearn import config_context, set_config
16
+ from sklearn._min_dependencies import PYTEST_MIN_VERSION
17
+ from sklearn.datasets import (
18
+ fetch_20newsgroups,
19
+ fetch_20newsgroups_vectorized,
20
+ fetch_california_housing,
21
+ fetch_covtype,
22
+ fetch_kddcup99,
23
+ fetch_olivetti_faces,
24
+ fetch_rcv1,
25
+ fetch_species_distributions,
26
+ )
27
+ from sklearn.tests import random_seed
28
+ from sklearn.utils import _IS_32BIT
29
+ from sklearn.utils._testing import get_pytest_filterwarning_lines
30
+ from sklearn.utils.fixes import (
31
+ np_base_version,
32
+ parse_version,
33
+ sp_version,
34
+ )
35
+
36
+ if parse_version(pytest.__version__) < parse_version(PYTEST_MIN_VERSION):
37
+ raise ImportError(
38
+ f"Your version of pytest is too old. Got version {pytest.__version__}, you"
39
+ f" should have pytest >= {PYTEST_MIN_VERSION} installed."
40
+ )
41
+
42
+ scipy_datasets_require_network = sp_version >= parse_version("1.10")
43
+
44
+
45
+ @pytest.fixture
46
+ def enable_slep006():
47
+ """Enable SLEP006 for all tests."""
48
+ with config_context(enable_metadata_routing=True):
49
+ yield
50
+
51
+
52
+ def raccoon_face_or_skip():
53
+ # SciPy >= 1.10 requires network to access to get data
54
+ if scipy_datasets_require_network:
55
+ run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0"
56
+ if not run_network_tests:
57
+ raise SkipTest("test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0")
58
+
59
+ try:
60
+ import pooch # noqa
61
+ except ImportError:
62
+ raise SkipTest("test requires pooch to be installed")
63
+
64
+ from scipy.datasets import face
65
+ else:
66
+ from scipy.misc import face
67
+
68
+ return face(gray=True)
69
+
70
+
71
+ dataset_fetchers = {
72
+ "fetch_20newsgroups_fxt": fetch_20newsgroups,
73
+ "fetch_20newsgroups_vectorized_fxt": fetch_20newsgroups_vectorized,
74
+ "fetch_california_housing_fxt": fetch_california_housing,
75
+ "fetch_covtype_fxt": fetch_covtype,
76
+ "fetch_kddcup99_fxt": fetch_kddcup99,
77
+ "fetch_olivetti_faces_fxt": fetch_olivetti_faces,
78
+ "fetch_rcv1_fxt": fetch_rcv1,
79
+ "fetch_species_distributions_fxt": fetch_species_distributions,
80
+ }
81
+
82
+ if scipy_datasets_require_network:
83
+ dataset_fetchers["raccoon_face_fxt"] = raccoon_face_or_skip
84
+
85
+ _SKIP32_MARK = pytest.mark.skipif(
86
+ environ.get("SKLEARN_RUN_FLOAT32_TESTS", "0") != "1",
87
+ reason="Set SKLEARN_RUN_FLOAT32_TESTS=1 to run float32 dtype tests",
88
+ )
89
+
90
+
91
+ # Global fixtures
92
+ @pytest.fixture(params=[pytest.param(np.float32, marks=_SKIP32_MARK), np.float64])
93
+ def global_dtype(request):
94
+ yield request.param
95
+
96
+
97
+ def _fetch_fixture(f):
98
+ """Fetch dataset (download if missing and requested by environment)."""
99
+ download_if_missing = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0"
100
+
101
+ @wraps(f)
102
+ def wrapped(*args, **kwargs):
103
+ kwargs["download_if_missing"] = download_if_missing
104
+ try:
105
+ return f(*args, **kwargs)
106
+ except OSError as e:
107
+ if str(e) != "Data not found and `download_if_missing` is False":
108
+ raise
109
+ pytest.skip("test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0")
110
+
111
+ return pytest.fixture(lambda: wrapped)
112
+
113
+
114
+ # Adds fixtures for fetching data
115
+ fetch_20newsgroups_fxt = _fetch_fixture(fetch_20newsgroups)
116
+ fetch_20newsgroups_vectorized_fxt = _fetch_fixture(fetch_20newsgroups_vectorized)
117
+ fetch_california_housing_fxt = _fetch_fixture(fetch_california_housing)
118
+ fetch_covtype_fxt = _fetch_fixture(fetch_covtype)
119
+ fetch_kddcup99_fxt = _fetch_fixture(fetch_kddcup99)
120
+ fetch_olivetti_faces_fxt = _fetch_fixture(fetch_olivetti_faces)
121
+ fetch_rcv1_fxt = _fetch_fixture(fetch_rcv1)
122
+ fetch_species_distributions_fxt = _fetch_fixture(fetch_species_distributions)
123
+ raccoon_face_fxt = pytest.fixture(raccoon_face_or_skip)
124
+
125
+
126
+ def pytest_collection_modifyitems(config, items):
127
+ """Called after collect is completed.
128
+
129
+ Parameters
130
+ ----------
131
+ config : pytest config
132
+ items : list of collected items
133
+ """
134
+ run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0"
135
+ skip_network = pytest.mark.skip(
136
+ reason="test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0"
137
+ )
138
+
139
+ # download datasets during collection to avoid thread unsafe behavior
140
+ # when running pytest in parallel with pytest-xdist
141
+ dataset_features_set = set(dataset_fetchers)
142
+ datasets_to_download = set()
143
+
144
+ for item in items:
145
+ if isinstance(item, DoctestItem) and "fetch_" in item.name:
146
+ fetcher_function_name = item.name.split(".")[-1]
147
+ dataset_fetchers_key = f"{fetcher_function_name}_fxt"
148
+ dataset_to_fetch = set([dataset_fetchers_key]) & dataset_features_set
149
+ elif not hasattr(item, "fixturenames"):
150
+ continue
151
+ else:
152
+ item_fixtures = set(item.fixturenames)
153
+ dataset_to_fetch = item_fixtures & dataset_features_set
154
+
155
+ if not dataset_to_fetch:
156
+ continue
157
+
158
+ if run_network_tests:
159
+ datasets_to_download |= dataset_to_fetch
160
+ else:
161
+ # network tests are skipped
162
+ item.add_marker(skip_network)
163
+
164
+ # Only download datasets on the first worker spawned by pytest-xdist
165
+ # to avoid thread unsafe behavior. If pytest-xdist is not used, we still
166
+ # download before tests run.
167
+ worker_id = environ.get("PYTEST_XDIST_WORKER", "gw0")
168
+ if worker_id == "gw0" and run_network_tests:
169
+ for name in datasets_to_download:
170
+ with suppress(SkipTest):
171
+ dataset_fetchers[name]()
172
+
173
+ for item in items:
174
+ # Known failure on with GradientBoostingClassifier on ARM64
175
+ if (
176
+ item.name.endswith("GradientBoostingClassifier")
177
+ and platform.machine() == "aarch64"
178
+ ):
179
+ marker = pytest.mark.xfail(
180
+ reason=(
181
+ "know failure. See "
182
+ "https://github.com/scikit-learn/scikit-learn/issues/17797" # noqa
183
+ )
184
+ )
185
+ item.add_marker(marker)
186
+
187
+ skip_doctests = False
188
+ try:
189
+ import matplotlib # noqa
190
+ except ImportError:
191
+ skip_doctests = True
192
+ reason = "matplotlib is required to run the doctests"
193
+
194
+ if _IS_32BIT:
195
+ reason = "doctest are only run when the default numpy int is 64 bits."
196
+ skip_doctests = True
197
+ elif sys.platform.startswith("win32"):
198
+ reason = (
199
+ "doctests are not run for Windows because numpy arrays "
200
+ "repr is inconsistent across platforms."
201
+ )
202
+ skip_doctests = True
203
+
204
+ if np_base_version >= parse_version("2"):
205
+ reason = "Due to NEP 51 numpy scalar repr has changed in numpy 2"
206
+ skip_doctests = True
207
+
208
+ # Normally doctest has the entire module's scope. Here we set globs to an empty dict
209
+ # to remove the module's scope:
210
+ # https://docs.python.org/3/library/doctest.html#what-s-the-execution-context
211
+ for item in items:
212
+ if isinstance(item, DoctestItem):
213
+ item.dtest.globs = {}
214
+
215
+ if skip_doctests:
216
+ skip_marker = pytest.mark.skip(reason=reason)
217
+
218
+ for item in items:
219
+ if isinstance(item, DoctestItem):
220
+ # work-around an internal error with pytest if adding a skip
221
+ # mark to a doctest in a contextmanager, see
222
+ # https://github.com/pytest-dev/pytest/issues/8796 for more
223
+ # details.
224
+ if item.name != "sklearn._config.config_context":
225
+ item.add_marker(skip_marker)
226
+ try:
227
+ import PIL # noqa
228
+
229
+ pillow_installed = True
230
+ except ImportError:
231
+ pillow_installed = False
232
+
233
+ if not pillow_installed:
234
+ skip_marker = pytest.mark.skip(reason="pillow (or PIL) not installed!")
235
+ for item in items:
236
+ if item.name in [
237
+ "sklearn.feature_extraction.image.PatchExtractor",
238
+ "sklearn.feature_extraction.image.extract_patches_2d",
239
+ ]:
240
+ item.add_marker(skip_marker)
241
+
242
+
243
+ @pytest.fixture(scope="function")
244
+ def pyplot():
245
+ """Setup and teardown fixture for matplotlib.
246
+
247
+ This fixture checks if we can import matplotlib. If not, the tests will be
248
+ skipped. Otherwise, we close the figures before and after running the
249
+ functions.
250
+
251
+ Returns
252
+ -------
253
+ pyplot : module
254
+ The ``matplotlib.pyplot`` module.
255
+ """
256
+ pyplot = pytest.importorskip("matplotlib.pyplot")
257
+ pyplot.close("all")
258
+ yield pyplot
259
+ pyplot.close("all")
260
+
261
+
262
+ def pytest_configure(config):
263
+ # Use matplotlib agg backend during the tests including doctests
264
+ try:
265
+ import matplotlib
266
+
267
+ matplotlib.use("agg")
268
+ except ImportError:
269
+ pass
270
+
271
+ allowed_parallelism = joblib.cpu_count(only_physical_cores=True)
272
+ xdist_worker_count = environ.get("PYTEST_XDIST_WORKER_COUNT")
273
+ if xdist_worker_count is not None:
274
+ # Set the number of OpenMP and BLAS threads based on the number of workers
275
+ # xdist is using to prevent oversubscription.
276
+ allowed_parallelism = max(allowed_parallelism // int(xdist_worker_count), 1)
277
+ threadpool_limits(allowed_parallelism)
278
+
279
+ # Register global_random_seed plugin if it is not already registered
280
+ if not config.pluginmanager.hasplugin("sklearn.tests.random_seed"):
281
+ config.pluginmanager.register(random_seed)
282
+
283
+ if environ.get("SKLEARN_WARNINGS_AS_ERRORS", "0") != "0":
284
+ # This seems like the only way to programmatically change the config
285
+ # filterwarnings. This was suggested in
286
+ # https://github.com/pytest-dev/pytest/issues/3311#issuecomment-373177592
287
+ for line in get_pytest_filterwarning_lines():
288
+ config.addinivalue_line("filterwarnings", line)
289
+
290
+
291
+ @pytest.fixture
292
+ def hide_available_pandas(monkeypatch):
293
+ """Pretend pandas was not installed."""
294
+ import_orig = builtins.__import__
295
+
296
+ def mocked_import(name, *args, **kwargs):
297
+ if name == "pandas":
298
+ raise ImportError()
299
+ return import_orig(name, *args, **kwargs)
300
+
301
+ monkeypatch.setattr(builtins, "__import__", mocked_import)
302
+
303
+
304
+ @pytest.fixture
305
+ def print_changed_only_false():
306
+ """Set `print_changed_only` to False for the duration of the test."""
307
+ set_config(print_changed_only=False)
308
+ yield
309
+ set_config(print_changed_only=True) # reset to default
venv/lib/python3.10/site-packages/sklearn/discriminant_analysis.py ADDED
@@ -0,0 +1,1047 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Linear Discriminant Analysis and Quadratic Discriminant Analysis
3
+ """
4
+
5
+ # Authors: Clemens Brunner
6
+ # Martin Billinger
7
+ # Matthieu Perrot
8
+ # Mathieu Blondel
9
+
10
+ # License: BSD 3-Clause
11
+
12
+ import warnings
13
+ from numbers import Integral, Real
14
+
15
+ import numpy as np
16
+ import scipy.linalg
17
+ from scipy import linalg
18
+
19
+ from .base import (
20
+ BaseEstimator,
21
+ ClassifierMixin,
22
+ ClassNamePrefixFeaturesOutMixin,
23
+ TransformerMixin,
24
+ _fit_context,
25
+ )
26
+ from .covariance import empirical_covariance, ledoit_wolf, shrunk_covariance
27
+ from .linear_model._base import LinearClassifierMixin
28
+ from .preprocessing import StandardScaler
29
+ from .utils._array_api import _expit, device, get_namespace, size
30
+ from .utils._param_validation import HasMethods, Interval, StrOptions
31
+ from .utils.extmath import softmax
32
+ from .utils.multiclass import check_classification_targets, unique_labels
33
+ from .utils.validation import check_is_fitted
34
+
35
+ __all__ = ["LinearDiscriminantAnalysis", "QuadraticDiscriminantAnalysis"]
36
+
37
+
38
+ def _cov(X, shrinkage=None, covariance_estimator=None):
39
+ """Estimate covariance matrix (using optional covariance_estimator).
40
+ Parameters
41
+ ----------
42
+ X : array-like of shape (n_samples, n_features)
43
+ Input data.
44
+
45
+ shrinkage : {'empirical', 'auto'} or float, default=None
46
+ Shrinkage parameter, possible values:
47
+ - None or 'empirical': no shrinkage (default).
48
+ - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
49
+ - float between 0 and 1: fixed shrinkage parameter.
50
+
51
+ Shrinkage parameter is ignored if `covariance_estimator`
52
+ is not None.
53
+
54
+ covariance_estimator : estimator, default=None
55
+ If not None, `covariance_estimator` is used to estimate
56
+ the covariance matrices instead of relying on the empirical
57
+ covariance estimator (with potential shrinkage).
58
+ The object should have a fit method and a ``covariance_`` attribute
59
+ like the estimators in :mod:`sklearn.covariance``.
60
+ if None the shrinkage parameter drives the estimate.
61
+
62
+ .. versionadded:: 0.24
63
+
64
+ Returns
65
+ -------
66
+ s : ndarray of shape (n_features, n_features)
67
+ Estimated covariance matrix.
68
+ """
69
+ if covariance_estimator is None:
70
+ shrinkage = "empirical" if shrinkage is None else shrinkage
71
+ if isinstance(shrinkage, str):
72
+ if shrinkage == "auto":
73
+ sc = StandardScaler() # standardize features
74
+ X = sc.fit_transform(X)
75
+ s = ledoit_wolf(X)[0]
76
+ # rescale
77
+ s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
78
+ elif shrinkage == "empirical":
79
+ s = empirical_covariance(X)
80
+ elif isinstance(shrinkage, Real):
81
+ s = shrunk_covariance(empirical_covariance(X), shrinkage)
82
+ else:
83
+ if shrinkage is not None and shrinkage != 0:
84
+ raise ValueError(
85
+ "covariance_estimator and shrinkage parameters "
86
+ "are not None. Only one of the two can be set."
87
+ )
88
+ covariance_estimator.fit(X)
89
+ if not hasattr(covariance_estimator, "covariance_"):
90
+ raise ValueError(
91
+ "%s does not have a covariance_ attribute"
92
+ % covariance_estimator.__class__.__name__
93
+ )
94
+ s = covariance_estimator.covariance_
95
+ return s
96
+
97
+
98
+ def _class_means(X, y):
99
+ """Compute class means.
100
+
101
+ Parameters
102
+ ----------
103
+ X : array-like of shape (n_samples, n_features)
104
+ Input data.
105
+
106
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
107
+ Target values.
108
+
109
+ Returns
110
+ -------
111
+ means : array-like of shape (n_classes, n_features)
112
+ Class means.
113
+ """
114
+ xp, is_array_api_compliant = get_namespace(X)
115
+ classes, y = xp.unique_inverse(y)
116
+ means = xp.zeros((classes.shape[0], X.shape[1]), device=device(X), dtype=X.dtype)
117
+
118
+ if is_array_api_compliant:
119
+ for i in range(classes.shape[0]):
120
+ means[i, :] = xp.mean(X[y == i], axis=0)
121
+ else:
122
+ # TODO: Explore the choice of using bincount + add.at as it seems sub optimal
123
+ # from a performance-wise
124
+ cnt = np.bincount(y)
125
+ np.add.at(means, y, X)
126
+ means /= cnt[:, None]
127
+ return means
128
+
129
+
130
+ def _class_cov(X, y, priors, shrinkage=None, covariance_estimator=None):
131
+ """Compute weighted within-class covariance matrix.
132
+
133
+ The per-class covariance are weighted by the class priors.
134
+
135
+ Parameters
136
+ ----------
137
+ X : array-like of shape (n_samples, n_features)
138
+ Input data.
139
+
140
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
141
+ Target values.
142
+
143
+ priors : array-like of shape (n_classes,)
144
+ Class priors.
145
+
146
+ shrinkage : 'auto' or float, default=None
147
+ Shrinkage parameter, possible values:
148
+ - None: no shrinkage (default).
149
+ - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
150
+ - float between 0 and 1: fixed shrinkage parameter.
151
+
152
+ Shrinkage parameter is ignored if `covariance_estimator` is not None.
153
+
154
+ covariance_estimator : estimator, default=None
155
+ If not None, `covariance_estimator` is used to estimate
156
+ the covariance matrices instead of relying the empirical
157
+ covariance estimator (with potential shrinkage).
158
+ The object should have a fit method and a ``covariance_`` attribute
159
+ like the estimators in sklearn.covariance.
160
+ If None, the shrinkage parameter drives the estimate.
161
+
162
+ .. versionadded:: 0.24
163
+
164
+ Returns
165
+ -------
166
+ cov : array-like of shape (n_features, n_features)
167
+ Weighted within-class covariance matrix
168
+ """
169
+ classes = np.unique(y)
170
+ cov = np.zeros(shape=(X.shape[1], X.shape[1]))
171
+ for idx, group in enumerate(classes):
172
+ Xg = X[y == group, :]
173
+ cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage, covariance_estimator))
174
+ return cov
175
+
176
+
177
+ class LinearDiscriminantAnalysis(
178
+ ClassNamePrefixFeaturesOutMixin,
179
+ LinearClassifierMixin,
180
+ TransformerMixin,
181
+ BaseEstimator,
182
+ ):
183
+ """Linear Discriminant Analysis.
184
+
185
+ A classifier with a linear decision boundary, generated by fitting class
186
+ conditional densities to the data and using Bayes' rule.
187
+
188
+ The model fits a Gaussian density to each class, assuming that all classes
189
+ share the same covariance matrix.
190
+
191
+ The fitted model can also be used to reduce the dimensionality of the input
192
+ by projecting it to the most discriminative directions, using the
193
+ `transform` method.
194
+
195
+ .. versionadded:: 0.17
196
+ *LinearDiscriminantAnalysis*.
197
+
198
+ Read more in the :ref:`User Guide <lda_qda>`.
199
+
200
+ Parameters
201
+ ----------
202
+ solver : {'svd', 'lsqr', 'eigen'}, default='svd'
203
+ Solver to use, possible values:
204
+ - 'svd': Singular value decomposition (default).
205
+ Does not compute the covariance matrix, therefore this solver is
206
+ recommended for data with a large number of features.
207
+ - 'lsqr': Least squares solution.
208
+ Can be combined with shrinkage or custom covariance estimator.
209
+ - 'eigen': Eigenvalue decomposition.
210
+ Can be combined with shrinkage or custom covariance estimator.
211
+
212
+ .. versionchanged:: 1.2
213
+ `solver="svd"` now has experimental Array API support. See the
214
+ :ref:`Array API User Guide <array_api>` for more details.
215
+
216
+ shrinkage : 'auto' or float, default=None
217
+ Shrinkage parameter, possible values:
218
+ - None: no shrinkage (default).
219
+ - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
220
+ - float between 0 and 1: fixed shrinkage parameter.
221
+
222
+ This should be left to None if `covariance_estimator` is used.
223
+ Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
224
+
225
+ priors : array-like of shape (n_classes,), default=None
226
+ The class prior probabilities. By default, the class proportions are
227
+ inferred from the training data.
228
+
229
+ n_components : int, default=None
230
+ Number of components (<= min(n_classes - 1, n_features)) for
231
+ dimensionality reduction. If None, will be set to
232
+ min(n_classes - 1, n_features). This parameter only affects the
233
+ `transform` method.
234
+
235
+ store_covariance : bool, default=False
236
+ If True, explicitly compute the weighted within-class covariance
237
+ matrix when solver is 'svd'. The matrix is always computed
238
+ and stored for the other solvers.
239
+
240
+ .. versionadded:: 0.17
241
+
242
+ tol : float, default=1.0e-4
243
+ Absolute threshold for a singular value of X to be considered
244
+ significant, used to estimate the rank of X. Dimensions whose
245
+ singular values are non-significant are discarded. Only used if
246
+ solver is 'svd'.
247
+
248
+ .. versionadded:: 0.17
249
+
250
+ covariance_estimator : covariance estimator, default=None
251
+ If not None, `covariance_estimator` is used to estimate
252
+ the covariance matrices instead of relying on the empirical
253
+ covariance estimator (with potential shrinkage).
254
+ The object should have a fit method and a ``covariance_`` attribute
255
+ like the estimators in :mod:`sklearn.covariance`.
256
+ if None the shrinkage parameter drives the estimate.
257
+
258
+ This should be left to None if `shrinkage` is used.
259
+ Note that `covariance_estimator` works only with 'lsqr' and 'eigen'
260
+ solvers.
261
+
262
+ .. versionadded:: 0.24
263
+
264
+ Attributes
265
+ ----------
266
+ coef_ : ndarray of shape (n_features,) or (n_classes, n_features)
267
+ Weight vector(s).
268
+
269
+ intercept_ : ndarray of shape (n_classes,)
270
+ Intercept term.
271
+
272
+ covariance_ : array-like of shape (n_features, n_features)
273
+ Weighted within-class covariance matrix. It corresponds to
274
+ `sum_k prior_k * C_k` where `C_k` is the covariance matrix of the
275
+ samples in class `k`. The `C_k` are estimated using the (potentially
276
+ shrunk) biased estimator of covariance. If solver is 'svd', only
277
+ exists when `store_covariance` is True.
278
+
279
+ explained_variance_ratio_ : ndarray of shape (n_components,)
280
+ Percentage of variance explained by each of the selected components.
281
+ If ``n_components`` is not set then all components are stored and the
282
+ sum of explained variances is equal to 1.0. Only available when eigen
283
+ or svd solver is used.
284
+
285
+ means_ : array-like of shape (n_classes, n_features)
286
+ Class-wise means.
287
+
288
+ priors_ : array-like of shape (n_classes,)
289
+ Class priors (sum to 1).
290
+
291
+ scalings_ : array-like of shape (rank, n_classes - 1)
292
+ Scaling of the features in the space spanned by the class centroids.
293
+ Only available for 'svd' and 'eigen' solvers.
294
+
295
+ xbar_ : array-like of shape (n_features,)
296
+ Overall mean. Only present if solver is 'svd'.
297
+
298
+ classes_ : array-like of shape (n_classes,)
299
+ Unique class labels.
300
+
301
+ n_features_in_ : int
302
+ Number of features seen during :term:`fit`.
303
+
304
+ .. versionadded:: 0.24
305
+
306
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
307
+ Names of features seen during :term:`fit`. Defined only when `X`
308
+ has feature names that are all strings.
309
+
310
+ .. versionadded:: 1.0
311
+
312
+ See Also
313
+ --------
314
+ QuadraticDiscriminantAnalysis : Quadratic Discriminant Analysis.
315
+
316
+ Examples
317
+ --------
318
+ >>> import numpy as np
319
+ >>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
320
+ >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
321
+ >>> y = np.array([1, 1, 1, 2, 2, 2])
322
+ >>> clf = LinearDiscriminantAnalysis()
323
+ >>> clf.fit(X, y)
324
+ LinearDiscriminantAnalysis()
325
+ >>> print(clf.predict([[-0.8, -1]]))
326
+ [1]
327
+ """
328
+
329
+ _parameter_constraints: dict = {
330
+ "solver": [StrOptions({"svd", "lsqr", "eigen"})],
331
+ "shrinkage": [StrOptions({"auto"}), Interval(Real, 0, 1, closed="both"), None],
332
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
333
+ "priors": ["array-like", None],
334
+ "store_covariance": ["boolean"],
335
+ "tol": [Interval(Real, 0, None, closed="left")],
336
+ "covariance_estimator": [HasMethods("fit"), None],
337
+ }
338
+
339
+ def __init__(
340
+ self,
341
+ solver="svd",
342
+ shrinkage=None,
343
+ priors=None,
344
+ n_components=None,
345
+ store_covariance=False,
346
+ tol=1e-4,
347
+ covariance_estimator=None,
348
+ ):
349
+ self.solver = solver
350
+ self.shrinkage = shrinkage
351
+ self.priors = priors
352
+ self.n_components = n_components
353
+ self.store_covariance = store_covariance # used only in svd solver
354
+ self.tol = tol # used only in svd solver
355
+ self.covariance_estimator = covariance_estimator
356
+
357
+ def _solve_lstsq(self, X, y, shrinkage, covariance_estimator):
358
+ """Least squares solver.
359
+
360
+ The least squares solver computes a straightforward solution of the
361
+ optimal decision rule based directly on the discriminant functions. It
362
+ can only be used for classification (with any covariance estimator),
363
+ because
364
+ estimation of eigenvectors is not performed. Therefore, dimensionality
365
+ reduction with the transform is not supported.
366
+
367
+ Parameters
368
+ ----------
369
+ X : array-like of shape (n_samples, n_features)
370
+ Training data.
371
+
372
+ y : array-like of shape (n_samples,) or (n_samples, n_classes)
373
+ Target values.
374
+
375
+ shrinkage : 'auto', float or None
376
+ Shrinkage parameter, possible values:
377
+ - None: no shrinkage.
378
+ - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
379
+ - float between 0 and 1: fixed shrinkage parameter.
380
+
381
+ Shrinkage parameter is ignored if `covariance_estimator` i
382
+ not None
383
+
384
+ covariance_estimator : estimator, default=None
385
+ If not None, `covariance_estimator` is used to estimate
386
+ the covariance matrices instead of relying the empirical
387
+ covariance estimator (with potential shrinkage).
388
+ The object should have a fit method and a ``covariance_`` attribute
389
+ like the estimators in sklearn.covariance.
390
+ if None the shrinkage parameter drives the estimate.
391
+
392
+ .. versionadded:: 0.24
393
+
394
+ Notes
395
+ -----
396
+ This solver is based on [1]_, section 2.6.2, pp. 39-41.
397
+
398
+ References
399
+ ----------
400
+ .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
401
+ (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
402
+ 0-471-05669-3.
403
+ """
404
+ self.means_ = _class_means(X, y)
405
+ self.covariance_ = _class_cov(
406
+ X, y, self.priors_, shrinkage, covariance_estimator
407
+ )
408
+ self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
409
+ self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(
410
+ self.priors_
411
+ )
412
+
413
+ def _solve_eigen(self, X, y, shrinkage, covariance_estimator):
414
+ """Eigenvalue solver.
415
+
416
+ The eigenvalue solver computes the optimal solution of the Rayleigh
417
+ coefficient (basically the ratio of between class scatter to within
418
+ class scatter). This solver supports both classification and
419
+ dimensionality reduction (with any covariance estimator).
420
+
421
+ Parameters
422
+ ----------
423
+ X : array-like of shape (n_samples, n_features)
424
+ Training data.
425
+
426
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
427
+ Target values.
428
+
429
+ shrinkage : 'auto', float or None
430
+ Shrinkage parameter, possible values:
431
+ - None: no shrinkage.
432
+ - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
433
+ - float between 0 and 1: fixed shrinkage constant.
434
+
435
+ Shrinkage parameter is ignored if `covariance_estimator` i
436
+ not None
437
+
438
+ covariance_estimator : estimator, default=None
439
+ If not None, `covariance_estimator` is used to estimate
440
+ the covariance matrices instead of relying the empirical
441
+ covariance estimator (with potential shrinkage).
442
+ The object should have a fit method and a ``covariance_`` attribute
443
+ like the estimators in sklearn.covariance.
444
+ if None the shrinkage parameter drives the estimate.
445
+
446
+ .. versionadded:: 0.24
447
+
448
+ Notes
449
+ -----
450
+ This solver is based on [1]_, section 3.8.3, pp. 121-124.
451
+
452
+ References
453
+ ----------
454
+ .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
455
+ (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
456
+ 0-471-05669-3.
457
+ """
458
+ self.means_ = _class_means(X, y)
459
+ self.covariance_ = _class_cov(
460
+ X, y, self.priors_, shrinkage, covariance_estimator
461
+ )
462
+
463
+ Sw = self.covariance_ # within scatter
464
+ St = _cov(X, shrinkage, covariance_estimator) # total scatter
465
+ Sb = St - Sw # between scatter
466
+
467
+ evals, evecs = linalg.eigh(Sb, Sw)
468
+ self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][
469
+ : self._max_components
470
+ ]
471
+ evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
472
+
473
+ self.scalings_ = evecs
474
+ self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
475
+ self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(
476
+ self.priors_
477
+ )
478
+
479
+ def _solve_svd(self, X, y):
480
+ """SVD solver.
481
+
482
+ Parameters
483
+ ----------
484
+ X : array-like of shape (n_samples, n_features)
485
+ Training data.
486
+
487
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
488
+ Target values.
489
+ """
490
+ xp, is_array_api_compliant = get_namespace(X)
491
+
492
+ if is_array_api_compliant:
493
+ svd = xp.linalg.svd
494
+ else:
495
+ svd = scipy.linalg.svd
496
+
497
+ n_samples, n_features = X.shape
498
+ n_classes = self.classes_.shape[0]
499
+
500
+ self.means_ = _class_means(X, y)
501
+ if self.store_covariance:
502
+ self.covariance_ = _class_cov(X, y, self.priors_)
503
+
504
+ Xc = []
505
+ for idx, group in enumerate(self.classes_):
506
+ Xg = X[y == group]
507
+ Xc.append(Xg - self.means_[idx, :])
508
+
509
+ self.xbar_ = self.priors_ @ self.means_
510
+
511
+ Xc = xp.concat(Xc, axis=0)
512
+
513
+ # 1) within (univariate) scaling by with classes std-dev
514
+ std = xp.std(Xc, axis=0)
515
+ # avoid division by zero in normalization
516
+ std[std == 0] = 1.0
517
+ fac = xp.asarray(1.0 / (n_samples - n_classes))
518
+
519
+ # 2) Within variance scaling
520
+ X = xp.sqrt(fac) * (Xc / std)
521
+ # SVD of centered (within)scaled data
522
+ U, S, Vt = svd(X, full_matrices=False)
523
+
524
+ rank = xp.sum(xp.astype(S > self.tol, xp.int32))
525
+ # Scaling of within covariance is: V' 1/S
526
+ scalings = (Vt[:rank, :] / std).T / S[:rank]
527
+ fac = 1.0 if n_classes == 1 else 1.0 / (n_classes - 1)
528
+
529
+ # 3) Between variance scaling
530
+ # Scale weighted centers
531
+ X = (
532
+ (xp.sqrt((n_samples * self.priors_) * fac)) * (self.means_ - self.xbar_).T
533
+ ).T @ scalings
534
+ # Centers are living in a space with n_classes-1 dim (maximum)
535
+ # Use SVD to find projection in the space spanned by the
536
+ # (n_classes) centers
537
+ _, S, Vt = svd(X, full_matrices=False)
538
+
539
+ if self._max_components == 0:
540
+ self.explained_variance_ratio_ = xp.empty((0,), dtype=S.dtype)
541
+ else:
542
+ self.explained_variance_ratio_ = (S**2 / xp.sum(S**2))[
543
+ : self._max_components
544
+ ]
545
+
546
+ rank = xp.sum(xp.astype(S > self.tol * S[0], xp.int32))
547
+ self.scalings_ = scalings @ Vt.T[:, :rank]
548
+ coef = (self.means_ - self.xbar_) @ self.scalings_
549
+ self.intercept_ = -0.5 * xp.sum(coef**2, axis=1) + xp.log(self.priors_)
550
+ self.coef_ = coef @ self.scalings_.T
551
+ self.intercept_ -= self.xbar_ @ self.coef_.T
552
+
553
+ @_fit_context(
554
+ # LinearDiscriminantAnalysis.covariance_estimator is not validated yet
555
+ prefer_skip_nested_validation=False
556
+ )
557
+ def fit(self, X, y):
558
+ """Fit the Linear Discriminant Analysis model.
559
+
560
+ .. versionchanged:: 0.19
561
+ *store_covariance* has been moved to main constructor.
562
+
563
+ .. versionchanged:: 0.19
564
+ *tol* has been moved to main constructor.
565
+
566
+ Parameters
567
+ ----------
568
+ X : array-like of shape (n_samples, n_features)
569
+ Training data.
570
+
571
+ y : array-like of shape (n_samples,)
572
+ Target values.
573
+
574
+ Returns
575
+ -------
576
+ self : object
577
+ Fitted estimator.
578
+ """
579
+ xp, _ = get_namespace(X)
580
+
581
+ X, y = self._validate_data(
582
+ X, y, ensure_min_samples=2, dtype=[xp.float64, xp.float32]
583
+ )
584
+ self.classes_ = unique_labels(y)
585
+ n_samples, _ = X.shape
586
+ n_classes = self.classes_.shape[0]
587
+
588
+ if n_samples == n_classes:
589
+ raise ValueError(
590
+ "The number of samples must be more than the number of classes."
591
+ )
592
+
593
+ if self.priors is None: # estimate priors from sample
594
+ _, cnts = xp.unique_counts(y) # non-negative ints
595
+ self.priors_ = xp.astype(cnts, X.dtype) / float(y.shape[0])
596
+ else:
597
+ self.priors_ = xp.asarray(self.priors, dtype=X.dtype)
598
+
599
+ if xp.any(self.priors_ < 0):
600
+ raise ValueError("priors must be non-negative")
601
+
602
+ if xp.abs(xp.sum(self.priors_) - 1.0) > 1e-5:
603
+ warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning)
604
+ self.priors_ = self.priors_ / self.priors_.sum()
605
+
606
+ # Maximum number of components no matter what n_components is
607
+ # specified:
608
+ max_components = min(n_classes - 1, X.shape[1])
609
+
610
+ if self.n_components is None:
611
+ self._max_components = max_components
612
+ else:
613
+ if self.n_components > max_components:
614
+ raise ValueError(
615
+ "n_components cannot be larger than min(n_features, n_classes - 1)."
616
+ )
617
+ self._max_components = self.n_components
618
+
619
+ if self.solver == "svd":
620
+ if self.shrinkage is not None:
621
+ raise NotImplementedError("shrinkage not supported with 'svd' solver.")
622
+ if self.covariance_estimator is not None:
623
+ raise ValueError(
624
+ "covariance estimator "
625
+ "is not supported "
626
+ "with svd solver. Try another solver"
627
+ )
628
+ self._solve_svd(X, y)
629
+ elif self.solver == "lsqr":
630
+ self._solve_lstsq(
631
+ X,
632
+ y,
633
+ shrinkage=self.shrinkage,
634
+ covariance_estimator=self.covariance_estimator,
635
+ )
636
+ elif self.solver == "eigen":
637
+ self._solve_eigen(
638
+ X,
639
+ y,
640
+ shrinkage=self.shrinkage,
641
+ covariance_estimator=self.covariance_estimator,
642
+ )
643
+ if size(self.classes_) == 2: # treat binary case as a special case
644
+ coef_ = xp.asarray(self.coef_[1, :] - self.coef_[0, :], dtype=X.dtype)
645
+ self.coef_ = xp.reshape(coef_, (1, -1))
646
+ intercept_ = xp.asarray(
647
+ self.intercept_[1] - self.intercept_[0], dtype=X.dtype
648
+ )
649
+ self.intercept_ = xp.reshape(intercept_, (1,))
650
+ self._n_features_out = self._max_components
651
+ return self
652
+
653
+ def transform(self, X):
654
+ """Project data to maximize class separation.
655
+
656
+ Parameters
657
+ ----------
658
+ X : array-like of shape (n_samples, n_features)
659
+ Input data.
660
+
661
+ Returns
662
+ -------
663
+ X_new : ndarray of shape (n_samples, n_components) or \
664
+ (n_samples, min(rank, n_components))
665
+ Transformed data. In the case of the 'svd' solver, the shape
666
+ is (n_samples, min(rank, n_components)).
667
+ """
668
+ if self.solver == "lsqr":
669
+ raise NotImplementedError(
670
+ "transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')."
671
+ )
672
+ check_is_fitted(self)
673
+ xp, _ = get_namespace(X)
674
+ X = self._validate_data(X, reset=False)
675
+
676
+ if self.solver == "svd":
677
+ X_new = (X - self.xbar_) @ self.scalings_
678
+ elif self.solver == "eigen":
679
+ X_new = X @ self.scalings_
680
+
681
+ return X_new[:, : self._max_components]
682
+
683
+ def predict_proba(self, X):
684
+ """Estimate probability.
685
+
686
+ Parameters
687
+ ----------
688
+ X : array-like of shape (n_samples, n_features)
689
+ Input data.
690
+
691
+ Returns
692
+ -------
693
+ C : ndarray of shape (n_samples, n_classes)
694
+ Estimated probabilities.
695
+ """
696
+ check_is_fitted(self)
697
+ xp, is_array_api_compliant = get_namespace(X)
698
+ decision = self.decision_function(X)
699
+ if size(self.classes_) == 2:
700
+ proba = _expit(decision)
701
+ return xp.stack([1 - proba, proba], axis=1)
702
+ else:
703
+ return softmax(decision)
704
+
705
+ def predict_log_proba(self, X):
706
+ """Estimate log probability.
707
+
708
+ Parameters
709
+ ----------
710
+ X : array-like of shape (n_samples, n_features)
711
+ Input data.
712
+
713
+ Returns
714
+ -------
715
+ C : ndarray of shape (n_samples, n_classes)
716
+ Estimated log probabilities.
717
+ """
718
+ xp, _ = get_namespace(X)
719
+ prediction = self.predict_proba(X)
720
+
721
+ info = xp.finfo(prediction.dtype)
722
+ if hasattr(info, "smallest_normal"):
723
+ smallest_normal = info.smallest_normal
724
+ else:
725
+ # smallest_normal was introduced in NumPy 1.22
726
+ smallest_normal = info.tiny
727
+
728
+ prediction[prediction == 0.0] += smallest_normal
729
+ return xp.log(prediction)
730
+
731
+ def decision_function(self, X):
732
+ """Apply decision function to an array of samples.
733
+
734
+ The decision function is equal (up to a constant factor) to the
735
+ log-posterior of the model, i.e. `log p(y = k | x)`. In a binary
736
+ classification setting this instead corresponds to the difference
737
+ `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.
738
+
739
+ Parameters
740
+ ----------
741
+ X : array-like of shape (n_samples, n_features)
742
+ Array of samples (test vectors).
743
+
744
+ Returns
745
+ -------
746
+ C : ndarray of shape (n_samples,) or (n_samples, n_classes)
747
+ Decision function values related to each class, per sample.
748
+ In the two-class case, the shape is (n_samples,), giving the
749
+ log likelihood ratio of the positive class.
750
+ """
751
+ # Only override for the doc
752
+ return super().decision_function(X)
753
+
754
+ def _more_tags(self):
755
+ return {"array_api_support": True}
756
+
757
+
758
+ class QuadraticDiscriminantAnalysis(ClassifierMixin, BaseEstimator):
759
+ """Quadratic Discriminant Analysis.
760
+
761
+ A classifier with a quadratic decision boundary, generated
762
+ by fitting class conditional densities to the data
763
+ and using Bayes' rule.
764
+
765
+ The model fits a Gaussian density to each class.
766
+
767
+ .. versionadded:: 0.17
768
+ *QuadraticDiscriminantAnalysis*
769
+
770
+ Read more in the :ref:`User Guide <lda_qda>`.
771
+
772
+ Parameters
773
+ ----------
774
+ priors : array-like of shape (n_classes,), default=None
775
+ Class priors. By default, the class proportions are inferred from the
776
+ training data.
777
+
778
+ reg_param : float, default=0.0
779
+ Regularizes the per-class covariance estimates by transforming S2 as
780
+ ``S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features)``,
781
+ where S2 corresponds to the `scaling_` attribute of a given class.
782
+
783
+ store_covariance : bool, default=False
784
+ If True, the class covariance matrices are explicitly computed and
785
+ stored in the `self.covariance_` attribute.
786
+
787
+ .. versionadded:: 0.17
788
+
789
+ tol : float, default=1.0e-4
790
+ Absolute threshold for a singular value to be considered significant,
791
+ used to estimate the rank of `Xk` where `Xk` is the centered matrix
792
+ of samples in class k. This parameter does not affect the
793
+ predictions. It only controls a warning that is raised when features
794
+ are considered to be colinear.
795
+
796
+ .. versionadded:: 0.17
797
+
798
+ Attributes
799
+ ----------
800
+ covariance_ : list of len n_classes of ndarray \
801
+ of shape (n_features, n_features)
802
+ For each class, gives the covariance matrix estimated using the
803
+ samples of that class. The estimations are unbiased. Only present if
804
+ `store_covariance` is True.
805
+
806
+ means_ : array-like of shape (n_classes, n_features)
807
+ Class-wise means.
808
+
809
+ priors_ : array-like of shape (n_classes,)
810
+ Class priors (sum to 1).
811
+
812
+ rotations_ : list of len n_classes of ndarray of shape (n_features, n_k)
813
+ For each class k an array of shape (n_features, n_k), where
814
+ ``n_k = min(n_features, number of elements in class k)``
815
+ It is the rotation of the Gaussian distribution, i.e. its
816
+ principal axis. It corresponds to `V`, the matrix of eigenvectors
817
+ coming from the SVD of `Xk = U S Vt` where `Xk` is the centered
818
+ matrix of samples from class k.
819
+
820
+ scalings_ : list of len n_classes of ndarray of shape (n_k,)
821
+ For each class, contains the scaling of
822
+ the Gaussian distributions along its principal axes, i.e. the
823
+ variance in the rotated coordinate system. It corresponds to `S^2 /
824
+ (n_samples - 1)`, where `S` is the diagonal matrix of singular values
825
+ from the SVD of `Xk`, where `Xk` is the centered matrix of samples
826
+ from class k.
827
+
828
+ classes_ : ndarray of shape (n_classes,)
829
+ Unique class labels.
830
+
831
+ n_features_in_ : int
832
+ Number of features seen during :term:`fit`.
833
+
834
+ .. versionadded:: 0.24
835
+
836
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
837
+ Names of features seen during :term:`fit`. Defined only when `X`
838
+ has feature names that are all strings.
839
+
840
+ .. versionadded:: 1.0
841
+
842
+ See Also
843
+ --------
844
+ LinearDiscriminantAnalysis : Linear Discriminant Analysis.
845
+
846
+ Examples
847
+ --------
848
+ >>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
849
+ >>> import numpy as np
850
+ >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
851
+ >>> y = np.array([1, 1, 1, 2, 2, 2])
852
+ >>> clf = QuadraticDiscriminantAnalysis()
853
+ >>> clf.fit(X, y)
854
+ QuadraticDiscriminantAnalysis()
855
+ >>> print(clf.predict([[-0.8, -1]]))
856
+ [1]
857
+ """
858
+
859
+ _parameter_constraints: dict = {
860
+ "priors": ["array-like", None],
861
+ "reg_param": [Interval(Real, 0, 1, closed="both")],
862
+ "store_covariance": ["boolean"],
863
+ "tol": [Interval(Real, 0, None, closed="left")],
864
+ }
865
+
866
+ def __init__(
867
+ self, *, priors=None, reg_param=0.0, store_covariance=False, tol=1.0e-4
868
+ ):
869
+ self.priors = priors
870
+ self.reg_param = reg_param
871
+ self.store_covariance = store_covariance
872
+ self.tol = tol
873
+
874
+ @_fit_context(prefer_skip_nested_validation=True)
875
+ def fit(self, X, y):
876
+ """Fit the model according to the given training data and parameters.
877
+
878
+ .. versionchanged:: 0.19
879
+ ``store_covariances`` has been moved to main constructor as
880
+ ``store_covariance``
881
+
882
+ .. versionchanged:: 0.19
883
+ ``tol`` has been moved to main constructor.
884
+
885
+ Parameters
886
+ ----------
887
+ X : array-like of shape (n_samples, n_features)
888
+ Training vector, where `n_samples` is the number of samples and
889
+ `n_features` is the number of features.
890
+
891
+ y : array-like of shape (n_samples,)
892
+ Target values (integers).
893
+
894
+ Returns
895
+ -------
896
+ self : object
897
+ Fitted estimator.
898
+ """
899
+ X, y = self._validate_data(X, y)
900
+ check_classification_targets(y)
901
+ self.classes_, y = np.unique(y, return_inverse=True)
902
+ n_samples, n_features = X.shape
903
+ n_classes = len(self.classes_)
904
+ if n_classes < 2:
905
+ raise ValueError(
906
+ "The number of classes has to be greater than one; got %d class"
907
+ % (n_classes)
908
+ )
909
+ if self.priors is None:
910
+ self.priors_ = np.bincount(y) / float(n_samples)
911
+ else:
912
+ self.priors_ = np.array(self.priors)
913
+
914
+ cov = None
915
+ store_covariance = self.store_covariance
916
+ if store_covariance:
917
+ cov = []
918
+ means = []
919
+ scalings = []
920
+ rotations = []
921
+ for ind in range(n_classes):
922
+ Xg = X[y == ind, :]
923
+ meang = Xg.mean(0)
924
+ means.append(meang)
925
+ if len(Xg) == 1:
926
+ raise ValueError(
927
+ "y has only 1 sample in class %s, covariance is ill defined."
928
+ % str(self.classes_[ind])
929
+ )
930
+ Xgc = Xg - meang
931
+ # Xgc = U * S * V.T
932
+ _, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
933
+ rank = np.sum(S > self.tol)
934
+ if rank < n_features:
935
+ warnings.warn("Variables are collinear")
936
+ S2 = (S**2) / (len(Xg) - 1)
937
+ S2 = ((1 - self.reg_param) * S2) + self.reg_param
938
+ if self.store_covariance or store_covariance:
939
+ # cov = V * (S^2 / (n-1)) * V.T
940
+ cov.append(np.dot(S2 * Vt.T, Vt))
941
+ scalings.append(S2)
942
+ rotations.append(Vt.T)
943
+ if self.store_covariance or store_covariance:
944
+ self.covariance_ = cov
945
+ self.means_ = np.asarray(means)
946
+ self.scalings_ = scalings
947
+ self.rotations_ = rotations
948
+ return self
949
+
950
+ def _decision_function(self, X):
951
+ # return log posterior, see eq (4.12) p. 110 of the ESL.
952
+ check_is_fitted(self)
953
+
954
+ X = self._validate_data(X, reset=False)
955
+ norm2 = []
956
+ for i in range(len(self.classes_)):
957
+ R = self.rotations_[i]
958
+ S = self.scalings_[i]
959
+ Xm = X - self.means_[i]
960
+ X2 = np.dot(Xm, R * (S ** (-0.5)))
961
+ norm2.append(np.sum(X2**2, axis=1))
962
+ norm2 = np.array(norm2).T # shape = [len(X), n_classes]
963
+ u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
964
+ return -0.5 * (norm2 + u) + np.log(self.priors_)
965
+
966
+ def decision_function(self, X):
967
+ """Apply decision function to an array of samples.
968
+
969
+ The decision function is equal (up to a constant factor) to the
970
+ log-posterior of the model, i.e. `log p(y = k | x)`. In a binary
971
+ classification setting this instead corresponds to the difference
972
+ `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.
973
+
974
+ Parameters
975
+ ----------
976
+ X : array-like of shape (n_samples, n_features)
977
+ Array of samples (test vectors).
978
+
979
+ Returns
980
+ -------
981
+ C : ndarray of shape (n_samples,) or (n_samples, n_classes)
982
+ Decision function values related to each class, per sample.
983
+ In the two-class case, the shape is (n_samples,), giving the
984
+ log likelihood ratio of the positive class.
985
+ """
986
+ dec_func = self._decision_function(X)
987
+ # handle special case of two classes
988
+ if len(self.classes_) == 2:
989
+ return dec_func[:, 1] - dec_func[:, 0]
990
+ return dec_func
991
+
992
+ def predict(self, X):
993
+ """Perform classification on an array of test vectors X.
994
+
995
+ The predicted class C for each sample in X is returned.
996
+
997
+ Parameters
998
+ ----------
999
+ X : array-like of shape (n_samples, n_features)
1000
+ Vector to be scored, where `n_samples` is the number of samples and
1001
+ `n_features` is the number of features.
1002
+
1003
+ Returns
1004
+ -------
1005
+ C : ndarray of shape (n_samples,)
1006
+ Estimated probabilities.
1007
+ """
1008
+ d = self._decision_function(X)
1009
+ y_pred = self.classes_.take(d.argmax(1))
1010
+ return y_pred
1011
+
1012
+ def predict_proba(self, X):
1013
+ """Return posterior probabilities of classification.
1014
+
1015
+ Parameters
1016
+ ----------
1017
+ X : array-like of shape (n_samples, n_features)
1018
+ Array of samples/test vectors.
1019
+
1020
+ Returns
1021
+ -------
1022
+ C : ndarray of shape (n_samples, n_classes)
1023
+ Posterior probabilities of classification per class.
1024
+ """
1025
+ values = self._decision_function(X)
1026
+ # compute the likelihood of the underlying gaussian models
1027
+ # up to a multiplicative constant.
1028
+ likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
1029
+ # compute posterior probabilities
1030
+ return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
1031
+
1032
+ def predict_log_proba(self, X):
1033
+ """Return log of posterior probabilities of classification.
1034
+
1035
+ Parameters
1036
+ ----------
1037
+ X : array-like of shape (n_samples, n_features)
1038
+ Array of samples/test vectors.
1039
+
1040
+ Returns
1041
+ -------
1042
+ C : ndarray of shape (n_samples, n_classes)
1043
+ Posterior log-probabilities of classification per class.
1044
+ """
1045
+ # XXX : can do better to avoid precision overflows
1046
+ probas_ = self.predict_proba(X)
1047
+ return np.log(probas_)
venv/lib/python3.10/site-packages/sklearn/dummy.py ADDED
@@ -0,0 +1,682 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Mathieu Blondel <[email protected]>
2
+ # Arnaud Joly <[email protected]>
3
+ # Maheshakya Wijewardena <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+ import warnings
7
+ from numbers import Integral, Real
8
+
9
+ import numpy as np
10
+ import scipy.sparse as sp
11
+
12
+ from .base import (
13
+ BaseEstimator,
14
+ ClassifierMixin,
15
+ MultiOutputMixin,
16
+ RegressorMixin,
17
+ _fit_context,
18
+ )
19
+ from .utils import check_random_state
20
+ from .utils._param_validation import Interval, StrOptions
21
+ from .utils.multiclass import class_distribution
22
+ from .utils.random import _random_choice_csc
23
+ from .utils.stats import _weighted_percentile
24
+ from .utils.validation import (
25
+ _check_sample_weight,
26
+ _num_samples,
27
+ check_array,
28
+ check_consistent_length,
29
+ check_is_fitted,
30
+ )
31
+
32
+
33
+ class DummyClassifier(MultiOutputMixin, ClassifierMixin, BaseEstimator):
34
+ """DummyClassifier makes predictions that ignore the input features.
35
+
36
+ This classifier serves as a simple baseline to compare against other more
37
+ complex classifiers.
38
+
39
+ The specific behavior of the baseline is selected with the `strategy`
40
+ parameter.
41
+
42
+ All strategies make predictions that ignore the input feature values passed
43
+ as the `X` argument to `fit` and `predict`. The predictions, however,
44
+ typically depend on values observed in the `y` parameter passed to `fit`.
45
+
46
+ Note that the "stratified" and "uniform" strategies lead to
47
+ non-deterministic predictions that can be rendered deterministic by setting
48
+ the `random_state` parameter if needed. The other strategies are naturally
49
+ deterministic and, once fit, always return the same constant prediction
50
+ for any value of `X`.
51
+
52
+ Read more in the :ref:`User Guide <dummy_estimators>`.
53
+
54
+ .. versionadded:: 0.13
55
+
56
+ Parameters
57
+ ----------
58
+ strategy : {"most_frequent", "prior", "stratified", "uniform", \
59
+ "constant"}, default="prior"
60
+ Strategy to use to generate predictions.
61
+
62
+ * "most_frequent": the `predict` method always returns the most
63
+ frequent class label in the observed `y` argument passed to `fit`.
64
+ The `predict_proba` method returns the matching one-hot encoded
65
+ vector.
66
+ * "prior": the `predict` method always returns the most frequent
67
+ class label in the observed `y` argument passed to `fit` (like
68
+ "most_frequent"). ``predict_proba`` always returns the empirical
69
+ class distribution of `y` also known as the empirical class prior
70
+ distribution.
71
+ * "stratified": the `predict_proba` method randomly samples one-hot
72
+ vectors from a multinomial distribution parametrized by the empirical
73
+ class prior probabilities.
74
+ The `predict` method returns the class label which got probability
75
+ one in the one-hot vector of `predict_proba`.
76
+ Each sampled row of both methods is therefore independent and
77
+ identically distributed.
78
+ * "uniform": generates predictions uniformly at random from the list
79
+ of unique classes observed in `y`, i.e. each class has equal
80
+ probability.
81
+ * "constant": always predicts a constant label that is provided by
82
+ the user. This is useful for metrics that evaluate a non-majority
83
+ class.
84
+
85
+ .. versionchanged:: 0.24
86
+ The default value of `strategy` has changed to "prior" in version
87
+ 0.24.
88
+
89
+ random_state : int, RandomState instance or None, default=None
90
+ Controls the randomness to generate the predictions when
91
+ ``strategy='stratified'`` or ``strategy='uniform'``.
92
+ Pass an int for reproducible output across multiple function calls.
93
+ See :term:`Glossary <random_state>`.
94
+
95
+ constant : int or str or array-like of shape (n_outputs,), default=None
96
+ The explicit constant as predicted by the "constant" strategy. This
97
+ parameter is useful only for the "constant" strategy.
98
+
99
+ Attributes
100
+ ----------
101
+ classes_ : ndarray of shape (n_classes,) or list of such arrays
102
+ Unique class labels observed in `y`. For multi-output classification
103
+ problems, this attribute is a list of arrays as each output has an
104
+ independent set of possible classes.
105
+
106
+ n_classes_ : int or list of int
107
+ Number of label for each output.
108
+
109
+ class_prior_ : ndarray of shape (n_classes,) or list of such arrays
110
+ Frequency of each class observed in `y`. For multioutput classification
111
+ problems, this is computed independently for each output.
112
+
113
+ n_outputs_ : int
114
+ Number of outputs.
115
+
116
+ sparse_output_ : bool
117
+ True if the array returned from predict is to be in sparse CSC format.
118
+ Is automatically set to True if the input `y` is passed in sparse
119
+ format.
120
+
121
+ See Also
122
+ --------
123
+ DummyRegressor : Regressor that makes predictions using simple rules.
124
+
125
+ Examples
126
+ --------
127
+ >>> import numpy as np
128
+ >>> from sklearn.dummy import DummyClassifier
129
+ >>> X = np.array([-1, 1, 1, 1])
130
+ >>> y = np.array([0, 1, 1, 1])
131
+ >>> dummy_clf = DummyClassifier(strategy="most_frequent")
132
+ >>> dummy_clf.fit(X, y)
133
+ DummyClassifier(strategy='most_frequent')
134
+ >>> dummy_clf.predict(X)
135
+ array([1, 1, 1, 1])
136
+ >>> dummy_clf.score(X, y)
137
+ 0.75
138
+ """
139
+
140
+ _parameter_constraints: dict = {
141
+ "strategy": [
142
+ StrOptions({"most_frequent", "prior", "stratified", "uniform", "constant"})
143
+ ],
144
+ "random_state": ["random_state"],
145
+ "constant": [Integral, str, "array-like", None],
146
+ }
147
+
148
+ def __init__(self, *, strategy="prior", random_state=None, constant=None):
149
+ self.strategy = strategy
150
+ self.random_state = random_state
151
+ self.constant = constant
152
+
153
+ @_fit_context(prefer_skip_nested_validation=True)
154
+ def fit(self, X, y, sample_weight=None):
155
+ """Fit the baseline classifier.
156
+
157
+ Parameters
158
+ ----------
159
+ X : array-like of shape (n_samples, n_features)
160
+ Training data.
161
+
162
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
163
+ Target values.
164
+
165
+ sample_weight : array-like of shape (n_samples,), default=None
166
+ Sample weights.
167
+
168
+ Returns
169
+ -------
170
+ self : object
171
+ Returns the instance itself.
172
+ """
173
+ self._strategy = self.strategy
174
+
175
+ if self._strategy == "uniform" and sp.issparse(y):
176
+ y = y.toarray()
177
+ warnings.warn(
178
+ (
179
+ "A local copy of the target data has been converted "
180
+ "to a numpy array. Predicting on sparse target data "
181
+ "with the uniform strategy would not save memory "
182
+ "and would be slower."
183
+ ),
184
+ UserWarning,
185
+ )
186
+
187
+ self.sparse_output_ = sp.issparse(y)
188
+
189
+ if not self.sparse_output_:
190
+ y = np.asarray(y)
191
+ y = np.atleast_1d(y)
192
+
193
+ if y.ndim == 1:
194
+ y = np.reshape(y, (-1, 1))
195
+
196
+ self.n_outputs_ = y.shape[1]
197
+
198
+ check_consistent_length(X, y)
199
+
200
+ if sample_weight is not None:
201
+ sample_weight = _check_sample_weight(sample_weight, X)
202
+
203
+ if self._strategy == "constant":
204
+ if self.constant is None:
205
+ raise ValueError(
206
+ "Constant target value has to be specified "
207
+ "when the constant strategy is used."
208
+ )
209
+ else:
210
+ constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
211
+ if constant.shape[0] != self.n_outputs_:
212
+ raise ValueError(
213
+ "Constant target value should have shape (%d, 1)."
214
+ % self.n_outputs_
215
+ )
216
+
217
+ (self.classes_, self.n_classes_, self.class_prior_) = class_distribution(
218
+ y, sample_weight
219
+ )
220
+
221
+ if self._strategy == "constant":
222
+ for k in range(self.n_outputs_):
223
+ if not any(constant[k][0] == c for c in self.classes_[k]):
224
+ # Checking in case of constant strategy if the constant
225
+ # provided by the user is in y.
226
+ err_msg = (
227
+ "The constant target value must be present in "
228
+ "the training data. You provided constant={}. "
229
+ "Possible values are: {}.".format(
230
+ self.constant, self.classes_[k].tolist()
231
+ )
232
+ )
233
+ raise ValueError(err_msg)
234
+
235
+ if self.n_outputs_ == 1:
236
+ self.n_classes_ = self.n_classes_[0]
237
+ self.classes_ = self.classes_[0]
238
+ self.class_prior_ = self.class_prior_[0]
239
+
240
+ return self
241
+
242
+ def predict(self, X):
243
+ """Perform classification on test vectors X.
244
+
245
+ Parameters
246
+ ----------
247
+ X : array-like of shape (n_samples, n_features)
248
+ Test data.
249
+
250
+ Returns
251
+ -------
252
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
253
+ Predicted target values for X.
254
+ """
255
+ check_is_fitted(self)
256
+
257
+ # numpy random_state expects Python int and not long as size argument
258
+ # under Windows
259
+ n_samples = _num_samples(X)
260
+ rs = check_random_state(self.random_state)
261
+
262
+ n_classes_ = self.n_classes_
263
+ classes_ = self.classes_
264
+ class_prior_ = self.class_prior_
265
+ constant = self.constant
266
+ if self.n_outputs_ == 1:
267
+ # Get same type even for self.n_outputs_ == 1
268
+ n_classes_ = [n_classes_]
269
+ classes_ = [classes_]
270
+ class_prior_ = [class_prior_]
271
+ constant = [constant]
272
+ # Compute probability only once
273
+ if self._strategy == "stratified":
274
+ proba = self.predict_proba(X)
275
+ if self.n_outputs_ == 1:
276
+ proba = [proba]
277
+
278
+ if self.sparse_output_:
279
+ class_prob = None
280
+ if self._strategy in ("most_frequent", "prior"):
281
+ classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
282
+
283
+ elif self._strategy == "stratified":
284
+ class_prob = class_prior_
285
+
286
+ elif self._strategy == "uniform":
287
+ raise ValueError(
288
+ "Sparse target prediction is not "
289
+ "supported with the uniform strategy"
290
+ )
291
+
292
+ elif self._strategy == "constant":
293
+ classes_ = [np.array([c]) for c in constant]
294
+
295
+ y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state)
296
+ else:
297
+ if self._strategy in ("most_frequent", "prior"):
298
+ y = np.tile(
299
+ [
300
+ classes_[k][class_prior_[k].argmax()]
301
+ for k in range(self.n_outputs_)
302
+ ],
303
+ [n_samples, 1],
304
+ )
305
+
306
+ elif self._strategy == "stratified":
307
+ y = np.vstack(
308
+ [
309
+ classes_[k][proba[k].argmax(axis=1)]
310
+ for k in range(self.n_outputs_)
311
+ ]
312
+ ).T
313
+
314
+ elif self._strategy == "uniform":
315
+ ret = [
316
+ classes_[k][rs.randint(n_classes_[k], size=n_samples)]
317
+ for k in range(self.n_outputs_)
318
+ ]
319
+ y = np.vstack(ret).T
320
+
321
+ elif self._strategy == "constant":
322
+ y = np.tile(self.constant, (n_samples, 1))
323
+
324
+ if self.n_outputs_ == 1:
325
+ y = np.ravel(y)
326
+
327
+ return y
328
+
329
+ def predict_proba(self, X):
330
+ """
331
+ Return probability estimates for the test vectors X.
332
+
333
+ Parameters
334
+ ----------
335
+ X : array-like of shape (n_samples, n_features)
336
+ Test data.
337
+
338
+ Returns
339
+ -------
340
+ P : ndarray of shape (n_samples, n_classes) or list of such arrays
341
+ Returns the probability of the sample for each class in
342
+ the model, where classes are ordered arithmetically, for each
343
+ output.
344
+ """
345
+ check_is_fitted(self)
346
+
347
+ # numpy random_state expects Python int and not long as size argument
348
+ # under Windows
349
+ n_samples = _num_samples(X)
350
+ rs = check_random_state(self.random_state)
351
+
352
+ n_classes_ = self.n_classes_
353
+ classes_ = self.classes_
354
+ class_prior_ = self.class_prior_
355
+ constant = self.constant
356
+ if self.n_outputs_ == 1:
357
+ # Get same type even for self.n_outputs_ == 1
358
+ n_classes_ = [n_classes_]
359
+ classes_ = [classes_]
360
+ class_prior_ = [class_prior_]
361
+ constant = [constant]
362
+
363
+ P = []
364
+ for k in range(self.n_outputs_):
365
+ if self._strategy == "most_frequent":
366
+ ind = class_prior_[k].argmax()
367
+ out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
368
+ out[:, ind] = 1.0
369
+ elif self._strategy == "prior":
370
+ out = np.ones((n_samples, 1)) * class_prior_[k]
371
+
372
+ elif self._strategy == "stratified":
373
+ out = rs.multinomial(1, class_prior_[k], size=n_samples)
374
+ out = out.astype(np.float64)
375
+
376
+ elif self._strategy == "uniform":
377
+ out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
378
+ out /= n_classes_[k]
379
+
380
+ elif self._strategy == "constant":
381
+ ind = np.where(classes_[k] == constant[k])
382
+ out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
383
+ out[:, ind] = 1.0
384
+
385
+ P.append(out)
386
+
387
+ if self.n_outputs_ == 1:
388
+ P = P[0]
389
+
390
+ return P
391
+
392
+ def predict_log_proba(self, X):
393
+ """
394
+ Return log probability estimates for the test vectors X.
395
+
396
+ Parameters
397
+ ----------
398
+ X : {array-like, object with finite length or shape}
399
+ Training data.
400
+
401
+ Returns
402
+ -------
403
+ P : ndarray of shape (n_samples, n_classes) or list of such arrays
404
+ Returns the log probability of the sample for each class in
405
+ the model, where classes are ordered arithmetically for each
406
+ output.
407
+ """
408
+ proba = self.predict_proba(X)
409
+ if self.n_outputs_ == 1:
410
+ return np.log(proba)
411
+ else:
412
+ return [np.log(p) for p in proba]
413
+
414
+ def _more_tags(self):
415
+ return {
416
+ "poor_score": True,
417
+ "no_validation": True,
418
+ "_xfail_checks": {
419
+ "check_methods_subset_invariance": "fails for the predict method",
420
+ "check_methods_sample_order_invariance": "fails for the predict method",
421
+ },
422
+ }
423
+
424
+ def score(self, X, y, sample_weight=None):
425
+ """Return the mean accuracy on the given test data and labels.
426
+
427
+ In multi-label classification, this is the subset accuracy
428
+ which is a harsh metric since you require for each sample that
429
+ each label set be correctly predicted.
430
+
431
+ Parameters
432
+ ----------
433
+ X : None or array-like of shape (n_samples, n_features)
434
+ Test samples. Passing None as test samples gives the same result
435
+ as passing real test samples, since DummyClassifier
436
+ operates independently of the sampled observations.
437
+
438
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
439
+ True labels for X.
440
+
441
+ sample_weight : array-like of shape (n_samples,), default=None
442
+ Sample weights.
443
+
444
+ Returns
445
+ -------
446
+ score : float
447
+ Mean accuracy of self.predict(X) w.r.t. y.
448
+ """
449
+ if X is None:
450
+ X = np.zeros(shape=(len(y), 1))
451
+ return super().score(X, y, sample_weight)
452
+
453
+
454
+ class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
455
+ """Regressor that makes predictions using simple rules.
456
+
457
+ This regressor is useful as a simple baseline to compare with other
458
+ (real) regressors. Do not use it for real problems.
459
+
460
+ Read more in the :ref:`User Guide <dummy_estimators>`.
461
+
462
+ .. versionadded:: 0.13
463
+
464
+ Parameters
465
+ ----------
466
+ strategy : {"mean", "median", "quantile", "constant"}, default="mean"
467
+ Strategy to use to generate predictions.
468
+
469
+ * "mean": always predicts the mean of the training set
470
+ * "median": always predicts the median of the training set
471
+ * "quantile": always predicts a specified quantile of the training set,
472
+ provided with the quantile parameter.
473
+ * "constant": always predicts a constant value that is provided by
474
+ the user.
475
+
476
+ constant : int or float or array-like of shape (n_outputs,), default=None
477
+ The explicit constant as predicted by the "constant" strategy. This
478
+ parameter is useful only for the "constant" strategy.
479
+
480
+ quantile : float in [0.0, 1.0], default=None
481
+ The quantile to predict using the "quantile" strategy. A quantile of
482
+ 0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the
483
+ maximum.
484
+
485
+ Attributes
486
+ ----------
487
+ constant_ : ndarray of shape (1, n_outputs)
488
+ Mean or median or quantile of the training targets or constant value
489
+ given by the user.
490
+
491
+ n_outputs_ : int
492
+ Number of outputs.
493
+
494
+ See Also
495
+ --------
496
+ DummyClassifier: Classifier that makes predictions using simple rules.
497
+
498
+ Examples
499
+ --------
500
+ >>> import numpy as np
501
+ >>> from sklearn.dummy import DummyRegressor
502
+ >>> X = np.array([1.0, 2.0, 3.0, 4.0])
503
+ >>> y = np.array([2.0, 3.0, 5.0, 10.0])
504
+ >>> dummy_regr = DummyRegressor(strategy="mean")
505
+ >>> dummy_regr.fit(X, y)
506
+ DummyRegressor()
507
+ >>> dummy_regr.predict(X)
508
+ array([5., 5., 5., 5.])
509
+ >>> dummy_regr.score(X, y)
510
+ 0.0
511
+ """
512
+
513
+ _parameter_constraints: dict = {
514
+ "strategy": [StrOptions({"mean", "median", "quantile", "constant"})],
515
+ "quantile": [Interval(Real, 0.0, 1.0, closed="both"), None],
516
+ "constant": [
517
+ Interval(Real, None, None, closed="neither"),
518
+ "array-like",
519
+ None,
520
+ ],
521
+ }
522
+
523
+ def __init__(self, *, strategy="mean", constant=None, quantile=None):
524
+ self.strategy = strategy
525
+ self.constant = constant
526
+ self.quantile = quantile
527
+
528
+ @_fit_context(prefer_skip_nested_validation=True)
529
+ def fit(self, X, y, sample_weight=None):
530
+ """Fit the random regressor.
531
+
532
+ Parameters
533
+ ----------
534
+ X : array-like of shape (n_samples, n_features)
535
+ Training data.
536
+
537
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
538
+ Target values.
539
+
540
+ sample_weight : array-like of shape (n_samples,), default=None
541
+ Sample weights.
542
+
543
+ Returns
544
+ -------
545
+ self : object
546
+ Fitted estimator.
547
+ """
548
+ y = check_array(y, ensure_2d=False, input_name="y")
549
+ if len(y) == 0:
550
+ raise ValueError("y must not be empty.")
551
+
552
+ if y.ndim == 1:
553
+ y = np.reshape(y, (-1, 1))
554
+ self.n_outputs_ = y.shape[1]
555
+
556
+ check_consistent_length(X, y, sample_weight)
557
+
558
+ if sample_weight is not None:
559
+ sample_weight = _check_sample_weight(sample_weight, X)
560
+
561
+ if self.strategy == "mean":
562
+ self.constant_ = np.average(y, axis=0, weights=sample_weight)
563
+
564
+ elif self.strategy == "median":
565
+ if sample_weight is None:
566
+ self.constant_ = np.median(y, axis=0)
567
+ else:
568
+ self.constant_ = [
569
+ _weighted_percentile(y[:, k], sample_weight, percentile=50.0)
570
+ for k in range(self.n_outputs_)
571
+ ]
572
+
573
+ elif self.strategy == "quantile":
574
+ if self.quantile is None:
575
+ raise ValueError(
576
+ "When using `strategy='quantile', you have to specify the desired "
577
+ "quantile in the range [0, 1]."
578
+ )
579
+ percentile = self.quantile * 100.0
580
+ if sample_weight is None:
581
+ self.constant_ = np.percentile(y, axis=0, q=percentile)
582
+ else:
583
+ self.constant_ = [
584
+ _weighted_percentile(y[:, k], sample_weight, percentile=percentile)
585
+ for k in range(self.n_outputs_)
586
+ ]
587
+
588
+ elif self.strategy == "constant":
589
+ if self.constant is None:
590
+ raise TypeError(
591
+ "Constant target value has to be specified "
592
+ "when the constant strategy is used."
593
+ )
594
+
595
+ self.constant_ = check_array(
596
+ self.constant,
597
+ accept_sparse=["csr", "csc", "coo"],
598
+ ensure_2d=False,
599
+ ensure_min_samples=0,
600
+ )
601
+
602
+ if self.n_outputs_ != 1 and self.constant_.shape[0] != y.shape[1]:
603
+ raise ValueError(
604
+ "Constant target value should have shape (%d, 1)." % y.shape[1]
605
+ )
606
+
607
+ self.constant_ = np.reshape(self.constant_, (1, -1))
608
+ return self
609
+
610
+ def predict(self, X, return_std=False):
611
+ """Perform classification on test vectors X.
612
+
613
+ Parameters
614
+ ----------
615
+ X : array-like of shape (n_samples, n_features)
616
+ Test data.
617
+
618
+ return_std : bool, default=False
619
+ Whether to return the standard deviation of posterior prediction.
620
+ All zeros in this case.
621
+
622
+ .. versionadded:: 0.20
623
+
624
+ Returns
625
+ -------
626
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
627
+ Predicted target values for X.
628
+
629
+ y_std : array-like of shape (n_samples,) or (n_samples, n_outputs)
630
+ Standard deviation of predictive distribution of query points.
631
+ """
632
+ check_is_fitted(self)
633
+ n_samples = _num_samples(X)
634
+
635
+ y = np.full(
636
+ (n_samples, self.n_outputs_),
637
+ self.constant_,
638
+ dtype=np.array(self.constant_).dtype,
639
+ )
640
+ y_std = np.zeros((n_samples, self.n_outputs_))
641
+
642
+ if self.n_outputs_ == 1:
643
+ y = np.ravel(y)
644
+ y_std = np.ravel(y_std)
645
+
646
+ return (y, y_std) if return_std else y
647
+
648
+ def _more_tags(self):
649
+ return {"poor_score": True, "no_validation": True}
650
+
651
+ def score(self, X, y, sample_weight=None):
652
+ """Return the coefficient of determination R^2 of the prediction.
653
+
654
+ The coefficient R^2 is defined as `(1 - u/v)`, where `u` is the
655
+ residual sum of squares `((y_true - y_pred) ** 2).sum()` and `v` is the
656
+ total sum of squares `((y_true - y_true.mean()) ** 2).sum()`. The best
657
+ possible score is 1.0 and it can be negative (because the model can be
658
+ arbitrarily worse). A constant model that always predicts the expected
659
+ value of y, disregarding the input features, would get a R^2 score of
660
+ 0.0.
661
+
662
+ Parameters
663
+ ----------
664
+ X : None or array-like of shape (n_samples, n_features)
665
+ Test samples. Passing None as test samples gives the same result
666
+ as passing real test samples, since `DummyRegressor`
667
+ operates independently of the sampled observations.
668
+
669
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
670
+ True values for X.
671
+
672
+ sample_weight : array-like of shape (n_samples,), default=None
673
+ Sample weights.
674
+
675
+ Returns
676
+ -------
677
+ score : float
678
+ R^2 of `self.predict(X)` w.r.t. y.
679
+ """
680
+ if X is None:
681
+ X = np.zeros(shape=(len(y), 1))
682
+ return super().score(X, y, sample_weight)
venv/lib/python3.10/site-packages/sklearn/exceptions.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.exceptions` module includes all custom warnings and error
3
+ classes used across scikit-learn.
4
+ """
5
+
6
+ __all__ = [
7
+ "NotFittedError",
8
+ "ConvergenceWarning",
9
+ "DataConversionWarning",
10
+ "DataDimensionalityWarning",
11
+ "EfficiencyWarning",
12
+ "FitFailedWarning",
13
+ "SkipTestWarning",
14
+ "UndefinedMetricWarning",
15
+ "PositiveSpectrumWarning",
16
+ "UnsetMetadataPassedError",
17
+ ]
18
+
19
+
20
+ class UnsetMetadataPassedError(ValueError):
21
+ """Exception class to raise if a metadata is passed which is not explicitly \
22
+ requested (metadata=True) or not requested (metadata=False).
23
+
24
+ .. versionadded:: 1.3
25
+
26
+ Parameters
27
+ ----------
28
+ message : str
29
+ The message
30
+
31
+ unrequested_params : dict
32
+ A dictionary of parameters and their values which are provided but not
33
+ requested.
34
+
35
+ routed_params : dict
36
+ A dictionary of routed parameters.
37
+ """
38
+
39
+ def __init__(self, *, message, unrequested_params, routed_params):
40
+ super().__init__(message)
41
+ self.unrequested_params = unrequested_params
42
+ self.routed_params = routed_params
43
+
44
+
45
+ class NotFittedError(ValueError, AttributeError):
46
+ """Exception class to raise if estimator is used before fitting.
47
+
48
+ This class inherits from both ValueError and AttributeError to help with
49
+ exception handling and backward compatibility.
50
+
51
+ Examples
52
+ --------
53
+ >>> from sklearn.svm import LinearSVC
54
+ >>> from sklearn.exceptions import NotFittedError
55
+ >>> try:
56
+ ... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
57
+ ... except NotFittedError as e:
58
+ ... print(repr(e))
59
+ NotFittedError("This LinearSVC instance is not fitted yet. Call 'fit' with
60
+ appropriate arguments before using this estimator."...)
61
+
62
+ .. versionchanged:: 0.18
63
+ Moved from sklearn.utils.validation.
64
+ """
65
+
66
+
67
+ class ConvergenceWarning(UserWarning):
68
+ """Custom warning to capture convergence problems
69
+
70
+ .. versionchanged:: 0.18
71
+ Moved from sklearn.utils.
72
+ """
73
+
74
+
75
+ class DataConversionWarning(UserWarning):
76
+ """Warning used to notify implicit data conversions happening in the code.
77
+
78
+ This warning occurs when some input data needs to be converted or
79
+ interpreted in a way that may not match the user's expectations.
80
+
81
+ For example, this warning may occur when the user
82
+ - passes an integer array to a function which expects float input and
83
+ will convert the input
84
+ - requests a non-copying operation, but a copy is required to meet the
85
+ implementation's data-type expectations;
86
+ - passes an input whose shape can be interpreted ambiguously.
87
+
88
+ .. versionchanged:: 0.18
89
+ Moved from sklearn.utils.validation.
90
+ """
91
+
92
+
93
+ class DataDimensionalityWarning(UserWarning):
94
+ """Custom warning to notify potential issues with data dimensionality.
95
+
96
+ For example, in random projection, this warning is raised when the
97
+ number of components, which quantifies the dimensionality of the target
98
+ projection space, is higher than the number of features, which quantifies
99
+ the dimensionality of the original source space, to imply that the
100
+ dimensionality of the problem will not be reduced.
101
+
102
+ .. versionchanged:: 0.18
103
+ Moved from sklearn.utils.
104
+ """
105
+
106
+
107
+ class EfficiencyWarning(UserWarning):
108
+ """Warning used to notify the user of inefficient computation.
109
+
110
+ This warning notifies the user that the efficiency may not be optimal due
111
+ to some reason which may be included as a part of the warning message.
112
+ This may be subclassed into a more specific Warning class.
113
+
114
+ .. versionadded:: 0.18
115
+ """
116
+
117
+
118
+ class FitFailedWarning(RuntimeWarning):
119
+ """Warning class used if there is an error while fitting the estimator.
120
+
121
+ This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
122
+ and the cross-validation helper function cross_val_score to warn when there
123
+ is an error while fitting the estimator.
124
+
125
+ .. versionchanged:: 0.18
126
+ Moved from sklearn.cross_validation.
127
+ """
128
+
129
+
130
+ class SkipTestWarning(UserWarning):
131
+ """Warning class used to notify the user of a test that was skipped.
132
+
133
+ For example, one of the estimator checks requires a pandas import.
134
+ If the pandas package cannot be imported, the test will be skipped rather
135
+ than register as a failure.
136
+ """
137
+
138
+
139
+ class UndefinedMetricWarning(UserWarning):
140
+ """Warning used when the metric is invalid
141
+
142
+ .. versionchanged:: 0.18
143
+ Moved from sklearn.base.
144
+ """
145
+
146
+
147
+ class PositiveSpectrumWarning(UserWarning):
148
+ """Warning raised when the eigenvalues of a PSD matrix have issues
149
+
150
+ This warning is typically raised by ``_check_psd_eigenvalues`` when the
151
+ eigenvalues of a positive semidefinite (PSD) matrix such as a gram matrix
152
+ (kernel) present significant negative eigenvalues, or bad conditioning i.e.
153
+ very small non-zero eigenvalues compared to the largest eigenvalue.
154
+
155
+ .. versionadded:: 0.22
156
+ """
157
+
158
+
159
+ class InconsistentVersionWarning(UserWarning):
160
+ """Warning raised when an estimator is unpickled with a inconsistent version.
161
+
162
+ Parameters
163
+ ----------
164
+ estimator_name : str
165
+ Estimator name.
166
+
167
+ current_sklearn_version : str
168
+ Current scikit-learn version.
169
+
170
+ original_sklearn_version : str
171
+ Original scikit-learn version.
172
+ """
173
+
174
+ def __init__(
175
+ self, *, estimator_name, current_sklearn_version, original_sklearn_version
176
+ ):
177
+ self.estimator_name = estimator_name
178
+ self.current_sklearn_version = current_sklearn_version
179
+ self.original_sklearn_version = original_sklearn_version
180
+
181
+ def __str__(self):
182
+ return (
183
+ f"Trying to unpickle estimator {self.estimator_name} from version"
184
+ f" {self.original_sklearn_version} when "
185
+ f"using version {self.current_sklearn_version}. This might lead to breaking"
186
+ " code or "
187
+ "invalid results. Use at your own risk. "
188
+ "For more info please refer to:\n"
189
+ "https://scikit-learn.org/stable/model_persistence.html"
190
+ "#security-maintainability-limitations"
191
+ )
venv/lib/python3.10/site-packages/sklearn/impute/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Transformers for missing value imputation"""
2
+ import typing
3
+
4
+ from ._base import MissingIndicator, SimpleImputer
5
+ from ._knn import KNNImputer
6
+
7
+ if typing.TYPE_CHECKING:
8
+ # Avoid errors in type checkers (e.g. mypy) for experimental estimators.
9
+ # TODO: remove this check once the estimator is no longer experimental.
10
+ from ._iterative import IterativeImputer # noqa
11
+
12
+ __all__ = ["MissingIndicator", "SimpleImputer", "KNNImputer"]
13
+
14
+
15
+ # TODO: remove this check once the estimator is no longer experimental.
16
+ def __getattr__(name):
17
+ if name == "IterativeImputer":
18
+ raise ImportError(
19
+ f"{name} is experimental and the API might change without any "
20
+ "deprecation cycle. To use it, you need to explicitly import "
21
+ "enable_iterative_imputer:\n"
22
+ "from sklearn.experimental import enable_iterative_imputer"
23
+ )
24
+ raise AttributeError(f"module {__name__} has no attribute {name}")
venv/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (906 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc ADDED
Binary file (29.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/impute/_base.py ADDED
@@ -0,0 +1,1075 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Nicolas Tresegnie <[email protected]>
2
+ # Sergey Feldman <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ import numbers
6
+ import warnings
7
+ from collections import Counter
8
+ from functools import partial
9
+
10
+ import numpy as np
11
+ import numpy.ma as ma
12
+ from scipy import sparse as sp
13
+
14
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
15
+ from ..utils import _is_pandas_na, is_scalar_nan
16
+ from ..utils._mask import _get_mask
17
+ from ..utils._param_validation import MissingValues, StrOptions
18
+ from ..utils.fixes import _mode
19
+ from ..utils.sparsefuncs import _get_median
20
+ from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted
21
+
22
+
23
+ def _check_inputs_dtype(X, missing_values):
24
+ if _is_pandas_na(missing_values):
25
+ # Allow using `pd.NA` as missing values to impute numerical arrays.
26
+ return
27
+ if X.dtype.kind in ("f", "i", "u") and not isinstance(missing_values, numbers.Real):
28
+ raise ValueError(
29
+ "'X' and 'missing_values' types are expected to be"
30
+ " both numerical. Got X.dtype={} and "
31
+ " type(missing_values)={}.".format(X.dtype, type(missing_values))
32
+ )
33
+
34
+
35
+ def _most_frequent(array, extra_value, n_repeat):
36
+ """Compute the most frequent value in a 1d array extended with
37
+ [extra_value] * n_repeat, where extra_value is assumed to be not part
38
+ of the array."""
39
+ # Compute the most frequent value in array only
40
+ if array.size > 0:
41
+ if array.dtype == object:
42
+ # scipy.stats.mode is slow with object dtype array.
43
+ # Python Counter is more efficient
44
+ counter = Counter(array)
45
+ most_frequent_count = counter.most_common(1)[0][1]
46
+ # tie breaking similarly to scipy.stats.mode
47
+ most_frequent_value = min(
48
+ value
49
+ for value, count in counter.items()
50
+ if count == most_frequent_count
51
+ )
52
+ else:
53
+ mode = _mode(array)
54
+ most_frequent_value = mode[0][0]
55
+ most_frequent_count = mode[1][0]
56
+ else:
57
+ most_frequent_value = 0
58
+ most_frequent_count = 0
59
+
60
+ # Compare to array + [extra_value] * n_repeat
61
+ if most_frequent_count == 0 and n_repeat == 0:
62
+ return np.nan
63
+ elif most_frequent_count < n_repeat:
64
+ return extra_value
65
+ elif most_frequent_count > n_repeat:
66
+ return most_frequent_value
67
+ elif most_frequent_count == n_repeat:
68
+ # tie breaking similarly to scipy.stats.mode
69
+ return min(most_frequent_value, extra_value)
70
+
71
+
72
+ class _BaseImputer(TransformerMixin, BaseEstimator):
73
+ """Base class for all imputers.
74
+
75
+ It adds automatically support for `add_indicator`.
76
+ """
77
+
78
+ _parameter_constraints: dict = {
79
+ "missing_values": [MissingValues()],
80
+ "add_indicator": ["boolean"],
81
+ "keep_empty_features": ["boolean"],
82
+ }
83
+
84
+ def __init__(
85
+ self, *, missing_values=np.nan, add_indicator=False, keep_empty_features=False
86
+ ):
87
+ self.missing_values = missing_values
88
+ self.add_indicator = add_indicator
89
+ self.keep_empty_features = keep_empty_features
90
+
91
+ def _fit_indicator(self, X):
92
+ """Fit a MissingIndicator."""
93
+ if self.add_indicator:
94
+ self.indicator_ = MissingIndicator(
95
+ missing_values=self.missing_values, error_on_new=False
96
+ )
97
+ self.indicator_._fit(X, precomputed=True)
98
+ else:
99
+ self.indicator_ = None
100
+
101
+ def _transform_indicator(self, X):
102
+ """Compute the indicator mask.'
103
+
104
+ Note that X must be the original data as passed to the imputer before
105
+ any imputation, since imputation may be done inplace in some cases.
106
+ """
107
+ if self.add_indicator:
108
+ if not hasattr(self, "indicator_"):
109
+ raise ValueError(
110
+ "Make sure to call _fit_indicator before _transform_indicator"
111
+ )
112
+ return self.indicator_.transform(X)
113
+
114
+ def _concatenate_indicator(self, X_imputed, X_indicator):
115
+ """Concatenate indicator mask with the imputed data."""
116
+ if not self.add_indicator:
117
+ return X_imputed
118
+
119
+ if sp.issparse(X_imputed):
120
+ # sp.hstack may result in different formats between sparse arrays and
121
+ # matrices; specify the format to keep consistent behavior
122
+ hstack = partial(sp.hstack, format=X_imputed.format)
123
+ else:
124
+ hstack = np.hstack
125
+
126
+ if X_indicator is None:
127
+ raise ValueError(
128
+ "Data from the missing indicator are not provided. Call "
129
+ "_fit_indicator and _transform_indicator in the imputer "
130
+ "implementation."
131
+ )
132
+
133
+ return hstack((X_imputed, X_indicator))
134
+
135
+ def _concatenate_indicator_feature_names_out(self, names, input_features):
136
+ if not self.add_indicator:
137
+ return names
138
+
139
+ indicator_names = self.indicator_.get_feature_names_out(input_features)
140
+ return np.concatenate([names, indicator_names])
141
+
142
+ def _more_tags(self):
143
+ return {"allow_nan": is_scalar_nan(self.missing_values)}
144
+
145
+
146
+ class SimpleImputer(_BaseImputer):
147
+ """Univariate imputer for completing missing values with simple strategies.
148
+
149
+ Replace missing values using a descriptive statistic (e.g. mean, median, or
150
+ most frequent) along each column, or using a constant value.
151
+
152
+ Read more in the :ref:`User Guide <impute>`.
153
+
154
+ .. versionadded:: 0.20
155
+ `SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer`
156
+ estimator which is now removed.
157
+
158
+ Parameters
159
+ ----------
160
+ missing_values : int, float, str, np.nan, None or pandas.NA, default=np.nan
161
+ The placeholder for the missing values. All occurrences of
162
+ `missing_values` will be imputed. For pandas' dataframes with
163
+ nullable integer dtypes with missing values, `missing_values`
164
+ can be set to either `np.nan` or `pd.NA`.
165
+
166
+ strategy : str, default='mean'
167
+ The imputation strategy.
168
+
169
+ - If "mean", then replace missing values using the mean along
170
+ each column. Can only be used with numeric data.
171
+ - If "median", then replace missing values using the median along
172
+ each column. Can only be used with numeric data.
173
+ - If "most_frequent", then replace missing using the most frequent
174
+ value along each column. Can be used with strings or numeric data.
175
+ If there is more than one such value, only the smallest is returned.
176
+ - If "constant", then replace missing values with fill_value. Can be
177
+ used with strings or numeric data.
178
+
179
+ .. versionadded:: 0.20
180
+ strategy="constant" for fixed value imputation.
181
+
182
+ fill_value : str or numerical value, default=None
183
+ When strategy == "constant", `fill_value` is used to replace all
184
+ occurrences of missing_values. For string or object data types,
185
+ `fill_value` must be a string.
186
+ If `None`, `fill_value` will be 0 when imputing numerical
187
+ data and "missing_value" for strings or object data types.
188
+
189
+ copy : bool, default=True
190
+ If True, a copy of X will be created. If False, imputation will
191
+ be done in-place whenever possible. Note that, in the following cases,
192
+ a new copy will always be made, even if `copy=False`:
193
+
194
+ - If `X` is not an array of floating values;
195
+ - If `X` is encoded as a CSR matrix;
196
+ - If `add_indicator=True`.
197
+
198
+ add_indicator : bool, default=False
199
+ If True, a :class:`MissingIndicator` transform will stack onto output
200
+ of the imputer's transform. This allows a predictive estimator
201
+ to account for missingness despite imputation. If a feature has no
202
+ missing values at fit/train time, the feature won't appear on
203
+ the missing indicator even if there are missing values at
204
+ transform/test time.
205
+
206
+ keep_empty_features : bool, default=False
207
+ If True, features that consist exclusively of missing values when
208
+ `fit` is called are returned in results when `transform` is called.
209
+ The imputed value is always `0` except when `strategy="constant"`
210
+ in which case `fill_value` will be used instead.
211
+
212
+ .. versionadded:: 1.2
213
+
214
+ Attributes
215
+ ----------
216
+ statistics_ : array of shape (n_features,)
217
+ The imputation fill value for each feature.
218
+ Computing statistics can result in `np.nan` values.
219
+ During :meth:`transform`, features corresponding to `np.nan`
220
+ statistics will be discarded.
221
+
222
+ indicator_ : :class:`~sklearn.impute.MissingIndicator`
223
+ Indicator used to add binary indicators for missing values.
224
+ `None` if `add_indicator=False`.
225
+
226
+ n_features_in_ : int
227
+ Number of features seen during :term:`fit`.
228
+
229
+ .. versionadded:: 0.24
230
+
231
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
232
+ Names of features seen during :term:`fit`. Defined only when `X`
233
+ has feature names that are all strings.
234
+
235
+ .. versionadded:: 1.0
236
+
237
+ See Also
238
+ --------
239
+ IterativeImputer : Multivariate imputer that estimates values to impute for
240
+ each feature with missing values from all the others.
241
+ KNNImputer : Multivariate imputer that estimates missing features using
242
+ nearest samples.
243
+
244
+ Notes
245
+ -----
246
+ Columns which only contained missing values at :meth:`fit` are discarded
247
+ upon :meth:`transform` if strategy is not `"constant"`.
248
+
249
+ In a prediction context, simple imputation usually performs poorly when
250
+ associated with a weak learner. However, with a powerful learner, it can
251
+ lead to as good or better performance than complex imputation such as
252
+ :class:`~sklearn.impute.IterativeImputer` or :class:`~sklearn.impute.KNNImputer`.
253
+
254
+ Examples
255
+ --------
256
+ >>> import numpy as np
257
+ >>> from sklearn.impute import SimpleImputer
258
+ >>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
259
+ >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
260
+ SimpleImputer()
261
+ >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
262
+ >>> print(imp_mean.transform(X))
263
+ [[ 7. 2. 3. ]
264
+ [ 4. 3.5 6. ]
265
+ [10. 3.5 9. ]]
266
+
267
+ For a more detailed example see
268
+ :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`.
269
+ """
270
+
271
+ _parameter_constraints: dict = {
272
+ **_BaseImputer._parameter_constraints,
273
+ "strategy": [StrOptions({"mean", "median", "most_frequent", "constant"})],
274
+ "fill_value": "no_validation", # any object is valid
275
+ "copy": ["boolean"],
276
+ }
277
+
278
+ def __init__(
279
+ self,
280
+ *,
281
+ missing_values=np.nan,
282
+ strategy="mean",
283
+ fill_value=None,
284
+ copy=True,
285
+ add_indicator=False,
286
+ keep_empty_features=False,
287
+ ):
288
+ super().__init__(
289
+ missing_values=missing_values,
290
+ add_indicator=add_indicator,
291
+ keep_empty_features=keep_empty_features,
292
+ )
293
+ self.strategy = strategy
294
+ self.fill_value = fill_value
295
+ self.copy = copy
296
+
297
+ def _validate_input(self, X, in_fit):
298
+ if self.strategy in ("most_frequent", "constant"):
299
+ # If input is a list of strings, dtype = object.
300
+ # Otherwise ValueError is raised in SimpleImputer
301
+ # with strategy='most_frequent' or 'constant'
302
+ # because the list is converted to Unicode numpy array
303
+ if isinstance(X, list) and any(
304
+ isinstance(elem, str) for row in X for elem in row
305
+ ):
306
+ dtype = object
307
+ else:
308
+ dtype = None
309
+ else:
310
+ dtype = FLOAT_DTYPES
311
+
312
+ if not in_fit and self._fit_dtype.kind == "O":
313
+ # Use object dtype if fitted on object dtypes
314
+ dtype = self._fit_dtype
315
+
316
+ if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
317
+ force_all_finite = "allow-nan"
318
+ else:
319
+ force_all_finite = True
320
+
321
+ try:
322
+ X = self._validate_data(
323
+ X,
324
+ reset=in_fit,
325
+ accept_sparse="csc",
326
+ dtype=dtype,
327
+ force_all_finite=force_all_finite,
328
+ copy=self.copy,
329
+ )
330
+ except ValueError as ve:
331
+ if "could not convert" in str(ve):
332
+ new_ve = ValueError(
333
+ "Cannot use {} strategy with non-numeric data:\n{}".format(
334
+ self.strategy, ve
335
+ )
336
+ )
337
+ raise new_ve from None
338
+ else:
339
+ raise ve
340
+
341
+ if in_fit:
342
+ # Use the dtype seen in `fit` for non-`fit` conversion
343
+ self._fit_dtype = X.dtype
344
+
345
+ _check_inputs_dtype(X, self.missing_values)
346
+ if X.dtype.kind not in ("i", "u", "f", "O"):
347
+ raise ValueError(
348
+ "SimpleImputer does not support data with dtype "
349
+ "{0}. Please provide either a numeric array (with"
350
+ " a floating point or integer dtype) or "
351
+ "categorical data represented either as an array "
352
+ "with integer dtype or an array of string values "
353
+ "with an object dtype.".format(X.dtype)
354
+ )
355
+
356
+ if sp.issparse(X) and self.missing_values == 0:
357
+ # missing_values = 0 not allowed with sparse data as it would
358
+ # force densification
359
+ raise ValueError(
360
+ "Imputation not possible when missing_values "
361
+ "== 0 and input is sparse. Provide a dense "
362
+ "array instead."
363
+ )
364
+
365
+ if self.strategy == "constant":
366
+ if in_fit and self.fill_value is not None:
367
+ fill_value_dtype = type(self.fill_value)
368
+ err_msg = (
369
+ f"fill_value={self.fill_value!r} (of type {fill_value_dtype!r}) "
370
+ f"cannot be cast to the input data that is {X.dtype!r}. Make sure "
371
+ "that both dtypes are of the same kind."
372
+ )
373
+ elif not in_fit:
374
+ fill_value_dtype = self.statistics_.dtype
375
+ err_msg = (
376
+ f"The dtype of the filling value (i.e. {fill_value_dtype!r}) "
377
+ f"cannot be cast to the input data that is {X.dtype!r}. Make sure "
378
+ "that the dtypes of the input data is of the same kind between "
379
+ "fit and transform."
380
+ )
381
+ else:
382
+ # By default, fill_value=None, and the replacement is always
383
+ # compatible with the input data
384
+ fill_value_dtype = X.dtype
385
+
386
+ # Make sure we can safely cast fill_value dtype to the input data dtype
387
+ if not np.can_cast(fill_value_dtype, X.dtype, casting="same_kind"):
388
+ raise ValueError(err_msg)
389
+
390
+ return X
391
+
392
+ @_fit_context(prefer_skip_nested_validation=True)
393
+ def fit(self, X, y=None):
394
+ """Fit the imputer on `X`.
395
+
396
+ Parameters
397
+ ----------
398
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
399
+ Input data, where `n_samples` is the number of samples and
400
+ `n_features` is the number of features.
401
+
402
+ y : Ignored
403
+ Not used, present here for API consistency by convention.
404
+
405
+ Returns
406
+ -------
407
+ self : object
408
+ Fitted estimator.
409
+ """
410
+ X = self._validate_input(X, in_fit=True)
411
+
412
+ # default fill_value is 0 for numerical input and "missing_value"
413
+ # otherwise
414
+ if self.fill_value is None:
415
+ if X.dtype.kind in ("i", "u", "f"):
416
+ fill_value = 0
417
+ else:
418
+ fill_value = "missing_value"
419
+ else:
420
+ fill_value = self.fill_value
421
+
422
+ if sp.issparse(X):
423
+ self.statistics_ = self._sparse_fit(
424
+ X, self.strategy, self.missing_values, fill_value
425
+ )
426
+ else:
427
+ self.statistics_ = self._dense_fit(
428
+ X, self.strategy, self.missing_values, fill_value
429
+ )
430
+
431
+ return self
432
+
433
+ def _sparse_fit(self, X, strategy, missing_values, fill_value):
434
+ """Fit the transformer on sparse data."""
435
+ missing_mask = _get_mask(X, missing_values)
436
+ mask_data = missing_mask.data
437
+ n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
438
+
439
+ statistics = np.empty(X.shape[1])
440
+
441
+ if strategy == "constant":
442
+ # for constant strategy, self.statistics_ is used to store
443
+ # fill_value in each column
444
+ statistics.fill(fill_value)
445
+ else:
446
+ for i in range(X.shape[1]):
447
+ column = X.data[X.indptr[i] : X.indptr[i + 1]]
448
+ mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]]
449
+ column = column[~mask_column]
450
+
451
+ # combine explicit and implicit zeros
452
+ mask_zeros = _get_mask(column, 0)
453
+ column = column[~mask_zeros]
454
+ n_explicit_zeros = mask_zeros.sum()
455
+ n_zeros = n_implicit_zeros[i] + n_explicit_zeros
456
+
457
+ if len(column) == 0 and self.keep_empty_features:
458
+ # in case we want to keep columns with only missing values.
459
+ statistics[i] = 0
460
+ else:
461
+ if strategy == "mean":
462
+ s = column.size + n_zeros
463
+ statistics[i] = np.nan if s == 0 else column.sum() / s
464
+
465
+ elif strategy == "median":
466
+ statistics[i] = _get_median(column, n_zeros)
467
+
468
+ elif strategy == "most_frequent":
469
+ statistics[i] = _most_frequent(column, 0, n_zeros)
470
+
471
+ super()._fit_indicator(missing_mask)
472
+
473
+ return statistics
474
+
475
+ def _dense_fit(self, X, strategy, missing_values, fill_value):
476
+ """Fit the transformer on dense data."""
477
+ missing_mask = _get_mask(X, missing_values)
478
+ masked_X = ma.masked_array(X, mask=missing_mask)
479
+
480
+ super()._fit_indicator(missing_mask)
481
+
482
+ # Mean
483
+ if strategy == "mean":
484
+ mean_masked = np.ma.mean(masked_X, axis=0)
485
+ # Avoid the warning "Warning: converting a masked element to nan."
486
+ mean = np.ma.getdata(mean_masked)
487
+ mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan
488
+
489
+ return mean
490
+
491
+ # Median
492
+ elif strategy == "median":
493
+ median_masked = np.ma.median(masked_X, axis=0)
494
+ # Avoid the warning "Warning: converting a masked element to nan."
495
+ median = np.ma.getdata(median_masked)
496
+ median[np.ma.getmaskarray(median_masked)] = (
497
+ 0 if self.keep_empty_features else np.nan
498
+ )
499
+
500
+ return median
501
+
502
+ # Most frequent
503
+ elif strategy == "most_frequent":
504
+ # Avoid use of scipy.stats.mstats.mode due to the required
505
+ # additional overhead and slow benchmarking performance.
506
+ # See Issue 14325 and PR 14399 for full discussion.
507
+
508
+ # To be able access the elements by columns
509
+ X = X.transpose()
510
+ mask = missing_mask.transpose()
511
+
512
+ if X.dtype.kind == "O":
513
+ most_frequent = np.empty(X.shape[0], dtype=object)
514
+ else:
515
+ most_frequent = np.empty(X.shape[0])
516
+
517
+ for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
518
+ row_mask = np.logical_not(row_mask).astype(bool)
519
+ row = row[row_mask]
520
+ if len(row) == 0 and self.keep_empty_features:
521
+ most_frequent[i] = 0
522
+ else:
523
+ most_frequent[i] = _most_frequent(row, np.nan, 0)
524
+
525
+ return most_frequent
526
+
527
+ # Constant
528
+ elif strategy == "constant":
529
+ # for constant strategy, self.statistcs_ is used to store
530
+ # fill_value in each column
531
+ return np.full(X.shape[1], fill_value, dtype=X.dtype)
532
+
533
+ def transform(self, X):
534
+ """Impute all missing values in `X`.
535
+
536
+ Parameters
537
+ ----------
538
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
539
+ The input data to complete.
540
+
541
+ Returns
542
+ -------
543
+ X_imputed : {ndarray, sparse matrix} of shape \
544
+ (n_samples, n_features_out)
545
+ `X` with imputed values.
546
+ """
547
+ check_is_fitted(self)
548
+
549
+ X = self._validate_input(X, in_fit=False)
550
+ statistics = self.statistics_
551
+
552
+ if X.shape[1] != statistics.shape[0]:
553
+ raise ValueError(
554
+ "X has %d features per sample, expected %d"
555
+ % (X.shape[1], self.statistics_.shape[0])
556
+ )
557
+
558
+ # compute mask before eliminating invalid features
559
+ missing_mask = _get_mask(X, self.missing_values)
560
+
561
+ # Decide whether to keep missing features
562
+ if self.strategy == "constant" or self.keep_empty_features:
563
+ valid_statistics = statistics
564
+ valid_statistics_indexes = None
565
+ else:
566
+ # same as np.isnan but also works for object dtypes
567
+ invalid_mask = _get_mask(statistics, np.nan)
568
+ valid_mask = np.logical_not(invalid_mask)
569
+ valid_statistics = statistics[valid_mask]
570
+ valid_statistics_indexes = np.flatnonzero(valid_mask)
571
+
572
+ if invalid_mask.any():
573
+ invalid_features = np.arange(X.shape[1])[invalid_mask]
574
+ # use feature names warning if features are provided
575
+ if hasattr(self, "feature_names_in_"):
576
+ invalid_features = self.feature_names_in_[invalid_features]
577
+ warnings.warn(
578
+ "Skipping features without any observed values:"
579
+ f" {invalid_features}. At least one non-missing value is needed"
580
+ f" for imputation with strategy='{self.strategy}'."
581
+ )
582
+ X = X[:, valid_statistics_indexes]
583
+
584
+ # Do actual imputation
585
+ if sp.issparse(X):
586
+ if self.missing_values == 0:
587
+ raise ValueError(
588
+ "Imputation not possible when missing_values "
589
+ "== 0 and input is sparse. Provide a dense "
590
+ "array instead."
591
+ )
592
+ else:
593
+ # if no invalid statistics are found, use the mask computed
594
+ # before, else recompute mask
595
+ if valid_statistics_indexes is None:
596
+ mask = missing_mask.data
597
+ else:
598
+ mask = _get_mask(X.data, self.missing_values)
599
+ indexes = np.repeat(
600
+ np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr)
601
+ )[mask]
602
+
603
+ X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)
604
+ else:
605
+ # use mask computed before eliminating invalid mask
606
+ if valid_statistics_indexes is None:
607
+ mask_valid_features = missing_mask
608
+ else:
609
+ mask_valid_features = missing_mask[:, valid_statistics_indexes]
610
+ n_missing = np.sum(mask_valid_features, axis=0)
611
+ values = np.repeat(valid_statistics, n_missing)
612
+ coordinates = np.where(mask_valid_features.transpose())[::-1]
613
+
614
+ X[coordinates] = values
615
+
616
+ X_indicator = super()._transform_indicator(missing_mask)
617
+
618
+ return super()._concatenate_indicator(X, X_indicator)
619
+
620
+ def inverse_transform(self, X):
621
+ """Convert the data back to the original representation.
622
+
623
+ Inverts the `transform` operation performed on an array.
624
+ This operation can only be performed after :class:`SimpleImputer` is
625
+ instantiated with `add_indicator=True`.
626
+
627
+ Note that `inverse_transform` can only invert the transform in
628
+ features that have binary indicators for missing values. If a feature
629
+ has no missing values at `fit` time, the feature won't have a binary
630
+ indicator, and the imputation done at `transform` time won't be
631
+ inverted.
632
+
633
+ .. versionadded:: 0.24
634
+
635
+ Parameters
636
+ ----------
637
+ X : array-like of shape \
638
+ (n_samples, n_features + n_features_missing_indicator)
639
+ The imputed data to be reverted to original data. It has to be
640
+ an augmented array of imputed data and the missing indicator mask.
641
+
642
+ Returns
643
+ -------
644
+ X_original : ndarray of shape (n_samples, n_features)
645
+ The original `X` with missing values as it was prior
646
+ to imputation.
647
+ """
648
+ check_is_fitted(self)
649
+
650
+ if not self.add_indicator:
651
+ raise ValueError(
652
+ "'inverse_transform' works only when "
653
+ "'SimpleImputer' is instantiated with "
654
+ "'add_indicator=True'. "
655
+ f"Got 'add_indicator={self.add_indicator}' "
656
+ "instead."
657
+ )
658
+
659
+ n_features_missing = len(self.indicator_.features_)
660
+ non_empty_feature_count = X.shape[1] - n_features_missing
661
+ array_imputed = X[:, :non_empty_feature_count].copy()
662
+ missing_mask = X[:, non_empty_feature_count:].astype(bool)
663
+
664
+ n_features_original = len(self.statistics_)
665
+ shape_original = (X.shape[0], n_features_original)
666
+ X_original = np.zeros(shape_original)
667
+ X_original[:, self.indicator_.features_] = missing_mask
668
+ full_mask = X_original.astype(bool)
669
+
670
+ imputed_idx, original_idx = 0, 0
671
+ while imputed_idx < len(array_imputed.T):
672
+ if not np.all(X_original[:, original_idx]):
673
+ X_original[:, original_idx] = array_imputed.T[imputed_idx]
674
+ imputed_idx += 1
675
+ original_idx += 1
676
+ else:
677
+ original_idx += 1
678
+
679
+ X_original[full_mask] = self.missing_values
680
+ return X_original
681
+
682
+ def _more_tags(self):
683
+ return {
684
+ "allow_nan": _is_pandas_na(self.missing_values) or is_scalar_nan(
685
+ self.missing_values
686
+ )
687
+ }
688
+
689
+ def get_feature_names_out(self, input_features=None):
690
+ """Get output feature names for transformation.
691
+
692
+ Parameters
693
+ ----------
694
+ input_features : array-like of str or None, default=None
695
+ Input features.
696
+
697
+ - If `input_features` is `None`, then `feature_names_in_` is
698
+ used as feature names in. If `feature_names_in_` is not defined,
699
+ then the following input feature names are generated:
700
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
701
+ - If `input_features` is an array-like, then `input_features` must
702
+ match `feature_names_in_` if `feature_names_in_` is defined.
703
+
704
+ Returns
705
+ -------
706
+ feature_names_out : ndarray of str objects
707
+ Transformed feature names.
708
+ """
709
+ check_is_fitted(self, "n_features_in_")
710
+ input_features = _check_feature_names_in(self, input_features)
711
+ non_missing_mask = np.logical_not(_get_mask(self.statistics_, np.nan))
712
+ names = input_features[non_missing_mask]
713
+ return self._concatenate_indicator_feature_names_out(names, input_features)
714
+
715
+
716
+ class MissingIndicator(TransformerMixin, BaseEstimator):
717
+ """Binary indicators for missing values.
718
+
719
+ Note that this component typically should not be used in a vanilla
720
+ :class:`~sklearn.pipeline.Pipeline` consisting of transformers and a
721
+ classifier, but rather could be added using a
722
+ :class:`~sklearn.pipeline.FeatureUnion` or
723
+ :class:`~sklearn.compose.ColumnTransformer`.
724
+
725
+ Read more in the :ref:`User Guide <impute>`.
726
+
727
+ .. versionadded:: 0.20
728
+
729
+ Parameters
730
+ ----------
731
+ missing_values : int, float, str, np.nan or None, default=np.nan
732
+ The placeholder for the missing values. All occurrences of
733
+ `missing_values` will be imputed. For pandas' dataframes with
734
+ nullable integer dtypes with missing values, `missing_values`
735
+ should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
736
+
737
+ features : {'missing-only', 'all'}, default='missing-only'
738
+ Whether the imputer mask should represent all or a subset of
739
+ features.
740
+
741
+ - If `'missing-only'` (default), the imputer mask will only represent
742
+ features containing missing values during fit time.
743
+ - If `'all'`, the imputer mask will represent all features.
744
+
745
+ sparse : bool or 'auto', default='auto'
746
+ Whether the imputer mask format should be sparse or dense.
747
+
748
+ - If `'auto'` (default), the imputer mask will be of same type as
749
+ input.
750
+ - If `True`, the imputer mask will be a sparse matrix.
751
+ - If `False`, the imputer mask will be a numpy array.
752
+
753
+ error_on_new : bool, default=True
754
+ If `True`, :meth:`transform` will raise an error when there are
755
+ features with missing values that have no missing values in
756
+ :meth:`fit`. This is applicable only when `features='missing-only'`.
757
+
758
+ Attributes
759
+ ----------
760
+ features_ : ndarray of shape (n_missing_features,) or (n_features,)
761
+ The features indices which will be returned when calling
762
+ :meth:`transform`. They are computed during :meth:`fit`. If
763
+ `features='all'`, `features_` is equal to `range(n_features)`.
764
+
765
+ n_features_in_ : int
766
+ Number of features seen during :term:`fit`.
767
+
768
+ .. versionadded:: 0.24
769
+
770
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
771
+ Names of features seen during :term:`fit`. Defined only when `X`
772
+ has feature names that are all strings.
773
+
774
+ .. versionadded:: 1.0
775
+
776
+ See Also
777
+ --------
778
+ SimpleImputer : Univariate imputation of missing values.
779
+ IterativeImputer : Multivariate imputation of missing values.
780
+
781
+ Examples
782
+ --------
783
+ >>> import numpy as np
784
+ >>> from sklearn.impute import MissingIndicator
785
+ >>> X1 = np.array([[np.nan, 1, 3],
786
+ ... [4, 0, np.nan],
787
+ ... [8, 1, 0]])
788
+ >>> X2 = np.array([[5, 1, np.nan],
789
+ ... [np.nan, 2, 3],
790
+ ... [2, 4, 0]])
791
+ >>> indicator = MissingIndicator()
792
+ >>> indicator.fit(X1)
793
+ MissingIndicator()
794
+ >>> X2_tr = indicator.transform(X2)
795
+ >>> X2_tr
796
+ array([[False, True],
797
+ [ True, False],
798
+ [False, False]])
799
+ """
800
+
801
+ _parameter_constraints: dict = {
802
+ "missing_values": [MissingValues()],
803
+ "features": [StrOptions({"missing-only", "all"})],
804
+ "sparse": ["boolean", StrOptions({"auto"})],
805
+ "error_on_new": ["boolean"],
806
+ }
807
+
808
+ def __init__(
809
+ self,
810
+ *,
811
+ missing_values=np.nan,
812
+ features="missing-only",
813
+ sparse="auto",
814
+ error_on_new=True,
815
+ ):
816
+ self.missing_values = missing_values
817
+ self.features = features
818
+ self.sparse = sparse
819
+ self.error_on_new = error_on_new
820
+
821
+ def _get_missing_features_info(self, X):
822
+ """Compute the imputer mask and the indices of the features
823
+ containing missing values.
824
+
825
+ Parameters
826
+ ----------
827
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
828
+ The input data with missing values. Note that `X` has been
829
+ checked in :meth:`fit` and :meth:`transform` before to call this
830
+ function.
831
+
832
+ Returns
833
+ -------
834
+ imputer_mask : {ndarray, sparse matrix} of shape \
835
+ (n_samples, n_features)
836
+ The imputer mask of the original data.
837
+
838
+ features_with_missing : ndarray of shape (n_features_with_missing)
839
+ The features containing missing values.
840
+ """
841
+ if not self._precomputed:
842
+ imputer_mask = _get_mask(X, self.missing_values)
843
+ else:
844
+ imputer_mask = X
845
+
846
+ if sp.issparse(X):
847
+ imputer_mask.eliminate_zeros()
848
+
849
+ if self.features == "missing-only":
850
+ n_missing = imputer_mask.getnnz(axis=0)
851
+
852
+ if self.sparse is False:
853
+ imputer_mask = imputer_mask.toarray()
854
+ elif imputer_mask.format == "csr":
855
+ imputer_mask = imputer_mask.tocsc()
856
+ else:
857
+ if not self._precomputed:
858
+ imputer_mask = _get_mask(X, self.missing_values)
859
+ else:
860
+ imputer_mask = X
861
+
862
+ if self.features == "missing-only":
863
+ n_missing = imputer_mask.sum(axis=0)
864
+
865
+ if self.sparse is True:
866
+ imputer_mask = sp.csc_matrix(imputer_mask)
867
+
868
+ if self.features == "all":
869
+ features_indices = np.arange(X.shape[1])
870
+ else:
871
+ features_indices = np.flatnonzero(n_missing)
872
+
873
+ return imputer_mask, features_indices
874
+
875
+ def _validate_input(self, X, in_fit):
876
+ if not is_scalar_nan(self.missing_values):
877
+ force_all_finite = True
878
+ else:
879
+ force_all_finite = "allow-nan"
880
+ X = self._validate_data(
881
+ X,
882
+ reset=in_fit,
883
+ accept_sparse=("csc", "csr"),
884
+ dtype=None,
885
+ force_all_finite=force_all_finite,
886
+ )
887
+ _check_inputs_dtype(X, self.missing_values)
888
+ if X.dtype.kind not in ("i", "u", "f", "O"):
889
+ raise ValueError(
890
+ "MissingIndicator does not support data with "
891
+ "dtype {0}. Please provide either a numeric array"
892
+ " (with a floating point or integer dtype) or "
893
+ "categorical data represented either as an array "
894
+ "with integer dtype or an array of string values "
895
+ "with an object dtype.".format(X.dtype)
896
+ )
897
+
898
+ if sp.issparse(X) and self.missing_values == 0:
899
+ # missing_values = 0 not allowed with sparse data as it would
900
+ # force densification
901
+ raise ValueError(
902
+ "Sparse input with missing_values=0 is "
903
+ "not supported. Provide a dense "
904
+ "array instead."
905
+ )
906
+
907
+ return X
908
+
909
+ def _fit(self, X, y=None, precomputed=False):
910
+ """Fit the transformer on `X`.
911
+
912
+ Parameters
913
+ ----------
914
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
915
+ Input data, where `n_samples` is the number of samples and
916
+ `n_features` is the number of features.
917
+ If `precomputed=True`, then `X` is a mask of the input data.
918
+
919
+ precomputed : bool
920
+ Whether the input data is a mask.
921
+
922
+ Returns
923
+ -------
924
+ imputer_mask : {ndarray, sparse matrix} of shape (n_samples, \
925
+ n_features)
926
+ The imputer mask of the original data.
927
+ """
928
+ if precomputed:
929
+ if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
930
+ raise ValueError("precomputed is True but the input data is not a mask")
931
+ self._precomputed = True
932
+ else:
933
+ self._precomputed = False
934
+
935
+ # Need not validate X again as it would have already been validated
936
+ # in the Imputer calling MissingIndicator
937
+ if not self._precomputed:
938
+ X = self._validate_input(X, in_fit=True)
939
+ else:
940
+ # only create `n_features_in_` in the precomputed case
941
+ self._check_n_features(X, reset=True)
942
+
943
+ self._n_features = X.shape[1]
944
+
945
+ missing_features_info = self._get_missing_features_info(X)
946
+ self.features_ = missing_features_info[1]
947
+
948
+ return missing_features_info[0]
949
+
950
+ @_fit_context(prefer_skip_nested_validation=True)
951
+ def fit(self, X, y=None):
952
+ """Fit the transformer on `X`.
953
+
954
+ Parameters
955
+ ----------
956
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
957
+ Input data, where `n_samples` is the number of samples and
958
+ `n_features` is the number of features.
959
+
960
+ y : Ignored
961
+ Not used, present for API consistency by convention.
962
+
963
+ Returns
964
+ -------
965
+ self : object
966
+ Fitted estimator.
967
+ """
968
+ self._fit(X, y)
969
+
970
+ return self
971
+
972
+ def transform(self, X):
973
+ """Generate missing values indicator for `X`.
974
+
975
+ Parameters
976
+ ----------
977
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
978
+ The input data to complete.
979
+
980
+ Returns
981
+ -------
982
+ Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
983
+ or (n_samples, n_features_with_missing)
984
+ The missing indicator for input data. The data type of `Xt`
985
+ will be boolean.
986
+ """
987
+ check_is_fitted(self)
988
+
989
+ # Need not validate X again as it would have already been validated
990
+ # in the Imputer calling MissingIndicator
991
+ if not self._precomputed:
992
+ X = self._validate_input(X, in_fit=False)
993
+ else:
994
+ if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
995
+ raise ValueError("precomputed is True but the input data is not a mask")
996
+
997
+ imputer_mask, features = self._get_missing_features_info(X)
998
+
999
+ if self.features == "missing-only":
1000
+ features_diff_fit_trans = np.setdiff1d(features, self.features_)
1001
+ if self.error_on_new and features_diff_fit_trans.size > 0:
1002
+ raise ValueError(
1003
+ "The features {} have missing values "
1004
+ "in transform but have no missing values "
1005
+ "in fit.".format(features_diff_fit_trans)
1006
+ )
1007
+
1008
+ if self.features_.size < self._n_features:
1009
+ imputer_mask = imputer_mask[:, self.features_]
1010
+
1011
+ return imputer_mask
1012
+
1013
+ @_fit_context(prefer_skip_nested_validation=True)
1014
+ def fit_transform(self, X, y=None):
1015
+ """Generate missing values indicator for `X`.
1016
+
1017
+ Parameters
1018
+ ----------
1019
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1020
+ The input data to complete.
1021
+
1022
+ y : Ignored
1023
+ Not used, present for API consistency by convention.
1024
+
1025
+ Returns
1026
+ -------
1027
+ Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
1028
+ or (n_samples, n_features_with_missing)
1029
+ The missing indicator for input data. The data type of `Xt`
1030
+ will be boolean.
1031
+ """
1032
+ imputer_mask = self._fit(X, y)
1033
+
1034
+ if self.features_.size < self._n_features:
1035
+ imputer_mask = imputer_mask[:, self.features_]
1036
+
1037
+ return imputer_mask
1038
+
1039
+ def get_feature_names_out(self, input_features=None):
1040
+ """Get output feature names for transformation.
1041
+
1042
+ Parameters
1043
+ ----------
1044
+ input_features : array-like of str or None, default=None
1045
+ Input features.
1046
+
1047
+ - If `input_features` is `None`, then `feature_names_in_` is
1048
+ used as feature names in. If `feature_names_in_` is not defined,
1049
+ then the following input feature names are generated:
1050
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
1051
+ - If `input_features` is an array-like, then `input_features` must
1052
+ match `feature_names_in_` if `feature_names_in_` is defined.
1053
+
1054
+ Returns
1055
+ -------
1056
+ feature_names_out : ndarray of str objects
1057
+ Transformed feature names.
1058
+ """
1059
+ check_is_fitted(self, "n_features_in_")
1060
+ input_features = _check_feature_names_in(self, input_features)
1061
+ prefix = self.__class__.__name__.lower()
1062
+ return np.asarray(
1063
+ [
1064
+ f"{prefix}_{feature_name}"
1065
+ for feature_name in input_features[self.features_]
1066
+ ],
1067
+ dtype=object,
1068
+ )
1069
+
1070
+ def _more_tags(self):
1071
+ return {
1072
+ "allow_nan": True,
1073
+ "X_types": ["2darray", "string"],
1074
+ "preserves_dtype": [],
1075
+ }
venv/lib/python3.10/site-packages/sklearn/impute/_iterative.py ADDED
@@ -0,0 +1,906 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from collections import namedtuple
3
+ from numbers import Integral, Real
4
+ from time import time
5
+
6
+ import numpy as np
7
+ from scipy import stats
8
+
9
+ from ..base import _fit_context, clone
10
+ from ..exceptions import ConvergenceWarning
11
+ from ..preprocessing import normalize
12
+ from ..utils import (
13
+ _safe_assign,
14
+ _safe_indexing,
15
+ check_array,
16
+ check_random_state,
17
+ is_scalar_nan,
18
+ )
19
+ from ..utils._mask import _get_mask
20
+ from ..utils._param_validation import HasMethods, Interval, StrOptions
21
+ from ..utils.metadata_routing import _RoutingNotSupportedMixin
22
+ from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted
23
+ from ._base import SimpleImputer, _BaseImputer, _check_inputs_dtype
24
+
25
+ _ImputerTriplet = namedtuple(
26
+ "_ImputerTriplet", ["feat_idx", "neighbor_feat_idx", "estimator"]
27
+ )
28
+
29
+
30
+ def _assign_where(X1, X2, cond):
31
+ """Assign X2 to X1 where cond is True.
32
+
33
+ Parameters
34
+ ----------
35
+ X1 : ndarray or dataframe of shape (n_samples, n_features)
36
+ Data.
37
+
38
+ X2 : ndarray of shape (n_samples, n_features)
39
+ Data to be assigned.
40
+
41
+ cond : ndarray of shape (n_samples, n_features)
42
+ Boolean mask to assign data.
43
+ """
44
+ if hasattr(X1, "mask"): # pandas dataframes
45
+ X1.mask(cond=cond, other=X2, inplace=True)
46
+ else: # ndarrays
47
+ X1[cond] = X2[cond]
48
+
49
+
50
+ class IterativeImputer(_RoutingNotSupportedMixin, _BaseImputer):
51
+ """Multivariate imputer that estimates each feature from all the others.
52
+
53
+ A strategy for imputing missing values by modeling each feature with
54
+ missing values as a function of other features in a round-robin fashion.
55
+
56
+ Read more in the :ref:`User Guide <iterative_imputer>`.
57
+
58
+ .. versionadded:: 0.21
59
+
60
+ .. note::
61
+
62
+ This estimator is still **experimental** for now: the predictions
63
+ and the API might change without any deprecation cycle. To use it,
64
+ you need to explicitly import `enable_iterative_imputer`::
65
+
66
+ >>> # explicitly require this experimental feature
67
+ >>> from sklearn.experimental import enable_iterative_imputer # noqa
68
+ >>> # now you can import normally from sklearn.impute
69
+ >>> from sklearn.impute import IterativeImputer
70
+
71
+ Parameters
72
+ ----------
73
+ estimator : estimator object, default=BayesianRidge()
74
+ The estimator to use at each step of the round-robin imputation.
75
+ If `sample_posterior=True`, the estimator must support
76
+ `return_std` in its `predict` method.
77
+
78
+ missing_values : int or np.nan, default=np.nan
79
+ The placeholder for the missing values. All occurrences of
80
+ `missing_values` will be imputed. For pandas' dataframes with
81
+ nullable integer dtypes with missing values, `missing_values`
82
+ should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
83
+
84
+ sample_posterior : bool, default=False
85
+ Whether to sample from the (Gaussian) predictive posterior of the
86
+ fitted estimator for each imputation. Estimator must support
87
+ `return_std` in its `predict` method if set to `True`. Set to
88
+ `True` if using `IterativeImputer` for multiple imputations.
89
+
90
+ max_iter : int, default=10
91
+ Maximum number of imputation rounds to perform before returning the
92
+ imputations computed during the final round. A round is a single
93
+ imputation of each feature with missing values. The stopping criterion
94
+ is met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals])) < tol`,
95
+ where `X_t` is `X` at iteration `t`. Note that early stopping is only
96
+ applied if `sample_posterior=False`.
97
+
98
+ tol : float, default=1e-3
99
+ Tolerance of the stopping condition.
100
+
101
+ n_nearest_features : int, default=None
102
+ Number of other features to use to estimate the missing values of
103
+ each feature column. Nearness between features is measured using
104
+ the absolute correlation coefficient between each feature pair (after
105
+ initial imputation). To ensure coverage of features throughout the
106
+ imputation process, the neighbor features are not necessarily nearest,
107
+ but are drawn with probability proportional to correlation for each
108
+ imputed target feature. Can provide significant speed-up when the
109
+ number of features is huge. If `None`, all features will be used.
110
+
111
+ initial_strategy : {'mean', 'median', 'most_frequent', 'constant'}, \
112
+ default='mean'
113
+ Which strategy to use to initialize the missing values. Same as the
114
+ `strategy` parameter in :class:`~sklearn.impute.SimpleImputer`.
115
+
116
+ fill_value : str or numerical value, default=None
117
+ When `strategy="constant"`, `fill_value` is used to replace all
118
+ occurrences of missing_values. For string or object data types,
119
+ `fill_value` must be a string.
120
+ If `None`, `fill_value` will be 0 when imputing numerical
121
+ data and "missing_value" for strings or object data types.
122
+
123
+ .. versionadded:: 1.3
124
+
125
+ imputation_order : {'ascending', 'descending', 'roman', 'arabic', \
126
+ 'random'}, default='ascending'
127
+ The order in which the features will be imputed. Possible values:
128
+
129
+ - `'ascending'`: From features with fewest missing values to most.
130
+ - `'descending'`: From features with most missing values to fewest.
131
+ - `'roman'`: Left to right.
132
+ - `'arabic'`: Right to left.
133
+ - `'random'`: A random order for each round.
134
+
135
+ skip_complete : bool, default=False
136
+ If `True` then features with missing values during :meth:`transform`
137
+ which did not have any missing values during :meth:`fit` will be
138
+ imputed with the initial imputation method only. Set to `True` if you
139
+ have many features with no missing values at both :meth:`fit` and
140
+ :meth:`transform` time to save compute.
141
+
142
+ min_value : float or array-like of shape (n_features,), default=-np.inf
143
+ Minimum possible imputed value. Broadcast to shape `(n_features,)` if
144
+ scalar. If array-like, expects shape `(n_features,)`, one min value for
145
+ each feature. The default is `-np.inf`.
146
+
147
+ .. versionchanged:: 0.23
148
+ Added support for array-like.
149
+
150
+ max_value : float or array-like of shape (n_features,), default=np.inf
151
+ Maximum possible imputed value. Broadcast to shape `(n_features,)` if
152
+ scalar. If array-like, expects shape `(n_features,)`, one max value for
153
+ each feature. The default is `np.inf`.
154
+
155
+ .. versionchanged:: 0.23
156
+ Added support for array-like.
157
+
158
+ verbose : int, default=0
159
+ Verbosity flag, controls the debug messages that are issued
160
+ as functions are evaluated. The higher, the more verbose. Can be 0, 1,
161
+ or 2.
162
+
163
+ random_state : int, RandomState instance or None, default=None
164
+ The seed of the pseudo random number generator to use. Randomizes
165
+ selection of estimator features if `n_nearest_features` is not `None`,
166
+ the `imputation_order` if `random`, and the sampling from posterior if
167
+ `sample_posterior=True`. Use an integer for determinism.
168
+ See :term:`the Glossary <random_state>`.
169
+
170
+ add_indicator : bool, default=False
171
+ If `True`, a :class:`MissingIndicator` transform will stack onto output
172
+ of the imputer's transform. This allows a predictive estimator
173
+ to account for missingness despite imputation. If a feature has no
174
+ missing values at fit/train time, the feature won't appear on
175
+ the missing indicator even if there are missing values at
176
+ transform/test time.
177
+
178
+ keep_empty_features : bool, default=False
179
+ If True, features that consist exclusively of missing values when
180
+ `fit` is called are returned in results when `transform` is called.
181
+ The imputed value is always `0` except when
182
+ `initial_strategy="constant"` in which case `fill_value` will be
183
+ used instead.
184
+
185
+ .. versionadded:: 1.2
186
+
187
+ Attributes
188
+ ----------
189
+ initial_imputer_ : object of type :class:`~sklearn.impute.SimpleImputer`
190
+ Imputer used to initialize the missing values.
191
+
192
+ imputation_sequence_ : list of tuples
193
+ Each tuple has `(feat_idx, neighbor_feat_idx, estimator)`, where
194
+ `feat_idx` is the current feature to be imputed,
195
+ `neighbor_feat_idx` is the array of other features used to impute the
196
+ current feature, and `estimator` is the trained estimator used for
197
+ the imputation. Length is `self.n_features_with_missing_ *
198
+ self.n_iter_`.
199
+
200
+ n_iter_ : int
201
+ Number of iteration rounds that occurred. Will be less than
202
+ `self.max_iter` if early stopping criterion was reached.
203
+
204
+ n_features_in_ : int
205
+ Number of features seen during :term:`fit`.
206
+
207
+ .. versionadded:: 0.24
208
+
209
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
210
+ Names of features seen during :term:`fit`. Defined only when `X`
211
+ has feature names that are all strings.
212
+
213
+ .. versionadded:: 1.0
214
+
215
+ n_features_with_missing_ : int
216
+ Number of features with missing values.
217
+
218
+ indicator_ : :class:`~sklearn.impute.MissingIndicator`
219
+ Indicator used to add binary indicators for missing values.
220
+ `None` if `add_indicator=False`.
221
+
222
+ random_state_ : RandomState instance
223
+ RandomState instance that is generated either from a seed, the random
224
+ number generator or by `np.random`.
225
+
226
+ See Also
227
+ --------
228
+ SimpleImputer : Univariate imputer for completing missing values
229
+ with simple strategies.
230
+ KNNImputer : Multivariate imputer that estimates missing features using
231
+ nearest samples.
232
+
233
+ Notes
234
+ -----
235
+ To support imputation in inductive mode we store each feature's estimator
236
+ during the :meth:`fit` phase, and predict without refitting (in order)
237
+ during the :meth:`transform` phase.
238
+
239
+ Features which contain all missing values at :meth:`fit` are discarded upon
240
+ :meth:`transform`.
241
+
242
+ Using defaults, the imputer scales in :math:`\\mathcal{O}(knp^3\\min(n,p))`
243
+ where :math:`k` = `max_iter`, :math:`n` the number of samples and
244
+ :math:`p` the number of features. It thus becomes prohibitively costly when
245
+ the number of features increases. Setting
246
+ `n_nearest_features << n_features`, `skip_complete=True` or increasing `tol`
247
+ can help to reduce its computational cost.
248
+
249
+ Depending on the nature of missing values, simple imputers can be
250
+ preferable in a prediction context.
251
+
252
+ References
253
+ ----------
254
+ .. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice:
255
+ Multivariate Imputation by Chained Equations in R". Journal of
256
+ Statistical Software 45: 1-67.
257
+ <https://www.jstatsoft.org/article/view/v045i03>`_
258
+
259
+ .. [2] `S. F. Buck, (1960). "A Method of Estimation of Missing Values in
260
+ Multivariate Data Suitable for use with an Electronic Computer".
261
+ Journal of the Royal Statistical Society 22(2): 302-306.
262
+ <https://www.jstor.org/stable/2984099>`_
263
+
264
+ Examples
265
+ --------
266
+ >>> import numpy as np
267
+ >>> from sklearn.experimental import enable_iterative_imputer
268
+ >>> from sklearn.impute import IterativeImputer
269
+ >>> imp_mean = IterativeImputer(random_state=0)
270
+ >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
271
+ IterativeImputer(random_state=0)
272
+ >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
273
+ >>> imp_mean.transform(X)
274
+ array([[ 6.9584..., 2. , 3. ],
275
+ [ 4. , 2.6000..., 6. ],
276
+ [10. , 4.9999..., 9. ]])
277
+
278
+ For a more detailed example see
279
+ :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py` or
280
+ :ref:`sphx_glr_auto_examples_impute_plot_iterative_imputer_variants_comparison.py`.
281
+ """
282
+
283
+ _parameter_constraints: dict = {
284
+ **_BaseImputer._parameter_constraints,
285
+ "estimator": [None, HasMethods(["fit", "predict"])],
286
+ "sample_posterior": ["boolean"],
287
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
288
+ "tol": [Interval(Real, 0, None, closed="left")],
289
+ "n_nearest_features": [None, Interval(Integral, 1, None, closed="left")],
290
+ "initial_strategy": [
291
+ StrOptions({"mean", "median", "most_frequent", "constant"})
292
+ ],
293
+ "fill_value": "no_validation", # any object is valid
294
+ "imputation_order": [
295
+ StrOptions({"ascending", "descending", "roman", "arabic", "random"})
296
+ ],
297
+ "skip_complete": ["boolean"],
298
+ "min_value": [None, Interval(Real, None, None, closed="both"), "array-like"],
299
+ "max_value": [None, Interval(Real, None, None, closed="both"), "array-like"],
300
+ "verbose": ["verbose"],
301
+ "random_state": ["random_state"],
302
+ }
303
+
304
+ def __init__(
305
+ self,
306
+ estimator=None,
307
+ *,
308
+ missing_values=np.nan,
309
+ sample_posterior=False,
310
+ max_iter=10,
311
+ tol=1e-3,
312
+ n_nearest_features=None,
313
+ initial_strategy="mean",
314
+ fill_value=None,
315
+ imputation_order="ascending",
316
+ skip_complete=False,
317
+ min_value=-np.inf,
318
+ max_value=np.inf,
319
+ verbose=0,
320
+ random_state=None,
321
+ add_indicator=False,
322
+ keep_empty_features=False,
323
+ ):
324
+ super().__init__(
325
+ missing_values=missing_values,
326
+ add_indicator=add_indicator,
327
+ keep_empty_features=keep_empty_features,
328
+ )
329
+
330
+ self.estimator = estimator
331
+ self.sample_posterior = sample_posterior
332
+ self.max_iter = max_iter
333
+ self.tol = tol
334
+ self.n_nearest_features = n_nearest_features
335
+ self.initial_strategy = initial_strategy
336
+ self.fill_value = fill_value
337
+ self.imputation_order = imputation_order
338
+ self.skip_complete = skip_complete
339
+ self.min_value = min_value
340
+ self.max_value = max_value
341
+ self.verbose = verbose
342
+ self.random_state = random_state
343
+
344
+ def _impute_one_feature(
345
+ self,
346
+ X_filled,
347
+ mask_missing_values,
348
+ feat_idx,
349
+ neighbor_feat_idx,
350
+ estimator=None,
351
+ fit_mode=True,
352
+ ):
353
+ """Impute a single feature from the others provided.
354
+
355
+ This function predicts the missing values of one of the features using
356
+ the current estimates of all the other features. The `estimator` must
357
+ support `return_std=True` in its `predict` method for this function
358
+ to work.
359
+
360
+ Parameters
361
+ ----------
362
+ X_filled : ndarray
363
+ Input data with the most recent imputations.
364
+
365
+ mask_missing_values : ndarray
366
+ Input data's missing indicator matrix.
367
+
368
+ feat_idx : int
369
+ Index of the feature currently being imputed.
370
+
371
+ neighbor_feat_idx : ndarray
372
+ Indices of the features to be used in imputing `feat_idx`.
373
+
374
+ estimator : object
375
+ The estimator to use at this step of the round-robin imputation.
376
+ If `sample_posterior=True`, the estimator must support
377
+ `return_std` in its `predict` method.
378
+ If None, it will be cloned from self._estimator.
379
+
380
+ fit_mode : boolean, default=True
381
+ Whether to fit and predict with the estimator or just predict.
382
+
383
+ Returns
384
+ -------
385
+ X_filled : ndarray
386
+ Input data with `X_filled[missing_row_mask, feat_idx]` updated.
387
+
388
+ estimator : estimator with sklearn API
389
+ The fitted estimator used to impute
390
+ `X_filled[missing_row_mask, feat_idx]`.
391
+ """
392
+ if estimator is None and fit_mode is False:
393
+ raise ValueError(
394
+ "If fit_mode is False, then an already-fitted "
395
+ "estimator should be passed in."
396
+ )
397
+
398
+ if estimator is None:
399
+ estimator = clone(self._estimator)
400
+
401
+ missing_row_mask = mask_missing_values[:, feat_idx]
402
+ if fit_mode:
403
+ X_train = _safe_indexing(
404
+ _safe_indexing(X_filled, neighbor_feat_idx, axis=1),
405
+ ~missing_row_mask,
406
+ axis=0,
407
+ )
408
+ y_train = _safe_indexing(
409
+ _safe_indexing(X_filled, feat_idx, axis=1),
410
+ ~missing_row_mask,
411
+ axis=0,
412
+ )
413
+ estimator.fit(X_train, y_train)
414
+
415
+ # if no missing values, don't predict
416
+ if np.sum(missing_row_mask) == 0:
417
+ return X_filled, estimator
418
+
419
+ # get posterior samples if there is at least one missing value
420
+ X_test = _safe_indexing(
421
+ _safe_indexing(X_filled, neighbor_feat_idx, axis=1),
422
+ missing_row_mask,
423
+ axis=0,
424
+ )
425
+ if self.sample_posterior:
426
+ mus, sigmas = estimator.predict(X_test, return_std=True)
427
+ imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)
428
+ # two types of problems: (1) non-positive sigmas
429
+ # (2) mus outside legal range of min_value and max_value
430
+ # (results in inf sample)
431
+ positive_sigmas = sigmas > 0
432
+ imputed_values[~positive_sigmas] = mus[~positive_sigmas]
433
+ mus_too_low = mus < self._min_value[feat_idx]
434
+ imputed_values[mus_too_low] = self._min_value[feat_idx]
435
+ mus_too_high = mus > self._max_value[feat_idx]
436
+ imputed_values[mus_too_high] = self._max_value[feat_idx]
437
+ # the rest can be sampled without statistical issues
438
+ inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
439
+ mus = mus[inrange_mask]
440
+ sigmas = sigmas[inrange_mask]
441
+ a = (self._min_value[feat_idx] - mus) / sigmas
442
+ b = (self._max_value[feat_idx] - mus) / sigmas
443
+
444
+ truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas)
445
+ imputed_values[inrange_mask] = truncated_normal.rvs(
446
+ random_state=self.random_state_
447
+ )
448
+ else:
449
+ imputed_values = estimator.predict(X_test)
450
+ imputed_values = np.clip(
451
+ imputed_values, self._min_value[feat_idx], self._max_value[feat_idx]
452
+ )
453
+
454
+ # update the feature
455
+ _safe_assign(
456
+ X_filled,
457
+ imputed_values,
458
+ row_indexer=missing_row_mask,
459
+ column_indexer=feat_idx,
460
+ )
461
+ return X_filled, estimator
462
+
463
+ def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat):
464
+ """Get a list of other features to predict `feat_idx`.
465
+
466
+ If `self.n_nearest_features` is less than or equal to the total
467
+ number of features, then use a probability proportional to the absolute
468
+ correlation between `feat_idx` and each other feature to randomly
469
+ choose a subsample of the other features (without replacement).
470
+
471
+ Parameters
472
+ ----------
473
+ n_features : int
474
+ Number of features in `X`.
475
+
476
+ feat_idx : int
477
+ Index of the feature currently being imputed.
478
+
479
+ abs_corr_mat : ndarray, shape (n_features, n_features)
480
+ Absolute correlation matrix of `X`. The diagonal has been zeroed
481
+ out and each feature has been normalized to sum to 1. Can be None.
482
+
483
+ Returns
484
+ -------
485
+ neighbor_feat_idx : array-like
486
+ The features to use to impute `feat_idx`.
487
+ """
488
+ if self.n_nearest_features is not None and self.n_nearest_features < n_features:
489
+ p = abs_corr_mat[:, feat_idx]
490
+ neighbor_feat_idx = self.random_state_.choice(
491
+ np.arange(n_features), self.n_nearest_features, replace=False, p=p
492
+ )
493
+ else:
494
+ inds_left = np.arange(feat_idx)
495
+ inds_right = np.arange(feat_idx + 1, n_features)
496
+ neighbor_feat_idx = np.concatenate((inds_left, inds_right))
497
+ return neighbor_feat_idx
498
+
499
+ def _get_ordered_idx(self, mask_missing_values):
500
+ """Decide in what order we will update the features.
501
+
502
+ As a homage to the MICE R package, we will have 4 main options of
503
+ how to order the updates, and use a random order if anything else
504
+ is specified.
505
+
506
+ Also, this function skips features which have no missing values.
507
+
508
+ Parameters
509
+ ----------
510
+ mask_missing_values : array-like, shape (n_samples, n_features)
511
+ Input data's missing indicator matrix, where `n_samples` is the
512
+ number of samples and `n_features` is the number of features.
513
+
514
+ Returns
515
+ -------
516
+ ordered_idx : ndarray, shape (n_features,)
517
+ The order in which to impute the features.
518
+ """
519
+ frac_of_missing_values = mask_missing_values.mean(axis=0)
520
+ if self.skip_complete:
521
+ missing_values_idx = np.flatnonzero(frac_of_missing_values)
522
+ else:
523
+ missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])
524
+ if self.imputation_order == "roman":
525
+ ordered_idx = missing_values_idx
526
+ elif self.imputation_order == "arabic":
527
+ ordered_idx = missing_values_idx[::-1]
528
+ elif self.imputation_order == "ascending":
529
+ n = len(frac_of_missing_values) - len(missing_values_idx)
530
+ ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:]
531
+ elif self.imputation_order == "descending":
532
+ n = len(frac_of_missing_values) - len(missing_values_idx)
533
+ ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:][::-1]
534
+ elif self.imputation_order == "random":
535
+ ordered_idx = missing_values_idx
536
+ self.random_state_.shuffle(ordered_idx)
537
+ return ordered_idx
538
+
539
+ def _get_abs_corr_mat(self, X_filled, tolerance=1e-6):
540
+ """Get absolute correlation matrix between features.
541
+
542
+ Parameters
543
+ ----------
544
+ X_filled : ndarray, shape (n_samples, n_features)
545
+ Input data with the most recent imputations.
546
+
547
+ tolerance : float, default=1e-6
548
+ `abs_corr_mat` can have nans, which will be replaced
549
+ with `tolerance`.
550
+
551
+ Returns
552
+ -------
553
+ abs_corr_mat : ndarray, shape (n_features, n_features)
554
+ Absolute correlation matrix of `X` at the beginning of the
555
+ current round. The diagonal has been zeroed out and each feature's
556
+ absolute correlations with all others have been normalized to sum
557
+ to 1.
558
+ """
559
+ n_features = X_filled.shape[1]
560
+ if self.n_nearest_features is None or self.n_nearest_features >= n_features:
561
+ return None
562
+ with np.errstate(invalid="ignore"):
563
+ # if a feature in the neighborhood has only a single value
564
+ # (e.g., categorical feature), the std. dev. will be null and
565
+ # np.corrcoef will raise a warning due to a division by zero
566
+ abs_corr_mat = np.abs(np.corrcoef(X_filled.T))
567
+ # np.corrcoef is not defined for features with zero std
568
+ abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance
569
+ # ensures exploration, i.e. at least some probability of sampling
570
+ np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)
571
+ # features are not their own neighbors
572
+ np.fill_diagonal(abs_corr_mat, 0)
573
+ # needs to sum to 1 for np.random.choice sampling
574
+ abs_corr_mat = normalize(abs_corr_mat, norm="l1", axis=0, copy=False)
575
+ return abs_corr_mat
576
+
577
+ def _initial_imputation(self, X, in_fit=False):
578
+ """Perform initial imputation for input `X`.
579
+
580
+ Parameters
581
+ ----------
582
+ X : ndarray of shape (n_samples, n_features)
583
+ Input data, where `n_samples` is the number of samples and
584
+ `n_features` is the number of features.
585
+
586
+ in_fit : bool, default=False
587
+ Whether function is called in :meth:`fit`.
588
+
589
+ Returns
590
+ -------
591
+ Xt : ndarray of shape (n_samples, n_features)
592
+ Input data, where `n_samples` is the number of samples and
593
+ `n_features` is the number of features.
594
+
595
+ X_filled : ndarray of shape (n_samples, n_features)
596
+ Input data with the most recent imputations.
597
+
598
+ mask_missing_values : ndarray of shape (n_samples, n_features)
599
+ Input data's missing indicator matrix, where `n_samples` is the
600
+ number of samples and `n_features` is the number of features,
601
+ masked by non-missing features.
602
+
603
+ X_missing_mask : ndarray, shape (n_samples, n_features)
604
+ Input data's mask matrix indicating missing datapoints, where
605
+ `n_samples` is the number of samples and `n_features` is the
606
+ number of features.
607
+ """
608
+ if is_scalar_nan(self.missing_values):
609
+ force_all_finite = "allow-nan"
610
+ else:
611
+ force_all_finite = True
612
+
613
+ X = self._validate_data(
614
+ X,
615
+ dtype=FLOAT_DTYPES,
616
+ order="F",
617
+ reset=in_fit,
618
+ force_all_finite=force_all_finite,
619
+ )
620
+ _check_inputs_dtype(X, self.missing_values)
621
+
622
+ X_missing_mask = _get_mask(X, self.missing_values)
623
+ mask_missing_values = X_missing_mask.copy()
624
+ if self.initial_imputer_ is None:
625
+ self.initial_imputer_ = SimpleImputer(
626
+ missing_values=self.missing_values,
627
+ strategy=self.initial_strategy,
628
+ fill_value=self.fill_value,
629
+ keep_empty_features=self.keep_empty_features,
630
+ ).set_output(transform="default")
631
+ X_filled = self.initial_imputer_.fit_transform(X)
632
+ else:
633
+ X_filled = self.initial_imputer_.transform(X)
634
+
635
+ valid_mask = np.flatnonzero(
636
+ np.logical_not(np.isnan(self.initial_imputer_.statistics_))
637
+ )
638
+
639
+ if not self.keep_empty_features:
640
+ # drop empty features
641
+ Xt = X[:, valid_mask]
642
+ mask_missing_values = mask_missing_values[:, valid_mask]
643
+ else:
644
+ # mark empty features as not missing and keep the original
645
+ # imputation
646
+ mask_missing_values[:, valid_mask] = True
647
+ Xt = X
648
+
649
+ return Xt, X_filled, mask_missing_values, X_missing_mask
650
+
651
+ @staticmethod
652
+ def _validate_limit(limit, limit_type, n_features):
653
+ """Validate the limits (min/max) of the feature values.
654
+
655
+ Converts scalar min/max limits to vectors of shape `(n_features,)`.
656
+
657
+ Parameters
658
+ ----------
659
+ limit: scalar or array-like
660
+ The user-specified limit (i.e, min_value or max_value).
661
+ limit_type: {'max', 'min'}
662
+ Type of limit to validate.
663
+ n_features: int
664
+ Number of features in the dataset.
665
+
666
+ Returns
667
+ -------
668
+ limit: ndarray, shape(n_features,)
669
+ Array of limits, one for each feature.
670
+ """
671
+ limit_bound = np.inf if limit_type == "max" else -np.inf
672
+ limit = limit_bound if limit is None else limit
673
+ if np.isscalar(limit):
674
+ limit = np.full(n_features, limit)
675
+ limit = check_array(limit, force_all_finite=False, copy=False, ensure_2d=False)
676
+ if not limit.shape[0] == n_features:
677
+ raise ValueError(
678
+ f"'{limit_type}_value' should be of "
679
+ f"shape ({n_features},) when an array-like "
680
+ f"is provided. Got {limit.shape}, instead."
681
+ )
682
+ return limit
683
+
684
+ @_fit_context(
685
+ # IterativeImputer.estimator is not validated yet
686
+ prefer_skip_nested_validation=False
687
+ )
688
+ def fit_transform(self, X, y=None):
689
+ """Fit the imputer on `X` and return the transformed `X`.
690
+
691
+ Parameters
692
+ ----------
693
+ X : array-like, shape (n_samples, n_features)
694
+ Input data, where `n_samples` is the number of samples and
695
+ `n_features` is the number of features.
696
+
697
+ y : Ignored
698
+ Not used, present for API consistency by convention.
699
+
700
+ Returns
701
+ -------
702
+ Xt : array-like, shape (n_samples, n_features)
703
+ The imputed input data.
704
+ """
705
+ self.random_state_ = getattr(
706
+ self, "random_state_", check_random_state(self.random_state)
707
+ )
708
+
709
+ if self.estimator is None:
710
+ from ..linear_model import BayesianRidge
711
+
712
+ self._estimator = BayesianRidge()
713
+ else:
714
+ self._estimator = clone(self.estimator)
715
+
716
+ self.imputation_sequence_ = []
717
+
718
+ self.initial_imputer_ = None
719
+
720
+ X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
721
+ X, in_fit=True
722
+ )
723
+
724
+ super()._fit_indicator(complete_mask)
725
+ X_indicator = super()._transform_indicator(complete_mask)
726
+
727
+ if self.max_iter == 0 or np.all(mask_missing_values):
728
+ self.n_iter_ = 0
729
+ return super()._concatenate_indicator(Xt, X_indicator)
730
+
731
+ # Edge case: a single feature. We return the initial ...
732
+ if Xt.shape[1] == 1:
733
+ self.n_iter_ = 0
734
+ return super()._concatenate_indicator(Xt, X_indicator)
735
+
736
+ self._min_value = self._validate_limit(self.min_value, "min", X.shape[1])
737
+ self._max_value = self._validate_limit(self.max_value, "max", X.shape[1])
738
+
739
+ if not np.all(np.greater(self._max_value, self._min_value)):
740
+ raise ValueError("One (or more) features have min_value >= max_value.")
741
+
742
+ # order in which to impute
743
+ # note this is probably too slow for large feature data (d > 100000)
744
+ # and a better way would be good.
745
+ # see: https://goo.gl/KyCNwj and subsequent comments
746
+ ordered_idx = self._get_ordered_idx(mask_missing_values)
747
+ self.n_features_with_missing_ = len(ordered_idx)
748
+
749
+ abs_corr_mat = self._get_abs_corr_mat(Xt)
750
+
751
+ n_samples, n_features = Xt.shape
752
+ if self.verbose > 0:
753
+ print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
754
+ start_t = time()
755
+ if not self.sample_posterior:
756
+ Xt_previous = Xt.copy()
757
+ normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values]))
758
+ for self.n_iter_ in range(1, self.max_iter + 1):
759
+ if self.imputation_order == "random":
760
+ ordered_idx = self._get_ordered_idx(mask_missing_values)
761
+
762
+ for feat_idx in ordered_idx:
763
+ neighbor_feat_idx = self._get_neighbor_feat_idx(
764
+ n_features, feat_idx, abs_corr_mat
765
+ )
766
+ Xt, estimator = self._impute_one_feature(
767
+ Xt,
768
+ mask_missing_values,
769
+ feat_idx,
770
+ neighbor_feat_idx,
771
+ estimator=None,
772
+ fit_mode=True,
773
+ )
774
+ estimator_triplet = _ImputerTriplet(
775
+ feat_idx, neighbor_feat_idx, estimator
776
+ )
777
+ self.imputation_sequence_.append(estimator_triplet)
778
+
779
+ if self.verbose > 1:
780
+ print(
781
+ "[IterativeImputer] Ending imputation round "
782
+ "%d/%d, elapsed time %0.2f"
783
+ % (self.n_iter_, self.max_iter, time() - start_t)
784
+ )
785
+
786
+ if not self.sample_posterior:
787
+ inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None)
788
+ if self.verbose > 0:
789
+ print(
790
+ "[IterativeImputer] Change: {}, scaled tolerance: {} ".format(
791
+ inf_norm, normalized_tol
792
+ )
793
+ )
794
+ if inf_norm < normalized_tol:
795
+ if self.verbose > 0:
796
+ print("[IterativeImputer] Early stopping criterion reached.")
797
+ break
798
+ Xt_previous = Xt.copy()
799
+ else:
800
+ if not self.sample_posterior:
801
+ warnings.warn(
802
+ "[IterativeImputer] Early stopping criterion not reached.",
803
+ ConvergenceWarning,
804
+ )
805
+ _assign_where(Xt, X, cond=~mask_missing_values)
806
+
807
+ return super()._concatenate_indicator(Xt, X_indicator)
808
+
809
+ def transform(self, X):
810
+ """Impute all missing values in `X`.
811
+
812
+ Note that this is stochastic, and that if `random_state` is not fixed,
813
+ repeated calls, or permuted input, results will differ.
814
+
815
+ Parameters
816
+ ----------
817
+ X : array-like of shape (n_samples, n_features)
818
+ The input data to complete.
819
+
820
+ Returns
821
+ -------
822
+ Xt : array-like, shape (n_samples, n_features)
823
+ The imputed input data.
824
+ """
825
+ check_is_fitted(self)
826
+
827
+ X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
828
+ X, in_fit=False
829
+ )
830
+
831
+ X_indicator = super()._transform_indicator(complete_mask)
832
+
833
+ if self.n_iter_ == 0 or np.all(mask_missing_values):
834
+ return super()._concatenate_indicator(Xt, X_indicator)
835
+
836
+ imputations_per_round = len(self.imputation_sequence_) // self.n_iter_
837
+ i_rnd = 0
838
+ if self.verbose > 0:
839
+ print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
840
+ start_t = time()
841
+ for it, estimator_triplet in enumerate(self.imputation_sequence_):
842
+ Xt, _ = self._impute_one_feature(
843
+ Xt,
844
+ mask_missing_values,
845
+ estimator_triplet.feat_idx,
846
+ estimator_triplet.neighbor_feat_idx,
847
+ estimator=estimator_triplet.estimator,
848
+ fit_mode=False,
849
+ )
850
+ if not (it + 1) % imputations_per_round:
851
+ if self.verbose > 1:
852
+ print(
853
+ "[IterativeImputer] Ending imputation round "
854
+ "%d/%d, elapsed time %0.2f"
855
+ % (i_rnd + 1, self.n_iter_, time() - start_t)
856
+ )
857
+ i_rnd += 1
858
+
859
+ _assign_where(Xt, X, cond=~mask_missing_values)
860
+
861
+ return super()._concatenate_indicator(Xt, X_indicator)
862
+
863
+ def fit(self, X, y=None):
864
+ """Fit the imputer on `X` and return self.
865
+
866
+ Parameters
867
+ ----------
868
+ X : array-like, shape (n_samples, n_features)
869
+ Input data, where `n_samples` is the number of samples and
870
+ `n_features` is the number of features.
871
+
872
+ y : Ignored
873
+ Not used, present for API consistency by convention.
874
+
875
+ Returns
876
+ -------
877
+ self : object
878
+ Fitted estimator.
879
+ """
880
+ self.fit_transform(X)
881
+ return self
882
+
883
+ def get_feature_names_out(self, input_features=None):
884
+ """Get output feature names for transformation.
885
+
886
+ Parameters
887
+ ----------
888
+ input_features : array-like of str or None, default=None
889
+ Input features.
890
+
891
+ - If `input_features` is `None`, then `feature_names_in_` is
892
+ used as feature names in. If `feature_names_in_` is not defined,
893
+ then the following input feature names are generated:
894
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
895
+ - If `input_features` is an array-like, then `input_features` must
896
+ match `feature_names_in_` if `feature_names_in_` is defined.
897
+
898
+ Returns
899
+ -------
900
+ feature_names_out : ndarray of str objects
901
+ Transformed feature names.
902
+ """
903
+ check_is_fitted(self, "n_features_in_")
904
+ input_features = _check_feature_names_in(self, input_features)
905
+ names = self.initial_imputer_.get_feature_names_out(input_features)
906
+ return self._concatenate_indicator_feature_names_out(names, input_features)
venv/lib/python3.10/site-packages/sklearn/impute/_knn.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Ashim Bhattarai <[email protected]>
2
+ # Thomas J Fan <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ from numbers import Integral
6
+
7
+ import numpy as np
8
+
9
+ from ..base import _fit_context
10
+ from ..metrics import pairwise_distances_chunked
11
+ from ..metrics.pairwise import _NAN_METRICS
12
+ from ..neighbors._base import _get_weights
13
+ from ..utils import is_scalar_nan
14
+ from ..utils._mask import _get_mask
15
+ from ..utils._param_validation import Hidden, Interval, StrOptions
16
+ from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted
17
+ from ._base import _BaseImputer
18
+
19
+
20
+ class KNNImputer(_BaseImputer):
21
+ """Imputation for completing missing values using k-Nearest Neighbors.
22
+
23
+ Each sample's missing values are imputed using the mean value from
24
+ `n_neighbors` nearest neighbors found in the training set. Two samples are
25
+ close if the features that neither is missing are close.
26
+
27
+ Read more in the :ref:`User Guide <knnimpute>`.
28
+
29
+ .. versionadded:: 0.22
30
+
31
+ Parameters
32
+ ----------
33
+ missing_values : int, float, str, np.nan or None, default=np.nan
34
+ The placeholder for the missing values. All occurrences of
35
+ `missing_values` will be imputed. For pandas' dataframes with
36
+ nullable integer dtypes with missing values, `missing_values`
37
+ should be set to np.nan, since `pd.NA` will be converted to np.nan.
38
+
39
+ n_neighbors : int, default=5
40
+ Number of neighboring samples to use for imputation.
41
+
42
+ weights : {'uniform', 'distance'} or callable, default='uniform'
43
+ Weight function used in prediction. Possible values:
44
+
45
+ - 'uniform' : uniform weights. All points in each neighborhood are
46
+ weighted equally.
47
+ - 'distance' : weight points by the inverse of their distance.
48
+ in this case, closer neighbors of a query point will have a
49
+ greater influence than neighbors which are further away.
50
+ - callable : a user-defined function which accepts an
51
+ array of distances, and returns an array of the same shape
52
+ containing the weights.
53
+
54
+ metric : {'nan_euclidean'} or callable, default='nan_euclidean'
55
+ Distance metric for searching neighbors. Possible values:
56
+
57
+ - 'nan_euclidean'
58
+ - callable : a user-defined function which conforms to the definition
59
+ of ``_pairwise_callable(X, Y, metric, **kwds)``. The function
60
+ accepts two arrays, X and Y, and a `missing_values` keyword in
61
+ `kwds` and returns a scalar distance value.
62
+
63
+ copy : bool, default=True
64
+ If True, a copy of X will be created. If False, imputation will
65
+ be done in-place whenever possible.
66
+
67
+ add_indicator : bool, default=False
68
+ If True, a :class:`MissingIndicator` transform will stack onto the
69
+ output of the imputer's transform. This allows a predictive estimator
70
+ to account for missingness despite imputation. If a feature has no
71
+ missing values at fit/train time, the feature won't appear on the
72
+ missing indicator even if there are missing values at transform/test
73
+ time.
74
+
75
+ keep_empty_features : bool, default=False
76
+ If True, features that consist exclusively of missing values when
77
+ `fit` is called are returned in results when `transform` is called.
78
+ The imputed value is always `0`.
79
+
80
+ .. versionadded:: 1.2
81
+
82
+ Attributes
83
+ ----------
84
+ indicator_ : :class:`~sklearn.impute.MissingIndicator`
85
+ Indicator used to add binary indicators for missing values.
86
+ ``None`` if add_indicator is False.
87
+
88
+ n_features_in_ : int
89
+ Number of features seen during :term:`fit`.
90
+
91
+ .. versionadded:: 0.24
92
+
93
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
94
+ Names of features seen during :term:`fit`. Defined only when `X`
95
+ has feature names that are all strings.
96
+
97
+ .. versionadded:: 1.0
98
+
99
+ See Also
100
+ --------
101
+ SimpleImputer : Univariate imputer for completing missing values
102
+ with simple strategies.
103
+ IterativeImputer : Multivariate imputer that estimates values to impute for
104
+ each feature with missing values from all the others.
105
+
106
+ References
107
+ ----------
108
+ * `Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor
109
+ Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing
110
+ value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17
111
+ no. 6, 2001 Pages 520-525.
112
+ <https://academic.oup.com/bioinformatics/article/17/6/520/272365>`_
113
+
114
+ Examples
115
+ --------
116
+ >>> import numpy as np
117
+ >>> from sklearn.impute import KNNImputer
118
+ >>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
119
+ >>> imputer = KNNImputer(n_neighbors=2)
120
+ >>> imputer.fit_transform(X)
121
+ array([[1. , 2. , 4. ],
122
+ [3. , 4. , 3. ],
123
+ [5.5, 6. , 5. ],
124
+ [8. , 8. , 7. ]])
125
+
126
+ For a more detailed example see
127
+ :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`.
128
+ """
129
+
130
+ _parameter_constraints: dict = {
131
+ **_BaseImputer._parameter_constraints,
132
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
133
+ "weights": [StrOptions({"uniform", "distance"}), callable, Hidden(None)],
134
+ "metric": [StrOptions(set(_NAN_METRICS)), callable],
135
+ "copy": ["boolean"],
136
+ }
137
+
138
+ def __init__(
139
+ self,
140
+ *,
141
+ missing_values=np.nan,
142
+ n_neighbors=5,
143
+ weights="uniform",
144
+ metric="nan_euclidean",
145
+ copy=True,
146
+ add_indicator=False,
147
+ keep_empty_features=False,
148
+ ):
149
+ super().__init__(
150
+ missing_values=missing_values,
151
+ add_indicator=add_indicator,
152
+ keep_empty_features=keep_empty_features,
153
+ )
154
+ self.n_neighbors = n_neighbors
155
+ self.weights = weights
156
+ self.metric = metric
157
+ self.copy = copy
158
+
159
+ def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col):
160
+ """Helper function to impute a single column.
161
+
162
+ Parameters
163
+ ----------
164
+ dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)
165
+ Distance matrix between the receivers and potential donors from
166
+ training set. There must be at least one non-nan distance between
167
+ a receiver and a potential donor.
168
+
169
+ n_neighbors : int
170
+ Number of neighbors to consider.
171
+
172
+ fit_X_col : ndarray of shape (n_potential_donors,)
173
+ Column of potential donors from training set.
174
+
175
+ mask_fit_X_col : ndarray of shape (n_potential_donors,)
176
+ Missing mask for fit_X_col.
177
+
178
+ Returns
179
+ -------
180
+ imputed_values: ndarray of shape (n_receivers,)
181
+ Imputed values for receiver.
182
+ """
183
+ # Get donors
184
+ donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[
185
+ :, :n_neighbors
186
+ ]
187
+
188
+ # Get weight matrix from distance matrix
189
+ donors_dist = dist_pot_donors[
190
+ np.arange(donors_idx.shape[0])[:, None], donors_idx
191
+ ]
192
+
193
+ weight_matrix = _get_weights(donors_dist, self.weights)
194
+
195
+ # fill nans with zeros
196
+ if weight_matrix is not None:
197
+ weight_matrix[np.isnan(weight_matrix)] = 0.0
198
+
199
+ # Retrieve donor values and calculate kNN average
200
+ donors = fit_X_col.take(donors_idx)
201
+ donors_mask = mask_fit_X_col.take(donors_idx)
202
+ donors = np.ma.array(donors, mask=donors_mask)
203
+
204
+ return np.ma.average(donors, axis=1, weights=weight_matrix).data
205
+
206
+ @_fit_context(prefer_skip_nested_validation=True)
207
+ def fit(self, X, y=None):
208
+ """Fit the imputer on X.
209
+
210
+ Parameters
211
+ ----------
212
+ X : array-like shape of (n_samples, n_features)
213
+ Input data, where `n_samples` is the number of samples and
214
+ `n_features` is the number of features.
215
+
216
+ y : Ignored
217
+ Not used, present here for API consistency by convention.
218
+
219
+ Returns
220
+ -------
221
+ self : object
222
+ The fitted `KNNImputer` class instance.
223
+ """
224
+ # Check data integrity and calling arguments
225
+ if not is_scalar_nan(self.missing_values):
226
+ force_all_finite = True
227
+ else:
228
+ force_all_finite = "allow-nan"
229
+
230
+ X = self._validate_data(
231
+ X,
232
+ accept_sparse=False,
233
+ dtype=FLOAT_DTYPES,
234
+ force_all_finite=force_all_finite,
235
+ copy=self.copy,
236
+ )
237
+
238
+ self._fit_X = X
239
+ self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
240
+ self._valid_mask = ~np.all(self._mask_fit_X, axis=0)
241
+
242
+ super()._fit_indicator(self._mask_fit_X)
243
+
244
+ return self
245
+
246
+ def transform(self, X):
247
+ """Impute all missing values in X.
248
+
249
+ Parameters
250
+ ----------
251
+ X : array-like of shape (n_samples, n_features)
252
+ The input data to complete.
253
+
254
+ Returns
255
+ -------
256
+ X : array-like of shape (n_samples, n_output_features)
257
+ The imputed dataset. `n_output_features` is the number of features
258
+ that is not always missing during `fit`.
259
+ """
260
+
261
+ check_is_fitted(self)
262
+ if not is_scalar_nan(self.missing_values):
263
+ force_all_finite = True
264
+ else:
265
+ force_all_finite = "allow-nan"
266
+ X = self._validate_data(
267
+ X,
268
+ accept_sparse=False,
269
+ dtype=FLOAT_DTYPES,
270
+ force_all_finite=force_all_finite,
271
+ copy=self.copy,
272
+ reset=False,
273
+ )
274
+
275
+ mask = _get_mask(X, self.missing_values)
276
+ mask_fit_X = self._mask_fit_X
277
+ valid_mask = self._valid_mask
278
+
279
+ X_indicator = super()._transform_indicator(mask)
280
+
281
+ # Removes columns where the training data is all nan
282
+ if not np.any(mask):
283
+ # No missing values in X
284
+ if self.keep_empty_features:
285
+ Xc = X
286
+ Xc[:, ~valid_mask] = 0
287
+ else:
288
+ Xc = X[:, valid_mask]
289
+
290
+ # Even if there are no missing values in X, we still concatenate Xc
291
+ # with the missing value indicator matrix, X_indicator.
292
+ # This is to ensure that the output maintains consistency in terms
293
+ # of columns, regardless of whether missing values exist in X or not.
294
+ return super()._concatenate_indicator(Xc, X_indicator)
295
+
296
+ row_missing_idx = np.flatnonzero(mask.any(axis=1))
297
+
298
+ non_missing_fix_X = np.logical_not(mask_fit_X)
299
+
300
+ # Maps from indices from X to indices in dist matrix
301
+ dist_idx_map = np.zeros(X.shape[0], dtype=int)
302
+ dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])
303
+
304
+ def process_chunk(dist_chunk, start):
305
+ row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)]
306
+
307
+ # Find and impute missing by column
308
+ for col in range(X.shape[1]):
309
+ if not valid_mask[col]:
310
+ # column was all missing during training
311
+ continue
312
+
313
+ col_mask = mask[row_missing_chunk, col]
314
+ if not np.any(col_mask):
315
+ # column has no missing values
316
+ continue
317
+
318
+ (potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col])
319
+
320
+ # receivers_idx are indices in X
321
+ receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
322
+
323
+ # distances for samples that needed imputation for column
324
+ dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
325
+ :, potential_donors_idx
326
+ ]
327
+
328
+ # receivers with all nan distances impute with mean
329
+ all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
330
+ all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
331
+
332
+ if all_nan_receivers_idx.size:
333
+ col_mean = np.ma.array(
334
+ self._fit_X[:, col], mask=mask_fit_X[:, col]
335
+ ).mean()
336
+ X[all_nan_receivers_idx, col] = col_mean
337
+
338
+ if len(all_nan_receivers_idx) == len(receivers_idx):
339
+ # all receivers imputed with mean
340
+ continue
341
+
342
+ # receivers with at least one defined distance
343
+ receivers_idx = receivers_idx[~all_nan_dist_mask]
344
+ dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
345
+ :, potential_donors_idx
346
+ ]
347
+
348
+ n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
349
+ value = self._calc_impute(
350
+ dist_subset,
351
+ n_neighbors,
352
+ self._fit_X[potential_donors_idx, col],
353
+ mask_fit_X[potential_donors_idx, col],
354
+ )
355
+ X[receivers_idx, col] = value
356
+
357
+ # process in fixed-memory chunks
358
+ gen = pairwise_distances_chunked(
359
+ X[row_missing_idx, :],
360
+ self._fit_X,
361
+ metric=self.metric,
362
+ missing_values=self.missing_values,
363
+ force_all_finite=force_all_finite,
364
+ reduce_func=process_chunk,
365
+ )
366
+ for chunk in gen:
367
+ # process_chunk modifies X in place. No return value.
368
+ pass
369
+
370
+ if self.keep_empty_features:
371
+ Xc = X
372
+ Xc[:, ~valid_mask] = 0
373
+ else:
374
+ Xc = X[:, valid_mask]
375
+
376
+ return super()._concatenate_indicator(Xc, X_indicator)
377
+
378
+ def get_feature_names_out(self, input_features=None):
379
+ """Get output feature names for transformation.
380
+
381
+ Parameters
382
+ ----------
383
+ input_features : array-like of str or None, default=None
384
+ Input features.
385
+
386
+ - If `input_features` is `None`, then `feature_names_in_` is
387
+ used as feature names in. If `feature_names_in_` is not defined,
388
+ then the following input feature names are generated:
389
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
390
+ - If `input_features` is an array-like, then `input_features` must
391
+ match `feature_names_in_` if `feature_names_in_` is defined.
392
+
393
+ Returns
394
+ -------
395
+ feature_names_out : ndarray of str objects
396
+ Transformed feature names.
397
+ """
398
+ check_is_fitted(self, "n_features_in_")
399
+ input_features = _check_feature_names_in(self, input_features)
400
+ names = input_features[self._valid_mask]
401
+ return self._concatenate_indicator_feature_names_out(names, input_features)
venv/lib/python3.10/site-packages/sklearn/impute/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc ADDED
Binary file (4.46 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc ADDED
Binary file (5.78 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc ADDED
Binary file (43.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.impute._base import _BaseImputer
5
+ from sklearn.impute._iterative import _assign_where
6
+ from sklearn.utils._mask import _get_mask
7
+ from sklearn.utils._testing import _convert_container, assert_allclose
8
+
9
+
10
+ @pytest.fixture
11
+ def data():
12
+ X = np.random.randn(10, 2)
13
+ X[::2] = np.nan
14
+ return X
15
+
16
+
17
+ class NoFitIndicatorImputer(_BaseImputer):
18
+ def fit(self, X, y=None):
19
+ return self
20
+
21
+ def transform(self, X, y=None):
22
+ return self._concatenate_indicator(X, self._transform_indicator(X))
23
+
24
+
25
+ class NoTransformIndicatorImputer(_BaseImputer):
26
+ def fit(self, X, y=None):
27
+ mask = _get_mask(X, value_to_mask=np.nan)
28
+ super()._fit_indicator(mask)
29
+ return self
30
+
31
+ def transform(self, X, y=None):
32
+ return self._concatenate_indicator(X, None)
33
+
34
+
35
+ class NoPrecomputedMaskFit(_BaseImputer):
36
+ def fit(self, X, y=None):
37
+ self._fit_indicator(X)
38
+ return self
39
+
40
+ def transform(self, X):
41
+ return self._concatenate_indicator(X, self._transform_indicator(X))
42
+
43
+
44
+ class NoPrecomputedMaskTransform(_BaseImputer):
45
+ def fit(self, X, y=None):
46
+ mask = _get_mask(X, value_to_mask=np.nan)
47
+ self._fit_indicator(mask)
48
+ return self
49
+
50
+ def transform(self, X):
51
+ return self._concatenate_indicator(X, self._transform_indicator(X))
52
+
53
+
54
+ def test_base_imputer_not_fit(data):
55
+ imputer = NoFitIndicatorImputer(add_indicator=True)
56
+ err_msg = "Make sure to call _fit_indicator before _transform_indicator"
57
+ with pytest.raises(ValueError, match=err_msg):
58
+ imputer.fit(data).transform(data)
59
+ with pytest.raises(ValueError, match=err_msg):
60
+ imputer.fit_transform(data)
61
+
62
+
63
+ def test_base_imputer_not_transform(data):
64
+ imputer = NoTransformIndicatorImputer(add_indicator=True)
65
+ err_msg = (
66
+ "Call _fit_indicator and _transform_indicator in the imputer implementation"
67
+ )
68
+ with pytest.raises(ValueError, match=err_msg):
69
+ imputer.fit(data).transform(data)
70
+ with pytest.raises(ValueError, match=err_msg):
71
+ imputer.fit_transform(data)
72
+
73
+
74
+ def test_base_no_precomputed_mask_fit(data):
75
+ imputer = NoPrecomputedMaskFit(add_indicator=True)
76
+ err_msg = "precomputed is True but the input data is not a mask"
77
+ with pytest.raises(ValueError, match=err_msg):
78
+ imputer.fit(data)
79
+ with pytest.raises(ValueError, match=err_msg):
80
+ imputer.fit_transform(data)
81
+
82
+
83
+ def test_base_no_precomputed_mask_transform(data):
84
+ imputer = NoPrecomputedMaskTransform(add_indicator=True)
85
+ err_msg = "precomputed is True but the input data is not a mask"
86
+ imputer.fit(data)
87
+ with pytest.raises(ValueError, match=err_msg):
88
+ imputer.transform(data)
89
+ with pytest.raises(ValueError, match=err_msg):
90
+ imputer.fit_transform(data)
91
+
92
+
93
+ @pytest.mark.parametrize("X1_type", ["array", "dataframe"])
94
+ def test_assign_where(X1_type):
95
+ """Check the behaviour of the private helpers `_assign_where`."""
96
+ rng = np.random.RandomState(0)
97
+
98
+ n_samples, n_features = 10, 5
99
+ X1 = _convert_container(rng.randn(n_samples, n_features), constructor_name=X1_type)
100
+ X2 = rng.randn(n_samples, n_features)
101
+ mask = rng.randint(0, 2, size=(n_samples, n_features)).astype(bool)
102
+
103
+ _assign_where(X1, X2, mask)
104
+
105
+ if X1_type == "dataframe":
106
+ X1 = X1.to_numpy()
107
+ assert_allclose(X1[mask], X2[mask])
venv/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.experimental import enable_iterative_imputer # noqa
5
+ from sklearn.impute import IterativeImputer, KNNImputer, SimpleImputer
6
+ from sklearn.utils._testing import (
7
+ assert_allclose,
8
+ assert_allclose_dense_sparse,
9
+ assert_array_equal,
10
+ )
11
+ from sklearn.utils.fixes import CSR_CONTAINERS
12
+
13
+
14
+ def imputers():
15
+ return [IterativeImputer(tol=0.1), KNNImputer(), SimpleImputer()]
16
+
17
+
18
+ def sparse_imputers():
19
+ return [SimpleImputer()]
20
+
21
+
22
+ # ConvergenceWarning will be raised by the IterativeImputer
23
+ @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
24
+ @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
25
+ def test_imputation_missing_value_in_test_array(imputer):
26
+ # [Non Regression Test for issue #13968] Missing value in test set should
27
+ # not throw an error and return a finite dataset
28
+ train = [[1], [2]]
29
+ test = [[3], [np.nan]]
30
+ imputer.set_params(add_indicator=True)
31
+ imputer.fit(train).transform(test)
32
+
33
+
34
+ # ConvergenceWarning will be raised by the IterativeImputer
35
+ @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
36
+ @pytest.mark.parametrize("marker", [np.nan, -1, 0])
37
+ @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
38
+ def test_imputers_add_indicator(marker, imputer):
39
+ X = np.array(
40
+ [
41
+ [marker, 1, 5, marker, 1],
42
+ [2, marker, 1, marker, 2],
43
+ [6, 3, marker, marker, 3],
44
+ [1, 2, 9, marker, 4],
45
+ ]
46
+ )
47
+ X_true_indicator = np.array(
48
+ [
49
+ [1.0, 0.0, 0.0, 1.0],
50
+ [0.0, 1.0, 0.0, 1.0],
51
+ [0.0, 0.0, 1.0, 1.0],
52
+ [0.0, 0.0, 0.0, 1.0],
53
+ ]
54
+ )
55
+ imputer.set_params(missing_values=marker, add_indicator=True)
56
+
57
+ X_trans = imputer.fit_transform(X)
58
+ assert_allclose(X_trans[:, -4:], X_true_indicator)
59
+ assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3]))
60
+
61
+ imputer.set_params(add_indicator=False)
62
+ X_trans_no_indicator = imputer.fit_transform(X)
63
+ assert_allclose(X_trans[:, :-4], X_trans_no_indicator)
64
+
65
+
66
+ # ConvergenceWarning will be raised by the IterativeImputer
67
+ @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
68
+ @pytest.mark.parametrize("marker", [np.nan, -1])
69
+ @pytest.mark.parametrize(
70
+ "imputer", sparse_imputers(), ids=lambda x: x.__class__.__name__
71
+ )
72
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
73
+ def test_imputers_add_indicator_sparse(imputer, marker, csr_container):
74
+ X = csr_container(
75
+ [
76
+ [marker, 1, 5, marker, 1],
77
+ [2, marker, 1, marker, 2],
78
+ [6, 3, marker, marker, 3],
79
+ [1, 2, 9, marker, 4],
80
+ ]
81
+ )
82
+ X_true_indicator = csr_container(
83
+ [
84
+ [1.0, 0.0, 0.0, 1.0],
85
+ [0.0, 1.0, 0.0, 1.0],
86
+ [0.0, 0.0, 1.0, 1.0],
87
+ [0.0, 0.0, 0.0, 1.0],
88
+ ]
89
+ )
90
+ imputer.set_params(missing_values=marker, add_indicator=True)
91
+
92
+ X_trans = imputer.fit_transform(X)
93
+ assert_allclose_dense_sparse(X_trans[:, -4:], X_true_indicator)
94
+ assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3]))
95
+
96
+ imputer.set_params(add_indicator=False)
97
+ X_trans_no_indicator = imputer.fit_transform(X)
98
+ assert_allclose_dense_sparse(X_trans[:, :-4], X_trans_no_indicator)
99
+
100
+
101
+ # ConvergenceWarning will be raised by the IterativeImputer
102
+ @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
103
+ @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
104
+ @pytest.mark.parametrize("add_indicator", [True, False])
105
+ def test_imputers_pandas_na_integer_array_support(imputer, add_indicator):
106
+ # Test pandas IntegerArray with pd.NA
107
+ pd = pytest.importorskip("pandas")
108
+ marker = np.nan
109
+ imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker)
110
+
111
+ X = np.array(
112
+ [
113
+ [marker, 1, 5, marker, 1],
114
+ [2, marker, 1, marker, 2],
115
+ [6, 3, marker, marker, 3],
116
+ [1, 2, 9, marker, 4],
117
+ ]
118
+ )
119
+ # fit on numpy array
120
+ X_trans_expected = imputer.fit_transform(X)
121
+
122
+ # Creates dataframe with IntegerArrays with pd.NA
123
+ X_df = pd.DataFrame(X, dtype="Int16", columns=["a", "b", "c", "d", "e"])
124
+
125
+ # fit on pandas dataframe with IntegerArrays
126
+ X_trans = imputer.fit_transform(X_df)
127
+
128
+ assert_allclose(X_trans_expected, X_trans)
129
+
130
+
131
+ @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
132
+ @pytest.mark.parametrize("add_indicator", [True, False])
133
+ def test_imputers_feature_names_out_pandas(imputer, add_indicator):
134
+ """Check feature names out for imputers."""
135
+ pd = pytest.importorskip("pandas")
136
+ marker = np.nan
137
+ imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker)
138
+
139
+ X = np.array(
140
+ [
141
+ [marker, 1, 5, 3, marker, 1],
142
+ [2, marker, 1, 4, marker, 2],
143
+ [6, 3, 7, marker, marker, 3],
144
+ [1, 2, 9, 8, marker, 4],
145
+ ]
146
+ )
147
+ X_df = pd.DataFrame(X, columns=["a", "b", "c", "d", "e", "f"])
148
+ imputer.fit(X_df)
149
+
150
+ names = imputer.get_feature_names_out()
151
+
152
+ if add_indicator:
153
+ expected_names = [
154
+ "a",
155
+ "b",
156
+ "c",
157
+ "d",
158
+ "f",
159
+ "missingindicator_a",
160
+ "missingindicator_b",
161
+ "missingindicator_d",
162
+ "missingindicator_e",
163
+ ]
164
+ assert_array_equal(expected_names, names)
165
+ else:
166
+ expected_names = ["a", "b", "c", "d", "f"]
167
+ assert_array_equal(expected_names, names)
168
+
169
+
170
+ @pytest.mark.parametrize("keep_empty_features", [True, False])
171
+ @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
172
+ def test_keep_empty_features(imputer, keep_empty_features):
173
+ """Check that the imputer keeps features with only missing values."""
174
+ X = np.array([[np.nan, 1], [np.nan, 2], [np.nan, 3]])
175
+ imputer = imputer.set_params(
176
+ add_indicator=False, keep_empty_features=keep_empty_features
177
+ )
178
+
179
+ for method in ["fit_transform", "transform"]:
180
+ X_imputed = getattr(imputer, method)(X)
181
+ if keep_empty_features:
182
+ assert X_imputed.shape == X.shape
183
+ else:
184
+ assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
185
+
186
+
187
+ @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
188
+ @pytest.mark.parametrize("missing_value_test", [np.nan, 1])
189
+ def test_imputation_adds_missing_indicator_if_add_indicator_is_true(
190
+ imputer, missing_value_test
191
+ ):
192
+ """Check that missing indicator always exists when add_indicator=True.
193
+
194
+ Non-regression test for gh-26590.
195
+ """
196
+ X_train = np.array([[0, np.nan], [1, 2]])
197
+
198
+ # Test data where missing_value_test variable can be set to np.nan or 1.
199
+ X_test = np.array([[0, missing_value_test], [1, 2]])
200
+
201
+ imputer.set_params(add_indicator=True)
202
+ imputer.fit(X_train)
203
+
204
+ X_test_imputed_with_indicator = imputer.transform(X_test)
205
+ assert X_test_imputed_with_indicator.shape == (2, 3)
206
+
207
+ imputer.set_params(add_indicator=False)
208
+ imputer.fit(X_train)
209
+ X_test_imputed_without_indicator = imputer.transform(X_test)
210
+ assert X_test_imputed_without_indicator.shape == (2, 2)
211
+
212
+ assert_allclose(
213
+ X_test_imputed_with_indicator[:, :-1], X_test_imputed_without_indicator
214
+ )
215
+ if np.isnan(missing_value_test):
216
+ expected_missing_indicator = [1, 0]
217
+ else:
218
+ expected_missing_indicator = [0, 0]
219
+
220
+ assert_allclose(X_test_imputed_with_indicator[:, -1], expected_missing_indicator)
venv/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py ADDED
@@ -0,0 +1,1754 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import re
3
+ import warnings
4
+ from itertools import product
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from scipy import sparse
9
+ from scipy.stats import kstest
10
+
11
+ from sklearn import tree
12
+ from sklearn.datasets import load_diabetes
13
+ from sklearn.dummy import DummyRegressor
14
+ from sklearn.exceptions import ConvergenceWarning
15
+
16
+ # make IterativeImputer available
17
+ from sklearn.experimental import enable_iterative_imputer # noqa
18
+ from sklearn.impute import IterativeImputer, KNNImputer, MissingIndicator, SimpleImputer
19
+ from sklearn.impute._base import _most_frequent
20
+ from sklearn.linear_model import ARDRegression, BayesianRidge, RidgeCV
21
+ from sklearn.model_selection import GridSearchCV
22
+ from sklearn.pipeline import Pipeline, make_union
23
+ from sklearn.random_projection import _sparse_random_matrix
24
+ from sklearn.utils._testing import (
25
+ _convert_container,
26
+ assert_allclose,
27
+ assert_allclose_dense_sparse,
28
+ assert_array_almost_equal,
29
+ assert_array_equal,
30
+ )
31
+ from sklearn.utils.fixes import (
32
+ BSR_CONTAINERS,
33
+ COO_CONTAINERS,
34
+ CSC_CONTAINERS,
35
+ CSR_CONTAINERS,
36
+ LIL_CONTAINERS,
37
+ )
38
+
39
+
40
+ def _assert_array_equal_and_same_dtype(x, y):
41
+ assert_array_equal(x, y)
42
+ assert x.dtype == y.dtype
43
+
44
+
45
+ def _assert_allclose_and_same_dtype(x, y):
46
+ assert_allclose(x, y)
47
+ assert x.dtype == y.dtype
48
+
49
+
50
+ def _check_statistics(
51
+ X, X_true, strategy, statistics, missing_values, sparse_container
52
+ ):
53
+ """Utility function for testing imputation for a given strategy.
54
+
55
+ Test with dense and sparse arrays
56
+
57
+ Check that:
58
+ - the statistics (mean, median, mode) are correct
59
+ - the missing values are imputed correctly"""
60
+
61
+ err_msg = "Parameters: strategy = %s, missing_values = %s, sparse = {0}" % (
62
+ strategy,
63
+ missing_values,
64
+ )
65
+
66
+ assert_ae = assert_array_equal
67
+
68
+ if X.dtype.kind == "f" or X_true.dtype.kind == "f":
69
+ assert_ae = assert_array_almost_equal
70
+
71
+ # Normal matrix
72
+ imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)
73
+ X_trans = imputer.fit(X).transform(X.copy())
74
+ assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False))
75
+ assert_ae(X_trans, X_true, err_msg=err_msg.format(False))
76
+
77
+ # Sparse matrix
78
+ imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)
79
+ imputer.fit(sparse_container(X))
80
+ X_trans = imputer.transform(sparse_container(X.copy()))
81
+
82
+ if sparse.issparse(X_trans):
83
+ X_trans = X_trans.toarray()
84
+
85
+ assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True))
86
+ assert_ae(X_trans, X_true, err_msg=err_msg.format(True))
87
+
88
+
89
+ @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"])
90
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
91
+ def test_imputation_shape(strategy, csr_container):
92
+ # Verify the shapes of the imputed matrix for different strategies.
93
+ X = np.random.randn(10, 2)
94
+ X[::2] = np.nan
95
+
96
+ imputer = SimpleImputer(strategy=strategy)
97
+ X_imputed = imputer.fit_transform(csr_container(X))
98
+ assert X_imputed.shape == (10, 2)
99
+ X_imputed = imputer.fit_transform(X)
100
+ assert X_imputed.shape == (10, 2)
101
+
102
+ iterative_imputer = IterativeImputer(initial_strategy=strategy)
103
+ X_imputed = iterative_imputer.fit_transform(X)
104
+ assert X_imputed.shape == (10, 2)
105
+
106
+
107
+ @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
108
+ def test_imputation_deletion_warning(strategy):
109
+ X = np.ones((3, 5))
110
+ X[:, 0] = np.nan
111
+ imputer = SimpleImputer(strategy=strategy).fit(X)
112
+
113
+ with pytest.warns(UserWarning, match="Skipping"):
114
+ imputer.transform(X)
115
+
116
+
117
+ @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
118
+ def test_imputation_deletion_warning_feature_names(strategy):
119
+ pd = pytest.importorskip("pandas")
120
+
121
+ missing_values = np.nan
122
+ feature_names = np.array(["a", "b", "c", "d"], dtype=object)
123
+ X = pd.DataFrame(
124
+ [
125
+ [missing_values, missing_values, 1, missing_values],
126
+ [4, missing_values, 2, 10],
127
+ ],
128
+ columns=feature_names,
129
+ )
130
+
131
+ imputer = SimpleImputer(strategy=strategy).fit(X)
132
+
133
+ # check SimpleImputer returning feature name attribute correctly
134
+ assert_array_equal(imputer.feature_names_in_, feature_names)
135
+
136
+ # ensure that skipped feature warning includes feature name
137
+ with pytest.warns(
138
+ UserWarning, match=r"Skipping features without any observed values: \['b'\]"
139
+ ):
140
+ imputer.transform(X)
141
+
142
+
143
+ @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"])
144
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
145
+ def test_imputation_error_sparse_0(strategy, csc_container):
146
+ # check that error are raised when missing_values = 0 and input is sparse
147
+ X = np.ones((3, 5))
148
+ X[0] = 0
149
+ X = csc_container(X)
150
+
151
+ imputer = SimpleImputer(strategy=strategy, missing_values=0)
152
+ with pytest.raises(ValueError, match="Provide a dense array"):
153
+ imputer.fit(X)
154
+
155
+ imputer.fit(X.toarray())
156
+ with pytest.raises(ValueError, match="Provide a dense array"):
157
+ imputer.transform(X)
158
+
159
+
160
+ def safe_median(arr, *args, **kwargs):
161
+ # np.median([]) raises a TypeError for numpy >= 1.10.1
162
+ length = arr.size if hasattr(arr, "size") else len(arr)
163
+ return np.nan if length == 0 else np.median(arr, *args, **kwargs)
164
+
165
+
166
+ def safe_mean(arr, *args, **kwargs):
167
+ # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
168
+ length = arr.size if hasattr(arr, "size") else len(arr)
169
+ return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
170
+
171
+
172
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
173
+ def test_imputation_mean_median(csc_container):
174
+ # Test imputation using the mean and median strategies, when
175
+ # missing_values != 0.
176
+ rng = np.random.RandomState(0)
177
+
178
+ dim = 10
179
+ dec = 10
180
+ shape = (dim * dim, dim + dec)
181
+
182
+ zeros = np.zeros(shape[0])
183
+ values = np.arange(1, shape[0] + 1)
184
+ values[4::2] = -values[4::2]
185
+
186
+ tests = [
187
+ ("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))),
188
+ ("median", np.nan, lambda z, v, p: safe_median(np.hstack((z, v)))),
189
+ ]
190
+
191
+ for strategy, test_missing_values, true_value_fun in tests:
192
+ X = np.empty(shape)
193
+ X_true = np.empty(shape)
194
+ true_statistics = np.empty(shape[1])
195
+
196
+ # Create a matrix X with columns
197
+ # - with only zeros,
198
+ # - with only missing values
199
+ # - with zeros, missing values and values
200
+ # And a matrix X_true containing all true values
201
+ for j in range(shape[1]):
202
+ nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
203
+ nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0)
204
+ nb_values = shape[0] - nb_zeros - nb_missing_values
205
+
206
+ z = zeros[:nb_zeros]
207
+ p = np.repeat(test_missing_values, nb_missing_values)
208
+ v = values[rng.permutation(len(values))[:nb_values]]
209
+
210
+ true_statistics[j] = true_value_fun(z, v, p)
211
+
212
+ # Create the columns
213
+ X[:, j] = np.hstack((v, z, p))
214
+
215
+ if 0 == test_missing_values:
216
+ # XXX unreached code as of v0.22
217
+ X_true[:, j] = np.hstack(
218
+ (v, np.repeat(true_statistics[j], nb_missing_values + nb_zeros))
219
+ )
220
+ else:
221
+ X_true[:, j] = np.hstack(
222
+ (v, z, np.repeat(true_statistics[j], nb_missing_values))
223
+ )
224
+
225
+ # Shuffle them the same way
226
+ np.random.RandomState(j).shuffle(X[:, j])
227
+ np.random.RandomState(j).shuffle(X_true[:, j])
228
+
229
+ # Mean doesn't support columns containing NaNs, median does
230
+ if strategy == "median":
231
+ cols_to_keep = ~np.isnan(X_true).any(axis=0)
232
+ else:
233
+ cols_to_keep = ~np.isnan(X_true).all(axis=0)
234
+
235
+ X_true = X_true[:, cols_to_keep]
236
+
237
+ _check_statistics(
238
+ X, X_true, strategy, true_statistics, test_missing_values, csc_container
239
+ )
240
+
241
+
242
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
243
+ def test_imputation_median_special_cases(csc_container):
244
+ # Test median imputation with sparse boundary cases
245
+ X = np.array(
246
+ [
247
+ [0, np.nan, np.nan], # odd: implicit zero
248
+ [5, np.nan, np.nan], # odd: explicit nonzero
249
+ [0, 0, np.nan], # even: average two zeros
250
+ [-5, 0, np.nan], # even: avg zero and neg
251
+ [0, 5, np.nan], # even: avg zero and pos
252
+ [4, 5, np.nan], # even: avg nonzeros
253
+ [-4, -5, np.nan], # even: avg negatives
254
+ [-1, 2, np.nan], # even: crossing neg and pos
255
+ ]
256
+ ).transpose()
257
+
258
+ X_imputed_median = np.array(
259
+ [
260
+ [0, 0, 0],
261
+ [5, 5, 5],
262
+ [0, 0, 0],
263
+ [-5, 0, -2.5],
264
+ [0, 5, 2.5],
265
+ [4, 5, 4.5],
266
+ [-4, -5, -4.5],
267
+ [-1, 2, 0.5],
268
+ ]
269
+ ).transpose()
270
+ statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, 0.5]
271
+
272
+ _check_statistics(
273
+ X, X_imputed_median, "median", statistics_median, np.nan, csc_container
274
+ )
275
+
276
+
277
+ @pytest.mark.parametrize("strategy", ["mean", "median"])
278
+ @pytest.mark.parametrize("dtype", [None, object, str])
279
+ def test_imputation_mean_median_error_invalid_type(strategy, dtype):
280
+ X = np.array([["a", "b", 3], [4, "e", 6], ["g", "h", 9]], dtype=dtype)
281
+ msg = "non-numeric data:\ncould not convert string to float:"
282
+ with pytest.raises(ValueError, match=msg):
283
+ imputer = SimpleImputer(strategy=strategy)
284
+ imputer.fit_transform(X)
285
+
286
+
287
+ @pytest.mark.parametrize("strategy", ["mean", "median"])
288
+ @pytest.mark.parametrize("type", ["list", "dataframe"])
289
+ def test_imputation_mean_median_error_invalid_type_list_pandas(strategy, type):
290
+ X = [["a", "b", 3], [4, "e", 6], ["g", "h", 9]]
291
+ if type == "dataframe":
292
+ pd = pytest.importorskip("pandas")
293
+ X = pd.DataFrame(X)
294
+ msg = "non-numeric data:\ncould not convert string to float:"
295
+ with pytest.raises(ValueError, match=msg):
296
+ imputer = SimpleImputer(strategy=strategy)
297
+ imputer.fit_transform(X)
298
+
299
+
300
+ @pytest.mark.parametrize("strategy", ["constant", "most_frequent"])
301
+ @pytest.mark.parametrize("dtype", [str, np.dtype("U"), np.dtype("S")])
302
+ def test_imputation_const_mostf_error_invalid_types(strategy, dtype):
303
+ # Test imputation on non-numeric data using "most_frequent" and "constant"
304
+ # strategy
305
+ X = np.array(
306
+ [
307
+ [np.nan, np.nan, "a", "f"],
308
+ [np.nan, "c", np.nan, "d"],
309
+ [np.nan, "b", "d", np.nan],
310
+ [np.nan, "c", "d", "h"],
311
+ ],
312
+ dtype=dtype,
313
+ )
314
+
315
+ err_msg = "SimpleImputer does not support data"
316
+ with pytest.raises(ValueError, match=err_msg):
317
+ imputer = SimpleImputer(strategy=strategy)
318
+ imputer.fit(X).transform(X)
319
+
320
+
321
+ @pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
322
+ def test_imputation_most_frequent(csc_container):
323
+ # Test imputation using the most-frequent strategy.
324
+ X = np.array(
325
+ [
326
+ [-1, -1, 0, 5],
327
+ [-1, 2, -1, 3],
328
+ [-1, 1, 3, -1],
329
+ [-1, 2, 3, 7],
330
+ ]
331
+ )
332
+
333
+ X_true = np.array(
334
+ [
335
+ [2, 0, 5],
336
+ [2, 3, 3],
337
+ [1, 3, 3],
338
+ [2, 3, 7],
339
+ ]
340
+ )
341
+
342
+ # scipy.stats.mode, used in SimpleImputer, doesn't return the first most
343
+ # frequent as promised in the doc but the lowest most frequent. When this
344
+ # test will fail after an update of scipy, SimpleImputer will need to be
345
+ # updated to be consistent with the new (correct) behaviour
346
+ _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1, csc_container)
347
+
348
+
349
+ @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0])
350
+ def test_imputation_most_frequent_objects(marker):
351
+ # Test imputation using the most-frequent strategy.
352
+ X = np.array(
353
+ [
354
+ [marker, marker, "a", "f"],
355
+ [marker, "c", marker, "d"],
356
+ [marker, "b", "d", marker],
357
+ [marker, "c", "d", "h"],
358
+ ],
359
+ dtype=object,
360
+ )
361
+
362
+ X_true = np.array(
363
+ [
364
+ ["c", "a", "f"],
365
+ ["c", "d", "d"],
366
+ ["b", "d", "d"],
367
+ ["c", "d", "h"],
368
+ ],
369
+ dtype=object,
370
+ )
371
+
372
+ imputer = SimpleImputer(missing_values=marker, strategy="most_frequent")
373
+ X_trans = imputer.fit(X).transform(X)
374
+
375
+ assert_array_equal(X_trans, X_true)
376
+
377
+
378
+ @pytest.mark.parametrize("dtype", [object, "category"])
379
+ def test_imputation_most_frequent_pandas(dtype):
380
+ # Test imputation using the most frequent strategy on pandas df
381
+ pd = pytest.importorskip("pandas")
382
+
383
+ f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,")
384
+
385
+ df = pd.read_csv(f, dtype=dtype)
386
+
387
+ X_true = np.array(
388
+ [["a", "i", "x"], ["a", "j", "y"], ["a", "j", "x"], ["b", "j", "x"]],
389
+ dtype=object,
390
+ )
391
+
392
+ imputer = SimpleImputer(strategy="most_frequent")
393
+ X_trans = imputer.fit_transform(df)
394
+
395
+ assert_array_equal(X_trans, X_true)
396
+
397
+
398
+ @pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1.0, np.nan)])
399
+ def test_imputation_constant_error_invalid_type(X_data, missing_value):
400
+ # Verify that exceptions are raised on invalid fill_value type
401
+ X = np.full((3, 5), X_data, dtype=float)
402
+ X[0, 0] = missing_value
403
+
404
+ fill_value = "x"
405
+ err_msg = f"fill_value={fill_value!r} (of type {type(fill_value)!r}) cannot be cast"
406
+ with pytest.raises(ValueError, match=re.escape(err_msg)):
407
+ imputer = SimpleImputer(
408
+ missing_values=missing_value, strategy="constant", fill_value=fill_value
409
+ )
410
+ imputer.fit_transform(X)
411
+
412
+
413
+ def test_imputation_constant_integer():
414
+ # Test imputation using the constant strategy on integers
415
+ X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]])
416
+
417
+ X_true = np.array([[0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0]])
418
+
419
+ imputer = SimpleImputer(missing_values=-1, strategy="constant", fill_value=0)
420
+ X_trans = imputer.fit_transform(X)
421
+
422
+ assert_array_equal(X_trans, X_true)
423
+
424
+
425
+ @pytest.mark.parametrize("array_constructor", CSR_CONTAINERS + [np.asarray])
426
+ def test_imputation_constant_float(array_constructor):
427
+ # Test imputation using the constant strategy on floats
428
+ X = np.array(
429
+ [
430
+ [np.nan, 1.1, 0, np.nan],
431
+ [1.2, np.nan, 1.3, np.nan],
432
+ [0, 0, np.nan, np.nan],
433
+ [1.4, 1.5, 0, np.nan],
434
+ ]
435
+ )
436
+
437
+ X_true = np.array(
438
+ [[-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1]]
439
+ )
440
+
441
+ X = array_constructor(X)
442
+
443
+ X_true = array_constructor(X_true)
444
+
445
+ imputer = SimpleImputer(strategy="constant", fill_value=-1)
446
+ X_trans = imputer.fit_transform(X)
447
+
448
+ assert_allclose_dense_sparse(X_trans, X_true)
449
+
450
+
451
+ @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0])
452
+ def test_imputation_constant_object(marker):
453
+ # Test imputation using the constant strategy on objects
454
+ X = np.array(
455
+ [
456
+ [marker, "a", "b", marker],
457
+ ["c", marker, "d", marker],
458
+ ["e", "f", marker, marker],
459
+ ["g", "h", "i", marker],
460
+ ],
461
+ dtype=object,
462
+ )
463
+
464
+ X_true = np.array(
465
+ [
466
+ ["missing", "a", "b", "missing"],
467
+ ["c", "missing", "d", "missing"],
468
+ ["e", "f", "missing", "missing"],
469
+ ["g", "h", "i", "missing"],
470
+ ],
471
+ dtype=object,
472
+ )
473
+
474
+ imputer = SimpleImputer(
475
+ missing_values=marker, strategy="constant", fill_value="missing"
476
+ )
477
+ X_trans = imputer.fit_transform(X)
478
+
479
+ assert_array_equal(X_trans, X_true)
480
+
481
+
482
+ @pytest.mark.parametrize("dtype", [object, "category"])
483
+ def test_imputation_constant_pandas(dtype):
484
+ # Test imputation using the constant strategy on pandas df
485
+ pd = pytest.importorskip("pandas")
486
+
487
+ f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,")
488
+
489
+ df = pd.read_csv(f, dtype=dtype)
490
+
491
+ X_true = np.array(
492
+ [
493
+ ["missing_value", "i", "x", "missing_value"],
494
+ ["a", "missing_value", "y", "missing_value"],
495
+ ["a", "j", "missing_value", "missing_value"],
496
+ ["b", "j", "x", "missing_value"],
497
+ ],
498
+ dtype=object,
499
+ )
500
+
501
+ imputer = SimpleImputer(strategy="constant")
502
+ X_trans = imputer.fit_transform(df)
503
+
504
+ assert_array_equal(X_trans, X_true)
505
+
506
+
507
+ @pytest.mark.parametrize("X", [[[1], [2]], [[1], [np.nan]]])
508
+ def test_iterative_imputer_one_feature(X):
509
+ # check we exit early when there is a single feature
510
+ imputer = IterativeImputer().fit(X)
511
+ assert imputer.n_iter_ == 0
512
+ imputer = IterativeImputer()
513
+ imputer.fit([[1], [2]])
514
+ assert imputer.n_iter_ == 0
515
+ imputer.fit([[1], [np.nan]])
516
+ assert imputer.n_iter_ == 0
517
+
518
+
519
+ def test_imputation_pipeline_grid_search():
520
+ # Test imputation within a pipeline + gridsearch.
521
+ X = _sparse_random_matrix(100, 100, density=0.10)
522
+ missing_values = X.data[0]
523
+
524
+ pipeline = Pipeline(
525
+ [
526
+ ("imputer", SimpleImputer(missing_values=missing_values)),
527
+ ("tree", tree.DecisionTreeRegressor(random_state=0)),
528
+ ]
529
+ )
530
+
531
+ parameters = {"imputer__strategy": ["mean", "median", "most_frequent"]}
532
+
533
+ Y = _sparse_random_matrix(100, 1, density=0.10).toarray()
534
+ gs = GridSearchCV(pipeline, parameters)
535
+ gs.fit(X, Y)
536
+
537
+
538
+ def test_imputation_copy():
539
+ # Test imputation with copy
540
+ X_orig = _sparse_random_matrix(5, 5, density=0.75, random_state=0)
541
+
542
+ # copy=True, dense => copy
543
+ X = X_orig.copy().toarray()
544
+ imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True)
545
+ Xt = imputer.fit(X).transform(X)
546
+ Xt[0, 0] = -1
547
+ assert not np.all(X == Xt)
548
+
549
+ # copy=True, sparse csr => copy
550
+ X = X_orig.copy()
551
+ imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=True)
552
+ Xt = imputer.fit(X).transform(X)
553
+ Xt.data[0] = -1
554
+ assert not np.all(X.data == Xt.data)
555
+
556
+ # copy=False, dense => no copy
557
+ X = X_orig.copy().toarray()
558
+ imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False)
559
+ Xt = imputer.fit(X).transform(X)
560
+ Xt[0, 0] = -1
561
+ assert_array_almost_equal(X, Xt)
562
+
563
+ # copy=False, sparse csc => no copy
564
+ X = X_orig.copy().tocsc()
565
+ imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False)
566
+ Xt = imputer.fit(X).transform(X)
567
+ Xt.data[0] = -1
568
+ assert_array_almost_equal(X.data, Xt.data)
569
+
570
+ # copy=False, sparse csr => copy
571
+ X = X_orig.copy()
572
+ imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False)
573
+ Xt = imputer.fit(X).transform(X)
574
+ Xt.data[0] = -1
575
+ assert not np.all(X.data == Xt.data)
576
+
577
+ # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
578
+ # made, even if copy=False.
579
+
580
+
581
+ def test_iterative_imputer_zero_iters():
582
+ rng = np.random.RandomState(0)
583
+
584
+ n = 100
585
+ d = 10
586
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
587
+ missing_flag = X == 0
588
+ X[missing_flag] = np.nan
589
+
590
+ imputer = IterativeImputer(max_iter=0)
591
+ X_imputed = imputer.fit_transform(X)
592
+ # with max_iter=0, only initial imputation is performed
593
+ assert_allclose(X_imputed, imputer.initial_imputer_.transform(X))
594
+
595
+ # repeat but force n_iter_ to 0
596
+ imputer = IterativeImputer(max_iter=5).fit(X)
597
+ # transformed should not be equal to initial imputation
598
+ assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X))
599
+
600
+ imputer.n_iter_ = 0
601
+ # now they should be equal as only initial imputation is done
602
+ assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X))
603
+
604
+
605
+ def test_iterative_imputer_verbose():
606
+ rng = np.random.RandomState(0)
607
+
608
+ n = 100
609
+ d = 3
610
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
611
+ imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1)
612
+ imputer.fit(X)
613
+ imputer.transform(X)
614
+ imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2)
615
+ imputer.fit(X)
616
+ imputer.transform(X)
617
+
618
+
619
+ def test_iterative_imputer_all_missing():
620
+ n = 100
621
+ d = 3
622
+ X = np.zeros((n, d))
623
+ imputer = IterativeImputer(missing_values=0, max_iter=1)
624
+ X_imputed = imputer.fit_transform(X)
625
+ assert_allclose(X_imputed, imputer.initial_imputer_.transform(X))
626
+
627
+
628
+ @pytest.mark.parametrize(
629
+ "imputation_order", ["random", "roman", "ascending", "descending", "arabic"]
630
+ )
631
+ def test_iterative_imputer_imputation_order(imputation_order):
632
+ rng = np.random.RandomState(0)
633
+ n = 100
634
+ d = 10
635
+ max_iter = 2
636
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
637
+ X[:, 0] = 1 # this column should not be discarded by IterativeImputer
638
+
639
+ imputer = IterativeImputer(
640
+ missing_values=0,
641
+ max_iter=max_iter,
642
+ n_nearest_features=5,
643
+ sample_posterior=False,
644
+ skip_complete=True,
645
+ min_value=0,
646
+ max_value=1,
647
+ verbose=1,
648
+ imputation_order=imputation_order,
649
+ random_state=rng,
650
+ )
651
+ imputer.fit_transform(X)
652
+ ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_]
653
+
654
+ assert len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_
655
+
656
+ if imputation_order == "roman":
657
+ assert np.all(ordered_idx[: d - 1] == np.arange(1, d))
658
+ elif imputation_order == "arabic":
659
+ assert np.all(ordered_idx[: d - 1] == np.arange(d - 1, 0, -1))
660
+ elif imputation_order == "random":
661
+ ordered_idx_round_1 = ordered_idx[: d - 1]
662
+ ordered_idx_round_2 = ordered_idx[d - 1 :]
663
+ assert ordered_idx_round_1 != ordered_idx_round_2
664
+ elif "ending" in imputation_order:
665
+ assert len(ordered_idx) == max_iter * (d - 1)
666
+
667
+
668
+ @pytest.mark.parametrize(
669
+ "estimator", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()]
670
+ )
671
+ def test_iterative_imputer_estimators(estimator):
672
+ rng = np.random.RandomState(0)
673
+
674
+ n = 100
675
+ d = 10
676
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
677
+
678
+ imputer = IterativeImputer(
679
+ missing_values=0, max_iter=1, estimator=estimator, random_state=rng
680
+ )
681
+ imputer.fit_transform(X)
682
+
683
+ # check that types are correct for estimators
684
+ hashes = []
685
+ for triplet in imputer.imputation_sequence_:
686
+ expected_type = (
687
+ type(estimator) if estimator is not None else type(BayesianRidge())
688
+ )
689
+ assert isinstance(triplet.estimator, expected_type)
690
+ hashes.append(id(triplet.estimator))
691
+
692
+ # check that each estimator is unique
693
+ assert len(set(hashes)) == len(hashes)
694
+
695
+
696
+ def test_iterative_imputer_clip():
697
+ rng = np.random.RandomState(0)
698
+ n = 100
699
+ d = 10
700
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
701
+
702
+ imputer = IterativeImputer(
703
+ missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng
704
+ )
705
+
706
+ Xt = imputer.fit_transform(X)
707
+ assert_allclose(np.min(Xt[X == 0]), 0.1)
708
+ assert_allclose(np.max(Xt[X == 0]), 0.2)
709
+ assert_allclose(Xt[X != 0], X[X != 0])
710
+
711
+
712
+ def test_iterative_imputer_clip_truncnorm():
713
+ rng = np.random.RandomState(0)
714
+ n = 100
715
+ d = 10
716
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
717
+ X[:, 0] = 1
718
+
719
+ imputer = IterativeImputer(
720
+ missing_values=0,
721
+ max_iter=2,
722
+ n_nearest_features=5,
723
+ sample_posterior=True,
724
+ min_value=0.1,
725
+ max_value=0.2,
726
+ verbose=1,
727
+ imputation_order="random",
728
+ random_state=rng,
729
+ )
730
+ Xt = imputer.fit_transform(X)
731
+ assert_allclose(np.min(Xt[X == 0]), 0.1)
732
+ assert_allclose(np.max(Xt[X == 0]), 0.2)
733
+ assert_allclose(Xt[X != 0], X[X != 0])
734
+
735
+
736
+ def test_iterative_imputer_truncated_normal_posterior():
737
+ # test that the values that are imputed using `sample_posterior=True`
738
+ # with boundaries (`min_value` and `max_value` are not None) are drawn
739
+ # from a distribution that looks gaussian via the Kolmogorov Smirnov test.
740
+ # note that starting from the wrong random seed will make this test fail
741
+ # because random sampling doesn't occur at all when the imputation
742
+ # is outside of the (min_value, max_value) range
743
+ rng = np.random.RandomState(42)
744
+
745
+ X = rng.normal(size=(5, 5))
746
+ X[0][0] = np.nan
747
+
748
+ imputer = IterativeImputer(
749
+ min_value=0, max_value=0.5, sample_posterior=True, random_state=rng
750
+ )
751
+
752
+ imputer.fit_transform(X)
753
+ # generate multiple imputations for the single missing value
754
+ imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)])
755
+
756
+ assert all(imputations >= 0)
757
+ assert all(imputations <= 0.5)
758
+
759
+ mu, sigma = imputations.mean(), imputations.std()
760
+ ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm")
761
+ if sigma == 0:
762
+ sigma += 1e-12
763
+ ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm")
764
+ # we want to fail to reject null hypothesis
765
+ # null hypothesis: distributions are the same
766
+ assert ks_statistic < 0.2 or p_value > 0.1, "The posterior does appear to be normal"
767
+
768
+
769
+ @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
770
+ def test_iterative_imputer_missing_at_transform(strategy):
771
+ rng = np.random.RandomState(0)
772
+ n = 100
773
+ d = 10
774
+ X_train = rng.randint(low=0, high=3, size=(n, d))
775
+ X_test = rng.randint(low=0, high=3, size=(n, d))
776
+
777
+ X_train[:, 0] = 1 # definitely no missing values in 0th column
778
+ X_test[0, 0] = 0 # definitely missing value in 0th column
779
+
780
+ imputer = IterativeImputer(
781
+ missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng
782
+ ).fit(X_train)
783
+ initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train)
784
+
785
+ # if there were no missing values at time of fit, then imputer will
786
+ # only use the initial imputer for that feature at transform
787
+ assert_allclose(
788
+ imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0]
789
+ )
790
+
791
+
792
+ def test_iterative_imputer_transform_stochasticity():
793
+ rng1 = np.random.RandomState(0)
794
+ rng2 = np.random.RandomState(1)
795
+ n = 100
796
+ d = 10
797
+ X = _sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray()
798
+
799
+ # when sample_posterior=True, two transforms shouldn't be equal
800
+ imputer = IterativeImputer(
801
+ missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1
802
+ )
803
+ imputer.fit(X)
804
+
805
+ X_fitted_1 = imputer.transform(X)
806
+ X_fitted_2 = imputer.transform(X)
807
+
808
+ # sufficient to assert that the means are not the same
809
+ assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2))
810
+
811
+ # when sample_posterior=False, and n_nearest_features=None
812
+ # and imputation_order is not random
813
+ # the two transforms should be identical even if rng are different
814
+ imputer1 = IterativeImputer(
815
+ missing_values=0,
816
+ max_iter=1,
817
+ sample_posterior=False,
818
+ n_nearest_features=None,
819
+ imputation_order="ascending",
820
+ random_state=rng1,
821
+ )
822
+
823
+ imputer2 = IterativeImputer(
824
+ missing_values=0,
825
+ max_iter=1,
826
+ sample_posterior=False,
827
+ n_nearest_features=None,
828
+ imputation_order="ascending",
829
+ random_state=rng2,
830
+ )
831
+ imputer1.fit(X)
832
+ imputer2.fit(X)
833
+
834
+ X_fitted_1a = imputer1.transform(X)
835
+ X_fitted_1b = imputer1.transform(X)
836
+ X_fitted_2 = imputer2.transform(X)
837
+
838
+ assert_allclose(X_fitted_1a, X_fitted_1b)
839
+ assert_allclose(X_fitted_1a, X_fitted_2)
840
+
841
+
842
+ def test_iterative_imputer_no_missing():
843
+ rng = np.random.RandomState(0)
844
+ X = rng.rand(100, 100)
845
+ X[:, 0] = np.nan
846
+ m1 = IterativeImputer(max_iter=10, random_state=rng)
847
+ m2 = IterativeImputer(max_iter=10, random_state=rng)
848
+ pred1 = m1.fit(X).transform(X)
849
+ pred2 = m2.fit_transform(X)
850
+ # should exclude the first column entirely
851
+ assert_allclose(X[:, 1:], pred1)
852
+ # fit and fit_transform should both be identical
853
+ assert_allclose(pred1, pred2)
854
+
855
+
856
+ def test_iterative_imputer_rank_one():
857
+ rng = np.random.RandomState(0)
858
+ d = 50
859
+ A = rng.rand(d, 1)
860
+ B = rng.rand(1, d)
861
+ X = np.dot(A, B)
862
+ nan_mask = rng.rand(d, d) < 0.5
863
+ X_missing = X.copy()
864
+ X_missing[nan_mask] = np.nan
865
+
866
+ imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng)
867
+ X_filled = imputer.fit_transform(X_missing)
868
+ assert_allclose(X_filled, X, atol=0.02)
869
+
870
+
871
+ @pytest.mark.parametrize("rank", [3, 5])
872
+ def test_iterative_imputer_transform_recovery(rank):
873
+ rng = np.random.RandomState(0)
874
+ n = 70
875
+ d = 70
876
+ A = rng.rand(n, rank)
877
+ B = rng.rand(rank, d)
878
+ X_filled = np.dot(A, B)
879
+ nan_mask = rng.rand(n, d) < 0.5
880
+ X_missing = X_filled.copy()
881
+ X_missing[nan_mask] = np.nan
882
+
883
+ # split up data in half
884
+ n = n // 2
885
+ X_train = X_missing[:n]
886
+ X_test_filled = X_filled[n:]
887
+ X_test = X_missing[n:]
888
+
889
+ imputer = IterativeImputer(
890
+ max_iter=5, imputation_order="descending", verbose=1, random_state=rng
891
+ ).fit(X_train)
892
+ X_test_est = imputer.transform(X_test)
893
+ assert_allclose(X_test_filled, X_test_est, atol=0.1)
894
+
895
+
896
+ def test_iterative_imputer_additive_matrix():
897
+ rng = np.random.RandomState(0)
898
+ n = 100
899
+ d = 10
900
+ A = rng.randn(n, d)
901
+ B = rng.randn(n, d)
902
+ X_filled = np.zeros(A.shape)
903
+ for i in range(d):
904
+ for j in range(d):
905
+ X_filled[:, (i + j) % d] += (A[:, i] + B[:, j]) / 2
906
+ # a quarter is randomly missing
907
+ nan_mask = rng.rand(n, d) < 0.25
908
+ X_missing = X_filled.copy()
909
+ X_missing[nan_mask] = np.nan
910
+
911
+ # split up data
912
+ n = n // 2
913
+ X_train = X_missing[:n]
914
+ X_test_filled = X_filled[n:]
915
+ X_test = X_missing[n:]
916
+
917
+ imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train)
918
+ X_test_est = imputer.transform(X_test)
919
+ assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01)
920
+
921
+
922
+ def test_iterative_imputer_early_stopping():
923
+ rng = np.random.RandomState(0)
924
+ n = 50
925
+ d = 5
926
+ A = rng.rand(n, 1)
927
+ B = rng.rand(1, d)
928
+ X = np.dot(A, B)
929
+ nan_mask = rng.rand(n, d) < 0.5
930
+ X_missing = X.copy()
931
+ X_missing[nan_mask] = np.nan
932
+
933
+ imputer = IterativeImputer(
934
+ max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng
935
+ )
936
+ X_filled_100 = imputer.fit_transform(X_missing)
937
+ assert len(imputer.imputation_sequence_) == d * imputer.n_iter_
938
+
939
+ imputer = IterativeImputer(
940
+ max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng
941
+ )
942
+ X_filled_early = imputer.fit_transform(X_missing)
943
+ assert_allclose(X_filled_100, X_filled_early, atol=1e-7)
944
+
945
+ imputer = IterativeImputer(
946
+ max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng
947
+ )
948
+ imputer.fit(X_missing)
949
+ assert imputer.n_iter_ == imputer.max_iter
950
+
951
+
952
+ def test_iterative_imputer_catch_warning():
953
+ # check that we catch a RuntimeWarning due to a division by zero when a
954
+ # feature is constant in the dataset
955
+ X, y = load_diabetes(return_X_y=True)
956
+ n_samples, n_features = X.shape
957
+
958
+ # simulate that a feature only contain one category during fit
959
+ X[:, 3] = 1
960
+
961
+ # add some missing values
962
+ rng = np.random.RandomState(0)
963
+ missing_rate = 0.15
964
+ for feat in range(n_features):
965
+ sample_idx = rng.choice(
966
+ np.arange(n_samples), size=int(n_samples * missing_rate), replace=False
967
+ )
968
+ X[sample_idx, feat] = np.nan
969
+
970
+ imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True)
971
+ with warnings.catch_warnings():
972
+ warnings.simplefilter("error", RuntimeWarning)
973
+ X_fill = imputer.fit_transform(X, y)
974
+ assert not np.any(np.isnan(X_fill))
975
+
976
+
977
+ @pytest.mark.parametrize(
978
+ "min_value, max_value, correct_output",
979
+ [
980
+ (0, 100, np.array([[0] * 3, [100] * 3])),
981
+ (None, None, np.array([[-np.inf] * 3, [np.inf] * 3])),
982
+ (-np.inf, np.inf, np.array([[-np.inf] * 3, [np.inf] * 3])),
983
+ ([-5, 5, 10], [100, 200, 300], np.array([[-5, 5, 10], [100, 200, 300]])),
984
+ (
985
+ [-5, -np.inf, 10],
986
+ [100, 200, np.inf],
987
+ np.array([[-5, -np.inf, 10], [100, 200, np.inf]]),
988
+ ),
989
+ ],
990
+ ids=["scalars", "None-default", "inf", "lists", "lists-with-inf"],
991
+ )
992
+ def test_iterative_imputer_min_max_array_like(min_value, max_value, correct_output):
993
+ # check that passing scalar or array-like
994
+ # for min_value and max_value in IterativeImputer works
995
+ X = np.random.RandomState(0).randn(10, 3)
996
+ imputer = IterativeImputer(min_value=min_value, max_value=max_value)
997
+ imputer.fit(X)
998
+
999
+ assert isinstance(imputer._min_value, np.ndarray) and isinstance(
1000
+ imputer._max_value, np.ndarray
1001
+ )
1002
+ assert (imputer._min_value.shape[0] == X.shape[1]) and (
1003
+ imputer._max_value.shape[0] == X.shape[1]
1004
+ )
1005
+
1006
+ assert_allclose(correct_output[0, :], imputer._min_value)
1007
+ assert_allclose(correct_output[1, :], imputer._max_value)
1008
+
1009
+
1010
+ @pytest.mark.parametrize(
1011
+ "min_value, max_value, err_msg",
1012
+ [
1013
+ (100, 0, "min_value >= max_value."),
1014
+ (np.inf, -np.inf, "min_value >= max_value."),
1015
+ ([-5, 5], [100, 200, 0], "_value' should be of shape"),
1016
+ ],
1017
+ )
1018
+ def test_iterative_imputer_catch_min_max_error(min_value, max_value, err_msg):
1019
+ # check that passing scalar or array-like
1020
+ # for min_value and max_value in IterativeImputer works
1021
+ X = np.random.random((10, 3))
1022
+ imputer = IterativeImputer(min_value=min_value, max_value=max_value)
1023
+ with pytest.raises(ValueError, match=err_msg):
1024
+ imputer.fit(X)
1025
+
1026
+
1027
+ @pytest.mark.parametrize(
1028
+ "min_max_1, min_max_2",
1029
+ [([None, None], [-np.inf, np.inf]), ([-10, 10], [[-10] * 4, [10] * 4])],
1030
+ ids=["None-vs-inf", "Scalar-vs-vector"],
1031
+ )
1032
+ def test_iterative_imputer_min_max_array_like_imputation(min_max_1, min_max_2):
1033
+ # Test that None/inf and scalar/vector give the same imputation
1034
+ X_train = np.array(
1035
+ [
1036
+ [np.nan, 2, 2, 1],
1037
+ [10, np.nan, np.nan, 7],
1038
+ [3, 1, np.nan, 1],
1039
+ [np.nan, 4, 2, np.nan],
1040
+ ]
1041
+ )
1042
+ X_test = np.array(
1043
+ [[np.nan, 2, np.nan, 5], [2, 4, np.nan, np.nan], [np.nan, 1, 10, 1]]
1044
+ )
1045
+ imputer1 = IterativeImputer(
1046
+ min_value=min_max_1[0], max_value=min_max_1[1], random_state=0
1047
+ )
1048
+ imputer2 = IterativeImputer(
1049
+ min_value=min_max_2[0], max_value=min_max_2[1], random_state=0
1050
+ )
1051
+ X_test_imputed1 = imputer1.fit(X_train).transform(X_test)
1052
+ X_test_imputed2 = imputer2.fit(X_train).transform(X_test)
1053
+ assert_allclose(X_test_imputed1[:, 0], X_test_imputed2[:, 0])
1054
+
1055
+
1056
+ @pytest.mark.parametrize("skip_complete", [True, False])
1057
+ def test_iterative_imputer_skip_non_missing(skip_complete):
1058
+ # check the imputing strategy when missing data are present in the
1059
+ # testing set only.
1060
+ # taken from: https://github.com/scikit-learn/scikit-learn/issues/14383
1061
+ rng = np.random.RandomState(0)
1062
+ X_train = np.array([[5, 2, 2, 1], [10, 1, 2, 7], [3, 1, 1, 1], [8, 4, 2, 2]])
1063
+ X_test = np.array([[np.nan, 2, 4, 5], [np.nan, 4, 1, 2], [np.nan, 1, 10, 1]])
1064
+ imputer = IterativeImputer(
1065
+ initial_strategy="mean", skip_complete=skip_complete, random_state=rng
1066
+ )
1067
+ X_test_est = imputer.fit(X_train).transform(X_test)
1068
+ if skip_complete:
1069
+ # impute with the initial strategy: 'mean'
1070
+ assert_allclose(X_test_est[:, 0], np.mean(X_train[:, 0]))
1071
+ else:
1072
+ assert_allclose(X_test_est[:, 0], [11, 7, 12], rtol=1e-4)
1073
+
1074
+
1075
+ @pytest.mark.parametrize("rs_imputer", [None, 1, np.random.RandomState(seed=1)])
1076
+ @pytest.mark.parametrize("rs_estimator", [None, 1, np.random.RandomState(seed=1)])
1077
+ def test_iterative_imputer_dont_set_random_state(rs_imputer, rs_estimator):
1078
+ class ZeroEstimator:
1079
+ def __init__(self, random_state):
1080
+ self.random_state = random_state
1081
+
1082
+ def fit(self, *args, **kgards):
1083
+ return self
1084
+
1085
+ def predict(self, X):
1086
+ return np.zeros(X.shape[0])
1087
+
1088
+ estimator = ZeroEstimator(random_state=rs_estimator)
1089
+ imputer = IterativeImputer(random_state=rs_imputer)
1090
+ X_train = np.zeros((10, 3))
1091
+ imputer.fit(X_train)
1092
+ assert estimator.random_state == rs_estimator
1093
+
1094
+
1095
+ @pytest.mark.parametrize(
1096
+ "X_fit, X_trans, params, msg_err",
1097
+ [
1098
+ (
1099
+ np.array([[-1, 1], [1, 2]]),
1100
+ np.array([[-1, 1], [1, -1]]),
1101
+ {"features": "missing-only", "sparse": "auto"},
1102
+ "have missing values in transform but have no missing values in fit",
1103
+ ),
1104
+ (
1105
+ np.array([["a", "b"], ["c", "a"]], dtype=str),
1106
+ np.array([["a", "b"], ["c", "a"]], dtype=str),
1107
+ {},
1108
+ "MissingIndicator does not support data with dtype",
1109
+ ),
1110
+ ],
1111
+ )
1112
+ def test_missing_indicator_error(X_fit, X_trans, params, msg_err):
1113
+ indicator = MissingIndicator(missing_values=-1)
1114
+ indicator.set_params(**params)
1115
+ with pytest.raises(ValueError, match=msg_err):
1116
+ indicator.fit(X_fit).transform(X_trans)
1117
+
1118
+
1119
+ def _generate_missing_indicator_cases():
1120
+ missing_values_dtypes = [(0, np.int32), (np.nan, np.float64), (-1, np.int32)]
1121
+ arr_types = (
1122
+ [np.array]
1123
+ + CSC_CONTAINERS
1124
+ + CSR_CONTAINERS
1125
+ + COO_CONTAINERS
1126
+ + LIL_CONTAINERS
1127
+ + BSR_CONTAINERS
1128
+ )
1129
+ return [
1130
+ (arr_type, missing_values, dtype)
1131
+ for arr_type, (missing_values, dtype) in product(
1132
+ arr_types, missing_values_dtypes
1133
+ )
1134
+ if not (missing_values == 0 and arr_type is not np.array)
1135
+ ]
1136
+
1137
+
1138
+ @pytest.mark.parametrize(
1139
+ "arr_type, missing_values, dtype", _generate_missing_indicator_cases()
1140
+ )
1141
+ @pytest.mark.parametrize(
1142
+ "param_features, n_features, features_indices",
1143
+ [("missing-only", 3, np.array([0, 1, 2])), ("all", 3, np.array([0, 1, 2]))],
1144
+ )
1145
+ def test_missing_indicator_new(
1146
+ missing_values, arr_type, dtype, param_features, n_features, features_indices
1147
+ ):
1148
+ X_fit = np.array([[missing_values, missing_values, 1], [4, 2, missing_values]])
1149
+ X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])
1150
+ X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]])
1151
+ X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]])
1152
+
1153
+ # convert the input to the right array format and right dtype
1154
+ X_fit = arr_type(X_fit).astype(dtype)
1155
+ X_trans = arr_type(X_trans).astype(dtype)
1156
+ X_fit_expected = X_fit_expected.astype(dtype)
1157
+ X_trans_expected = X_trans_expected.astype(dtype)
1158
+
1159
+ indicator = MissingIndicator(
1160
+ missing_values=missing_values, features=param_features, sparse=False
1161
+ )
1162
+ X_fit_mask = indicator.fit_transform(X_fit)
1163
+ X_trans_mask = indicator.transform(X_trans)
1164
+
1165
+ assert X_fit_mask.shape[1] == n_features
1166
+ assert X_trans_mask.shape[1] == n_features
1167
+
1168
+ assert_array_equal(indicator.features_, features_indices)
1169
+ assert_allclose(X_fit_mask, X_fit_expected[:, features_indices])
1170
+ assert_allclose(X_trans_mask, X_trans_expected[:, features_indices])
1171
+
1172
+ assert X_fit_mask.dtype == bool
1173
+ assert X_trans_mask.dtype == bool
1174
+ assert isinstance(X_fit_mask, np.ndarray)
1175
+ assert isinstance(X_trans_mask, np.ndarray)
1176
+
1177
+ indicator.set_params(sparse=True)
1178
+ X_fit_mask_sparse = indicator.fit_transform(X_fit)
1179
+ X_trans_mask_sparse = indicator.transform(X_trans)
1180
+
1181
+ assert X_fit_mask_sparse.dtype == bool
1182
+ assert X_trans_mask_sparse.dtype == bool
1183
+ assert X_fit_mask_sparse.format == "csc"
1184
+ assert X_trans_mask_sparse.format == "csc"
1185
+ assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask)
1186
+ assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask)
1187
+
1188
+
1189
+ @pytest.mark.parametrize(
1190
+ "arr_type",
1191
+ CSC_CONTAINERS + CSR_CONTAINERS + COO_CONTAINERS + LIL_CONTAINERS + BSR_CONTAINERS,
1192
+ )
1193
+ def test_missing_indicator_raise_on_sparse_with_missing_0(arr_type):
1194
+ # test for sparse input and missing_value == 0
1195
+
1196
+ missing_values = 0
1197
+ X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]])
1198
+ X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])
1199
+
1200
+ # convert the input to the right array format
1201
+ X_fit_sparse = arr_type(X_fit)
1202
+ X_trans_sparse = arr_type(X_trans)
1203
+
1204
+ indicator = MissingIndicator(missing_values=missing_values)
1205
+
1206
+ with pytest.raises(ValueError, match="Sparse input with missing_values=0"):
1207
+ indicator.fit_transform(X_fit_sparse)
1208
+
1209
+ indicator.fit_transform(X_fit)
1210
+ with pytest.raises(ValueError, match="Sparse input with missing_values=0"):
1211
+ indicator.transform(X_trans_sparse)
1212
+
1213
+
1214
+ @pytest.mark.parametrize("param_sparse", [True, False, "auto"])
1215
+ @pytest.mark.parametrize(
1216
+ "arr_type, missing_values",
1217
+ [(np.array, 0)]
1218
+ + list(
1219
+ product(
1220
+ CSC_CONTAINERS
1221
+ + CSR_CONTAINERS
1222
+ + COO_CONTAINERS
1223
+ + LIL_CONTAINERS
1224
+ + BSR_CONTAINERS,
1225
+ [np.nan],
1226
+ )
1227
+ ),
1228
+ )
1229
+ def test_missing_indicator_sparse_param(arr_type, missing_values, param_sparse):
1230
+ # check the format of the output with different sparse parameter
1231
+ X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]])
1232
+ X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])
1233
+ X_fit = arr_type(X_fit).astype(np.float64)
1234
+ X_trans = arr_type(X_trans).astype(np.float64)
1235
+
1236
+ indicator = MissingIndicator(missing_values=missing_values, sparse=param_sparse)
1237
+ X_fit_mask = indicator.fit_transform(X_fit)
1238
+ X_trans_mask = indicator.transform(X_trans)
1239
+
1240
+ if param_sparse is True:
1241
+ assert X_fit_mask.format == "csc"
1242
+ assert X_trans_mask.format == "csc"
1243
+ elif param_sparse == "auto" and missing_values == 0:
1244
+ assert isinstance(X_fit_mask, np.ndarray)
1245
+ assert isinstance(X_trans_mask, np.ndarray)
1246
+ elif param_sparse is False:
1247
+ assert isinstance(X_fit_mask, np.ndarray)
1248
+ assert isinstance(X_trans_mask, np.ndarray)
1249
+ else:
1250
+ if sparse.issparse(X_fit):
1251
+ assert X_fit_mask.format == "csc"
1252
+ assert X_trans_mask.format == "csc"
1253
+ else:
1254
+ assert isinstance(X_fit_mask, np.ndarray)
1255
+ assert isinstance(X_trans_mask, np.ndarray)
1256
+
1257
+
1258
+ def test_missing_indicator_string():
1259
+ X = np.array([["a", "b", "c"], ["b", "c", "a"]], dtype=object)
1260
+ indicator = MissingIndicator(missing_values="a", features="all")
1261
+ X_trans = indicator.fit_transform(X)
1262
+ assert_array_equal(X_trans, np.array([[True, False, False], [False, False, True]]))
1263
+
1264
+
1265
+ @pytest.mark.parametrize(
1266
+ "X, missing_values, X_trans_exp",
1267
+ [
1268
+ (
1269
+ np.array([["a", "b"], ["b", "a"]], dtype=object),
1270
+ "a",
1271
+ np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object),
1272
+ ),
1273
+ (
1274
+ np.array([[np.nan, 1.0], [1.0, np.nan]]),
1275
+ np.nan,
1276
+ np.array([[1.0, 1.0, True, False], [1.0, 1.0, False, True]]),
1277
+ ),
1278
+ (
1279
+ np.array([[np.nan, "b"], ["b", np.nan]], dtype=object),
1280
+ np.nan,
1281
+ np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object),
1282
+ ),
1283
+ (
1284
+ np.array([[None, "b"], ["b", None]], dtype=object),
1285
+ None,
1286
+ np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object),
1287
+ ),
1288
+ ],
1289
+ )
1290
+ def test_missing_indicator_with_imputer(X, missing_values, X_trans_exp):
1291
+ trans = make_union(
1292
+ SimpleImputer(missing_values=missing_values, strategy="most_frequent"),
1293
+ MissingIndicator(missing_values=missing_values),
1294
+ )
1295
+ X_trans = trans.fit_transform(X)
1296
+ assert_array_equal(X_trans, X_trans_exp)
1297
+
1298
+
1299
+ @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer])
1300
+ @pytest.mark.parametrize(
1301
+ "imputer_missing_values, missing_value, err_msg",
1302
+ [
1303
+ ("NaN", np.nan, "Input X contains NaN"),
1304
+ ("-1", -1, "types are expected to be both numerical."),
1305
+ ],
1306
+ )
1307
+ def test_inconsistent_dtype_X_missing_values(
1308
+ imputer_constructor, imputer_missing_values, missing_value, err_msg
1309
+ ):
1310
+ # regression test for issue #11390. Comparison between incoherent dtype
1311
+ # for X and missing_values was not raising a proper error.
1312
+ rng = np.random.RandomState(42)
1313
+ X = rng.randn(10, 10)
1314
+ X[0, 0] = missing_value
1315
+
1316
+ imputer = imputer_constructor(missing_values=imputer_missing_values)
1317
+
1318
+ with pytest.raises(ValueError, match=err_msg):
1319
+ imputer.fit_transform(X)
1320
+
1321
+
1322
+ def test_missing_indicator_no_missing():
1323
+ # check that all features are dropped if there are no missing values when
1324
+ # features='missing-only' (#13491)
1325
+ X = np.array([[1, 1], [1, 1]])
1326
+
1327
+ mi = MissingIndicator(features="missing-only", missing_values=-1)
1328
+ Xt = mi.fit_transform(X)
1329
+
1330
+ assert Xt.shape[1] == 0
1331
+
1332
+
1333
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
1334
+ def test_missing_indicator_sparse_no_explicit_zeros(csr_container):
1335
+ # Check that non missing values don't become explicit zeros in the mask
1336
+ # generated by missing indicator when X is sparse. (#13491)
1337
+ X = csr_container([[0, 1, 2], [1, 2, 0], [2, 0, 1]])
1338
+
1339
+ mi = MissingIndicator(features="all", missing_values=1)
1340
+ Xt = mi.fit_transform(X)
1341
+
1342
+ assert Xt.getnnz() == Xt.sum()
1343
+
1344
+
1345
+ @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer])
1346
+ def test_imputer_without_indicator(imputer_constructor):
1347
+ X = np.array([[1, 1], [1, 1]])
1348
+ imputer = imputer_constructor()
1349
+ imputer.fit(X)
1350
+
1351
+ assert imputer.indicator_ is None
1352
+
1353
+
1354
+ @pytest.mark.parametrize(
1355
+ "arr_type",
1356
+ CSC_CONTAINERS + CSR_CONTAINERS + COO_CONTAINERS + LIL_CONTAINERS + BSR_CONTAINERS,
1357
+ )
1358
+ def test_simple_imputation_add_indicator_sparse_matrix(arr_type):
1359
+ X_sparse = arr_type([[np.nan, 1, 5], [2, np.nan, 1], [6, 3, np.nan], [1, 2, 9]])
1360
+ X_true = np.array(
1361
+ [
1362
+ [3.0, 1.0, 5.0, 1.0, 0.0, 0.0],
1363
+ [2.0, 2.0, 1.0, 0.0, 1.0, 0.0],
1364
+ [6.0, 3.0, 5.0, 0.0, 0.0, 1.0],
1365
+ [1.0, 2.0, 9.0, 0.0, 0.0, 0.0],
1366
+ ]
1367
+ )
1368
+
1369
+ imputer = SimpleImputer(missing_values=np.nan, add_indicator=True)
1370
+ X_trans = imputer.fit_transform(X_sparse)
1371
+
1372
+ assert sparse.issparse(X_trans)
1373
+ assert X_trans.shape == X_true.shape
1374
+ assert_allclose(X_trans.toarray(), X_true)
1375
+
1376
+
1377
+ @pytest.mark.parametrize(
1378
+ "strategy, expected", [("most_frequent", "b"), ("constant", "missing_value")]
1379
+ )
1380
+ def test_simple_imputation_string_list(strategy, expected):
1381
+ X = [["a", "b"], ["c", np.nan]]
1382
+
1383
+ X_true = np.array([["a", "b"], ["c", expected]], dtype=object)
1384
+
1385
+ imputer = SimpleImputer(strategy=strategy)
1386
+ X_trans = imputer.fit_transform(X)
1387
+
1388
+ assert_array_equal(X_trans, X_true)
1389
+
1390
+
1391
+ @pytest.mark.parametrize(
1392
+ "order, idx_order",
1393
+ [("ascending", [3, 4, 2, 0, 1]), ("descending", [1, 0, 2, 4, 3])],
1394
+ )
1395
+ def test_imputation_order(order, idx_order):
1396
+ # regression test for #15393
1397
+ rng = np.random.RandomState(42)
1398
+ X = rng.rand(100, 5)
1399
+ X[:50, 1] = np.nan
1400
+ X[:30, 0] = np.nan
1401
+ X[:20, 2] = np.nan
1402
+ X[:10, 4] = np.nan
1403
+
1404
+ with pytest.warns(ConvergenceWarning):
1405
+ trs = IterativeImputer(max_iter=1, imputation_order=order, random_state=0).fit(
1406
+ X
1407
+ )
1408
+ idx = [x.feat_idx for x in trs.imputation_sequence_]
1409
+ assert idx == idx_order
1410
+
1411
+
1412
+ @pytest.mark.parametrize("missing_value", [-1, np.nan])
1413
+ def test_simple_imputation_inverse_transform(missing_value):
1414
+ # Test inverse_transform feature for np.nan
1415
+ X_1 = np.array(
1416
+ [
1417
+ [9, missing_value, 3, -1],
1418
+ [4, -1, 5, 4],
1419
+ [6, 7, missing_value, -1],
1420
+ [8, 9, 0, missing_value],
1421
+ ]
1422
+ )
1423
+
1424
+ X_2 = np.array(
1425
+ [
1426
+ [5, 4, 2, 1],
1427
+ [2, 1, missing_value, 3],
1428
+ [9, missing_value, 7, 1],
1429
+ [6, 4, 2, missing_value],
1430
+ ]
1431
+ )
1432
+
1433
+ X_3 = np.array(
1434
+ [
1435
+ [1, missing_value, 5, 9],
1436
+ [missing_value, 4, missing_value, missing_value],
1437
+ [2, missing_value, 7, missing_value],
1438
+ [missing_value, 3, missing_value, 8],
1439
+ ]
1440
+ )
1441
+
1442
+ X_4 = np.array(
1443
+ [
1444
+ [1, 1, 1, 3],
1445
+ [missing_value, 2, missing_value, 1],
1446
+ [2, 3, 3, 4],
1447
+ [missing_value, 4, missing_value, 2],
1448
+ ]
1449
+ )
1450
+
1451
+ imputer = SimpleImputer(
1452
+ missing_values=missing_value, strategy="mean", add_indicator=True
1453
+ )
1454
+
1455
+ X_1_trans = imputer.fit_transform(X_1)
1456
+ X_1_inv_trans = imputer.inverse_transform(X_1_trans)
1457
+
1458
+ X_2_trans = imputer.transform(X_2) # test on new data
1459
+ X_2_inv_trans = imputer.inverse_transform(X_2_trans)
1460
+
1461
+ assert_array_equal(X_1_inv_trans, X_1)
1462
+ assert_array_equal(X_2_inv_trans, X_2)
1463
+
1464
+ for X in [X_3, X_4]:
1465
+ X_trans = imputer.fit_transform(X)
1466
+ X_inv_trans = imputer.inverse_transform(X_trans)
1467
+ assert_array_equal(X_inv_trans, X)
1468
+
1469
+
1470
+ @pytest.mark.parametrize("missing_value", [-1, np.nan])
1471
+ def test_simple_imputation_inverse_transform_exceptions(missing_value):
1472
+ X_1 = np.array(
1473
+ [
1474
+ [9, missing_value, 3, -1],
1475
+ [4, -1, 5, 4],
1476
+ [6, 7, missing_value, -1],
1477
+ [8, 9, 0, missing_value],
1478
+ ]
1479
+ )
1480
+
1481
+ imputer = SimpleImputer(missing_values=missing_value, strategy="mean")
1482
+ X_1_trans = imputer.fit_transform(X_1)
1483
+ with pytest.raises(
1484
+ ValueError, match=f"Got 'add_indicator={imputer.add_indicator}'"
1485
+ ):
1486
+ imputer.inverse_transform(X_1_trans)
1487
+
1488
+
1489
+ @pytest.mark.parametrize(
1490
+ "expected,array,dtype,extra_value,n_repeat",
1491
+ [
1492
+ # array of object dtype
1493
+ ("extra_value", ["a", "b", "c"], object, "extra_value", 2),
1494
+ (
1495
+ "most_frequent_value",
1496
+ ["most_frequent_value", "most_frequent_value", "value"],
1497
+ object,
1498
+ "extra_value",
1499
+ 1,
1500
+ ),
1501
+ ("a", ["min_value", "min_valuevalue"], object, "a", 2),
1502
+ ("min_value", ["min_value", "min_value", "value"], object, "z", 2),
1503
+ # array of numeric dtype
1504
+ (10, [1, 2, 3], int, 10, 2),
1505
+ (1, [1, 1, 2], int, 10, 1),
1506
+ (10, [20, 20, 1], int, 10, 2),
1507
+ (1, [1, 1, 20], int, 10, 2),
1508
+ ],
1509
+ )
1510
+ def test_most_frequent(expected, array, dtype, extra_value, n_repeat):
1511
+ assert expected == _most_frequent(
1512
+ np.array(array, dtype=dtype), extra_value, n_repeat
1513
+ )
1514
+
1515
+
1516
+ @pytest.mark.parametrize(
1517
+ "initial_strategy", ["mean", "median", "most_frequent", "constant"]
1518
+ )
1519
+ def test_iterative_imputer_keep_empty_features(initial_strategy):
1520
+ """Check the behaviour of the iterative imputer with different initial strategy
1521
+ and keeping empty features (i.e. features containing only missing values).
1522
+ """
1523
+ X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]])
1524
+
1525
+ imputer = IterativeImputer(
1526
+ initial_strategy=initial_strategy, keep_empty_features=True
1527
+ )
1528
+ X_imputed = imputer.fit_transform(X)
1529
+ assert_allclose(X_imputed[:, 1], 0)
1530
+ X_imputed = imputer.transform(X)
1531
+ assert_allclose(X_imputed[:, 1], 0)
1532
+
1533
+
1534
+ def test_iterative_imputer_constant_fill_value():
1535
+ """Check that we propagate properly the parameter `fill_value`."""
1536
+ X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]])
1537
+
1538
+ fill_value = 100
1539
+ imputer = IterativeImputer(
1540
+ missing_values=-1,
1541
+ initial_strategy="constant",
1542
+ fill_value=fill_value,
1543
+ max_iter=0,
1544
+ )
1545
+ imputer.fit_transform(X)
1546
+ assert_array_equal(imputer.initial_imputer_.statistics_, fill_value)
1547
+
1548
+
1549
+ @pytest.mark.parametrize("keep_empty_features", [True, False])
1550
+ def test_knn_imputer_keep_empty_features(keep_empty_features):
1551
+ """Check the behaviour of `keep_empty_features` for `KNNImputer`."""
1552
+ X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]])
1553
+
1554
+ imputer = KNNImputer(keep_empty_features=keep_empty_features)
1555
+
1556
+ for method in ["fit_transform", "transform"]:
1557
+ X_imputed = getattr(imputer, method)(X)
1558
+ if keep_empty_features:
1559
+ assert X_imputed.shape == X.shape
1560
+ assert_array_equal(X_imputed[:, 1], 0)
1561
+ else:
1562
+ assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
1563
+
1564
+
1565
+ def test_simple_impute_pd_na():
1566
+ pd = pytest.importorskip("pandas")
1567
+
1568
+ # Impute pandas array of string types.
1569
+ df = pd.DataFrame({"feature": pd.Series(["abc", None, "de"], dtype="string")})
1570
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value="na")
1571
+ _assert_array_equal_and_same_dtype(
1572
+ imputer.fit_transform(df), np.array([["abc"], ["na"], ["de"]], dtype=object)
1573
+ )
1574
+
1575
+ # Impute pandas array of string types without any missing values.
1576
+ df = pd.DataFrame({"feature": pd.Series(["abc", "de", "fgh"], dtype="string")})
1577
+ imputer = SimpleImputer(fill_value="ok", strategy="constant")
1578
+ _assert_array_equal_and_same_dtype(
1579
+ imputer.fit_transform(df), np.array([["abc"], ["de"], ["fgh"]], dtype=object)
1580
+ )
1581
+
1582
+ # Impute pandas array of integer types.
1583
+ df = pd.DataFrame({"feature": pd.Series([1, None, 3], dtype="Int64")})
1584
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-1)
1585
+ _assert_allclose_and_same_dtype(
1586
+ imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64")
1587
+ )
1588
+
1589
+ # Use `np.nan` also works.
1590
+ imputer = SimpleImputer(missing_values=np.nan, strategy="constant", fill_value=-1)
1591
+ _assert_allclose_and_same_dtype(
1592
+ imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64")
1593
+ )
1594
+
1595
+ # Impute pandas array of integer types with 'median' strategy.
1596
+ df = pd.DataFrame({"feature": pd.Series([1, None, 2, 3], dtype="Int64")})
1597
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="median")
1598
+ _assert_allclose_and_same_dtype(
1599
+ imputer.fit_transform(df), np.array([[1], [2], [2], [3]], dtype="float64")
1600
+ )
1601
+
1602
+ # Impute pandas array of integer types with 'mean' strategy.
1603
+ df = pd.DataFrame({"feature": pd.Series([1, None, 2], dtype="Int64")})
1604
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="mean")
1605
+ _assert_allclose_and_same_dtype(
1606
+ imputer.fit_transform(df), np.array([[1], [1.5], [2]], dtype="float64")
1607
+ )
1608
+
1609
+ # Impute pandas array of float types.
1610
+ df = pd.DataFrame({"feature": pd.Series([1.0, None, 3.0], dtype="float64")})
1611
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-2.0)
1612
+ _assert_allclose_and_same_dtype(
1613
+ imputer.fit_transform(df), np.array([[1.0], [-2.0], [3.0]], dtype="float64")
1614
+ )
1615
+
1616
+ # Impute pandas array of float types with 'median' strategy.
1617
+ df = pd.DataFrame({"feature": pd.Series([1.0, None, 2.0, 3.0], dtype="float64")})
1618
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="median")
1619
+ _assert_allclose_and_same_dtype(
1620
+ imputer.fit_transform(df),
1621
+ np.array([[1.0], [2.0], [2.0], [3.0]], dtype="float64"),
1622
+ )
1623
+
1624
+
1625
+ def test_missing_indicator_feature_names_out():
1626
+ """Check that missing indicator return the feature names with a prefix."""
1627
+ pd = pytest.importorskip("pandas")
1628
+
1629
+ missing_values = np.nan
1630
+ X = pd.DataFrame(
1631
+ [
1632
+ [missing_values, missing_values, 1, missing_values],
1633
+ [4, missing_values, 2, 10],
1634
+ ],
1635
+ columns=["a", "b", "c", "d"],
1636
+ )
1637
+
1638
+ indicator = MissingIndicator(missing_values=missing_values).fit(X)
1639
+ feature_names = indicator.get_feature_names_out()
1640
+ expected_names = ["missingindicator_a", "missingindicator_b", "missingindicator_d"]
1641
+ assert_array_equal(expected_names, feature_names)
1642
+
1643
+
1644
+ def test_imputer_lists_fit_transform():
1645
+ """Check transform uses object dtype when fitted on an object dtype.
1646
+
1647
+ Non-regression test for #19572.
1648
+ """
1649
+
1650
+ X = [["a", "b"], ["c", "b"], ["a", "a"]]
1651
+ imp_frequent = SimpleImputer(strategy="most_frequent").fit(X)
1652
+ X_trans = imp_frequent.transform([[np.nan, np.nan]])
1653
+ assert X_trans.dtype == object
1654
+ assert_array_equal(X_trans, [["a", "b"]])
1655
+
1656
+
1657
+ @pytest.mark.parametrize("dtype_test", [np.float32, np.float64])
1658
+ def test_imputer_transform_preserves_numeric_dtype(dtype_test):
1659
+ """Check transform preserves numeric dtype independent of fit dtype."""
1660
+ X = np.asarray(
1661
+ [[1.2, 3.4, np.nan], [np.nan, 1.2, 1.3], [4.2, 2, 1]], dtype=np.float64
1662
+ )
1663
+ imp = SimpleImputer().fit(X)
1664
+
1665
+ X_test = np.asarray([[np.nan, np.nan, np.nan]], dtype=dtype_test)
1666
+ X_trans = imp.transform(X_test)
1667
+ assert X_trans.dtype == dtype_test
1668
+
1669
+
1670
+ @pytest.mark.parametrize("array_type", ["array", "sparse"])
1671
+ @pytest.mark.parametrize("keep_empty_features", [True, False])
1672
+ def test_simple_imputer_constant_keep_empty_features(array_type, keep_empty_features):
1673
+ """Check the behaviour of `keep_empty_features` with `strategy='constant'.
1674
+ For backward compatibility, a column full of missing values will always be
1675
+ fill and never dropped.
1676
+ """
1677
+ X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]])
1678
+ X = _convert_container(X, array_type)
1679
+ fill_value = 10
1680
+ imputer = SimpleImputer(
1681
+ strategy="constant",
1682
+ fill_value=fill_value,
1683
+ keep_empty_features=keep_empty_features,
1684
+ )
1685
+
1686
+ for method in ["fit_transform", "transform"]:
1687
+ X_imputed = getattr(imputer, method)(X)
1688
+ assert X_imputed.shape == X.shape
1689
+ constant_feature = (
1690
+ X_imputed[:, 0].toarray() if array_type == "sparse" else X_imputed[:, 0]
1691
+ )
1692
+ assert_array_equal(constant_feature, fill_value)
1693
+
1694
+
1695
+ @pytest.mark.parametrize("array_type", ["array", "sparse"])
1696
+ @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
1697
+ @pytest.mark.parametrize("keep_empty_features", [True, False])
1698
+ def test_simple_imputer_keep_empty_features(strategy, array_type, keep_empty_features):
1699
+ """Check the behaviour of `keep_empty_features` with all strategies but
1700
+ 'constant'.
1701
+ """
1702
+ X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]])
1703
+ X = _convert_container(X, array_type)
1704
+ imputer = SimpleImputer(strategy=strategy, keep_empty_features=keep_empty_features)
1705
+
1706
+ for method in ["fit_transform", "transform"]:
1707
+ X_imputed = getattr(imputer, method)(X)
1708
+ if keep_empty_features:
1709
+ assert X_imputed.shape == X.shape
1710
+ constant_feature = (
1711
+ X_imputed[:, 0].toarray() if array_type == "sparse" else X_imputed[:, 0]
1712
+ )
1713
+ assert_array_equal(constant_feature, 0)
1714
+ else:
1715
+ assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
1716
+
1717
+
1718
+ def test_simple_imputer_constant_fill_value_casting():
1719
+ """Check that we raise a proper error message when we cannot cast the fill value
1720
+ to the input data type. Otherwise, check that the casting is done properly.
1721
+
1722
+ Non-regression test for:
1723
+ https://github.com/scikit-learn/scikit-learn/issues/28309
1724
+ """
1725
+ # cannot cast fill_value at fit
1726
+ fill_value = 1.5
1727
+ X_int64 = np.array([[1, 2, 3], [2, 3, 4]], dtype=np.int64)
1728
+ imputer = SimpleImputer(
1729
+ strategy="constant", fill_value=fill_value, missing_values=2
1730
+ )
1731
+ err_msg = f"fill_value={fill_value!r} (of type {type(fill_value)!r}) cannot be cast"
1732
+ with pytest.raises(ValueError, match=re.escape(err_msg)):
1733
+ imputer.fit(X_int64)
1734
+
1735
+ # cannot cast fill_value at transform
1736
+ X_float64 = np.array([[1, 2, 3], [2, 3, 4]], dtype=np.float64)
1737
+ imputer.fit(X_float64)
1738
+ err_msg = (
1739
+ f"The dtype of the filling value (i.e. {imputer.statistics_.dtype!r}) "
1740
+ "cannot be cast"
1741
+ )
1742
+ with pytest.raises(ValueError, match=re.escape(err_msg)):
1743
+ imputer.transform(X_int64)
1744
+
1745
+ # check that no error is raised when having the same kind of dtype
1746
+ fill_value_list = [np.float64(1.5), 1.5, 1]
1747
+ X_float32 = X_float64.astype(np.float32)
1748
+
1749
+ for fill_value in fill_value_list:
1750
+ imputer = SimpleImputer(
1751
+ strategy="constant", fill_value=fill_value, missing_values=2
1752
+ )
1753
+ X_trans = imputer.fit_transform(X_float32)
1754
+ assert X_trans.dtype == X_float32.dtype
venv/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py ADDED
@@ -0,0 +1,547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn import config_context
5
+ from sklearn.impute import KNNImputer
6
+ from sklearn.metrics.pairwise import nan_euclidean_distances, pairwise_distances
7
+ from sklearn.neighbors import KNeighborsRegressor
8
+ from sklearn.utils._testing import assert_allclose
9
+
10
+
11
+ @pytest.mark.parametrize("weights", ["uniform", "distance"])
12
+ @pytest.mark.parametrize("n_neighbors", range(1, 6))
13
+ def test_knn_imputer_shape(weights, n_neighbors):
14
+ # Verify the shapes of the imputed matrix for different weights and
15
+ # number of neighbors.
16
+ n_rows = 10
17
+ n_cols = 2
18
+ X = np.random.rand(n_rows, n_cols)
19
+ X[0, 0] = np.nan
20
+
21
+ imputer = KNNImputer(n_neighbors=n_neighbors, weights=weights)
22
+ X_imputed = imputer.fit_transform(X)
23
+ assert X_imputed.shape == (n_rows, n_cols)
24
+
25
+
26
+ @pytest.mark.parametrize("na", [np.nan, -1])
27
+ def test_knn_imputer_default_with_invalid_input(na):
28
+ # Test imputation with default values and invalid input
29
+
30
+ # Test with inf present
31
+ X = np.array(
32
+ [
33
+ [np.inf, 1, 1, 2, na],
34
+ [2, 1, 2, 2, 3],
35
+ [3, 2, 3, 3, 8],
36
+ [na, 6, 0, 5, 13],
37
+ [na, 7, 0, 7, 8],
38
+ [6, 6, 2, 5, 7],
39
+ ]
40
+ )
41
+ with pytest.raises(ValueError, match="Input X contains (infinity|NaN)"):
42
+ KNNImputer(missing_values=na).fit(X)
43
+
44
+ # Test with inf present in matrix passed in transform()
45
+ X = np.array(
46
+ [
47
+ [np.inf, 1, 1, 2, na],
48
+ [2, 1, 2, 2, 3],
49
+ [3, 2, 3, 3, 8],
50
+ [na, 6, 0, 5, 13],
51
+ [na, 7, 0, 7, 8],
52
+ [6, 6, 2, 5, 7],
53
+ ]
54
+ )
55
+
56
+ X_fit = np.array(
57
+ [
58
+ [0, 1, 1, 2, na],
59
+ [2, 1, 2, 2, 3],
60
+ [3, 2, 3, 3, 8],
61
+ [na, 6, 0, 5, 13],
62
+ [na, 7, 0, 7, 8],
63
+ [6, 6, 2, 5, 7],
64
+ ]
65
+ )
66
+ imputer = KNNImputer(missing_values=na).fit(X_fit)
67
+ with pytest.raises(ValueError, match="Input X contains (infinity|NaN)"):
68
+ imputer.transform(X)
69
+
70
+ # Test with missing_values=0 when NaN present
71
+ imputer = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform")
72
+ X = np.array(
73
+ [
74
+ [np.nan, 0, 0, 0, 5],
75
+ [np.nan, 1, 0, np.nan, 3],
76
+ [np.nan, 2, 0, 0, 0],
77
+ [np.nan, 6, 0, 5, 13],
78
+ ]
79
+ )
80
+ msg = "Input X contains NaN"
81
+ with pytest.raises(ValueError, match=msg):
82
+ imputer.fit(X)
83
+
84
+ X = np.array(
85
+ [
86
+ [0, 0],
87
+ [np.nan, 2],
88
+ ]
89
+ )
90
+
91
+
92
+ @pytest.mark.parametrize("na", [np.nan, -1])
93
+ def test_knn_imputer_removes_all_na_features(na):
94
+ X = np.array(
95
+ [
96
+ [1, 1, na, 1, 1, 1.0],
97
+ [2, 3, na, 2, 2, 2],
98
+ [3, 4, na, 3, 3, na],
99
+ [6, 4, na, na, 6, 6],
100
+ ]
101
+ )
102
+ knn = KNNImputer(missing_values=na, n_neighbors=2).fit(X)
103
+
104
+ X_transform = knn.transform(X)
105
+ assert not np.isnan(X_transform).any()
106
+ assert X_transform.shape == (4, 5)
107
+
108
+ X_test = np.arange(0, 12).reshape(2, 6)
109
+ X_transform = knn.transform(X_test)
110
+ assert_allclose(X_test[:, [0, 1, 3, 4, 5]], X_transform)
111
+
112
+
113
+ @pytest.mark.parametrize("na", [np.nan, -1])
114
+ def test_knn_imputer_zero_nan_imputes_the_same(na):
115
+ # Test with an imputable matrix and compare with different missing_values
116
+ X_zero = np.array(
117
+ [
118
+ [1, 0, 1, 1, 1.0],
119
+ [2, 2, 2, 2, 2],
120
+ [3, 3, 3, 3, 0],
121
+ [6, 6, 0, 6, 6],
122
+ ]
123
+ )
124
+
125
+ X_nan = np.array(
126
+ [
127
+ [1, na, 1, 1, 1.0],
128
+ [2, 2, 2, 2, 2],
129
+ [3, 3, 3, 3, na],
130
+ [6, 6, na, 6, 6],
131
+ ]
132
+ )
133
+
134
+ X_imputed = np.array(
135
+ [
136
+ [1, 2.5, 1, 1, 1.0],
137
+ [2, 2, 2, 2, 2],
138
+ [3, 3, 3, 3, 1.5],
139
+ [6, 6, 2.5, 6, 6],
140
+ ]
141
+ )
142
+
143
+ imputer_zero = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform")
144
+
145
+ imputer_nan = KNNImputer(missing_values=na, n_neighbors=2, weights="uniform")
146
+
147
+ assert_allclose(imputer_zero.fit_transform(X_zero), X_imputed)
148
+ assert_allclose(
149
+ imputer_zero.fit_transform(X_zero), imputer_nan.fit_transform(X_nan)
150
+ )
151
+
152
+
153
+ @pytest.mark.parametrize("na", [np.nan, -1])
154
+ def test_knn_imputer_verify(na):
155
+ # Test with an imputable matrix
156
+ X = np.array(
157
+ [
158
+ [1, 0, 0, 1],
159
+ [2, 1, 2, na],
160
+ [3, 2, 3, na],
161
+ [na, 4, 5, 5],
162
+ [6, na, 6, 7],
163
+ [8, 8, 8, 8],
164
+ [16, 15, 18, 19],
165
+ ]
166
+ )
167
+
168
+ X_imputed = np.array(
169
+ [
170
+ [1, 0, 0, 1],
171
+ [2, 1, 2, 8],
172
+ [3, 2, 3, 8],
173
+ [4, 4, 5, 5],
174
+ [6, 3, 6, 7],
175
+ [8, 8, 8, 8],
176
+ [16, 15, 18, 19],
177
+ ]
178
+ )
179
+
180
+ imputer = KNNImputer(missing_values=na)
181
+ assert_allclose(imputer.fit_transform(X), X_imputed)
182
+
183
+ # Test when there is not enough neighbors
184
+ X = np.array(
185
+ [
186
+ [1, 0, 0, na],
187
+ [2, 1, 2, na],
188
+ [3, 2, 3, na],
189
+ [4, 4, 5, na],
190
+ [6, 7, 6, na],
191
+ [8, 8, 8, na],
192
+ [20, 20, 20, 20],
193
+ [22, 22, 22, 22],
194
+ ]
195
+ )
196
+
197
+ # Not enough neighbors, use column mean from training
198
+ X_impute_value = (20 + 22) / 2
199
+ X_imputed = np.array(
200
+ [
201
+ [1, 0, 0, X_impute_value],
202
+ [2, 1, 2, X_impute_value],
203
+ [3, 2, 3, X_impute_value],
204
+ [4, 4, 5, X_impute_value],
205
+ [6, 7, 6, X_impute_value],
206
+ [8, 8, 8, X_impute_value],
207
+ [20, 20, 20, 20],
208
+ [22, 22, 22, 22],
209
+ ]
210
+ )
211
+
212
+ imputer = KNNImputer(missing_values=na)
213
+ assert_allclose(imputer.fit_transform(X), X_imputed)
214
+
215
+ # Test when data in fit() and transform() are different
216
+ X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 16]])
217
+
218
+ X1 = np.array([[1, 0], [3, 2], [4, na]])
219
+
220
+ X_2_1 = (0 + 3 + 6 + 7 + 8) / 5
221
+ X1_imputed = np.array([[1, 0], [3, 2], [4, X_2_1]])
222
+
223
+ imputer = KNNImputer(missing_values=na)
224
+ assert_allclose(imputer.fit(X).transform(X1), X1_imputed)
225
+
226
+
227
+ @pytest.mark.parametrize("na", [np.nan, -1])
228
+ def test_knn_imputer_one_n_neighbors(na):
229
+ X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]])
230
+
231
+ X_imputed = np.array([[0, 0], [4, 2], [4, 3], [5, 3], [7, 7], [7, 8], [14, 13]])
232
+
233
+ imputer = KNNImputer(n_neighbors=1, missing_values=na)
234
+
235
+ assert_allclose(imputer.fit_transform(X), X_imputed)
236
+
237
+
238
+ @pytest.mark.parametrize("na", [np.nan, -1])
239
+ def test_knn_imputer_all_samples_are_neighbors(na):
240
+ X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]])
241
+
242
+ X_imputed = np.array([[0, 0], [6, 2], [4, 3], [5, 5.5], [7, 7], [6, 8], [14, 13]])
243
+
244
+ n_neighbors = X.shape[0] - 1
245
+ imputer = KNNImputer(n_neighbors=n_neighbors, missing_values=na)
246
+
247
+ assert_allclose(imputer.fit_transform(X), X_imputed)
248
+
249
+ n_neighbors = X.shape[0]
250
+ imputer_plus1 = KNNImputer(n_neighbors=n_neighbors, missing_values=na)
251
+ assert_allclose(imputer_plus1.fit_transform(X), X_imputed)
252
+
253
+
254
+ @pytest.mark.parametrize("na", [np.nan, -1])
255
+ def test_knn_imputer_weight_uniform(na):
256
+ X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]])
257
+
258
+ # Test with "uniform" weight (or unweighted)
259
+ X_imputed_uniform = np.array(
260
+ [[0, 0], [5, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]
261
+ )
262
+
263
+ imputer = KNNImputer(weights="uniform", missing_values=na)
264
+ assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
265
+
266
+ # Test with "callable" weight
267
+ def no_weight(dist):
268
+ return None
269
+
270
+ imputer = KNNImputer(weights=no_weight, missing_values=na)
271
+ assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
272
+
273
+ # Test with "callable" uniform weight
274
+ def uniform_weight(dist):
275
+ return np.ones_like(dist)
276
+
277
+ imputer = KNNImputer(weights=uniform_weight, missing_values=na)
278
+ assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
279
+
280
+
281
+ @pytest.mark.parametrize("na", [np.nan, -1])
282
+ def test_knn_imputer_weight_distance(na):
283
+ X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]])
284
+
285
+ # Test with "distance" weight
286
+ nn = KNeighborsRegressor(metric="euclidean", weights="distance")
287
+ X_rows_idx = [0, 2, 3, 4, 5, 6]
288
+ nn.fit(X[X_rows_idx, 1:], X[X_rows_idx, 0])
289
+ knn_imputed_value = nn.predict(X[1:2, 1:])[0]
290
+
291
+ # Manual calculation
292
+ X_neighbors_idx = [0, 2, 3, 4, 5]
293
+ dist = nan_euclidean_distances(X[1:2, :], X, missing_values=na)
294
+ weights = 1 / dist[:, X_neighbors_idx].ravel()
295
+ manual_imputed_value = np.average(X[X_neighbors_idx, 0], weights=weights)
296
+
297
+ X_imputed_distance1 = np.array(
298
+ [[0, 0], [manual_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]
299
+ )
300
+
301
+ # NearestNeighbor calculation
302
+ X_imputed_distance2 = np.array(
303
+ [[0, 0], [knn_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]
304
+ )
305
+
306
+ imputer = KNNImputer(weights="distance", missing_values=na)
307
+ assert_allclose(imputer.fit_transform(X), X_imputed_distance1)
308
+ assert_allclose(imputer.fit_transform(X), X_imputed_distance2)
309
+
310
+ # Test with weights = "distance" and n_neighbors=2
311
+ X = np.array(
312
+ [
313
+ [na, 0, 0],
314
+ [2, 1, 2],
315
+ [3, 2, 3],
316
+ [4, 5, 5],
317
+ ]
318
+ )
319
+
320
+ # neighbors are rows 1, 2, the nan_euclidean_distances are:
321
+ dist_0_1 = np.sqrt((3 / 2) * ((1 - 0) ** 2 + (2 - 0) ** 2))
322
+ dist_0_2 = np.sqrt((3 / 2) * ((2 - 0) ** 2 + (3 - 0) ** 2))
323
+ imputed_value = np.average([2, 3], weights=[1 / dist_0_1, 1 / dist_0_2])
324
+
325
+ X_imputed = np.array(
326
+ [
327
+ [imputed_value, 0, 0],
328
+ [2, 1, 2],
329
+ [3, 2, 3],
330
+ [4, 5, 5],
331
+ ]
332
+ )
333
+
334
+ imputer = KNNImputer(n_neighbors=2, weights="distance", missing_values=na)
335
+ assert_allclose(imputer.fit_transform(X), X_imputed)
336
+
337
+ # Test with varying missingness patterns
338
+ X = np.array(
339
+ [
340
+ [1, 0, 0, 1],
341
+ [0, na, 1, na],
342
+ [1, 1, 1, na],
343
+ [0, 1, 0, 0],
344
+ [0, 0, 0, 0],
345
+ [1, 0, 1, 1],
346
+ [10, 10, 10, 10],
347
+ ]
348
+ )
349
+
350
+ # Get weights of donor neighbors
351
+ dist = nan_euclidean_distances(X, missing_values=na)
352
+ r1c1_nbor_dists = dist[1, [0, 2, 3, 4, 5]]
353
+ r1c3_nbor_dists = dist[1, [0, 3, 4, 5, 6]]
354
+ r1c1_nbor_wt = 1 / r1c1_nbor_dists
355
+ r1c3_nbor_wt = 1 / r1c3_nbor_dists
356
+
357
+ r2c3_nbor_dists = dist[2, [0, 3, 4, 5, 6]]
358
+ r2c3_nbor_wt = 1 / r2c3_nbor_dists
359
+
360
+ # Collect donor values
361
+ col1_donor_values = np.ma.masked_invalid(X[[0, 2, 3, 4, 5], 1]).copy()
362
+ col3_donor_values = np.ma.masked_invalid(X[[0, 3, 4, 5, 6], 3]).copy()
363
+
364
+ # Final imputed values
365
+ r1c1_imp = np.ma.average(col1_donor_values, weights=r1c1_nbor_wt)
366
+ r1c3_imp = np.ma.average(col3_donor_values, weights=r1c3_nbor_wt)
367
+ r2c3_imp = np.ma.average(col3_donor_values, weights=r2c3_nbor_wt)
368
+
369
+ X_imputed = np.array(
370
+ [
371
+ [1, 0, 0, 1],
372
+ [0, r1c1_imp, 1, r1c3_imp],
373
+ [1, 1, 1, r2c3_imp],
374
+ [0, 1, 0, 0],
375
+ [0, 0, 0, 0],
376
+ [1, 0, 1, 1],
377
+ [10, 10, 10, 10],
378
+ ]
379
+ )
380
+
381
+ imputer = KNNImputer(weights="distance", missing_values=na)
382
+ assert_allclose(imputer.fit_transform(X), X_imputed)
383
+
384
+ X = np.array(
385
+ [
386
+ [0, 0, 0, na],
387
+ [1, 1, 1, na],
388
+ [2, 2, na, 2],
389
+ [3, 3, 3, 3],
390
+ [4, 4, 4, 4],
391
+ [5, 5, 5, 5],
392
+ [6, 6, 6, 6],
393
+ [na, 7, 7, 7],
394
+ ]
395
+ )
396
+
397
+ dist = pairwise_distances(
398
+ X, metric="nan_euclidean", squared=False, missing_values=na
399
+ )
400
+
401
+ # Calculate weights
402
+ r0c3_w = 1.0 / dist[0, 2:-1]
403
+ r1c3_w = 1.0 / dist[1, 2:-1]
404
+ r2c2_w = 1.0 / dist[2, (0, 1, 3, 4, 5)]
405
+ r7c0_w = 1.0 / dist[7, 2:7]
406
+
407
+ # Calculate weighted averages
408
+ r0c3 = np.average(X[2:-1, -1], weights=r0c3_w)
409
+ r1c3 = np.average(X[2:-1, -1], weights=r1c3_w)
410
+ r2c2 = np.average(X[(0, 1, 3, 4, 5), 2], weights=r2c2_w)
411
+ r7c0 = np.average(X[2:7, 0], weights=r7c0_w)
412
+
413
+ X_imputed = np.array(
414
+ [
415
+ [0, 0, 0, r0c3],
416
+ [1, 1, 1, r1c3],
417
+ [2, 2, r2c2, 2],
418
+ [3, 3, 3, 3],
419
+ [4, 4, 4, 4],
420
+ [5, 5, 5, 5],
421
+ [6, 6, 6, 6],
422
+ [r7c0, 7, 7, 7],
423
+ ]
424
+ )
425
+
426
+ imputer_comp_wt = KNNImputer(missing_values=na, weights="distance")
427
+ assert_allclose(imputer_comp_wt.fit_transform(X), X_imputed)
428
+
429
+
430
+ def test_knn_imputer_callable_metric():
431
+ # Define callable metric that returns the l1 norm:
432
+ def custom_callable(x, y, missing_values=np.nan, squared=False):
433
+ x = np.ma.array(x, mask=np.isnan(x))
434
+ y = np.ma.array(y, mask=np.isnan(y))
435
+ dist = np.nansum(np.abs(x - y))
436
+ return dist
437
+
438
+ X = np.array([[4, 3, 3, np.nan], [6, 9, 6, 9], [4, 8, 6, 9], [np.nan, 9, 11, 10.0]])
439
+
440
+ X_0_3 = (9 + 9) / 2
441
+ X_3_0 = (6 + 4) / 2
442
+ X_imputed = np.array(
443
+ [[4, 3, 3, X_0_3], [6, 9, 6, 9], [4, 8, 6, 9], [X_3_0, 9, 11, 10.0]]
444
+ )
445
+
446
+ imputer = KNNImputer(n_neighbors=2, metric=custom_callable)
447
+ assert_allclose(imputer.fit_transform(X), X_imputed)
448
+
449
+
450
+ @pytest.mark.parametrize("working_memory", [None, 0])
451
+ @pytest.mark.parametrize("na", [-1, np.nan])
452
+ # Note that we use working_memory=0 to ensure that chunking is tested, even
453
+ # for a small dataset. However, it should raise a UserWarning that we ignore.
454
+ @pytest.mark.filterwarnings("ignore:adhere to working_memory")
455
+ def test_knn_imputer_with_simple_example(na, working_memory):
456
+ X = np.array(
457
+ [
458
+ [0, na, 0, na],
459
+ [1, 1, 1, na],
460
+ [2, 2, na, 2],
461
+ [3, 3, 3, 3],
462
+ [4, 4, 4, 4],
463
+ [5, 5, 5, 5],
464
+ [6, 6, 6, 6],
465
+ [na, 7, 7, 7],
466
+ ]
467
+ )
468
+
469
+ r0c1 = np.mean(X[1:6, 1])
470
+ r0c3 = np.mean(X[2:-1, -1])
471
+ r1c3 = np.mean(X[2:-1, -1])
472
+ r2c2 = np.mean(X[[0, 1, 3, 4, 5], 2])
473
+ r7c0 = np.mean(X[2:-1, 0])
474
+
475
+ X_imputed = np.array(
476
+ [
477
+ [0, r0c1, 0, r0c3],
478
+ [1, 1, 1, r1c3],
479
+ [2, 2, r2c2, 2],
480
+ [3, 3, 3, 3],
481
+ [4, 4, 4, 4],
482
+ [5, 5, 5, 5],
483
+ [6, 6, 6, 6],
484
+ [r7c0, 7, 7, 7],
485
+ ]
486
+ )
487
+
488
+ with config_context(working_memory=working_memory):
489
+ imputer_comp = KNNImputer(missing_values=na)
490
+ assert_allclose(imputer_comp.fit_transform(X), X_imputed)
491
+
492
+
493
+ @pytest.mark.parametrize("na", [-1, np.nan])
494
+ @pytest.mark.parametrize("weights", ["uniform", "distance"])
495
+ def test_knn_imputer_not_enough_valid_distances(na, weights):
496
+ # Samples with needed feature has nan distance
497
+ X1 = np.array([[na, 11], [na, 1], [3, na]])
498
+ X1_imputed = np.array([[3, 11], [3, 1], [3, 6]])
499
+
500
+ knn = KNNImputer(missing_values=na, n_neighbors=1, weights=weights)
501
+ assert_allclose(knn.fit_transform(X1), X1_imputed)
502
+
503
+ X2 = np.array([[4, na]])
504
+ X2_imputed = np.array([[4, 6]])
505
+ assert_allclose(knn.transform(X2), X2_imputed)
506
+
507
+
508
+ @pytest.mark.parametrize("na", [-1, np.nan])
509
+ def test_knn_imputer_drops_all_nan_features(na):
510
+ X1 = np.array([[na, 1], [na, 2]])
511
+ knn = KNNImputer(missing_values=na, n_neighbors=1)
512
+ X1_expected = np.array([[1], [2]])
513
+ assert_allclose(knn.fit_transform(X1), X1_expected)
514
+
515
+ X2 = np.array([[1, 2], [3, na]])
516
+ X2_expected = np.array([[2], [1.5]])
517
+ assert_allclose(knn.transform(X2), X2_expected)
518
+
519
+
520
+ @pytest.mark.parametrize("working_memory", [None, 0])
521
+ @pytest.mark.parametrize("na", [-1, np.nan])
522
+ def test_knn_imputer_distance_weighted_not_enough_neighbors(na, working_memory):
523
+ X = np.array([[3, na], [2, na], [na, 4], [5, 6], [6, 8], [na, 5]])
524
+
525
+ dist = pairwise_distances(
526
+ X, metric="nan_euclidean", squared=False, missing_values=na
527
+ )
528
+
529
+ X_01 = np.average(X[3:5, 1], weights=1 / dist[0, 3:5])
530
+ X_11 = np.average(X[3:5, 1], weights=1 / dist[1, 3:5])
531
+ X_20 = np.average(X[3:5, 0], weights=1 / dist[2, 3:5])
532
+ X_50 = np.average(X[3:5, 0], weights=1 / dist[5, 3:5])
533
+
534
+ X_expected = np.array([[3, X_01], [2, X_11], [X_20, 4], [5, 6], [6, 8], [X_50, 5]])
535
+
536
+ with config_context(working_memory=working_memory):
537
+ knn_3 = KNNImputer(missing_values=na, n_neighbors=3, weights="distance")
538
+ assert_allclose(knn_3.fit_transform(X), X_expected)
539
+
540
+ knn_4 = KNNImputer(missing_values=na, n_neighbors=4, weights="distance")
541
+ assert_allclose(knn_4.fit_transform(X), X_expected)
542
+
543
+
544
+ @pytest.mark.parametrize("na, allow_nan", [(-1, False), (np.nan, True)])
545
+ def test_knn_tags(na, allow_nan):
546
+ knn = KNNImputer(missing_values=na)
547
+ assert knn._get_tags()["allow_nan"] == allow_nan
venv/lib/python3.10/site-packages/sklearn/isotonic.py ADDED
@@ -0,0 +1,498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Fabian Pedregosa <[email protected]>
2
+ # Alexandre Gramfort <[email protected]>
3
+ # Nelle Varoquaux <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+ import math
7
+ import warnings
8
+ from numbers import Real
9
+
10
+ import numpy as np
11
+ from scipy import interpolate
12
+ from scipy.stats import spearmanr
13
+
14
+ from ._isotonic import _inplace_contiguous_isotonic_regression, _make_unique
15
+ from .base import BaseEstimator, RegressorMixin, TransformerMixin, _fit_context
16
+ from .utils import check_array, check_consistent_length
17
+ from .utils._param_validation import Interval, StrOptions, validate_params
18
+ from .utils.validation import _check_sample_weight, check_is_fitted
19
+
20
+ __all__ = ["check_increasing", "isotonic_regression", "IsotonicRegression"]
21
+
22
+
23
+ @validate_params(
24
+ {
25
+ "x": ["array-like"],
26
+ "y": ["array-like"],
27
+ },
28
+ prefer_skip_nested_validation=True,
29
+ )
30
+ def check_increasing(x, y):
31
+ """Determine whether y is monotonically correlated with x.
32
+
33
+ y is found increasing or decreasing with respect to x based on a Spearman
34
+ correlation test.
35
+
36
+ Parameters
37
+ ----------
38
+ x : array-like of shape (n_samples,)
39
+ Training data.
40
+
41
+ y : array-like of shape (n_samples,)
42
+ Training target.
43
+
44
+ Returns
45
+ -------
46
+ increasing_bool : boolean
47
+ Whether the relationship is increasing or decreasing.
48
+
49
+ Notes
50
+ -----
51
+ The Spearman correlation coefficient is estimated from the data, and the
52
+ sign of the resulting estimate is used as the result.
53
+
54
+ In the event that the 95% confidence interval based on Fisher transform
55
+ spans zero, a warning is raised.
56
+
57
+ References
58
+ ----------
59
+ Fisher transformation. Wikipedia.
60
+ https://en.wikipedia.org/wiki/Fisher_transformation
61
+
62
+ Examples
63
+ --------
64
+ >>> from sklearn.isotonic import check_increasing
65
+ >>> x, y = [1, 2, 3, 4, 5], [2, 4, 6, 8, 10]
66
+ >>> check_increasing(x, y)
67
+ True
68
+ >>> y = [10, 8, 6, 4, 2]
69
+ >>> check_increasing(x, y)
70
+ False
71
+ """
72
+
73
+ # Calculate Spearman rho estimate and set return accordingly.
74
+ rho, _ = spearmanr(x, y)
75
+ increasing_bool = rho >= 0
76
+
77
+ # Run Fisher transform to get the rho CI, but handle rho=+/-1
78
+ if rho not in [-1.0, 1.0] and len(x) > 3:
79
+ F = 0.5 * math.log((1.0 + rho) / (1.0 - rho))
80
+ F_se = 1 / math.sqrt(len(x) - 3)
81
+
82
+ # Use a 95% CI, i.e., +/-1.96 S.E.
83
+ # https://en.wikipedia.org/wiki/Fisher_transformation
84
+ rho_0 = math.tanh(F - 1.96 * F_se)
85
+ rho_1 = math.tanh(F + 1.96 * F_se)
86
+
87
+ # Warn if the CI spans zero.
88
+ if np.sign(rho_0) != np.sign(rho_1):
89
+ warnings.warn(
90
+ "Confidence interval of the Spearman "
91
+ "correlation coefficient spans zero. "
92
+ "Determination of ``increasing`` may be "
93
+ "suspect."
94
+ )
95
+
96
+ return increasing_bool
97
+
98
+
99
+ @validate_params(
100
+ {
101
+ "y": ["array-like"],
102
+ "sample_weight": ["array-like", None],
103
+ "y_min": [Interval(Real, None, None, closed="both"), None],
104
+ "y_max": [Interval(Real, None, None, closed="both"), None],
105
+ "increasing": ["boolean"],
106
+ },
107
+ prefer_skip_nested_validation=True,
108
+ )
109
+ def isotonic_regression(
110
+ y, *, sample_weight=None, y_min=None, y_max=None, increasing=True
111
+ ):
112
+ """Solve the isotonic regression model.
113
+
114
+ Read more in the :ref:`User Guide <isotonic>`.
115
+
116
+ Parameters
117
+ ----------
118
+ y : array-like of shape (n_samples,)
119
+ The data.
120
+
121
+ sample_weight : array-like of shape (n_samples,), default=None
122
+ Weights on each point of the regression.
123
+ If None, weight is set to 1 (equal weights).
124
+
125
+ y_min : float, default=None
126
+ Lower bound on the lowest predicted value (the minimum value may
127
+ still be higher). If not set, defaults to -inf.
128
+
129
+ y_max : float, default=None
130
+ Upper bound on the highest predicted value (the maximum may still be
131
+ lower). If not set, defaults to +inf.
132
+
133
+ increasing : bool, default=True
134
+ Whether to compute ``y_`` is increasing (if set to True) or decreasing
135
+ (if set to False).
136
+
137
+ Returns
138
+ -------
139
+ y_ : ndarray of shape (n_samples,)
140
+ Isotonic fit of y.
141
+
142
+ References
143
+ ----------
144
+ "Active set algorithms for isotonic regression; A unifying framework"
145
+ by Michael J. Best and Nilotpal Chakravarti, section 3.
146
+
147
+ Examples
148
+ --------
149
+ >>> from sklearn.isotonic import isotonic_regression
150
+ >>> isotonic_regression([5, 3, 1, 2, 8, 10, 7, 9, 6, 4])
151
+ array([2.75 , 2.75 , 2.75 , 2.75 , 7.33...,
152
+ 7.33..., 7.33..., 7.33..., 7.33..., 7.33...])
153
+ """
154
+ order = np.s_[:] if increasing else np.s_[::-1]
155
+ y = check_array(y, ensure_2d=False, input_name="y", dtype=[np.float64, np.float32])
156
+ y = np.array(y[order], dtype=y.dtype)
157
+ sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype, copy=True)
158
+ sample_weight = np.ascontiguousarray(sample_weight[order])
159
+
160
+ _inplace_contiguous_isotonic_regression(y, sample_weight)
161
+ if y_min is not None or y_max is not None:
162
+ # Older versions of np.clip don't accept None as a bound, so use np.inf
163
+ if y_min is None:
164
+ y_min = -np.inf
165
+ if y_max is None:
166
+ y_max = np.inf
167
+ np.clip(y, y_min, y_max, y)
168
+ return y[order]
169
+
170
+
171
+ class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator):
172
+ """Isotonic regression model.
173
+
174
+ Read more in the :ref:`User Guide <isotonic>`.
175
+
176
+ .. versionadded:: 0.13
177
+
178
+ Parameters
179
+ ----------
180
+ y_min : float, default=None
181
+ Lower bound on the lowest predicted value (the minimum value may
182
+ still be higher). If not set, defaults to -inf.
183
+
184
+ y_max : float, default=None
185
+ Upper bound on the highest predicted value (the maximum may still be
186
+ lower). If not set, defaults to +inf.
187
+
188
+ increasing : bool or 'auto', default=True
189
+ Determines whether the predictions should be constrained to increase
190
+ or decrease with `X`. 'auto' will decide based on the Spearman
191
+ correlation estimate's sign.
192
+
193
+ out_of_bounds : {'nan', 'clip', 'raise'}, default='nan'
194
+ Handles how `X` values outside of the training domain are handled
195
+ during prediction.
196
+
197
+ - 'nan', predictions will be NaN.
198
+ - 'clip', predictions will be set to the value corresponding to
199
+ the nearest train interval endpoint.
200
+ - 'raise', a `ValueError` is raised.
201
+
202
+ Attributes
203
+ ----------
204
+ X_min_ : float
205
+ Minimum value of input array `X_` for left bound.
206
+
207
+ X_max_ : float
208
+ Maximum value of input array `X_` for right bound.
209
+
210
+ X_thresholds_ : ndarray of shape (n_thresholds,)
211
+ Unique ascending `X` values used to interpolate
212
+ the y = f(X) monotonic function.
213
+
214
+ .. versionadded:: 0.24
215
+
216
+ y_thresholds_ : ndarray of shape (n_thresholds,)
217
+ De-duplicated `y` values suitable to interpolate the y = f(X)
218
+ monotonic function.
219
+
220
+ .. versionadded:: 0.24
221
+
222
+ f_ : function
223
+ The stepwise interpolating function that covers the input domain ``X``.
224
+
225
+ increasing_ : bool
226
+ Inferred value for ``increasing``.
227
+
228
+ See Also
229
+ --------
230
+ sklearn.linear_model.LinearRegression : Ordinary least squares Linear
231
+ Regression.
232
+ sklearn.ensemble.HistGradientBoostingRegressor : Gradient boosting that
233
+ is a non-parametric model accepting monotonicity constraints.
234
+ isotonic_regression : Function to solve the isotonic regression model.
235
+
236
+ Notes
237
+ -----
238
+ Ties are broken using the secondary method from de Leeuw, 1977.
239
+
240
+ References
241
+ ----------
242
+ Isotonic Median Regression: A Linear Programming Approach
243
+ Nilotpal Chakravarti
244
+ Mathematics of Operations Research
245
+ Vol. 14, No. 2 (May, 1989), pp. 303-308
246
+
247
+ Isotone Optimization in R : Pool-Adjacent-Violators
248
+ Algorithm (PAVA) and Active Set Methods
249
+ de Leeuw, Hornik, Mair
250
+ Journal of Statistical Software 2009
251
+
252
+ Correctness of Kruskal's algorithms for monotone regression with ties
253
+ de Leeuw, Psychometrica, 1977
254
+
255
+ Examples
256
+ --------
257
+ >>> from sklearn.datasets import make_regression
258
+ >>> from sklearn.isotonic import IsotonicRegression
259
+ >>> X, y = make_regression(n_samples=10, n_features=1, random_state=41)
260
+ >>> iso_reg = IsotonicRegression().fit(X, y)
261
+ >>> iso_reg.predict([.1, .2])
262
+ array([1.8628..., 3.7256...])
263
+ """
264
+
265
+ _parameter_constraints: dict = {
266
+ "y_min": [Interval(Real, None, None, closed="both"), None],
267
+ "y_max": [Interval(Real, None, None, closed="both"), None],
268
+ "increasing": ["boolean", StrOptions({"auto"})],
269
+ "out_of_bounds": [StrOptions({"nan", "clip", "raise"})],
270
+ }
271
+
272
+ def __init__(self, *, y_min=None, y_max=None, increasing=True, out_of_bounds="nan"):
273
+ self.y_min = y_min
274
+ self.y_max = y_max
275
+ self.increasing = increasing
276
+ self.out_of_bounds = out_of_bounds
277
+
278
+ def _check_input_data_shape(self, X):
279
+ if not (X.ndim == 1 or (X.ndim == 2 and X.shape[1] == 1)):
280
+ msg = (
281
+ "Isotonic regression input X should be a 1d array or "
282
+ "2d array with 1 feature"
283
+ )
284
+ raise ValueError(msg)
285
+
286
+ def _build_f(self, X, y):
287
+ """Build the f_ interp1d function."""
288
+
289
+ bounds_error = self.out_of_bounds == "raise"
290
+ if len(y) == 1:
291
+ # single y, constant prediction
292
+ self.f_ = lambda x: y.repeat(x.shape)
293
+ else:
294
+ self.f_ = interpolate.interp1d(
295
+ X, y, kind="linear", bounds_error=bounds_error
296
+ )
297
+
298
+ def _build_y(self, X, y, sample_weight, trim_duplicates=True):
299
+ """Build the y_ IsotonicRegression."""
300
+ self._check_input_data_shape(X)
301
+ X = X.reshape(-1) # use 1d view
302
+
303
+ # Determine increasing if auto-determination requested
304
+ if self.increasing == "auto":
305
+ self.increasing_ = check_increasing(X, y)
306
+ else:
307
+ self.increasing_ = self.increasing
308
+
309
+ # If sample_weights is passed, removed zero-weight values and clean
310
+ # order
311
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
312
+ mask = sample_weight > 0
313
+ X, y, sample_weight = X[mask], y[mask], sample_weight[mask]
314
+
315
+ order = np.lexsort((y, X))
316
+ X, y, sample_weight = [array[order] for array in [X, y, sample_weight]]
317
+ unique_X, unique_y, unique_sample_weight = _make_unique(X, y, sample_weight)
318
+
319
+ X = unique_X
320
+ y = isotonic_regression(
321
+ unique_y,
322
+ sample_weight=unique_sample_weight,
323
+ y_min=self.y_min,
324
+ y_max=self.y_max,
325
+ increasing=self.increasing_,
326
+ )
327
+
328
+ # Handle the left and right bounds on X
329
+ self.X_min_, self.X_max_ = np.min(X), np.max(X)
330
+
331
+ if trim_duplicates:
332
+ # Remove unnecessary points for faster prediction
333
+ keep_data = np.ones((len(y),), dtype=bool)
334
+ # Aside from the 1st and last point, remove points whose y values
335
+ # are equal to both the point before and the point after it.
336
+ keep_data[1:-1] = np.logical_or(
337
+ np.not_equal(y[1:-1], y[:-2]), np.not_equal(y[1:-1], y[2:])
338
+ )
339
+ return X[keep_data], y[keep_data]
340
+ else:
341
+ # The ability to turn off trim_duplicates is only used to it make
342
+ # easier to unit test that removing duplicates in y does not have
343
+ # any impact the resulting interpolation function (besides
344
+ # prediction speed).
345
+ return X, y
346
+
347
+ @_fit_context(prefer_skip_nested_validation=True)
348
+ def fit(self, X, y, sample_weight=None):
349
+ """Fit the model using X, y as training data.
350
+
351
+ Parameters
352
+ ----------
353
+ X : array-like of shape (n_samples,) or (n_samples, 1)
354
+ Training data.
355
+
356
+ .. versionchanged:: 0.24
357
+ Also accepts 2d array with 1 feature.
358
+
359
+ y : array-like of shape (n_samples,)
360
+ Training target.
361
+
362
+ sample_weight : array-like of shape (n_samples,), default=None
363
+ Weights. If set to None, all weights will be set to 1 (equal
364
+ weights).
365
+
366
+ Returns
367
+ -------
368
+ self : object
369
+ Returns an instance of self.
370
+
371
+ Notes
372
+ -----
373
+ X is stored for future use, as :meth:`transform` needs X to interpolate
374
+ new input data.
375
+ """
376
+ check_params = dict(accept_sparse=False, ensure_2d=False)
377
+ X = check_array(
378
+ X, input_name="X", dtype=[np.float64, np.float32], **check_params
379
+ )
380
+ y = check_array(y, input_name="y", dtype=X.dtype, **check_params)
381
+ check_consistent_length(X, y, sample_weight)
382
+
383
+ # Transform y by running the isotonic regression algorithm and
384
+ # transform X accordingly.
385
+ X, y = self._build_y(X, y, sample_weight)
386
+
387
+ # It is necessary to store the non-redundant part of the training set
388
+ # on the model to make it possible to support model persistence via
389
+ # the pickle module as the object built by scipy.interp1d is not
390
+ # picklable directly.
391
+ self.X_thresholds_, self.y_thresholds_ = X, y
392
+
393
+ # Build the interpolation function
394
+ self._build_f(X, y)
395
+ return self
396
+
397
+ def _transform(self, T):
398
+ """`_transform` is called by both `transform` and `predict` methods.
399
+
400
+ Since `transform` is wrapped to output arrays of specific types (e.g.
401
+ NumPy arrays, pandas DataFrame), we cannot make `predict` call `transform`
402
+ directly.
403
+
404
+ The above behaviour could be changed in the future, if we decide to output
405
+ other type of arrays when calling `predict`.
406
+ """
407
+ if hasattr(self, "X_thresholds_"):
408
+ dtype = self.X_thresholds_.dtype
409
+ else:
410
+ dtype = np.float64
411
+
412
+ T = check_array(T, dtype=dtype, ensure_2d=False)
413
+
414
+ self._check_input_data_shape(T)
415
+ T = T.reshape(-1) # use 1d view
416
+
417
+ if self.out_of_bounds == "clip":
418
+ T = np.clip(T, self.X_min_, self.X_max_)
419
+
420
+ res = self.f_(T)
421
+
422
+ # on scipy 0.17, interp1d up-casts to float64, so we cast back
423
+ res = res.astype(T.dtype)
424
+
425
+ return res
426
+
427
+ def transform(self, T):
428
+ """Transform new data by linear interpolation.
429
+
430
+ Parameters
431
+ ----------
432
+ T : array-like of shape (n_samples,) or (n_samples, 1)
433
+ Data to transform.
434
+
435
+ .. versionchanged:: 0.24
436
+ Also accepts 2d array with 1 feature.
437
+
438
+ Returns
439
+ -------
440
+ y_pred : ndarray of shape (n_samples,)
441
+ The transformed data.
442
+ """
443
+ return self._transform(T)
444
+
445
+ def predict(self, T):
446
+ """Predict new data by linear interpolation.
447
+
448
+ Parameters
449
+ ----------
450
+ T : array-like of shape (n_samples,) or (n_samples, 1)
451
+ Data to transform.
452
+
453
+ Returns
454
+ -------
455
+ y_pred : ndarray of shape (n_samples,)
456
+ Transformed data.
457
+ """
458
+ return self._transform(T)
459
+
460
+ # We implement get_feature_names_out here instead of using
461
+ # `ClassNamePrefixFeaturesOutMixin`` because `input_features` are ignored.
462
+ # `input_features` are ignored because `IsotonicRegression` accepts 1d
463
+ # arrays and the semantics of `feature_names_in_` are not clear for 1d arrays.
464
+ def get_feature_names_out(self, input_features=None):
465
+ """Get output feature names for transformation.
466
+
467
+ Parameters
468
+ ----------
469
+ input_features : array-like of str or None, default=None
470
+ Ignored.
471
+
472
+ Returns
473
+ -------
474
+ feature_names_out : ndarray of str objects
475
+ An ndarray with one string i.e. ["isotonicregression0"].
476
+ """
477
+ check_is_fitted(self, "f_")
478
+ class_name = self.__class__.__name__.lower()
479
+ return np.asarray([f"{class_name}0"], dtype=object)
480
+
481
+ def __getstate__(self):
482
+ """Pickle-protocol - return state of the estimator."""
483
+ state = super().__getstate__()
484
+ # remove interpolation method
485
+ state.pop("f_", None)
486
+ return state
487
+
488
+ def __setstate__(self, state):
489
+ """Pickle-protocol - set state of the estimator.
490
+
491
+ We need to rebuild the interpolation function.
492
+ """
493
+ super().__setstate__(state)
494
+ if hasattr(self, "X_thresholds_") and hasattr(self, "y_thresholds_"):
495
+ self._build_f(self.X_thresholds_, self.y_thresholds_)
496
+
497
+ def _more_tags(self):
498
+ return {"X_types": ["1darray"]}
venv/lib/python3.10/site-packages/sklearn/kernel_approximation.py ADDED
@@ -0,0 +1,1137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.kernel_approximation` module implements several
3
+ approximate kernel feature maps based on Fourier transforms and Count Sketches.
4
+ """
5
+
6
+ # Author: Andreas Mueller <[email protected]>
7
+ # Daniel Lopez-Sanchez (TensorSketch) <[email protected]>
8
+
9
+ # License: BSD 3 clause
10
+
11
+ import warnings
12
+ from numbers import Integral, Real
13
+
14
+ import numpy as np
15
+ import scipy.sparse as sp
16
+ from scipy.linalg import svd
17
+
18
+ try:
19
+ from scipy.fft import fft, ifft
20
+ except ImportError: # scipy < 1.4
21
+ from scipy.fftpack import fft, ifft
22
+
23
+ from .base import (
24
+ BaseEstimator,
25
+ ClassNamePrefixFeaturesOutMixin,
26
+ TransformerMixin,
27
+ _fit_context,
28
+ )
29
+ from .metrics.pairwise import KERNEL_PARAMS, PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels
30
+ from .utils import check_random_state, deprecated
31
+ from .utils._param_validation import Interval, StrOptions
32
+ from .utils.extmath import safe_sparse_dot
33
+ from .utils.validation import (
34
+ _check_feature_names_in,
35
+ check_is_fitted,
36
+ check_non_negative,
37
+ )
38
+
39
+
40
+ class PolynomialCountSketch(
41
+ ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
42
+ ):
43
+ """Polynomial kernel approximation via Tensor Sketch.
44
+
45
+ Implements Tensor Sketch, which approximates the feature map
46
+ of the polynomial kernel::
47
+
48
+ K(X, Y) = (gamma * <X, Y> + coef0)^degree
49
+
50
+ by efficiently computing a Count Sketch of the outer product of a
51
+ vector with itself using Fast Fourier Transforms (FFT). Read more in the
52
+ :ref:`User Guide <polynomial_kernel_approx>`.
53
+
54
+ .. versionadded:: 0.24
55
+
56
+ Parameters
57
+ ----------
58
+ gamma : float, default=1.0
59
+ Parameter of the polynomial kernel whose feature map
60
+ will be approximated.
61
+
62
+ degree : int, default=2
63
+ Degree of the polynomial kernel whose feature map
64
+ will be approximated.
65
+
66
+ coef0 : int, default=0
67
+ Constant term of the polynomial kernel whose feature map
68
+ will be approximated.
69
+
70
+ n_components : int, default=100
71
+ Dimensionality of the output feature space. Usually, `n_components`
72
+ should be greater than the number of features in input samples in
73
+ order to achieve good performance. The optimal score / run time
74
+ balance is typically achieved around `n_components` = 10 * `n_features`,
75
+ but this depends on the specific dataset being used.
76
+
77
+ random_state : int, RandomState instance, default=None
78
+ Determines random number generation for indexHash and bitHash
79
+ initialization. Pass an int for reproducible results across multiple
80
+ function calls. See :term:`Glossary <random_state>`.
81
+
82
+ Attributes
83
+ ----------
84
+ indexHash_ : ndarray of shape (degree, n_features), dtype=int64
85
+ Array of indexes in range [0, n_components) used to represent
86
+ the 2-wise independent hash functions for Count Sketch computation.
87
+
88
+ bitHash_ : ndarray of shape (degree, n_features), dtype=float32
89
+ Array with random entries in {+1, -1}, used to represent
90
+ the 2-wise independent hash functions for Count Sketch computation.
91
+
92
+ n_features_in_ : int
93
+ Number of features seen during :term:`fit`.
94
+
95
+ .. versionadded:: 0.24
96
+
97
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
98
+ Names of features seen during :term:`fit`. Defined only when `X`
99
+ has feature names that are all strings.
100
+
101
+ .. versionadded:: 1.0
102
+
103
+ See Also
104
+ --------
105
+ AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
106
+ Nystroem : Approximate a kernel map using a subset of the training data.
107
+ RBFSampler : Approximate a RBF kernel feature map using random Fourier
108
+ features.
109
+ SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
110
+ sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
111
+
112
+ Examples
113
+ --------
114
+ >>> from sklearn.kernel_approximation import PolynomialCountSketch
115
+ >>> from sklearn.linear_model import SGDClassifier
116
+ >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
117
+ >>> y = [0, 0, 1, 1]
118
+ >>> ps = PolynomialCountSketch(degree=3, random_state=1)
119
+ >>> X_features = ps.fit_transform(X)
120
+ >>> clf = SGDClassifier(max_iter=10, tol=1e-3)
121
+ >>> clf.fit(X_features, y)
122
+ SGDClassifier(max_iter=10)
123
+ >>> clf.score(X_features, y)
124
+ 1.0
125
+
126
+ For a more detailed example of usage, see
127
+ :ref:`sphx_glr_auto_examples_kernel_approximation_plot_scalable_poly_kernels.py`
128
+ """
129
+
130
+ _parameter_constraints: dict = {
131
+ "gamma": [Interval(Real, 0, None, closed="left")],
132
+ "degree": [Interval(Integral, 1, None, closed="left")],
133
+ "coef0": [Interval(Real, None, None, closed="neither")],
134
+ "n_components": [Interval(Integral, 1, None, closed="left")],
135
+ "random_state": ["random_state"],
136
+ }
137
+
138
+ def __init__(
139
+ self, *, gamma=1.0, degree=2, coef0=0, n_components=100, random_state=None
140
+ ):
141
+ self.gamma = gamma
142
+ self.degree = degree
143
+ self.coef0 = coef0
144
+ self.n_components = n_components
145
+ self.random_state = random_state
146
+
147
+ @_fit_context(prefer_skip_nested_validation=True)
148
+ def fit(self, X, y=None):
149
+ """Fit the model with X.
150
+
151
+ Initializes the internal variables. The method needs no information
152
+ about the distribution of data, so we only care about n_features in X.
153
+
154
+ Parameters
155
+ ----------
156
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
157
+ Training data, where `n_samples` is the number of samples
158
+ and `n_features` is the number of features.
159
+
160
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
161
+ default=None
162
+ Target values (None for unsupervised transformations).
163
+
164
+ Returns
165
+ -------
166
+ self : object
167
+ Returns the instance itself.
168
+ """
169
+ X = self._validate_data(X, accept_sparse="csc")
170
+ random_state = check_random_state(self.random_state)
171
+
172
+ n_features = X.shape[1]
173
+ if self.coef0 != 0:
174
+ n_features += 1
175
+
176
+ self.indexHash_ = random_state.randint(
177
+ 0, high=self.n_components, size=(self.degree, n_features)
178
+ )
179
+
180
+ self.bitHash_ = random_state.choice(a=[-1, 1], size=(self.degree, n_features))
181
+ self._n_features_out = self.n_components
182
+ return self
183
+
184
+ def transform(self, X):
185
+ """Generate the feature map approximation for X.
186
+
187
+ Parameters
188
+ ----------
189
+ X : {array-like}, shape (n_samples, n_features)
190
+ New data, where `n_samples` is the number of samples
191
+ and `n_features` is the number of features.
192
+
193
+ Returns
194
+ -------
195
+ X_new : array-like, shape (n_samples, n_components)
196
+ Returns the instance itself.
197
+ """
198
+
199
+ check_is_fitted(self)
200
+ X = self._validate_data(X, accept_sparse="csc", reset=False)
201
+
202
+ X_gamma = np.sqrt(self.gamma) * X
203
+
204
+ if sp.issparse(X_gamma) and self.coef0 != 0:
205
+ X_gamma = sp.hstack(
206
+ [X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))],
207
+ format="csc",
208
+ )
209
+
210
+ elif not sp.issparse(X_gamma) and self.coef0 != 0:
211
+ X_gamma = np.hstack(
212
+ [X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))]
213
+ )
214
+
215
+ if X_gamma.shape[1] != self.indexHash_.shape[1]:
216
+ raise ValueError(
217
+ "Number of features of test samples does not"
218
+ " match that of training samples."
219
+ )
220
+
221
+ count_sketches = np.zeros((X_gamma.shape[0], self.degree, self.n_components))
222
+
223
+ if sp.issparse(X_gamma):
224
+ for j in range(X_gamma.shape[1]):
225
+ for d in range(self.degree):
226
+ iHashIndex = self.indexHash_[d, j]
227
+ iHashBit = self.bitHash_[d, j]
228
+ count_sketches[:, d, iHashIndex] += (
229
+ (iHashBit * X_gamma[:, [j]]).toarray().ravel()
230
+ )
231
+
232
+ else:
233
+ for j in range(X_gamma.shape[1]):
234
+ for d in range(self.degree):
235
+ iHashIndex = self.indexHash_[d, j]
236
+ iHashBit = self.bitHash_[d, j]
237
+ count_sketches[:, d, iHashIndex] += iHashBit * X_gamma[:, j]
238
+
239
+ # For each same, compute a count sketch of phi(x) using the polynomial
240
+ # multiplication (via FFT) of p count sketches of x.
241
+ count_sketches_fft = fft(count_sketches, axis=2, overwrite_x=True)
242
+ count_sketches_fft_prod = np.prod(count_sketches_fft, axis=1)
243
+ data_sketch = np.real(ifft(count_sketches_fft_prod, overwrite_x=True))
244
+
245
+ return data_sketch
246
+
247
+
248
+ class RBFSampler(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
249
+ """Approximate a RBF kernel feature map using random Fourier features.
250
+
251
+ It implements a variant of Random Kitchen Sinks.[1]
252
+
253
+ Read more in the :ref:`User Guide <rbf_kernel_approx>`.
254
+
255
+ Parameters
256
+ ----------
257
+ gamma : 'scale' or float, default=1.0
258
+ Parameter of RBF kernel: exp(-gamma * x^2).
259
+ If ``gamma='scale'`` is passed then it uses
260
+ 1 / (n_features * X.var()) as value of gamma.
261
+
262
+ .. versionadded:: 1.2
263
+ The option `"scale"` was added in 1.2.
264
+
265
+ n_components : int, default=100
266
+ Number of Monte Carlo samples per original feature.
267
+ Equals the dimensionality of the computed feature space.
268
+
269
+ random_state : int, RandomState instance or None, default=None
270
+ Pseudo-random number generator to control the generation of the random
271
+ weights and random offset when fitting the training data.
272
+ Pass an int for reproducible output across multiple function calls.
273
+ See :term:`Glossary <random_state>`.
274
+
275
+ Attributes
276
+ ----------
277
+ random_offset_ : ndarray of shape (n_components,), dtype={np.float64, np.float32}
278
+ Random offset used to compute the projection in the `n_components`
279
+ dimensions of the feature space.
280
+
281
+ random_weights_ : ndarray of shape (n_features, n_components),\
282
+ dtype={np.float64, np.float32}
283
+ Random projection directions drawn from the Fourier transform
284
+ of the RBF kernel.
285
+
286
+ n_features_in_ : int
287
+ Number of features seen during :term:`fit`.
288
+
289
+ .. versionadded:: 0.24
290
+
291
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
292
+ Names of features seen during :term:`fit`. Defined only when `X`
293
+ has feature names that are all strings.
294
+
295
+ .. versionadded:: 1.0
296
+
297
+ See Also
298
+ --------
299
+ AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
300
+ Nystroem : Approximate a kernel map using a subset of the training data.
301
+ PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch.
302
+ SkewedChi2Sampler : Approximate feature map for
303
+ "skewed chi-squared" kernel.
304
+ sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
305
+
306
+ Notes
307
+ -----
308
+ See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
309
+ Benjamin Recht.
310
+
311
+ [1] "Weighted Sums of Random Kitchen Sinks: Replacing
312
+ minimization with randomization in learning" by A. Rahimi and
313
+ Benjamin Recht.
314
+ (https://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
315
+
316
+ Examples
317
+ --------
318
+ >>> from sklearn.kernel_approximation import RBFSampler
319
+ >>> from sklearn.linear_model import SGDClassifier
320
+ >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
321
+ >>> y = [0, 0, 1, 1]
322
+ >>> rbf_feature = RBFSampler(gamma=1, random_state=1)
323
+ >>> X_features = rbf_feature.fit_transform(X)
324
+ >>> clf = SGDClassifier(max_iter=5, tol=1e-3)
325
+ >>> clf.fit(X_features, y)
326
+ SGDClassifier(max_iter=5)
327
+ >>> clf.score(X_features, y)
328
+ 1.0
329
+ """
330
+
331
+ _parameter_constraints: dict = {
332
+ "gamma": [
333
+ StrOptions({"scale"}),
334
+ Interval(Real, 0.0, None, closed="left"),
335
+ ],
336
+ "n_components": [Interval(Integral, 1, None, closed="left")],
337
+ "random_state": ["random_state"],
338
+ }
339
+
340
+ def __init__(self, *, gamma=1.0, n_components=100, random_state=None):
341
+ self.gamma = gamma
342
+ self.n_components = n_components
343
+ self.random_state = random_state
344
+
345
+ @_fit_context(prefer_skip_nested_validation=True)
346
+ def fit(self, X, y=None):
347
+ """Fit the model with X.
348
+
349
+ Samples random projection according to n_features.
350
+
351
+ Parameters
352
+ ----------
353
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
354
+ Training data, where `n_samples` is the number of samples
355
+ and `n_features` is the number of features.
356
+
357
+ y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
358
+ default=None
359
+ Target values (None for unsupervised transformations).
360
+
361
+ Returns
362
+ -------
363
+ self : object
364
+ Returns the instance itself.
365
+ """
366
+ X = self._validate_data(X, accept_sparse="csr")
367
+ random_state = check_random_state(self.random_state)
368
+ n_features = X.shape[1]
369
+ sparse = sp.issparse(X)
370
+ if self.gamma == "scale":
371
+ # var = E[X^2] - E[X]^2 if sparse
372
+ X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var()
373
+ self._gamma = 1.0 / (n_features * X_var) if X_var != 0 else 1.0
374
+ else:
375
+ self._gamma = self.gamma
376
+ self.random_weights_ = (2.0 * self._gamma) ** 0.5 * random_state.normal(
377
+ size=(n_features, self.n_components)
378
+ )
379
+
380
+ self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
381
+
382
+ if X.dtype == np.float32:
383
+ # Setting the data type of the fitted attribute will ensure the
384
+ # output data type during `transform`.
385
+ self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)
386
+ self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)
387
+
388
+ self._n_features_out = self.n_components
389
+ return self
390
+
391
+ def transform(self, X):
392
+ """Apply the approximate feature map to X.
393
+
394
+ Parameters
395
+ ----------
396
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
397
+ New data, where `n_samples` is the number of samples
398
+ and `n_features` is the number of features.
399
+
400
+ Returns
401
+ -------
402
+ X_new : array-like, shape (n_samples, n_components)
403
+ Returns the instance itself.
404
+ """
405
+ check_is_fitted(self)
406
+
407
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
408
+ projection = safe_sparse_dot(X, self.random_weights_)
409
+ projection += self.random_offset_
410
+ np.cos(projection, projection)
411
+ projection *= (2.0 / self.n_components) ** 0.5
412
+ return projection
413
+
414
+ def _more_tags(self):
415
+ return {"preserves_dtype": [np.float64, np.float32]}
416
+
417
+
418
+ class SkewedChi2Sampler(
419
+ ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
420
+ ):
421
+ """Approximate feature map for "skewed chi-squared" kernel.
422
+
423
+ Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
424
+
425
+ Parameters
426
+ ----------
427
+ skewedness : float, default=1.0
428
+ "skewedness" parameter of the kernel. Needs to be cross-validated.
429
+
430
+ n_components : int, default=100
431
+ Number of Monte Carlo samples per original feature.
432
+ Equals the dimensionality of the computed feature space.
433
+
434
+ random_state : int, RandomState instance or None, default=None
435
+ Pseudo-random number generator to control the generation of the random
436
+ weights and random offset when fitting the training data.
437
+ Pass an int for reproducible output across multiple function calls.
438
+ See :term:`Glossary <random_state>`.
439
+
440
+ Attributes
441
+ ----------
442
+ random_weights_ : ndarray of shape (n_features, n_components)
443
+ Weight array, sampled from a secant hyperbolic distribution, which will
444
+ be used to linearly transform the log of the data.
445
+
446
+ random_offset_ : ndarray of shape (n_features, n_components)
447
+ Bias term, which will be added to the data. It is uniformly distributed
448
+ between 0 and 2*pi.
449
+
450
+ n_features_in_ : int
451
+ Number of features seen during :term:`fit`.
452
+
453
+ .. versionadded:: 0.24
454
+
455
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
456
+ Names of features seen during :term:`fit`. Defined only when `X`
457
+ has feature names that are all strings.
458
+
459
+ .. versionadded:: 1.0
460
+
461
+ See Also
462
+ --------
463
+ AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
464
+ Nystroem : Approximate a kernel map using a subset of the training data.
465
+ RBFSampler : Approximate a RBF kernel feature map using random Fourier
466
+ features.
467
+ SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
468
+ sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
469
+ sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
470
+
471
+ References
472
+ ----------
473
+ See "Random Fourier Approximations for Skewed Multiplicative Histogram
474
+ Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
475
+
476
+ Examples
477
+ --------
478
+ >>> from sklearn.kernel_approximation import SkewedChi2Sampler
479
+ >>> from sklearn.linear_model import SGDClassifier
480
+ >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
481
+ >>> y = [0, 0, 1, 1]
482
+ >>> chi2_feature = SkewedChi2Sampler(skewedness=.01,
483
+ ... n_components=10,
484
+ ... random_state=0)
485
+ >>> X_features = chi2_feature.fit_transform(X, y)
486
+ >>> clf = SGDClassifier(max_iter=10, tol=1e-3)
487
+ >>> clf.fit(X_features, y)
488
+ SGDClassifier(max_iter=10)
489
+ >>> clf.score(X_features, y)
490
+ 1.0
491
+ """
492
+
493
+ _parameter_constraints: dict = {
494
+ "skewedness": [Interval(Real, None, None, closed="neither")],
495
+ "n_components": [Interval(Integral, 1, None, closed="left")],
496
+ "random_state": ["random_state"],
497
+ }
498
+
499
+ def __init__(self, *, skewedness=1.0, n_components=100, random_state=None):
500
+ self.skewedness = skewedness
501
+ self.n_components = n_components
502
+ self.random_state = random_state
503
+
504
+ @_fit_context(prefer_skip_nested_validation=True)
505
+ def fit(self, X, y=None):
506
+ """Fit the model with X.
507
+
508
+ Samples random projection according to n_features.
509
+
510
+ Parameters
511
+ ----------
512
+ X : array-like, shape (n_samples, n_features)
513
+ Training data, where `n_samples` is the number of samples
514
+ and `n_features` is the number of features.
515
+
516
+ y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
517
+ default=None
518
+ Target values (None for unsupervised transformations).
519
+
520
+ Returns
521
+ -------
522
+ self : object
523
+ Returns the instance itself.
524
+ """
525
+ X = self._validate_data(X)
526
+ random_state = check_random_state(self.random_state)
527
+ n_features = X.shape[1]
528
+ uniform = random_state.uniform(size=(n_features, self.n_components))
529
+ # transform by inverse CDF of sech
530
+ self.random_weights_ = 1.0 / np.pi * np.log(np.tan(np.pi / 2.0 * uniform))
531
+ self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
532
+
533
+ if X.dtype == np.float32:
534
+ # Setting the data type of the fitted attribute will ensure the
535
+ # output data type during `transform`.
536
+ self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)
537
+ self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)
538
+
539
+ self._n_features_out = self.n_components
540
+ return self
541
+
542
+ def transform(self, X):
543
+ """Apply the approximate feature map to X.
544
+
545
+ Parameters
546
+ ----------
547
+ X : array-like, shape (n_samples, n_features)
548
+ New data, where `n_samples` is the number of samples
549
+ and `n_features` is the number of features. All values of X must be
550
+ strictly greater than "-skewedness".
551
+
552
+ Returns
553
+ -------
554
+ X_new : array-like, shape (n_samples, n_components)
555
+ Returns the instance itself.
556
+ """
557
+ check_is_fitted(self)
558
+ X = self._validate_data(
559
+ X, copy=True, dtype=[np.float64, np.float32], reset=False
560
+ )
561
+ if (X <= -self.skewedness).any():
562
+ raise ValueError("X may not contain entries smaller than -skewedness.")
563
+
564
+ X += self.skewedness
565
+ np.log(X, X)
566
+ projection = safe_sparse_dot(X, self.random_weights_)
567
+ projection += self.random_offset_
568
+ np.cos(projection, projection)
569
+ projection *= np.sqrt(2.0) / np.sqrt(self.n_components)
570
+ return projection
571
+
572
+ def _more_tags(self):
573
+ return {"preserves_dtype": [np.float64, np.float32]}
574
+
575
+
576
+ class AdditiveChi2Sampler(TransformerMixin, BaseEstimator):
577
+ """Approximate feature map for additive chi2 kernel.
578
+
579
+ Uses sampling the fourier transform of the kernel characteristic
580
+ at regular intervals.
581
+
582
+ Since the kernel that is to be approximated is additive, the components of
583
+ the input vectors can be treated separately. Each entry in the original
584
+ space is transformed into 2*sample_steps-1 features, where sample_steps is
585
+ a parameter of the method. Typical values of sample_steps include 1, 2 and
586
+ 3.
587
+
588
+ Optimal choices for the sampling interval for certain data ranges can be
589
+ computed (see the reference). The default values should be reasonable.
590
+
591
+ Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
592
+
593
+ Parameters
594
+ ----------
595
+ sample_steps : int, default=2
596
+ Gives the number of (complex) sampling points.
597
+
598
+ sample_interval : float, default=None
599
+ Sampling interval. Must be specified when sample_steps not in {1,2,3}.
600
+
601
+ Attributes
602
+ ----------
603
+ sample_interval_ : float
604
+ Stored sampling interval. Specified as a parameter if `sample_steps`
605
+ not in {1,2,3}.
606
+
607
+ .. deprecated:: 1.3
608
+ `sample_interval_` serves internal purposes only and will be removed in 1.5.
609
+
610
+ n_features_in_ : int
611
+ Number of features seen during :term:`fit`.
612
+
613
+ .. versionadded:: 0.24
614
+
615
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
616
+ Names of features seen during :term:`fit`. Defined only when `X`
617
+ has feature names that are all strings.
618
+
619
+ .. versionadded:: 1.0
620
+
621
+ See Also
622
+ --------
623
+ SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
624
+ the chi squared kernel.
625
+
626
+ sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
627
+
628
+ sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
629
+ squared kernel.
630
+
631
+ Notes
632
+ -----
633
+ This estimator approximates a slightly different version of the additive
634
+ chi squared kernel then ``metric.additive_chi2`` computes.
635
+
636
+ This estimator is stateless and does not need to be fitted. However, we
637
+ recommend to call :meth:`fit_transform` instead of :meth:`transform`, as
638
+ parameter validation is only performed in :meth:`fit`.
639
+
640
+ References
641
+ ----------
642
+ See `"Efficient additive kernels via explicit feature maps"
643
+ <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
644
+ A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
645
+ 2011
646
+
647
+ Examples
648
+ --------
649
+ >>> from sklearn.datasets import load_digits
650
+ >>> from sklearn.linear_model import SGDClassifier
651
+ >>> from sklearn.kernel_approximation import AdditiveChi2Sampler
652
+ >>> X, y = load_digits(return_X_y=True)
653
+ >>> chi2sampler = AdditiveChi2Sampler(sample_steps=2)
654
+ >>> X_transformed = chi2sampler.fit_transform(X, y)
655
+ >>> clf = SGDClassifier(max_iter=5, random_state=0, tol=1e-3)
656
+ >>> clf.fit(X_transformed, y)
657
+ SGDClassifier(max_iter=5, random_state=0)
658
+ >>> clf.score(X_transformed, y)
659
+ 0.9499...
660
+ """
661
+
662
+ _parameter_constraints: dict = {
663
+ "sample_steps": [Interval(Integral, 1, None, closed="left")],
664
+ "sample_interval": [Interval(Real, 0, None, closed="left"), None],
665
+ }
666
+
667
+ def __init__(self, *, sample_steps=2, sample_interval=None):
668
+ self.sample_steps = sample_steps
669
+ self.sample_interval = sample_interval
670
+
671
+ @_fit_context(prefer_skip_nested_validation=True)
672
+ def fit(self, X, y=None):
673
+ """Only validates estimator's parameters.
674
+
675
+ This method allows to: (i) validate the estimator's parameters and
676
+ (ii) be consistent with the scikit-learn transformer API.
677
+
678
+ Parameters
679
+ ----------
680
+ X : array-like, shape (n_samples, n_features)
681
+ Training data, where `n_samples` is the number of samples
682
+ and `n_features` is the number of features.
683
+
684
+ y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
685
+ default=None
686
+ Target values (None for unsupervised transformations).
687
+
688
+ Returns
689
+ -------
690
+ self : object
691
+ Returns the transformer.
692
+ """
693
+ X = self._validate_data(X, accept_sparse="csr")
694
+ check_non_negative(X, "X in AdditiveChi2Sampler.fit")
695
+
696
+ # TODO(1.5): remove the setting of _sample_interval from fit
697
+ if self.sample_interval is None:
698
+ # See figure 2 c) of "Efficient additive kernels via explicit feature maps"
699
+ # <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>
700
+ # A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
701
+ # 2011
702
+ if self.sample_steps == 1:
703
+ self._sample_interval = 0.8
704
+ elif self.sample_steps == 2:
705
+ self._sample_interval = 0.5
706
+ elif self.sample_steps == 3:
707
+ self._sample_interval = 0.4
708
+ else:
709
+ raise ValueError(
710
+ "If sample_steps is not in [1, 2, 3],"
711
+ " you need to provide sample_interval"
712
+ )
713
+ else:
714
+ self._sample_interval = self.sample_interval
715
+
716
+ return self
717
+
718
+ # TODO(1.5): remove
719
+ @deprecated( # type: ignore
720
+ "The ``sample_interval_`` attribute was deprecated in version 1.3 and "
721
+ "will be removed 1.5."
722
+ )
723
+ @property
724
+ def sample_interval_(self):
725
+ return self._sample_interval
726
+
727
+ def transform(self, X):
728
+ """Apply approximate feature map to X.
729
+
730
+ Parameters
731
+ ----------
732
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
733
+ Training data, where `n_samples` is the number of samples
734
+ and `n_features` is the number of features.
735
+
736
+ Returns
737
+ -------
738
+ X_new : {ndarray, sparse matrix}, \
739
+ shape = (n_samples, n_features * (2*sample_steps - 1))
740
+ Whether the return value is an array or sparse matrix depends on
741
+ the type of the input X.
742
+ """
743
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
744
+ check_non_negative(X, "X in AdditiveChi2Sampler.transform")
745
+ sparse = sp.issparse(X)
746
+
747
+ if hasattr(self, "_sample_interval"):
748
+ # TODO(1.5): remove this branch
749
+ sample_interval = self._sample_interval
750
+
751
+ else:
752
+ if self.sample_interval is None:
753
+ # See figure 2 c) of "Efficient additive kernels via explicit feature maps" # noqa
754
+ # <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>
755
+ # A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, # noqa
756
+ # 2011
757
+ if self.sample_steps == 1:
758
+ sample_interval = 0.8
759
+ elif self.sample_steps == 2:
760
+ sample_interval = 0.5
761
+ elif self.sample_steps == 3:
762
+ sample_interval = 0.4
763
+ else:
764
+ raise ValueError(
765
+ "If sample_steps is not in [1, 2, 3],"
766
+ " you need to provide sample_interval"
767
+ )
768
+ else:
769
+ sample_interval = self.sample_interval
770
+
771
+ # zeroth component
772
+ # 1/cosh = sech
773
+ # cosh(0) = 1.0
774
+ transf = self._transform_sparse if sparse else self._transform_dense
775
+ return transf(X, self.sample_steps, sample_interval)
776
+
777
+ def get_feature_names_out(self, input_features=None):
778
+ """Get output feature names for transformation.
779
+
780
+ Parameters
781
+ ----------
782
+ input_features : array-like of str or None, default=None
783
+ Only used to validate feature names with the names seen in :meth:`fit`.
784
+
785
+ Returns
786
+ -------
787
+ feature_names_out : ndarray of str objects
788
+ Transformed feature names.
789
+ """
790
+ check_is_fitted(self, "n_features_in_")
791
+ input_features = _check_feature_names_in(
792
+ self, input_features, generate_names=True
793
+ )
794
+ est_name = self.__class__.__name__.lower()
795
+
796
+ names_list = [f"{est_name}_{name}_sqrt" for name in input_features]
797
+
798
+ for j in range(1, self.sample_steps):
799
+ cos_names = [f"{est_name}_{name}_cos{j}" for name in input_features]
800
+ sin_names = [f"{est_name}_{name}_sin{j}" for name in input_features]
801
+ names_list.extend(cos_names + sin_names)
802
+
803
+ return np.asarray(names_list, dtype=object)
804
+
805
+ @staticmethod
806
+ def _transform_dense(X, sample_steps, sample_interval):
807
+ non_zero = X != 0.0
808
+ X_nz = X[non_zero]
809
+
810
+ X_step = np.zeros_like(X)
811
+ X_step[non_zero] = np.sqrt(X_nz * sample_interval)
812
+
813
+ X_new = [X_step]
814
+
815
+ log_step_nz = sample_interval * np.log(X_nz)
816
+ step_nz = 2 * X_nz * sample_interval
817
+
818
+ for j in range(1, sample_steps):
819
+ factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval))
820
+
821
+ X_step = np.zeros_like(X)
822
+ X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
823
+ X_new.append(X_step)
824
+
825
+ X_step = np.zeros_like(X)
826
+ X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
827
+ X_new.append(X_step)
828
+
829
+ return np.hstack(X_new)
830
+
831
+ @staticmethod
832
+ def _transform_sparse(X, sample_steps, sample_interval):
833
+ indices = X.indices.copy()
834
+ indptr = X.indptr.copy()
835
+
836
+ data_step = np.sqrt(X.data * sample_interval)
837
+ X_step = sp.csr_matrix(
838
+ (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
839
+ )
840
+ X_new = [X_step]
841
+
842
+ log_step_nz = sample_interval * np.log(X.data)
843
+ step_nz = 2 * X.data * sample_interval
844
+
845
+ for j in range(1, sample_steps):
846
+ factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval))
847
+
848
+ data_step = factor_nz * np.cos(j * log_step_nz)
849
+ X_step = sp.csr_matrix(
850
+ (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
851
+ )
852
+ X_new.append(X_step)
853
+
854
+ data_step = factor_nz * np.sin(j * log_step_nz)
855
+ X_step = sp.csr_matrix(
856
+ (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
857
+ )
858
+ X_new.append(X_step)
859
+
860
+ return sp.hstack(X_new)
861
+
862
+ def _more_tags(self):
863
+ return {"stateless": True, "requires_positive_X": True}
864
+
865
+
866
+ class Nystroem(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
867
+ """Approximate a kernel map using a subset of the training data.
868
+
869
+ Constructs an approximate feature map for an arbitrary kernel
870
+ using a subset of the data as basis.
871
+
872
+ Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
873
+
874
+ .. versionadded:: 0.13
875
+
876
+ Parameters
877
+ ----------
878
+ kernel : str or callable, default='rbf'
879
+ Kernel map to be approximated. A callable should accept two arguments
880
+ and the keyword arguments passed to this object as `kernel_params`, and
881
+ should return a floating point number.
882
+
883
+ gamma : float, default=None
884
+ Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
885
+ and sigmoid kernels. Interpretation of the default value is left to
886
+ the kernel; see the documentation for sklearn.metrics.pairwise.
887
+ Ignored by other kernels.
888
+
889
+ coef0 : float, default=None
890
+ Zero coefficient for polynomial and sigmoid kernels.
891
+ Ignored by other kernels.
892
+
893
+ degree : float, default=None
894
+ Degree of the polynomial kernel. Ignored by other kernels.
895
+
896
+ kernel_params : dict, default=None
897
+ Additional parameters (keyword arguments) for kernel function passed
898
+ as callable object.
899
+
900
+ n_components : int, default=100
901
+ Number of features to construct.
902
+ How many data points will be used to construct the mapping.
903
+
904
+ random_state : int, RandomState instance or None, default=None
905
+ Pseudo-random number generator to control the uniform sampling without
906
+ replacement of `n_components` of the training data to construct the
907
+ basis kernel.
908
+ Pass an int for reproducible output across multiple function calls.
909
+ See :term:`Glossary <random_state>`.
910
+
911
+ n_jobs : int, default=None
912
+ The number of jobs to use for the computation. This works by breaking
913
+ down the kernel matrix into `n_jobs` even slices and computing them in
914
+ parallel.
915
+
916
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
917
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
918
+ for more details.
919
+
920
+ .. versionadded:: 0.24
921
+
922
+ Attributes
923
+ ----------
924
+ components_ : ndarray of shape (n_components, n_features)
925
+ Subset of training points used to construct the feature map.
926
+
927
+ component_indices_ : ndarray of shape (n_components)
928
+ Indices of ``components_`` in the training set.
929
+
930
+ normalization_ : ndarray of shape (n_components, n_components)
931
+ Normalization matrix needed for embedding.
932
+ Square root of the kernel matrix on ``components_``.
933
+
934
+ n_features_in_ : int
935
+ Number of features seen during :term:`fit`.
936
+
937
+ .. versionadded:: 0.24
938
+
939
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
940
+ Names of features seen during :term:`fit`. Defined only when `X`
941
+ has feature names that are all strings.
942
+
943
+ .. versionadded:: 1.0
944
+
945
+ See Also
946
+ --------
947
+ AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
948
+ PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch.
949
+ RBFSampler : Approximate a RBF kernel feature map using random Fourier
950
+ features.
951
+ SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
952
+ sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
953
+
954
+ References
955
+ ----------
956
+ * Williams, C.K.I. and Seeger, M.
957
+ "Using the Nystroem method to speed up kernel machines",
958
+ Advances in neural information processing systems 2001
959
+
960
+ * T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
961
+ "Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
962
+ Comparison",
963
+ Advances in Neural Information Processing Systems 2012
964
+
965
+ Examples
966
+ --------
967
+ >>> from sklearn import datasets, svm
968
+ >>> from sklearn.kernel_approximation import Nystroem
969
+ >>> X, y = datasets.load_digits(n_class=9, return_X_y=True)
970
+ >>> data = X / 16.
971
+ >>> clf = svm.LinearSVC(dual="auto")
972
+ >>> feature_map_nystroem = Nystroem(gamma=.2,
973
+ ... random_state=1,
974
+ ... n_components=300)
975
+ >>> data_transformed = feature_map_nystroem.fit_transform(data)
976
+ >>> clf.fit(data_transformed, y)
977
+ LinearSVC(dual='auto')
978
+ >>> clf.score(data_transformed, y)
979
+ 0.9987...
980
+ """
981
+
982
+ _parameter_constraints: dict = {
983
+ "kernel": [
984
+ StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}),
985
+ callable,
986
+ ],
987
+ "gamma": [Interval(Real, 0, None, closed="left"), None],
988
+ "coef0": [Interval(Real, None, None, closed="neither"), None],
989
+ "degree": [Interval(Real, 1, None, closed="left"), None],
990
+ "kernel_params": [dict, None],
991
+ "n_components": [Interval(Integral, 1, None, closed="left")],
992
+ "random_state": ["random_state"],
993
+ "n_jobs": [Integral, None],
994
+ }
995
+
996
+ def __init__(
997
+ self,
998
+ kernel="rbf",
999
+ *,
1000
+ gamma=None,
1001
+ coef0=None,
1002
+ degree=None,
1003
+ kernel_params=None,
1004
+ n_components=100,
1005
+ random_state=None,
1006
+ n_jobs=None,
1007
+ ):
1008
+ self.kernel = kernel
1009
+ self.gamma = gamma
1010
+ self.coef0 = coef0
1011
+ self.degree = degree
1012
+ self.kernel_params = kernel_params
1013
+ self.n_components = n_components
1014
+ self.random_state = random_state
1015
+ self.n_jobs = n_jobs
1016
+
1017
+ @_fit_context(prefer_skip_nested_validation=True)
1018
+ def fit(self, X, y=None):
1019
+ """Fit estimator to data.
1020
+
1021
+ Samples a subset of training points, computes kernel
1022
+ on these and computes normalization matrix.
1023
+
1024
+ Parameters
1025
+ ----------
1026
+ X : array-like, shape (n_samples, n_features)
1027
+ Training data, where `n_samples` is the number of samples
1028
+ and `n_features` is the number of features.
1029
+
1030
+ y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
1031
+ default=None
1032
+ Target values (None for unsupervised transformations).
1033
+
1034
+ Returns
1035
+ -------
1036
+ self : object
1037
+ Returns the instance itself.
1038
+ """
1039
+ X = self._validate_data(X, accept_sparse="csr")
1040
+ rnd = check_random_state(self.random_state)
1041
+ n_samples = X.shape[0]
1042
+
1043
+ # get basis vectors
1044
+ if self.n_components > n_samples:
1045
+ # XXX should we just bail?
1046
+ n_components = n_samples
1047
+ warnings.warn(
1048
+ "n_components > n_samples. This is not possible.\n"
1049
+ "n_components was set to n_samples, which results"
1050
+ " in inefficient evaluation of the full kernel."
1051
+ )
1052
+
1053
+ else:
1054
+ n_components = self.n_components
1055
+ n_components = min(n_samples, n_components)
1056
+ inds = rnd.permutation(n_samples)
1057
+ basis_inds = inds[:n_components]
1058
+ basis = X[basis_inds]
1059
+
1060
+ basis_kernel = pairwise_kernels(
1061
+ basis,
1062
+ metric=self.kernel,
1063
+ filter_params=True,
1064
+ n_jobs=self.n_jobs,
1065
+ **self._get_kernel_params(),
1066
+ )
1067
+
1068
+ # sqrt of kernel matrix on basis vectors
1069
+ U, S, V = svd(basis_kernel)
1070
+ S = np.maximum(S, 1e-12)
1071
+ self.normalization_ = np.dot(U / np.sqrt(S), V)
1072
+ self.components_ = basis
1073
+ self.component_indices_ = basis_inds
1074
+ self._n_features_out = n_components
1075
+ return self
1076
+
1077
+ def transform(self, X):
1078
+ """Apply feature map to X.
1079
+
1080
+ Computes an approximate feature map using the kernel
1081
+ between some training points and X.
1082
+
1083
+ Parameters
1084
+ ----------
1085
+ X : array-like of shape (n_samples, n_features)
1086
+ Data to transform.
1087
+
1088
+ Returns
1089
+ -------
1090
+ X_transformed : ndarray of shape (n_samples, n_components)
1091
+ Transformed data.
1092
+ """
1093
+ check_is_fitted(self)
1094
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
1095
+
1096
+ kernel_params = self._get_kernel_params()
1097
+ embedded = pairwise_kernels(
1098
+ X,
1099
+ self.components_,
1100
+ metric=self.kernel,
1101
+ filter_params=True,
1102
+ n_jobs=self.n_jobs,
1103
+ **kernel_params,
1104
+ )
1105
+ return np.dot(embedded, self.normalization_.T)
1106
+
1107
+ def _get_kernel_params(self):
1108
+ params = self.kernel_params
1109
+ if params is None:
1110
+ params = {}
1111
+ if not callable(self.kernel) and self.kernel != "precomputed":
1112
+ for param in KERNEL_PARAMS[self.kernel]:
1113
+ if getattr(self, param) is not None:
1114
+ params[param] = getattr(self, param)
1115
+ else:
1116
+ if (
1117
+ self.gamma is not None
1118
+ or self.coef0 is not None
1119
+ or self.degree is not None
1120
+ ):
1121
+ raise ValueError(
1122
+ "Don't pass gamma, coef0 or degree to "
1123
+ "Nystroem if using a callable "
1124
+ "or precomputed kernel"
1125
+ )
1126
+
1127
+ return params
1128
+
1129
+ def _more_tags(self):
1130
+ return {
1131
+ "_xfail_checks": {
1132
+ "check_transformer_preserve_dtypes": (
1133
+ "dtypes are preserved but not at a close enough precision"
1134
+ )
1135
+ },
1136
+ "preserves_dtype": [np.float64, np.float32],
1137
+ }
venv/lib/python3.10/site-packages/sklearn/kernel_ridge.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
2
+
3
+ # Authors: Mathieu Blondel <[email protected]>
4
+ # Jan Hendrik Metzen <[email protected]>
5
+ # License: BSD 3 clause
6
+ from numbers import Real
7
+
8
+ import numpy as np
9
+
10
+ from .base import BaseEstimator, MultiOutputMixin, RegressorMixin, _fit_context
11
+ from .linear_model._ridge import _solve_cholesky_kernel
12
+ from .metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels
13
+ from .utils._param_validation import Interval, StrOptions
14
+ from .utils.validation import _check_sample_weight, check_is_fitted
15
+
16
+
17
+ class KernelRidge(MultiOutputMixin, RegressorMixin, BaseEstimator):
18
+ """Kernel ridge regression.
19
+
20
+ Kernel ridge regression (KRR) combines ridge regression (linear least
21
+ squares with l2-norm regularization) with the kernel trick. It thus
22
+ learns a linear function in the space induced by the respective kernel and
23
+ the data. For non-linear kernels, this corresponds to a non-linear
24
+ function in the original space.
25
+
26
+ The form of the model learned by KRR is identical to support vector
27
+ regression (SVR). However, different loss functions are used: KRR uses
28
+ squared error loss while support vector regression uses epsilon-insensitive
29
+ loss, both combined with l2 regularization. In contrast to SVR, fitting a
30
+ KRR model can be done in closed-form and is typically faster for
31
+ medium-sized datasets. On the other hand, the learned model is non-sparse
32
+ and thus slower than SVR, which learns a sparse model for epsilon > 0, at
33
+ prediction-time.
34
+
35
+ This estimator has built-in support for multi-variate regression
36
+ (i.e., when y is a 2d-array of shape [n_samples, n_targets]).
37
+
38
+ Read more in the :ref:`User Guide <kernel_ridge>`.
39
+
40
+ Parameters
41
+ ----------
42
+ alpha : float or array-like of shape (n_targets,), default=1.0
43
+ Regularization strength; must be a positive float. Regularization
44
+ improves the conditioning of the problem and reduces the variance of
45
+ the estimates. Larger values specify stronger regularization.
46
+ Alpha corresponds to ``1 / (2C)`` in other linear models such as
47
+ :class:`~sklearn.linear_model.LogisticRegression` or
48
+ :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
49
+ assumed to be specific to the targets. Hence they must correspond in
50
+ number. See :ref:`ridge_regression` for formula.
51
+
52
+ kernel : str or callable, default="linear"
53
+ Kernel mapping used internally. This parameter is directly passed to
54
+ :class:`~sklearn.metrics.pairwise.pairwise_kernels`.
55
+ If `kernel` is a string, it must be one of the metrics
56
+ in `pairwise.PAIRWISE_KERNEL_FUNCTIONS` or "precomputed".
57
+ If `kernel` is "precomputed", X is assumed to be a kernel matrix.
58
+ Alternatively, if `kernel` is a callable function, it is called on
59
+ each pair of instances (rows) and the resulting value recorded. The
60
+ callable should take two rows from X as input and return the
61
+ corresponding kernel value as a single number. This means that
62
+ callables from :mod:`sklearn.metrics.pairwise` are not allowed, as
63
+ they operate on matrices, not single samples. Use the string
64
+ identifying the kernel instead.
65
+
66
+ gamma : float, default=None
67
+ Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
68
+ and sigmoid kernels. Interpretation of the default value is left to
69
+ the kernel; see the documentation for sklearn.metrics.pairwise.
70
+ Ignored by other kernels.
71
+
72
+ degree : float, default=3
73
+ Degree of the polynomial kernel. Ignored by other kernels.
74
+
75
+ coef0 : float, default=1
76
+ Zero coefficient for polynomial and sigmoid kernels.
77
+ Ignored by other kernels.
78
+
79
+ kernel_params : dict, default=None
80
+ Additional parameters (keyword arguments) for kernel function passed
81
+ as callable object.
82
+
83
+ Attributes
84
+ ----------
85
+ dual_coef_ : ndarray of shape (n_samples,) or (n_samples, n_targets)
86
+ Representation of weight vector(s) in kernel space
87
+
88
+ X_fit_ : {ndarray, sparse matrix} of shape (n_samples, n_features)
89
+ Training data, which is also required for prediction. If
90
+ kernel == "precomputed" this is instead the precomputed
91
+ training matrix, of shape (n_samples, n_samples).
92
+
93
+ n_features_in_ : int
94
+ Number of features seen during :term:`fit`.
95
+
96
+ .. versionadded:: 0.24
97
+
98
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
99
+ Names of features seen during :term:`fit`. Defined only when `X`
100
+ has feature names that are all strings.
101
+
102
+ .. versionadded:: 1.0
103
+
104
+ See Also
105
+ --------
106
+ sklearn.gaussian_process.GaussianProcessRegressor : Gaussian
107
+ Process regressor providing automatic kernel hyperparameters
108
+ tuning and predictions uncertainty.
109
+ sklearn.linear_model.Ridge : Linear ridge regression.
110
+ sklearn.linear_model.RidgeCV : Ridge regression with built-in
111
+ cross-validation.
112
+ sklearn.svm.SVR : Support Vector Regression accepting a large variety
113
+ of kernels.
114
+
115
+ References
116
+ ----------
117
+ * Kevin P. Murphy
118
+ "Machine Learning: A Probabilistic Perspective", The MIT Press
119
+ chapter 14.4.3, pp. 492-493
120
+
121
+ Examples
122
+ --------
123
+ >>> from sklearn.kernel_ridge import KernelRidge
124
+ >>> import numpy as np
125
+ >>> n_samples, n_features = 10, 5
126
+ >>> rng = np.random.RandomState(0)
127
+ >>> y = rng.randn(n_samples)
128
+ >>> X = rng.randn(n_samples, n_features)
129
+ >>> krr = KernelRidge(alpha=1.0)
130
+ >>> krr.fit(X, y)
131
+ KernelRidge(alpha=1.0)
132
+ """
133
+
134
+ _parameter_constraints: dict = {
135
+ "alpha": [Interval(Real, 0, None, closed="left"), "array-like"],
136
+ "kernel": [
137
+ StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}),
138
+ callable,
139
+ ],
140
+ "gamma": [Interval(Real, 0, None, closed="left"), None],
141
+ "degree": [Interval(Real, 0, None, closed="left")],
142
+ "coef0": [Interval(Real, None, None, closed="neither")],
143
+ "kernel_params": [dict, None],
144
+ }
145
+
146
+ def __init__(
147
+ self,
148
+ alpha=1,
149
+ *,
150
+ kernel="linear",
151
+ gamma=None,
152
+ degree=3,
153
+ coef0=1,
154
+ kernel_params=None,
155
+ ):
156
+ self.alpha = alpha
157
+ self.kernel = kernel
158
+ self.gamma = gamma
159
+ self.degree = degree
160
+ self.coef0 = coef0
161
+ self.kernel_params = kernel_params
162
+
163
+ def _get_kernel(self, X, Y=None):
164
+ if callable(self.kernel):
165
+ params = self.kernel_params or {}
166
+ else:
167
+ params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0}
168
+ return pairwise_kernels(X, Y, metric=self.kernel, filter_params=True, **params)
169
+
170
+ def _more_tags(self):
171
+ return {"pairwise": self.kernel == "precomputed"}
172
+
173
+ @_fit_context(prefer_skip_nested_validation=True)
174
+ def fit(self, X, y, sample_weight=None):
175
+ """Fit Kernel Ridge regression model.
176
+
177
+ Parameters
178
+ ----------
179
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
180
+ Training data. If kernel == "precomputed" this is instead
181
+ a precomputed kernel matrix, of shape (n_samples, n_samples).
182
+
183
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
184
+ Target values.
185
+
186
+ sample_weight : float or array-like of shape (n_samples,), default=None
187
+ Individual weights for each sample, ignored if None is passed.
188
+
189
+ Returns
190
+ -------
191
+ self : object
192
+ Returns the instance itself.
193
+ """
194
+ # Convert data
195
+ X, y = self._validate_data(
196
+ X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True
197
+ )
198
+ if sample_weight is not None and not isinstance(sample_weight, float):
199
+ sample_weight = _check_sample_weight(sample_weight, X)
200
+
201
+ K = self._get_kernel(X)
202
+ alpha = np.atleast_1d(self.alpha)
203
+
204
+ ravel = False
205
+ if len(y.shape) == 1:
206
+ y = y.reshape(-1, 1)
207
+ ravel = True
208
+
209
+ copy = self.kernel == "precomputed"
210
+ self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy)
211
+ if ravel:
212
+ self.dual_coef_ = self.dual_coef_.ravel()
213
+
214
+ self.X_fit_ = X
215
+
216
+ return self
217
+
218
+ def predict(self, X):
219
+ """Predict using the kernel ridge model.
220
+
221
+ Parameters
222
+ ----------
223
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
224
+ Samples. If kernel == "precomputed" this is instead a
225
+ precomputed kernel matrix, shape = [n_samples,
226
+ n_samples_fitted], where n_samples_fitted is the number of
227
+ samples used in the fitting for this estimator.
228
+
229
+ Returns
230
+ -------
231
+ C : ndarray of shape (n_samples,) or (n_samples, n_targets)
232
+ Returns predicted values.
233
+ """
234
+ check_is_fitted(self)
235
+ X = self._validate_data(X, accept_sparse=("csr", "csc"), reset=False)
236
+ K = self._get_kernel(X, self.X_fit_)
237
+ return np.dot(K, self.dual_coef_)
venv/lib/python3.10/site-packages/sklearn/linear_model/__init__.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.linear_model` module implements a variety of linear models.
3
+ """
4
+
5
+ # See http://scikit-learn.sourceforge.net/modules/sgd.html and
6
+ # http://scikit-learn.sourceforge.net/modules/linear_model.html for
7
+ # complete documentation.
8
+
9
+ from ._base import LinearRegression
10
+ from ._bayes import ARDRegression, BayesianRidge
11
+ from ._coordinate_descent import (
12
+ ElasticNet,
13
+ ElasticNetCV,
14
+ Lasso,
15
+ LassoCV,
16
+ MultiTaskElasticNet,
17
+ MultiTaskElasticNetCV,
18
+ MultiTaskLasso,
19
+ MultiTaskLassoCV,
20
+ enet_path,
21
+ lasso_path,
22
+ )
23
+ from ._glm import GammaRegressor, PoissonRegressor, TweedieRegressor
24
+ from ._huber import HuberRegressor
25
+ from ._least_angle import (
26
+ Lars,
27
+ LarsCV,
28
+ LassoLars,
29
+ LassoLarsCV,
30
+ LassoLarsIC,
31
+ lars_path,
32
+ lars_path_gram,
33
+ )
34
+ from ._logistic import LogisticRegression, LogisticRegressionCV
35
+ from ._omp import (
36
+ OrthogonalMatchingPursuit,
37
+ OrthogonalMatchingPursuitCV,
38
+ orthogonal_mp,
39
+ orthogonal_mp_gram,
40
+ )
41
+ from ._passive_aggressive import PassiveAggressiveClassifier, PassiveAggressiveRegressor
42
+ from ._perceptron import Perceptron
43
+ from ._quantile import QuantileRegressor
44
+ from ._ransac import RANSACRegressor
45
+ from ._ridge import Ridge, RidgeClassifier, RidgeClassifierCV, RidgeCV, ridge_regression
46
+ from ._sgd_fast import Hinge, Huber, Log, ModifiedHuber, SquaredLoss
47
+ from ._stochastic_gradient import SGDClassifier, SGDOneClassSVM, SGDRegressor
48
+ from ._theil_sen import TheilSenRegressor
49
+
50
+ __all__ = [
51
+ "ARDRegression",
52
+ "BayesianRidge",
53
+ "ElasticNet",
54
+ "ElasticNetCV",
55
+ "Hinge",
56
+ "Huber",
57
+ "HuberRegressor",
58
+ "Lars",
59
+ "LarsCV",
60
+ "Lasso",
61
+ "LassoCV",
62
+ "LassoLars",
63
+ "LassoLarsCV",
64
+ "LassoLarsIC",
65
+ "LinearRegression",
66
+ "Log",
67
+ "LogisticRegression",
68
+ "LogisticRegressionCV",
69
+ "ModifiedHuber",
70
+ "MultiTaskElasticNet",
71
+ "MultiTaskElasticNetCV",
72
+ "MultiTaskLasso",
73
+ "MultiTaskLassoCV",
74
+ "OrthogonalMatchingPursuit",
75
+ "OrthogonalMatchingPursuitCV",
76
+ "PassiveAggressiveClassifier",
77
+ "PassiveAggressiveRegressor",
78
+ "Perceptron",
79
+ "QuantileRegressor",
80
+ "Ridge",
81
+ "RidgeCV",
82
+ "RidgeClassifier",
83
+ "RidgeClassifierCV",
84
+ "SGDClassifier",
85
+ "SGDRegressor",
86
+ "SGDOneClassSVM",
87
+ "SquaredLoss",
88
+ "TheilSenRegressor",
89
+ "enet_path",
90
+ "lars_path",
91
+ "lars_path_gram",
92
+ "lasso_path",
93
+ "orthogonal_mp",
94
+ "orthogonal_mp_gram",
95
+ "ridge_regression",
96
+ "RANSACRegressor",
97
+ "PoissonRegressor",
98
+ "GammaRegressor",
99
+ "TweedieRegressor",
100
+ ]