applied-ai-018 commited on
Commit
f55eed3
·
verified ·
1 Parent(s): 2792309

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/sklearn/cluster/_birch.py +741 -0
  2. venv/lib/python3.10/site-packages/sklearn/cluster/_bisect_k_means.py +529 -0
  3. venv/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py +476 -0
  4. venv/lib/python3.10/site-packages/sklearn/cluster/_feature_agglomeration.py +104 -0
  5. venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.cpython-310-x86_64-linux-gnu.so +0 -0
  6. venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.pxd +48 -0
  7. venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_elkan.cpython-310-x86_64-linux-gnu.so +0 -0
  8. venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_lloyd.cpython-310-x86_64-linux-gnu.so +0 -0
  9. venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_minibatch.cpython-310-x86_64-linux-gnu.so +0 -0
  10. venv/lib/python3.10/site-packages/sklearn/cluster/_mean_shift.py +575 -0
  11. venv/lib/python3.10/site-packages/sklearn/cluster/_optics.py +1199 -0
  12. venv/lib/python3.10/site-packages/sklearn/cluster/_spectral.py +799 -0
  13. venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/sklearn/compose/tests/__init__.py +0 -0
  17. venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py +2582 -0
  21. venv/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py +387 -0
  22. venv/lib/python3.10/site-packages/sklearn/tests/__init__.py +0 -0
  23. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/random_seed.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_ridge.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metadata_routing.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators_metadata_routing.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_min_dependencies_readme.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multiclass.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multioutput.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_naive_bayes.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_pipeline.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_public_functions.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_random_projection.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/sklearn/tests/metadata_routing_common.py +456 -0
venv/lib/python3.10/site-packages/sklearn/cluster/_birch.py ADDED
@@ -0,0 +1,741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Manoj Kumar <[email protected]>
2
+ # Alexandre Gramfort <[email protected]>
3
+ # Joel Nothman <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+ import warnings
7
+ from math import sqrt
8
+ from numbers import Integral, Real
9
+
10
+ import numpy as np
11
+ from scipy import sparse
12
+
13
+ from .._config import config_context
14
+ from ..base import (
15
+ BaseEstimator,
16
+ ClassNamePrefixFeaturesOutMixin,
17
+ ClusterMixin,
18
+ TransformerMixin,
19
+ _fit_context,
20
+ )
21
+ from ..exceptions import ConvergenceWarning
22
+ from ..metrics import pairwise_distances_argmin
23
+ from ..metrics.pairwise import euclidean_distances
24
+ from ..utils._param_validation import Interval
25
+ from ..utils.extmath import row_norms
26
+ from ..utils.validation import check_is_fitted
27
+ from . import AgglomerativeClustering
28
+
29
+
30
+ def _iterate_sparse_X(X):
31
+ """This little hack returns a densified row when iterating over a sparse
32
+ matrix, instead of constructing a sparse matrix for every row that is
33
+ expensive.
34
+ """
35
+ n_samples = X.shape[0]
36
+ X_indices = X.indices
37
+ X_data = X.data
38
+ X_indptr = X.indptr
39
+
40
+ for i in range(n_samples):
41
+ row = np.zeros(X.shape[1])
42
+ startptr, endptr = X_indptr[i], X_indptr[i + 1]
43
+ nonzero_indices = X_indices[startptr:endptr]
44
+ row[nonzero_indices] = X_data[startptr:endptr]
45
+ yield row
46
+
47
+
48
+ def _split_node(node, threshold, branching_factor):
49
+ """The node has to be split if there is no place for a new subcluster
50
+ in the node.
51
+ 1. Two empty nodes and two empty subclusters are initialized.
52
+ 2. The pair of distant subclusters are found.
53
+ 3. The properties of the empty subclusters and nodes are updated
54
+ according to the nearest distance between the subclusters to the
55
+ pair of distant subclusters.
56
+ 4. The two nodes are set as children to the two subclusters.
57
+ """
58
+ new_subcluster1 = _CFSubcluster()
59
+ new_subcluster2 = _CFSubcluster()
60
+ new_node1 = _CFNode(
61
+ threshold=threshold,
62
+ branching_factor=branching_factor,
63
+ is_leaf=node.is_leaf,
64
+ n_features=node.n_features,
65
+ dtype=node.init_centroids_.dtype,
66
+ )
67
+ new_node2 = _CFNode(
68
+ threshold=threshold,
69
+ branching_factor=branching_factor,
70
+ is_leaf=node.is_leaf,
71
+ n_features=node.n_features,
72
+ dtype=node.init_centroids_.dtype,
73
+ )
74
+ new_subcluster1.child_ = new_node1
75
+ new_subcluster2.child_ = new_node2
76
+
77
+ if node.is_leaf:
78
+ if node.prev_leaf_ is not None:
79
+ node.prev_leaf_.next_leaf_ = new_node1
80
+ new_node1.prev_leaf_ = node.prev_leaf_
81
+ new_node1.next_leaf_ = new_node2
82
+ new_node2.prev_leaf_ = new_node1
83
+ new_node2.next_leaf_ = node.next_leaf_
84
+ if node.next_leaf_ is not None:
85
+ node.next_leaf_.prev_leaf_ = new_node2
86
+
87
+ dist = euclidean_distances(
88
+ node.centroids_, Y_norm_squared=node.squared_norm_, squared=True
89
+ )
90
+ n_clusters = dist.shape[0]
91
+
92
+ farthest_idx = np.unravel_index(dist.argmax(), (n_clusters, n_clusters))
93
+ node1_dist, node2_dist = dist[(farthest_idx,)]
94
+
95
+ node1_closer = node1_dist < node2_dist
96
+ # make sure node1 is closest to itself even if all distances are equal.
97
+ # This can only happen when all node.centroids_ are duplicates leading to all
98
+ # distances between centroids being zero.
99
+ node1_closer[farthest_idx[0]] = True
100
+
101
+ for idx, subcluster in enumerate(node.subclusters_):
102
+ if node1_closer[idx]:
103
+ new_node1.append_subcluster(subcluster)
104
+ new_subcluster1.update(subcluster)
105
+ else:
106
+ new_node2.append_subcluster(subcluster)
107
+ new_subcluster2.update(subcluster)
108
+ return new_subcluster1, new_subcluster2
109
+
110
+
111
+ class _CFNode:
112
+ """Each node in a CFTree is called a CFNode.
113
+
114
+ The CFNode can have a maximum of branching_factor
115
+ number of CFSubclusters.
116
+
117
+ Parameters
118
+ ----------
119
+ threshold : float
120
+ Threshold needed for a new subcluster to enter a CFSubcluster.
121
+
122
+ branching_factor : int
123
+ Maximum number of CF subclusters in each node.
124
+
125
+ is_leaf : bool
126
+ We need to know if the CFNode is a leaf or not, in order to
127
+ retrieve the final subclusters.
128
+
129
+ n_features : int
130
+ The number of features.
131
+
132
+ Attributes
133
+ ----------
134
+ subclusters_ : list
135
+ List of subclusters for a particular CFNode.
136
+
137
+ prev_leaf_ : _CFNode
138
+ Useful only if is_leaf is True.
139
+
140
+ next_leaf_ : _CFNode
141
+ next_leaf. Useful only if is_leaf is True.
142
+ the final subclusters.
143
+
144
+ init_centroids_ : ndarray of shape (branching_factor + 1, n_features)
145
+ Manipulate ``init_centroids_`` throughout rather than centroids_ since
146
+ the centroids are just a view of the ``init_centroids_`` .
147
+
148
+ init_sq_norm_ : ndarray of shape (branching_factor + 1,)
149
+ manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
150
+
151
+ centroids_ : ndarray of shape (branching_factor + 1, n_features)
152
+ View of ``init_centroids_``.
153
+
154
+ squared_norm_ : ndarray of shape (branching_factor + 1,)
155
+ View of ``init_sq_norm_``.
156
+
157
+ """
158
+
159
+ def __init__(self, *, threshold, branching_factor, is_leaf, n_features, dtype):
160
+ self.threshold = threshold
161
+ self.branching_factor = branching_factor
162
+ self.is_leaf = is_leaf
163
+ self.n_features = n_features
164
+
165
+ # The list of subclusters, centroids and squared norms
166
+ # to manipulate throughout.
167
+ self.subclusters_ = []
168
+ self.init_centroids_ = np.zeros((branching_factor + 1, n_features), dtype=dtype)
169
+ self.init_sq_norm_ = np.zeros((branching_factor + 1), dtype)
170
+ self.squared_norm_ = []
171
+ self.prev_leaf_ = None
172
+ self.next_leaf_ = None
173
+
174
+ def append_subcluster(self, subcluster):
175
+ n_samples = len(self.subclusters_)
176
+ self.subclusters_.append(subcluster)
177
+ self.init_centroids_[n_samples] = subcluster.centroid_
178
+ self.init_sq_norm_[n_samples] = subcluster.sq_norm_
179
+
180
+ # Keep centroids and squared norm as views. In this way
181
+ # if we change init_centroids and init_sq_norm_, it is
182
+ # sufficient,
183
+ self.centroids_ = self.init_centroids_[: n_samples + 1, :]
184
+ self.squared_norm_ = self.init_sq_norm_[: n_samples + 1]
185
+
186
+ def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2):
187
+ """Remove a subcluster from a node and update it with the
188
+ split subclusters.
189
+ """
190
+ ind = self.subclusters_.index(subcluster)
191
+ self.subclusters_[ind] = new_subcluster1
192
+ self.init_centroids_[ind] = new_subcluster1.centroid_
193
+ self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
194
+ self.append_subcluster(new_subcluster2)
195
+
196
+ def insert_cf_subcluster(self, subcluster):
197
+ """Insert a new subcluster into the node."""
198
+ if not self.subclusters_:
199
+ self.append_subcluster(subcluster)
200
+ return False
201
+
202
+ threshold = self.threshold
203
+ branching_factor = self.branching_factor
204
+ # We need to find the closest subcluster among all the
205
+ # subclusters so that we can insert our new subcluster.
206
+ dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
207
+ dist_matrix *= -2.0
208
+ dist_matrix += self.squared_norm_
209
+ closest_index = np.argmin(dist_matrix)
210
+ closest_subcluster = self.subclusters_[closest_index]
211
+
212
+ # If the subcluster has a child, we need a recursive strategy.
213
+ if closest_subcluster.child_ is not None:
214
+ split_child = closest_subcluster.child_.insert_cf_subcluster(subcluster)
215
+
216
+ if not split_child:
217
+ # If it is determined that the child need not be split, we
218
+ # can just update the closest_subcluster
219
+ closest_subcluster.update(subcluster)
220
+ self.init_centroids_[closest_index] = self.subclusters_[
221
+ closest_index
222
+ ].centroid_
223
+ self.init_sq_norm_[closest_index] = self.subclusters_[
224
+ closest_index
225
+ ].sq_norm_
226
+ return False
227
+
228
+ # things not too good. we need to redistribute the subclusters in
229
+ # our child node, and add a new subcluster in the parent
230
+ # subcluster to accommodate the new child.
231
+ else:
232
+ new_subcluster1, new_subcluster2 = _split_node(
233
+ closest_subcluster.child_,
234
+ threshold,
235
+ branching_factor,
236
+ )
237
+ self.update_split_subclusters(
238
+ closest_subcluster, new_subcluster1, new_subcluster2
239
+ )
240
+
241
+ if len(self.subclusters_) > self.branching_factor:
242
+ return True
243
+ return False
244
+
245
+ # good to go!
246
+ else:
247
+ merged = closest_subcluster.merge_subcluster(subcluster, self.threshold)
248
+ if merged:
249
+ self.init_centroids_[closest_index] = closest_subcluster.centroid_
250
+ self.init_sq_norm_[closest_index] = closest_subcluster.sq_norm_
251
+ return False
252
+
253
+ # not close to any other subclusters, and we still
254
+ # have space, so add.
255
+ elif len(self.subclusters_) < self.branching_factor:
256
+ self.append_subcluster(subcluster)
257
+ return False
258
+
259
+ # We do not have enough space nor is it closer to an
260
+ # other subcluster. We need to split.
261
+ else:
262
+ self.append_subcluster(subcluster)
263
+ return True
264
+
265
+
266
+ class _CFSubcluster:
267
+ """Each subcluster in a CFNode is called a CFSubcluster.
268
+
269
+ A CFSubcluster can have a CFNode has its child.
270
+
271
+ Parameters
272
+ ----------
273
+ linear_sum : ndarray of shape (n_features,), default=None
274
+ Sample. This is kept optional to allow initialization of empty
275
+ subclusters.
276
+
277
+ Attributes
278
+ ----------
279
+ n_samples_ : int
280
+ Number of samples that belong to each subcluster.
281
+
282
+ linear_sum_ : ndarray
283
+ Linear sum of all the samples in a subcluster. Prevents holding
284
+ all sample data in memory.
285
+
286
+ squared_sum_ : float
287
+ Sum of the squared l2 norms of all samples belonging to a subcluster.
288
+
289
+ centroid_ : ndarray of shape (branching_factor + 1, n_features)
290
+ Centroid of the subcluster. Prevent recomputing of centroids when
291
+ ``CFNode.centroids_`` is called.
292
+
293
+ child_ : _CFNode
294
+ Child Node of the subcluster. Once a given _CFNode is set as the child
295
+ of the _CFNode, it is set to ``self.child_``.
296
+
297
+ sq_norm_ : ndarray of shape (branching_factor + 1,)
298
+ Squared norm of the subcluster. Used to prevent recomputing when
299
+ pairwise minimum distances are computed.
300
+ """
301
+
302
+ def __init__(self, *, linear_sum=None):
303
+ if linear_sum is None:
304
+ self.n_samples_ = 0
305
+ self.squared_sum_ = 0.0
306
+ self.centroid_ = self.linear_sum_ = 0
307
+ else:
308
+ self.n_samples_ = 1
309
+ self.centroid_ = self.linear_sum_ = linear_sum
310
+ self.squared_sum_ = self.sq_norm_ = np.dot(
311
+ self.linear_sum_, self.linear_sum_
312
+ )
313
+ self.child_ = None
314
+
315
+ def update(self, subcluster):
316
+ self.n_samples_ += subcluster.n_samples_
317
+ self.linear_sum_ += subcluster.linear_sum_
318
+ self.squared_sum_ += subcluster.squared_sum_
319
+ self.centroid_ = self.linear_sum_ / self.n_samples_
320
+ self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
321
+
322
+ def merge_subcluster(self, nominee_cluster, threshold):
323
+ """Check if a cluster is worthy enough to be merged. If
324
+ yes then merge.
325
+ """
326
+ new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
327
+ new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
328
+ new_n = self.n_samples_ + nominee_cluster.n_samples_
329
+ new_centroid = (1 / new_n) * new_ls
330
+ new_sq_norm = np.dot(new_centroid, new_centroid)
331
+
332
+ # The squared radius of the cluster is defined:
333
+ # r^2 = sum_i ||x_i - c||^2 / n
334
+ # with x_i the n points assigned to the cluster and c its centroid:
335
+ # c = sum_i x_i / n
336
+ # This can be expanded to:
337
+ # r^2 = sum_i ||x_i||^2 / n - 2 < sum_i x_i / n, c> + n ||c||^2 / n
338
+ # and therefore simplifies to:
339
+ # r^2 = sum_i ||x_i||^2 / n - ||c||^2
340
+ sq_radius = new_ss / new_n - new_sq_norm
341
+
342
+ if sq_radius <= threshold**2:
343
+ (
344
+ self.n_samples_,
345
+ self.linear_sum_,
346
+ self.squared_sum_,
347
+ self.centroid_,
348
+ self.sq_norm_,
349
+ ) = (new_n, new_ls, new_ss, new_centroid, new_sq_norm)
350
+ return True
351
+ return False
352
+
353
+ @property
354
+ def radius(self):
355
+ """Return radius of the subcluster"""
356
+ # Because of numerical issues, this could become negative
357
+ sq_radius = self.squared_sum_ / self.n_samples_ - self.sq_norm_
358
+ return sqrt(max(0, sq_radius))
359
+
360
+
361
+ class Birch(
362
+ ClassNamePrefixFeaturesOutMixin, ClusterMixin, TransformerMixin, BaseEstimator
363
+ ):
364
+ """Implements the BIRCH clustering algorithm.
365
+
366
+ It is a memory-efficient, online-learning algorithm provided as an
367
+ alternative to :class:`MiniBatchKMeans`. It constructs a tree
368
+ data structure with the cluster centroids being read off the leaf.
369
+ These can be either the final cluster centroids or can be provided as input
370
+ to another clustering algorithm such as :class:`AgglomerativeClustering`.
371
+
372
+ Read more in the :ref:`User Guide <birch>`.
373
+
374
+ .. versionadded:: 0.16
375
+
376
+ Parameters
377
+ ----------
378
+ threshold : float, default=0.5
379
+ The radius of the subcluster obtained by merging a new sample and the
380
+ closest subcluster should be lesser than the threshold. Otherwise a new
381
+ subcluster is started. Setting this value to be very low promotes
382
+ splitting and vice-versa.
383
+
384
+ branching_factor : int, default=50
385
+ Maximum number of CF subclusters in each node. If a new samples enters
386
+ such that the number of subclusters exceed the branching_factor then
387
+ that node is split into two nodes with the subclusters redistributed
388
+ in each. The parent subcluster of that node is removed and two new
389
+ subclusters are added as parents of the 2 split nodes.
390
+
391
+ n_clusters : int, instance of sklearn.cluster model or None, default=3
392
+ Number of clusters after the final clustering step, which treats the
393
+ subclusters from the leaves as new samples.
394
+
395
+ - `None` : the final clustering step is not performed and the
396
+ subclusters are returned as they are.
397
+
398
+ - :mod:`sklearn.cluster` Estimator : If a model is provided, the model
399
+ is fit treating the subclusters as new samples and the initial data
400
+ is mapped to the label of the closest subcluster.
401
+
402
+ - `int` : the model fit is :class:`AgglomerativeClustering` with
403
+ `n_clusters` set to be equal to the int.
404
+
405
+ compute_labels : bool, default=True
406
+ Whether or not to compute labels for each fit.
407
+
408
+ copy : bool, default=True
409
+ Whether or not to make a copy of the given data. If set to False,
410
+ the initial data will be overwritten.
411
+
412
+ Attributes
413
+ ----------
414
+ root_ : _CFNode
415
+ Root of the CFTree.
416
+
417
+ dummy_leaf_ : _CFNode
418
+ Start pointer to all the leaves.
419
+
420
+ subcluster_centers_ : ndarray
421
+ Centroids of all subclusters read directly from the leaves.
422
+
423
+ subcluster_labels_ : ndarray
424
+ Labels assigned to the centroids of the subclusters after
425
+ they are clustered globally.
426
+
427
+ labels_ : ndarray of shape (n_samples,)
428
+ Array of labels assigned to the input data.
429
+ if partial_fit is used instead of fit, they are assigned to the
430
+ last batch of data.
431
+
432
+ n_features_in_ : int
433
+ Number of features seen during :term:`fit`.
434
+
435
+ .. versionadded:: 0.24
436
+
437
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
438
+ Names of features seen during :term:`fit`. Defined only when `X`
439
+ has feature names that are all strings.
440
+
441
+ .. versionadded:: 1.0
442
+
443
+ See Also
444
+ --------
445
+ MiniBatchKMeans : Alternative implementation that does incremental updates
446
+ of the centers' positions using mini-batches.
447
+
448
+ Notes
449
+ -----
450
+ The tree data structure consists of nodes with each node consisting of
451
+ a number of subclusters. The maximum number of subclusters in a node
452
+ is determined by the branching factor. Each subcluster maintains a
453
+ linear sum, squared sum and the number of samples in that subcluster.
454
+ In addition, each subcluster can also have a node as its child, if the
455
+ subcluster is not a member of a leaf node.
456
+
457
+ For a new point entering the root, it is merged with the subcluster closest
458
+ to it and the linear sum, squared sum and the number of samples of that
459
+ subcluster are updated. This is done recursively till the properties of
460
+ the leaf node are updated.
461
+
462
+ References
463
+ ----------
464
+ * Tian Zhang, Raghu Ramakrishnan, Maron Livny
465
+ BIRCH: An efficient data clustering method for large databases.
466
+ https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
467
+
468
+ * Roberto Perdisci
469
+ JBirch - Java implementation of BIRCH clustering algorithm
470
+ https://code.google.com/archive/p/jbirch
471
+
472
+ Examples
473
+ --------
474
+ >>> from sklearn.cluster import Birch
475
+ >>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
476
+ >>> brc = Birch(n_clusters=None)
477
+ >>> brc.fit(X)
478
+ Birch(n_clusters=None)
479
+ >>> brc.predict(X)
480
+ array([0, 0, 0, 1, 1, 1])
481
+ """
482
+
483
+ _parameter_constraints: dict = {
484
+ "threshold": [Interval(Real, 0.0, None, closed="neither")],
485
+ "branching_factor": [Interval(Integral, 1, None, closed="neither")],
486
+ "n_clusters": [None, ClusterMixin, Interval(Integral, 1, None, closed="left")],
487
+ "compute_labels": ["boolean"],
488
+ "copy": ["boolean"],
489
+ }
490
+
491
+ def __init__(
492
+ self,
493
+ *,
494
+ threshold=0.5,
495
+ branching_factor=50,
496
+ n_clusters=3,
497
+ compute_labels=True,
498
+ copy=True,
499
+ ):
500
+ self.threshold = threshold
501
+ self.branching_factor = branching_factor
502
+ self.n_clusters = n_clusters
503
+ self.compute_labels = compute_labels
504
+ self.copy = copy
505
+
506
+ @_fit_context(prefer_skip_nested_validation=True)
507
+ def fit(self, X, y=None):
508
+ """
509
+ Build a CF Tree for the input data.
510
+
511
+ Parameters
512
+ ----------
513
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
514
+ Input data.
515
+
516
+ y : Ignored
517
+ Not used, present here for API consistency by convention.
518
+
519
+ Returns
520
+ -------
521
+ self
522
+ Fitted estimator.
523
+ """
524
+ return self._fit(X, partial=False)
525
+
526
+ def _fit(self, X, partial):
527
+ has_root = getattr(self, "root_", None)
528
+ first_call = not (partial and has_root)
529
+
530
+ X = self._validate_data(
531
+ X,
532
+ accept_sparse="csr",
533
+ copy=self.copy,
534
+ reset=first_call,
535
+ dtype=[np.float64, np.float32],
536
+ )
537
+ threshold = self.threshold
538
+ branching_factor = self.branching_factor
539
+
540
+ n_samples, n_features = X.shape
541
+
542
+ # If partial_fit is called for the first time or fit is called, we
543
+ # start a new tree.
544
+ if first_call:
545
+ # The first root is the leaf. Manipulate this object throughout.
546
+ self.root_ = _CFNode(
547
+ threshold=threshold,
548
+ branching_factor=branching_factor,
549
+ is_leaf=True,
550
+ n_features=n_features,
551
+ dtype=X.dtype,
552
+ )
553
+
554
+ # To enable getting back subclusters.
555
+ self.dummy_leaf_ = _CFNode(
556
+ threshold=threshold,
557
+ branching_factor=branching_factor,
558
+ is_leaf=True,
559
+ n_features=n_features,
560
+ dtype=X.dtype,
561
+ )
562
+ self.dummy_leaf_.next_leaf_ = self.root_
563
+ self.root_.prev_leaf_ = self.dummy_leaf_
564
+
565
+ # Cannot vectorize. Enough to convince to use cython.
566
+ if not sparse.issparse(X):
567
+ iter_func = iter
568
+ else:
569
+ iter_func = _iterate_sparse_X
570
+
571
+ for sample in iter_func(X):
572
+ subcluster = _CFSubcluster(linear_sum=sample)
573
+ split = self.root_.insert_cf_subcluster(subcluster)
574
+
575
+ if split:
576
+ new_subcluster1, new_subcluster2 = _split_node(
577
+ self.root_, threshold, branching_factor
578
+ )
579
+ del self.root_
580
+ self.root_ = _CFNode(
581
+ threshold=threshold,
582
+ branching_factor=branching_factor,
583
+ is_leaf=False,
584
+ n_features=n_features,
585
+ dtype=X.dtype,
586
+ )
587
+ self.root_.append_subcluster(new_subcluster1)
588
+ self.root_.append_subcluster(new_subcluster2)
589
+
590
+ centroids = np.concatenate([leaf.centroids_ for leaf in self._get_leaves()])
591
+ self.subcluster_centers_ = centroids
592
+ self._n_features_out = self.subcluster_centers_.shape[0]
593
+
594
+ self._global_clustering(X)
595
+ return self
596
+
597
+ def _get_leaves(self):
598
+ """
599
+ Retrieve the leaves of the CF Node.
600
+
601
+ Returns
602
+ -------
603
+ leaves : list of shape (n_leaves,)
604
+ List of the leaf nodes.
605
+ """
606
+ leaf_ptr = self.dummy_leaf_.next_leaf_
607
+ leaves = []
608
+ while leaf_ptr is not None:
609
+ leaves.append(leaf_ptr)
610
+ leaf_ptr = leaf_ptr.next_leaf_
611
+ return leaves
612
+
613
+ @_fit_context(prefer_skip_nested_validation=True)
614
+ def partial_fit(self, X=None, y=None):
615
+ """
616
+ Online learning. Prevents rebuilding of CFTree from scratch.
617
+
618
+ Parameters
619
+ ----------
620
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), \
621
+ default=None
622
+ Input data. If X is not provided, only the global clustering
623
+ step is done.
624
+
625
+ y : Ignored
626
+ Not used, present here for API consistency by convention.
627
+
628
+ Returns
629
+ -------
630
+ self
631
+ Fitted estimator.
632
+ """
633
+ if X is None:
634
+ # Perform just the final global clustering step.
635
+ self._global_clustering()
636
+ return self
637
+ else:
638
+ return self._fit(X, partial=True)
639
+
640
+ def _check_fit(self, X):
641
+ check_is_fitted(self)
642
+
643
+ if (
644
+ hasattr(self, "subcluster_centers_")
645
+ and X.shape[1] != self.subcluster_centers_.shape[1]
646
+ ):
647
+ raise ValueError(
648
+ "Training data and predicted data do not have same number of features."
649
+ )
650
+
651
+ def predict(self, X):
652
+ """
653
+ Predict data using the ``centroids_`` of subclusters.
654
+
655
+ Avoid computation of the row norms of X.
656
+
657
+ Parameters
658
+ ----------
659
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
660
+ Input data.
661
+
662
+ Returns
663
+ -------
664
+ labels : ndarray of shape(n_samples,)
665
+ Labelled data.
666
+ """
667
+ check_is_fitted(self)
668
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
669
+ return self._predict(X)
670
+
671
+ def _predict(self, X):
672
+ """Predict data using the ``centroids_`` of subclusters."""
673
+ kwargs = {"Y_norm_squared": self._subcluster_norms}
674
+
675
+ with config_context(assume_finite=True):
676
+ argmin = pairwise_distances_argmin(
677
+ X, self.subcluster_centers_, metric_kwargs=kwargs
678
+ )
679
+ return self.subcluster_labels_[argmin]
680
+
681
+ def transform(self, X):
682
+ """
683
+ Transform X into subcluster centroids dimension.
684
+
685
+ Each dimension represents the distance from the sample point to each
686
+ cluster centroid.
687
+
688
+ Parameters
689
+ ----------
690
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
691
+ Input data.
692
+
693
+ Returns
694
+ -------
695
+ X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)
696
+ Transformed data.
697
+ """
698
+ check_is_fitted(self)
699
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
700
+ with config_context(assume_finite=True):
701
+ return euclidean_distances(X, self.subcluster_centers_)
702
+
703
+ def _global_clustering(self, X=None):
704
+ """
705
+ Global clustering for the subclusters obtained after fitting
706
+ """
707
+ clusterer = self.n_clusters
708
+ centroids = self.subcluster_centers_
709
+ compute_labels = (X is not None) and self.compute_labels
710
+
711
+ # Preprocessing for the global clustering.
712
+ not_enough_centroids = False
713
+ if isinstance(clusterer, Integral):
714
+ clusterer = AgglomerativeClustering(n_clusters=self.n_clusters)
715
+ # There is no need to perform the global clustering step.
716
+ if len(centroids) < self.n_clusters:
717
+ not_enough_centroids = True
718
+
719
+ # To use in predict to avoid recalculation.
720
+ self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True)
721
+
722
+ if clusterer is None or not_enough_centroids:
723
+ self.subcluster_labels_ = np.arange(len(centroids))
724
+ if not_enough_centroids:
725
+ warnings.warn(
726
+ "Number of subclusters found (%d) by BIRCH is less "
727
+ "than (%d). Decrease the threshold."
728
+ % (len(centroids), self.n_clusters),
729
+ ConvergenceWarning,
730
+ )
731
+ else:
732
+ # The global clustering step that clusters the subclusters of
733
+ # the leaves. It assumes the centroids of the subclusters as
734
+ # samples and finds the final centroids.
735
+ self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_)
736
+
737
+ if compute_labels:
738
+ self.labels_ = self._predict(X)
739
+
740
+ def _more_tags(self):
741
+ return {"preserves_dtype": [np.float64, np.float32]}
venv/lib/python3.10/site-packages/sklearn/cluster/_bisect_k_means.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Bisecting K-means clustering."""
2
+ # Author: Michal Krawczyk <[email protected]>
3
+
4
+ import warnings
5
+
6
+ import numpy as np
7
+ import scipy.sparse as sp
8
+
9
+ from ..base import _fit_context
10
+ from ..utils._openmp_helpers import _openmp_effective_n_threads
11
+ from ..utils._param_validation import Integral, Interval, StrOptions
12
+ from ..utils.extmath import row_norms
13
+ from ..utils.validation import _check_sample_weight, check_is_fitted, check_random_state
14
+ from ._k_means_common import _inertia_dense, _inertia_sparse
15
+ from ._kmeans import (
16
+ _BaseKMeans,
17
+ _kmeans_single_elkan,
18
+ _kmeans_single_lloyd,
19
+ _labels_inertia_threadpool_limit,
20
+ )
21
+
22
+
23
+ class _BisectingTree:
24
+ """Tree structure representing the hierarchical clusters of BisectingKMeans."""
25
+
26
+ def __init__(self, center, indices, score):
27
+ """Create a new cluster node in the tree.
28
+
29
+ The node holds the center of this cluster and the indices of the data points
30
+ that belong to it.
31
+ """
32
+ self.center = center
33
+ self.indices = indices
34
+ self.score = score
35
+
36
+ self.left = None
37
+ self.right = None
38
+
39
+ def split(self, labels, centers, scores):
40
+ """Split the cluster node into two subclusters."""
41
+ self.left = _BisectingTree(
42
+ indices=self.indices[labels == 0], center=centers[0], score=scores[0]
43
+ )
44
+ self.right = _BisectingTree(
45
+ indices=self.indices[labels == 1], center=centers[1], score=scores[1]
46
+ )
47
+
48
+ # reset the indices attribute to save memory
49
+ self.indices = None
50
+
51
+ def get_cluster_to_bisect(self):
52
+ """Return the cluster node to bisect next.
53
+
54
+ It's based on the score of the cluster, which can be either the number of
55
+ data points assigned to that cluster or the inertia of that cluster
56
+ (see `bisecting_strategy` for details).
57
+ """
58
+ max_score = None
59
+
60
+ for cluster_leaf in self.iter_leaves():
61
+ if max_score is None or cluster_leaf.score > max_score:
62
+ max_score = cluster_leaf.score
63
+ best_cluster_leaf = cluster_leaf
64
+
65
+ return best_cluster_leaf
66
+
67
+ def iter_leaves(self):
68
+ """Iterate over all the cluster leaves in the tree."""
69
+ if self.left is None:
70
+ yield self
71
+ else:
72
+ yield from self.left.iter_leaves()
73
+ yield from self.right.iter_leaves()
74
+
75
+
76
+ class BisectingKMeans(_BaseKMeans):
77
+ """Bisecting K-Means clustering.
78
+
79
+ Read more in the :ref:`User Guide <bisect_k_means>`.
80
+
81
+ .. versionadded:: 1.1
82
+
83
+ Parameters
84
+ ----------
85
+ n_clusters : int, default=8
86
+ The number of clusters to form as well as the number of
87
+ centroids to generate.
88
+
89
+ init : {'k-means++', 'random'} or callable, default='random'
90
+ Method for initialization:
91
+
92
+ 'k-means++' : selects initial cluster centers for k-mean
93
+ clustering in a smart way to speed up convergence. See section
94
+ Notes in k_init for more details.
95
+
96
+ 'random': choose `n_clusters` observations (rows) at random from data
97
+ for the initial centroids.
98
+
99
+ If a callable is passed, it should take arguments X, n_clusters and a
100
+ random state and return an initialization.
101
+
102
+ n_init : int, default=1
103
+ Number of time the inner k-means algorithm will be run with different
104
+ centroid seeds in each bisection.
105
+ That will result producing for each bisection best output of n_init
106
+ consecutive runs in terms of inertia.
107
+
108
+ random_state : int, RandomState instance or None, default=None
109
+ Determines random number generation for centroid initialization
110
+ in inner K-Means. Use an int to make the randomness deterministic.
111
+ See :term:`Glossary <random_state>`.
112
+
113
+ max_iter : int, default=300
114
+ Maximum number of iterations of the inner k-means algorithm at each
115
+ bisection.
116
+
117
+ verbose : int, default=0
118
+ Verbosity mode.
119
+
120
+ tol : float, default=1e-4
121
+ Relative tolerance with regards to Frobenius norm of the difference
122
+ in the cluster centers of two consecutive iterations to declare
123
+ convergence. Used in inner k-means algorithm at each bisection to pick
124
+ best possible clusters.
125
+
126
+ copy_x : bool, default=True
127
+ When pre-computing distances it is more numerically accurate to center
128
+ the data first. If copy_x is True (default), then the original data is
129
+ not modified. If False, the original data is modified, and put back
130
+ before the function returns, but small numerical differences may be
131
+ introduced by subtracting and then adding the data mean. Note that if
132
+ the original data is not C-contiguous, a copy will be made even if
133
+ copy_x is False. If the original data is sparse, but not in CSR format,
134
+ a copy will be made even if copy_x is False.
135
+
136
+ algorithm : {"lloyd", "elkan"}, default="lloyd"
137
+ Inner K-means algorithm used in bisection.
138
+ The classical EM-style algorithm is `"lloyd"`.
139
+ The `"elkan"` variation can be more efficient on some datasets with
140
+ well-defined clusters, by using the triangle inequality. However it's
141
+ more memory intensive due to the allocation of an extra array of shape
142
+ `(n_samples, n_clusters)`.
143
+
144
+ bisecting_strategy : {"biggest_inertia", "largest_cluster"},\
145
+ default="biggest_inertia"
146
+ Defines how bisection should be performed:
147
+
148
+ - "biggest_inertia" means that BisectingKMeans will always check
149
+ all calculated cluster for cluster with biggest SSE
150
+ (Sum of squared errors) and bisect it. This approach concentrates on
151
+ precision, but may be costly in terms of execution time (especially for
152
+ larger amount of data points).
153
+
154
+ - "largest_cluster" - BisectingKMeans will always split cluster with
155
+ largest amount of points assigned to it from all clusters
156
+ previously calculated. That should work faster than picking by SSE
157
+ ('biggest_inertia') and may produce similar results in most cases.
158
+
159
+ Attributes
160
+ ----------
161
+ cluster_centers_ : ndarray of shape (n_clusters, n_features)
162
+ Coordinates of cluster centers. If the algorithm stops before fully
163
+ converging (see ``tol`` and ``max_iter``), these will not be
164
+ consistent with ``labels_``.
165
+
166
+ labels_ : ndarray of shape (n_samples,)
167
+ Labels of each point.
168
+
169
+ inertia_ : float
170
+ Sum of squared distances of samples to their closest cluster center,
171
+ weighted by the sample weights if provided.
172
+
173
+ n_features_in_ : int
174
+ Number of features seen during :term:`fit`.
175
+
176
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
177
+ Names of features seen during :term:`fit`. Defined only when `X`
178
+ has feature names that are all strings.
179
+
180
+ See Also
181
+ --------
182
+ KMeans : Original implementation of K-Means algorithm.
183
+
184
+ Notes
185
+ -----
186
+ It might be inefficient when n_cluster is less than 3, due to unnecessary
187
+ calculations for that case.
188
+
189
+ Examples
190
+ --------
191
+ >>> from sklearn.cluster import BisectingKMeans
192
+ >>> import numpy as np
193
+ >>> X = np.array([[1, 1], [10, 1], [3, 1],
194
+ ... [10, 0], [2, 1], [10, 2],
195
+ ... [10, 8], [10, 9], [10, 10]])
196
+ >>> bisect_means = BisectingKMeans(n_clusters=3, random_state=0).fit(X)
197
+ >>> bisect_means.labels_
198
+ array([0, 2, 0, 2, 0, 2, 1, 1, 1], dtype=int32)
199
+ >>> bisect_means.predict([[0, 0], [12, 3]])
200
+ array([0, 2], dtype=int32)
201
+ >>> bisect_means.cluster_centers_
202
+ array([[ 2., 1.],
203
+ [10., 9.],
204
+ [10., 1.]])
205
+ """
206
+
207
+ _parameter_constraints: dict = {
208
+ **_BaseKMeans._parameter_constraints,
209
+ "init": [StrOptions({"k-means++", "random"}), callable],
210
+ "n_init": [Interval(Integral, 1, None, closed="left")],
211
+ "copy_x": ["boolean"],
212
+ "algorithm": [StrOptions({"lloyd", "elkan"})],
213
+ "bisecting_strategy": [StrOptions({"biggest_inertia", "largest_cluster"})],
214
+ }
215
+
216
+ def __init__(
217
+ self,
218
+ n_clusters=8,
219
+ *,
220
+ init="random",
221
+ n_init=1,
222
+ random_state=None,
223
+ max_iter=300,
224
+ verbose=0,
225
+ tol=1e-4,
226
+ copy_x=True,
227
+ algorithm="lloyd",
228
+ bisecting_strategy="biggest_inertia",
229
+ ):
230
+ super().__init__(
231
+ n_clusters=n_clusters,
232
+ init=init,
233
+ max_iter=max_iter,
234
+ verbose=verbose,
235
+ random_state=random_state,
236
+ tol=tol,
237
+ n_init=n_init,
238
+ )
239
+
240
+ self.copy_x = copy_x
241
+ self.algorithm = algorithm
242
+ self.bisecting_strategy = bisecting_strategy
243
+
244
+ def _warn_mkl_vcomp(self, n_active_threads):
245
+ """Warn when vcomp and mkl are both present"""
246
+ warnings.warn(
247
+ "BisectingKMeans is known to have a memory leak on Windows "
248
+ "with MKL, when there are less chunks than available "
249
+ "threads. You can avoid it by setting the environment"
250
+ f" variable OMP_NUM_THREADS={n_active_threads}."
251
+ )
252
+
253
+ def _inertia_per_cluster(self, X, centers, labels, sample_weight):
254
+ """Calculate the sum of squared errors (inertia) per cluster.
255
+
256
+ Parameters
257
+ ----------
258
+ X : {ndarray, csr_matrix} of shape (n_samples, n_features)
259
+ The input samples.
260
+
261
+ centers : ndarray of shape (n_clusters=2, n_features)
262
+ The cluster centers.
263
+
264
+ labels : ndarray of shape (n_samples,)
265
+ Index of the cluster each sample belongs to.
266
+
267
+ sample_weight : ndarray of shape (n_samples,)
268
+ The weights for each observation in X.
269
+
270
+ Returns
271
+ -------
272
+ inertia_per_cluster : ndarray of shape (n_clusters=2,)
273
+ Sum of squared errors (inertia) for each cluster.
274
+ """
275
+ n_clusters = centers.shape[0] # = 2 since centers comes from a bisection
276
+ _inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense
277
+
278
+ inertia_per_cluster = np.empty(n_clusters)
279
+ for label in range(n_clusters):
280
+ inertia_per_cluster[label] = _inertia(
281
+ X, sample_weight, centers, labels, self._n_threads, single_label=label
282
+ )
283
+
284
+ return inertia_per_cluster
285
+
286
+ def _bisect(self, X, x_squared_norms, sample_weight, cluster_to_bisect):
287
+ """Split a cluster into 2 subsclusters.
288
+
289
+ Parameters
290
+ ----------
291
+ X : {ndarray, csr_matrix} of shape (n_samples, n_features)
292
+ Training instances to cluster.
293
+
294
+ x_squared_norms : ndarray of shape (n_samples,)
295
+ Squared euclidean norm of each data point.
296
+
297
+ sample_weight : ndarray of shape (n_samples,)
298
+ The weights for each observation in X.
299
+
300
+ cluster_to_bisect : _BisectingTree node object
301
+ The cluster node to split.
302
+ """
303
+ X = X[cluster_to_bisect.indices]
304
+ x_squared_norms = x_squared_norms[cluster_to_bisect.indices]
305
+ sample_weight = sample_weight[cluster_to_bisect.indices]
306
+
307
+ best_inertia = None
308
+
309
+ # Split samples in X into 2 clusters.
310
+ # Repeating `n_init` times to obtain best clusters
311
+ for _ in range(self.n_init):
312
+ centers_init = self._init_centroids(
313
+ X,
314
+ x_squared_norms=x_squared_norms,
315
+ init=self.init,
316
+ random_state=self._random_state,
317
+ n_centroids=2,
318
+ sample_weight=sample_weight,
319
+ )
320
+
321
+ labels, inertia, centers, _ = self._kmeans_single(
322
+ X,
323
+ sample_weight,
324
+ centers_init,
325
+ max_iter=self.max_iter,
326
+ verbose=self.verbose,
327
+ tol=self.tol,
328
+ n_threads=self._n_threads,
329
+ )
330
+
331
+ # allow small tolerance on the inertia to accommodate for
332
+ # non-deterministic rounding errors due to parallel computation
333
+ if best_inertia is None or inertia < best_inertia * (1 - 1e-6):
334
+ best_labels = labels
335
+ best_centers = centers
336
+ best_inertia = inertia
337
+
338
+ if self.verbose:
339
+ print(f"New centroids from bisection: {best_centers}")
340
+
341
+ if self.bisecting_strategy == "biggest_inertia":
342
+ scores = self._inertia_per_cluster(
343
+ X, best_centers, best_labels, sample_weight
344
+ )
345
+ else: # bisecting_strategy == "largest_cluster"
346
+ # Using minlength to make sure that we have the counts for both labels even
347
+ # if all samples are labelled 0.
348
+ scores = np.bincount(best_labels, minlength=2)
349
+
350
+ cluster_to_bisect.split(best_labels, best_centers, scores)
351
+
352
+ @_fit_context(prefer_skip_nested_validation=True)
353
+ def fit(self, X, y=None, sample_weight=None):
354
+ """Compute bisecting k-means clustering.
355
+
356
+ Parameters
357
+ ----------
358
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
359
+
360
+ Training instances to cluster.
361
+
362
+ .. note:: The data will be converted to C ordering,
363
+ which will cause a memory copy
364
+ if the given data is not C-contiguous.
365
+
366
+ y : Ignored
367
+ Not used, present here for API consistency by convention.
368
+
369
+ sample_weight : array-like of shape (n_samples,), default=None
370
+ The weights for each observation in X. If None, all observations
371
+ are assigned equal weight. `sample_weight` is not used during
372
+ initialization if `init` is a callable.
373
+
374
+ Returns
375
+ -------
376
+ self
377
+ Fitted estimator.
378
+ """
379
+ X = self._validate_data(
380
+ X,
381
+ accept_sparse="csr",
382
+ dtype=[np.float64, np.float32],
383
+ order="C",
384
+ copy=self.copy_x,
385
+ accept_large_sparse=False,
386
+ )
387
+
388
+ self._check_params_vs_input(X)
389
+
390
+ self._random_state = check_random_state(self.random_state)
391
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
392
+ self._n_threads = _openmp_effective_n_threads()
393
+
394
+ if self.algorithm == "lloyd" or self.n_clusters == 1:
395
+ self._kmeans_single = _kmeans_single_lloyd
396
+ self._check_mkl_vcomp(X, X.shape[0])
397
+ else:
398
+ self._kmeans_single = _kmeans_single_elkan
399
+
400
+ # Subtract of mean of X for more accurate distance computations
401
+ if not sp.issparse(X):
402
+ self._X_mean = X.mean(axis=0)
403
+ X -= self._X_mean
404
+
405
+ # Initialize the hierarchical clusters tree
406
+ self._bisecting_tree = _BisectingTree(
407
+ indices=np.arange(X.shape[0]),
408
+ center=X.mean(axis=0),
409
+ score=0,
410
+ )
411
+
412
+ x_squared_norms = row_norms(X, squared=True)
413
+
414
+ for _ in range(self.n_clusters - 1):
415
+ # Chose cluster to bisect
416
+ cluster_to_bisect = self._bisecting_tree.get_cluster_to_bisect()
417
+
418
+ # Split this cluster into 2 subclusters
419
+ self._bisect(X, x_squared_norms, sample_weight, cluster_to_bisect)
420
+
421
+ # Aggregate final labels and centers from the bisecting tree
422
+ self.labels_ = np.full(X.shape[0], -1, dtype=np.int32)
423
+ self.cluster_centers_ = np.empty((self.n_clusters, X.shape[1]), dtype=X.dtype)
424
+
425
+ for i, cluster_node in enumerate(self._bisecting_tree.iter_leaves()):
426
+ self.labels_[cluster_node.indices] = i
427
+ self.cluster_centers_[i] = cluster_node.center
428
+ cluster_node.label = i # label final clusters for future prediction
429
+ cluster_node.indices = None # release memory
430
+
431
+ # Restore original data
432
+ if not sp.issparse(X):
433
+ X += self._X_mean
434
+ self.cluster_centers_ += self._X_mean
435
+
436
+ _inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense
437
+ self.inertia_ = _inertia(
438
+ X, sample_weight, self.cluster_centers_, self.labels_, self._n_threads
439
+ )
440
+
441
+ self._n_features_out = self.cluster_centers_.shape[0]
442
+
443
+ return self
444
+
445
+ def predict(self, X):
446
+ """Predict which cluster each sample in X belongs to.
447
+
448
+ Prediction is made by going down the hierarchical tree
449
+ in searching of closest leaf cluster.
450
+
451
+ In the vector quantization literature, `cluster_centers_` is called
452
+ the code book and each value returned by `predict` is the index of
453
+ the closest code in the code book.
454
+
455
+ Parameters
456
+ ----------
457
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
458
+ New data to predict.
459
+
460
+ Returns
461
+ -------
462
+ labels : ndarray of shape (n_samples,)
463
+ Index of the cluster each sample belongs to.
464
+ """
465
+ check_is_fitted(self)
466
+
467
+ X = self._check_test_data(X)
468
+ x_squared_norms = row_norms(X, squared=True)
469
+
470
+ # sample weights are unused but necessary in cython helpers
471
+ sample_weight = np.ones_like(x_squared_norms)
472
+
473
+ labels = self._predict_recursive(X, sample_weight, self._bisecting_tree)
474
+
475
+ return labels
476
+
477
+ def _predict_recursive(self, X, sample_weight, cluster_node):
478
+ """Predict recursively by going down the hierarchical tree.
479
+
480
+ Parameters
481
+ ----------
482
+ X : {ndarray, csr_matrix} of shape (n_samples, n_features)
483
+ The data points, currently assigned to `cluster_node`, to predict between
484
+ the subclusters of this node.
485
+
486
+ sample_weight : ndarray of shape (n_samples,)
487
+ The weights for each observation in X.
488
+
489
+ cluster_node : _BisectingTree node object
490
+ The cluster node of the hierarchical tree.
491
+
492
+ Returns
493
+ -------
494
+ labels : ndarray of shape (n_samples,)
495
+ Index of the cluster each sample belongs to.
496
+ """
497
+ if cluster_node.left is None:
498
+ # This cluster has no subcluster. Labels are just the label of the cluster.
499
+ return np.full(X.shape[0], cluster_node.label, dtype=np.int32)
500
+
501
+ # Determine if data points belong to the left or right subcluster
502
+ centers = np.vstack((cluster_node.left.center, cluster_node.right.center))
503
+ if hasattr(self, "_X_mean"):
504
+ centers += self._X_mean
505
+
506
+ cluster_labels = _labels_inertia_threadpool_limit(
507
+ X,
508
+ sample_weight,
509
+ centers,
510
+ self._n_threads,
511
+ return_inertia=False,
512
+ )
513
+ mask = cluster_labels == 0
514
+
515
+ # Compute the labels for each subset of the data points.
516
+ labels = np.full(X.shape[0], -1, dtype=np.int32)
517
+
518
+ labels[mask] = self._predict_recursive(
519
+ X[mask], sample_weight[mask], cluster_node.left
520
+ )
521
+
522
+ labels[~mask] = self._predict_recursive(
523
+ X[~mask], sample_weight[~mask], cluster_node.right
524
+ )
525
+
526
+ return labels
527
+
528
+ def _more_tags(self):
529
+ return {"preserves_dtype": [np.float64, np.float32]}
venv/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DBSCAN: Density-Based Spatial Clustering of Applications with Noise
3
+ """
4
+
5
+ # Author: Robert Layton <[email protected]>
6
+ # Joel Nothman <[email protected]>
7
+ # Lars Buitinck
8
+ #
9
+ # License: BSD 3 clause
10
+
11
+ import warnings
12
+ from numbers import Integral, Real
13
+
14
+ import numpy as np
15
+ from scipy import sparse
16
+
17
+ from ..base import BaseEstimator, ClusterMixin, _fit_context
18
+ from ..metrics.pairwise import _VALID_METRICS
19
+ from ..neighbors import NearestNeighbors
20
+ from ..utils._param_validation import Interval, StrOptions, validate_params
21
+ from ..utils.validation import _check_sample_weight
22
+ from ._dbscan_inner import dbscan_inner
23
+
24
+
25
+ @validate_params(
26
+ {
27
+ "X": ["array-like", "sparse matrix"],
28
+ "sample_weight": ["array-like", None],
29
+ },
30
+ prefer_skip_nested_validation=False,
31
+ )
32
+ def dbscan(
33
+ X,
34
+ eps=0.5,
35
+ *,
36
+ min_samples=5,
37
+ metric="minkowski",
38
+ metric_params=None,
39
+ algorithm="auto",
40
+ leaf_size=30,
41
+ p=2,
42
+ sample_weight=None,
43
+ n_jobs=None,
44
+ ):
45
+ """Perform DBSCAN clustering from vector array or distance matrix.
46
+
47
+ Read more in the :ref:`User Guide <dbscan>`.
48
+
49
+ Parameters
50
+ ----------
51
+ X : {array-like, sparse (CSR) matrix} of shape (n_samples, n_features) or \
52
+ (n_samples, n_samples)
53
+ A feature array, or array of distances between samples if
54
+ ``metric='precomputed'``.
55
+
56
+ eps : float, default=0.5
57
+ The maximum distance between two samples for one to be considered
58
+ as in the neighborhood of the other. This is not a maximum bound
59
+ on the distances of points within a cluster. This is the most
60
+ important DBSCAN parameter to choose appropriately for your data set
61
+ and distance function.
62
+
63
+ min_samples : int, default=5
64
+ The number of samples (or total weight) in a neighborhood for a point
65
+ to be considered as a core point. This includes the point itself.
66
+
67
+ metric : str or callable, default='minkowski'
68
+ The metric to use when calculating distance between instances in a
69
+ feature array. If metric is a string or callable, it must be one of
70
+ the options allowed by :func:`sklearn.metrics.pairwise_distances` for
71
+ its metric parameter.
72
+ If metric is "precomputed", X is assumed to be a distance matrix and
73
+ must be square during fit.
74
+ X may be a :term:`sparse graph <sparse graph>`,
75
+ in which case only "nonzero" elements may be considered neighbors.
76
+
77
+ metric_params : dict, default=None
78
+ Additional keyword arguments for the metric function.
79
+
80
+ .. versionadded:: 0.19
81
+
82
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
83
+ The algorithm to be used by the NearestNeighbors module
84
+ to compute pointwise distances and find nearest neighbors.
85
+ See NearestNeighbors module documentation for details.
86
+
87
+ leaf_size : int, default=30
88
+ Leaf size passed to BallTree or cKDTree. This can affect the speed
89
+ of the construction and query, as well as the memory required
90
+ to store the tree. The optimal value depends
91
+ on the nature of the problem.
92
+
93
+ p : float, default=2
94
+ The power of the Minkowski metric to be used to calculate distance
95
+ between points.
96
+
97
+ sample_weight : array-like of shape (n_samples,), default=None
98
+ Weight of each sample, such that a sample with a weight of at least
99
+ ``min_samples`` is by itself a core sample; a sample with negative
100
+ weight may inhibit its eps-neighbor from being core.
101
+ Note that weights are absolute, and default to 1.
102
+
103
+ n_jobs : int, default=None
104
+ The number of parallel jobs to run for neighbors search. ``None`` means
105
+ 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means
106
+ using all processors. See :term:`Glossary <n_jobs>` for more details.
107
+ If precomputed distance are used, parallel execution is not available
108
+ and thus n_jobs will have no effect.
109
+
110
+ Returns
111
+ -------
112
+ core_samples : ndarray of shape (n_core_samples,)
113
+ Indices of core samples.
114
+
115
+ labels : ndarray of shape (n_samples,)
116
+ Cluster labels for each point. Noisy samples are given the label -1.
117
+
118
+ See Also
119
+ --------
120
+ DBSCAN : An estimator interface for this clustering algorithm.
121
+ OPTICS : A similar estimator interface clustering at multiple values of
122
+ eps. Our implementation is optimized for memory usage.
123
+
124
+ Notes
125
+ -----
126
+ For an example, see :ref:`examples/cluster/plot_dbscan.py
127
+ <sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
128
+
129
+ This implementation bulk-computes all neighborhood queries, which increases
130
+ the memory complexity to O(n.d) where d is the average number of neighbors,
131
+ while original DBSCAN had memory complexity O(n). It may attract a higher
132
+ memory complexity when querying these nearest neighborhoods, depending
133
+ on the ``algorithm``.
134
+
135
+ One way to avoid the query complexity is to pre-compute sparse
136
+ neighborhoods in chunks using
137
+ :func:`NearestNeighbors.radius_neighbors_graph
138
+ <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
139
+ ``mode='distance'``, then using ``metric='precomputed'`` here.
140
+
141
+ Another way to reduce memory and computation time is to remove
142
+ (near-)duplicate points and use ``sample_weight`` instead.
143
+
144
+ :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower
145
+ memory usage.
146
+
147
+ References
148
+ ----------
149
+ Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based
150
+ Algorithm for Discovering Clusters in Large Spatial Databases with Noise"
151
+ <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_.
152
+ In: Proceedings of the 2nd International Conference on Knowledge Discovery
153
+ and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
154
+
155
+ Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017).
156
+ :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN."
157
+ <10.1145/3068335>`
158
+ ACM Transactions on Database Systems (TODS), 42(3), 19.
159
+
160
+ Examples
161
+ --------
162
+ >>> from sklearn.cluster import dbscan
163
+ >>> X = [[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]]
164
+ >>> core_samples, labels = dbscan(X, eps=3, min_samples=2)
165
+ >>> core_samples
166
+ array([0, 1, 2, 3, 4])
167
+ >>> labels
168
+ array([ 0, 0, 0, 1, 1, -1])
169
+ """
170
+
171
+ est = DBSCAN(
172
+ eps=eps,
173
+ min_samples=min_samples,
174
+ metric=metric,
175
+ metric_params=metric_params,
176
+ algorithm=algorithm,
177
+ leaf_size=leaf_size,
178
+ p=p,
179
+ n_jobs=n_jobs,
180
+ )
181
+ est.fit(X, sample_weight=sample_weight)
182
+ return est.core_sample_indices_, est.labels_
183
+
184
+
185
+ class DBSCAN(ClusterMixin, BaseEstimator):
186
+ """Perform DBSCAN clustering from vector array or distance matrix.
187
+
188
+ DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
189
+ Finds core samples of high density and expands clusters from them.
190
+ Good for data which contains clusters of similar density.
191
+
192
+ The worst case memory complexity of DBSCAN is :math:`O({n}^2)`, which can
193
+ occur when the `eps` param is large and `min_samples` is low.
194
+
195
+ Read more in the :ref:`User Guide <dbscan>`.
196
+
197
+ Parameters
198
+ ----------
199
+ eps : float, default=0.5
200
+ The maximum distance between two samples for one to be considered
201
+ as in the neighborhood of the other. This is not a maximum bound
202
+ on the distances of points within a cluster. This is the most
203
+ important DBSCAN parameter to choose appropriately for your data set
204
+ and distance function.
205
+
206
+ min_samples : int, default=5
207
+ The number of samples (or total weight) in a neighborhood for a point to
208
+ be considered as a core point. This includes the point itself. If
209
+ `min_samples` is set to a higher value, DBSCAN will find denser clusters,
210
+ whereas if it is set to a lower value, the found clusters will be more
211
+ sparse.
212
+
213
+ metric : str, or callable, default='euclidean'
214
+ The metric to use when calculating distance between instances in a
215
+ feature array. If metric is a string or callable, it must be one of
216
+ the options allowed by :func:`sklearn.metrics.pairwise_distances` for
217
+ its metric parameter.
218
+ If metric is "precomputed", X is assumed to be a distance matrix and
219
+ must be square. X may be a :term:`sparse graph`, in which
220
+ case only "nonzero" elements may be considered neighbors for DBSCAN.
221
+
222
+ .. versionadded:: 0.17
223
+ metric *precomputed* to accept precomputed sparse matrix.
224
+
225
+ metric_params : dict, default=None
226
+ Additional keyword arguments for the metric function.
227
+
228
+ .. versionadded:: 0.19
229
+
230
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
231
+ The algorithm to be used by the NearestNeighbors module
232
+ to compute pointwise distances and find nearest neighbors.
233
+ See NearestNeighbors module documentation for details.
234
+
235
+ leaf_size : int, default=30
236
+ Leaf size passed to BallTree or cKDTree. This can affect the speed
237
+ of the construction and query, as well as the memory required
238
+ to store the tree. The optimal value depends
239
+ on the nature of the problem.
240
+
241
+ p : float, default=None
242
+ The power of the Minkowski metric to be used to calculate distance
243
+ between points. If None, then ``p=2`` (equivalent to the Euclidean
244
+ distance).
245
+
246
+ n_jobs : int, default=None
247
+ The number of parallel jobs to run.
248
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
249
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
250
+ for more details.
251
+
252
+ Attributes
253
+ ----------
254
+ core_sample_indices_ : ndarray of shape (n_core_samples,)
255
+ Indices of core samples.
256
+
257
+ components_ : ndarray of shape (n_core_samples, n_features)
258
+ Copy of each core sample found by training.
259
+
260
+ labels_ : ndarray of shape (n_samples)
261
+ Cluster labels for each point in the dataset given to fit().
262
+ Noisy samples are given the label -1.
263
+
264
+ n_features_in_ : int
265
+ Number of features seen during :term:`fit`.
266
+
267
+ .. versionadded:: 0.24
268
+
269
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
270
+ Names of features seen during :term:`fit`. Defined only when `X`
271
+ has feature names that are all strings.
272
+
273
+ .. versionadded:: 1.0
274
+
275
+ See Also
276
+ --------
277
+ OPTICS : A similar clustering at multiple values of eps. Our implementation
278
+ is optimized for memory usage.
279
+
280
+ Notes
281
+ -----
282
+ For an example, see :ref:`examples/cluster/plot_dbscan.py
283
+ <sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
284
+
285
+ This implementation bulk-computes all neighborhood queries, which increases
286
+ the memory complexity to O(n.d) where d is the average number of neighbors,
287
+ while original DBSCAN had memory complexity O(n). It may attract a higher
288
+ memory complexity when querying these nearest neighborhoods, depending
289
+ on the ``algorithm``.
290
+
291
+ One way to avoid the query complexity is to pre-compute sparse
292
+ neighborhoods in chunks using
293
+ :func:`NearestNeighbors.radius_neighbors_graph
294
+ <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
295
+ ``mode='distance'``, then using ``metric='precomputed'`` here.
296
+
297
+ Another way to reduce memory and computation time is to remove
298
+ (near-)duplicate points and use ``sample_weight`` instead.
299
+
300
+ :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory
301
+ usage.
302
+
303
+ References
304
+ ----------
305
+ Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based
306
+ Algorithm for Discovering Clusters in Large Spatial Databases with Noise"
307
+ <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_.
308
+ In: Proceedings of the 2nd International Conference on Knowledge Discovery
309
+ and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
310
+
311
+ Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017).
312
+ :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN."
313
+ <10.1145/3068335>`
314
+ ACM Transactions on Database Systems (TODS), 42(3), 19.
315
+
316
+ Examples
317
+ --------
318
+ >>> from sklearn.cluster import DBSCAN
319
+ >>> import numpy as np
320
+ >>> X = np.array([[1, 2], [2, 2], [2, 3],
321
+ ... [8, 7], [8, 8], [25, 80]])
322
+ >>> clustering = DBSCAN(eps=3, min_samples=2).fit(X)
323
+ >>> clustering.labels_
324
+ array([ 0, 0, 0, 1, 1, -1])
325
+ >>> clustering
326
+ DBSCAN(eps=3, min_samples=2)
327
+ """
328
+
329
+ _parameter_constraints: dict = {
330
+ "eps": [Interval(Real, 0.0, None, closed="neither")],
331
+ "min_samples": [Interval(Integral, 1, None, closed="left")],
332
+ "metric": [
333
+ StrOptions(set(_VALID_METRICS) | {"precomputed"}),
334
+ callable,
335
+ ],
336
+ "metric_params": [dict, None],
337
+ "algorithm": [StrOptions({"auto", "ball_tree", "kd_tree", "brute"})],
338
+ "leaf_size": [Interval(Integral, 1, None, closed="left")],
339
+ "p": [Interval(Real, 0.0, None, closed="left"), None],
340
+ "n_jobs": [Integral, None],
341
+ }
342
+
343
+ def __init__(
344
+ self,
345
+ eps=0.5,
346
+ *,
347
+ min_samples=5,
348
+ metric="euclidean",
349
+ metric_params=None,
350
+ algorithm="auto",
351
+ leaf_size=30,
352
+ p=None,
353
+ n_jobs=None,
354
+ ):
355
+ self.eps = eps
356
+ self.min_samples = min_samples
357
+ self.metric = metric
358
+ self.metric_params = metric_params
359
+ self.algorithm = algorithm
360
+ self.leaf_size = leaf_size
361
+ self.p = p
362
+ self.n_jobs = n_jobs
363
+
364
+ @_fit_context(
365
+ # DBSCAN.metric is not validated yet
366
+ prefer_skip_nested_validation=False
367
+ )
368
+ def fit(self, X, y=None, sample_weight=None):
369
+ """Perform DBSCAN clustering from features, or distance matrix.
370
+
371
+ Parameters
372
+ ----------
373
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
374
+ (n_samples, n_samples)
375
+ Training instances to cluster, or distances between instances if
376
+ ``metric='precomputed'``. If a sparse matrix is provided, it will
377
+ be converted into a sparse ``csr_matrix``.
378
+
379
+ y : Ignored
380
+ Not used, present here for API consistency by convention.
381
+
382
+ sample_weight : array-like of shape (n_samples,), default=None
383
+ Weight of each sample, such that a sample with a weight of at least
384
+ ``min_samples`` is by itself a core sample; a sample with a
385
+ negative weight may inhibit its eps-neighbor from being core.
386
+ Note that weights are absolute, and default to 1.
387
+
388
+ Returns
389
+ -------
390
+ self : object
391
+ Returns a fitted instance of self.
392
+ """
393
+ X = self._validate_data(X, accept_sparse="csr")
394
+
395
+ if sample_weight is not None:
396
+ sample_weight = _check_sample_weight(sample_weight, X)
397
+
398
+ # Calculate neighborhood for all samples. This leaves the original
399
+ # point in, which needs to be considered later (i.e. point i is in the
400
+ # neighborhood of point i. While True, its useless information)
401
+ if self.metric == "precomputed" and sparse.issparse(X):
402
+ # set the diagonal to explicit values, as a point is its own
403
+ # neighbor
404
+ X = X.copy() # copy to avoid in-place modification
405
+ with warnings.catch_warnings():
406
+ warnings.simplefilter("ignore", sparse.SparseEfficiencyWarning)
407
+ X.setdiag(X.diagonal())
408
+
409
+ neighbors_model = NearestNeighbors(
410
+ radius=self.eps,
411
+ algorithm=self.algorithm,
412
+ leaf_size=self.leaf_size,
413
+ metric=self.metric,
414
+ metric_params=self.metric_params,
415
+ p=self.p,
416
+ n_jobs=self.n_jobs,
417
+ )
418
+ neighbors_model.fit(X)
419
+ # This has worst case O(n^2) memory complexity
420
+ neighborhoods = neighbors_model.radius_neighbors(X, return_distance=False)
421
+
422
+ if sample_weight is None:
423
+ n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])
424
+ else:
425
+ n_neighbors = np.array(
426
+ [np.sum(sample_weight[neighbors]) for neighbors in neighborhoods]
427
+ )
428
+
429
+ # Initially, all samples are noise.
430
+ labels = np.full(X.shape[0], -1, dtype=np.intp)
431
+
432
+ # A list of all core samples found.
433
+ core_samples = np.asarray(n_neighbors >= self.min_samples, dtype=np.uint8)
434
+ dbscan_inner(core_samples, neighborhoods, labels)
435
+
436
+ self.core_sample_indices_ = np.where(core_samples)[0]
437
+ self.labels_ = labels
438
+
439
+ if len(self.core_sample_indices_):
440
+ # fix for scipy sparse indexing issue
441
+ self.components_ = X[self.core_sample_indices_].copy()
442
+ else:
443
+ # no core samples
444
+ self.components_ = np.empty((0, X.shape[1]))
445
+ return self
446
+
447
+ def fit_predict(self, X, y=None, sample_weight=None):
448
+ """Compute clusters from a data or distance matrix and predict labels.
449
+
450
+ Parameters
451
+ ----------
452
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
453
+ (n_samples, n_samples)
454
+ Training instances to cluster, or distances between instances if
455
+ ``metric='precomputed'``. If a sparse matrix is provided, it will
456
+ be converted into a sparse ``csr_matrix``.
457
+
458
+ y : Ignored
459
+ Not used, present here for API consistency by convention.
460
+
461
+ sample_weight : array-like of shape (n_samples,), default=None
462
+ Weight of each sample, such that a sample with a weight of at least
463
+ ``min_samples`` is by itself a core sample; a sample with a
464
+ negative weight may inhibit its eps-neighbor from being core.
465
+ Note that weights are absolute, and default to 1.
466
+
467
+ Returns
468
+ -------
469
+ labels : ndarray of shape (n_samples,)
470
+ Cluster labels. Noisy samples are given the label -1.
471
+ """
472
+ self.fit(X, sample_weight=sample_weight)
473
+ return self.labels_
474
+
475
+ def _more_tags(self):
476
+ return {"pairwise": self.metric == "precomputed"}
venv/lib/python3.10/site-packages/sklearn/cluster/_feature_agglomeration.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Feature agglomeration. Base classes and functions for performing feature
3
+ agglomeration.
4
+ """
5
+ # Author: V. Michel, A. Gramfort
6
+ # License: BSD 3 clause
7
+
8
+ import warnings
9
+
10
+ import numpy as np
11
+ from scipy.sparse import issparse
12
+
13
+ from ..base import TransformerMixin
14
+ from ..utils import metadata_routing
15
+ from ..utils.validation import check_is_fitted
16
+
17
+ ###############################################################################
18
+ # Mixin class for feature agglomeration.
19
+
20
+
21
+ class AgglomerationTransform(TransformerMixin):
22
+ """
23
+ A class for feature agglomeration via the transform interface.
24
+ """
25
+
26
+ # This prevents ``set_split_inverse_transform`` to be generated for the
27
+ # non-standard ``Xred`` arg on ``inverse_transform``.
28
+ # TODO(1.5): remove when Xred is removed for inverse_transform.
29
+ __metadata_request__inverse_transform = {"Xred": metadata_routing.UNUSED}
30
+
31
+ def transform(self, X):
32
+ """
33
+ Transform a new matrix using the built clustering.
34
+
35
+ Parameters
36
+ ----------
37
+ X : array-like of shape (n_samples, n_features) or \
38
+ (n_samples, n_samples)
39
+ A M by N array of M observations in N dimensions or a length
40
+ M array of M one-dimensional observations.
41
+
42
+ Returns
43
+ -------
44
+ Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
45
+ The pooled values for each feature cluster.
46
+ """
47
+ check_is_fitted(self)
48
+
49
+ X = self._validate_data(X, reset=False)
50
+ if self.pooling_func == np.mean and not issparse(X):
51
+ size = np.bincount(self.labels_)
52
+ n_samples = X.shape[0]
53
+ # a fast way to compute the mean of grouped features
54
+ nX = np.array(
55
+ [np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
56
+ )
57
+ else:
58
+ nX = [
59
+ self.pooling_func(X[:, self.labels_ == l], axis=1)
60
+ for l in np.unique(self.labels_)
61
+ ]
62
+ nX = np.array(nX).T
63
+ return nX
64
+
65
+ def inverse_transform(self, Xt=None, Xred=None):
66
+ """
67
+ Inverse the transformation and return a vector of size `n_features`.
68
+
69
+ Parameters
70
+ ----------
71
+ Xt : array-like of shape (n_samples, n_clusters) or (n_clusters,)
72
+ The values to be assigned to each cluster of samples.
73
+
74
+ Xred : deprecated
75
+ Use `Xt` instead.
76
+
77
+ .. deprecated:: 1.3
78
+
79
+ Returns
80
+ -------
81
+ X : ndarray of shape (n_samples, n_features) or (n_features,)
82
+ A vector of size `n_samples` with the values of `Xred` assigned to
83
+ each of the cluster of samples.
84
+ """
85
+ if Xt is None and Xred is None:
86
+ raise TypeError("Missing required positional argument: Xt")
87
+
88
+ if Xred is not None and Xt is not None:
89
+ raise ValueError("Please provide only `Xt`, and not `Xred`.")
90
+
91
+ if Xred is not None:
92
+ warnings.warn(
93
+ (
94
+ "Input argument `Xred` was renamed to `Xt` in v1.3 and will be"
95
+ " removed in v1.5."
96
+ ),
97
+ FutureWarning,
98
+ )
99
+ Xt = Xred
100
+
101
+ check_is_fitted(self)
102
+
103
+ unil, inverse = np.unique(self.labels_, return_inverse=True)
104
+ return Xt[..., inverse]
venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (529 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.pxd ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from cython cimport floating
2
+
3
+
4
+ cdef floating _euclidean_dense_dense(
5
+ const floating*,
6
+ const floating*,
7
+ int,
8
+ bint
9
+ ) noexcept nogil
10
+
11
+ cdef floating _euclidean_sparse_dense(
12
+ const floating[::1],
13
+ const int[::1],
14
+ const floating[::1],
15
+ floating,
16
+ bint
17
+ ) noexcept nogil
18
+
19
+ cpdef void _relocate_empty_clusters_dense(
20
+ const floating[:, ::1],
21
+ const floating[::1],
22
+ const floating[:, ::1],
23
+ floating[:, ::1],
24
+ floating[::1],
25
+ const int[::1]
26
+ )
27
+
28
+ cpdef void _relocate_empty_clusters_sparse(
29
+ const floating[::1],
30
+ const int[::1],
31
+ const int[::1],
32
+ const floating[::1],
33
+ const floating[:, ::1],
34
+ floating[:, ::1],
35
+ floating[::1],
36
+ const int[::1]
37
+ )
38
+
39
+ cdef void _average_centers(
40
+ floating[:, ::1],
41
+ const floating[::1]
42
+ )
43
+
44
+ cdef void _center_shift(
45
+ const floating[:, ::1],
46
+ const floating[:, ::1],
47
+ floating[::1]
48
+ )
venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_elkan.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (526 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_lloyd.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (381 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_minibatch.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (324 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/cluster/_mean_shift.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Mean shift clustering algorithm.
2
+
3
+ Mean shift clustering aims to discover *blobs* in a smooth density of
4
+ samples. It is a centroid based algorithm, which works by updating candidates
5
+ for centroids to be the mean of the points within a given region. These
6
+ candidates are then filtered in a post-processing stage to eliminate
7
+ near-duplicates to form the final set of centroids.
8
+
9
+ Seeding is performed using a binning technique for scalability.
10
+ """
11
+
12
+ # Authors: Conrad Lee <[email protected]>
13
+ # Alexandre Gramfort <[email protected]>
14
+ # Gael Varoquaux <[email protected]>
15
+ # Martino Sorbaro <[email protected]>
16
+
17
+ import warnings
18
+ from collections import defaultdict
19
+ from numbers import Integral, Real
20
+
21
+ import numpy as np
22
+
23
+ from .._config import config_context
24
+ from ..base import BaseEstimator, ClusterMixin, _fit_context
25
+ from ..metrics.pairwise import pairwise_distances_argmin
26
+ from ..neighbors import NearestNeighbors
27
+ from ..utils import check_array, check_random_state, gen_batches
28
+ from ..utils._param_validation import Interval, validate_params
29
+ from ..utils.parallel import Parallel, delayed
30
+ from ..utils.validation import check_is_fitted
31
+
32
+
33
+ @validate_params(
34
+ {
35
+ "X": ["array-like"],
36
+ "quantile": [Interval(Real, 0, 1, closed="both")],
37
+ "n_samples": [Interval(Integral, 1, None, closed="left"), None],
38
+ "random_state": ["random_state"],
39
+ "n_jobs": [Integral, None],
40
+ },
41
+ prefer_skip_nested_validation=True,
42
+ )
43
+ def estimate_bandwidth(X, *, quantile=0.3, n_samples=None, random_state=0, n_jobs=None):
44
+ """Estimate the bandwidth to use with the mean-shift algorithm.
45
+
46
+ This function takes time at least quadratic in `n_samples`. For large
47
+ datasets, it is wise to subsample by setting `n_samples`. Alternatively,
48
+ the parameter `bandwidth` can be set to a small value without estimating
49
+ it.
50
+
51
+ Parameters
52
+ ----------
53
+ X : array-like of shape (n_samples, n_features)
54
+ Input points.
55
+
56
+ quantile : float, default=0.3
57
+ Should be between [0, 1]
58
+ 0.5 means that the median of all pairwise distances is used.
59
+
60
+ n_samples : int, default=None
61
+ The number of samples to use. If not given, all samples are used.
62
+
63
+ random_state : int, RandomState instance, default=None
64
+ The generator used to randomly select the samples from input points
65
+ for bandwidth estimation. Use an int to make the randomness
66
+ deterministic.
67
+ See :term:`Glossary <random_state>`.
68
+
69
+ n_jobs : int, default=None
70
+ The number of parallel jobs to run for neighbors search.
71
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
72
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
73
+ for more details.
74
+
75
+ Returns
76
+ -------
77
+ bandwidth : float
78
+ The bandwidth parameter.
79
+
80
+ Examples
81
+ --------
82
+ >>> import numpy as np
83
+ >>> from sklearn.cluster import estimate_bandwidth
84
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
85
+ ... [4, 7], [3, 5], [3, 6]])
86
+ >>> estimate_bandwidth(X, quantile=0.5)
87
+ 1.61...
88
+ """
89
+ X = check_array(X)
90
+
91
+ random_state = check_random_state(random_state)
92
+ if n_samples is not None:
93
+ idx = random_state.permutation(X.shape[0])[:n_samples]
94
+ X = X[idx]
95
+ n_neighbors = int(X.shape[0] * quantile)
96
+ if n_neighbors < 1: # cannot fit NearestNeighbors with n_neighbors = 0
97
+ n_neighbors = 1
98
+ nbrs = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=n_jobs)
99
+ nbrs.fit(X)
100
+
101
+ bandwidth = 0.0
102
+ for batch in gen_batches(len(X), 500):
103
+ d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
104
+ bandwidth += np.max(d, axis=1).sum()
105
+
106
+ return bandwidth / X.shape[0]
107
+
108
+
109
+ # separate function for each seed's iterative loop
110
+ def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
111
+ # For each seed, climb gradient until convergence or max_iter
112
+ bandwidth = nbrs.get_params()["radius"]
113
+ stop_thresh = 1e-3 * bandwidth # when mean has converged
114
+ completed_iterations = 0
115
+ while True:
116
+ # Find mean of points within bandwidth
117
+ i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth, return_distance=False)[0]
118
+ points_within = X[i_nbrs]
119
+ if len(points_within) == 0:
120
+ break # Depending on seeding strategy this condition may occur
121
+ my_old_mean = my_mean # save the old mean
122
+ my_mean = np.mean(points_within, axis=0)
123
+ # If converged or at max_iter, adds the cluster
124
+ if (
125
+ np.linalg.norm(my_mean - my_old_mean) < stop_thresh
126
+ or completed_iterations == max_iter
127
+ ):
128
+ break
129
+ completed_iterations += 1
130
+ return tuple(my_mean), len(points_within), completed_iterations
131
+
132
+
133
+ @validate_params(
134
+ {"X": ["array-like"]},
135
+ prefer_skip_nested_validation=False,
136
+ )
137
+ def mean_shift(
138
+ X,
139
+ *,
140
+ bandwidth=None,
141
+ seeds=None,
142
+ bin_seeding=False,
143
+ min_bin_freq=1,
144
+ cluster_all=True,
145
+ max_iter=300,
146
+ n_jobs=None,
147
+ ):
148
+ """Perform mean shift clustering of data using a flat kernel.
149
+
150
+ Read more in the :ref:`User Guide <mean_shift>`.
151
+
152
+ Parameters
153
+ ----------
154
+
155
+ X : array-like of shape (n_samples, n_features)
156
+ Input data.
157
+
158
+ bandwidth : float, default=None
159
+ Kernel bandwidth. If not None, must be in the range [0, +inf).
160
+
161
+ If None, the bandwidth is determined using a heuristic based on
162
+ the median of all pairwise distances. This will take quadratic time in
163
+ the number of samples. The sklearn.cluster.estimate_bandwidth function
164
+ can be used to do this more efficiently.
165
+
166
+ seeds : array-like of shape (n_seeds, n_features) or None
167
+ Point used as initial kernel locations. If None and bin_seeding=False,
168
+ each data point is used as a seed. If None and bin_seeding=True,
169
+ see bin_seeding.
170
+
171
+ bin_seeding : bool, default=False
172
+ If true, initial kernel locations are not locations of all
173
+ points, but rather the location of the discretized version of
174
+ points, where points are binned onto a grid whose coarseness
175
+ corresponds to the bandwidth. Setting this option to True will speed
176
+ up the algorithm because fewer seeds will be initialized.
177
+ Ignored if seeds argument is not None.
178
+
179
+ min_bin_freq : int, default=1
180
+ To speed up the algorithm, accept only those bins with at least
181
+ min_bin_freq points as seeds.
182
+
183
+ cluster_all : bool, default=True
184
+ If true, then all points are clustered, even those orphans that are
185
+ not within any kernel. Orphans are assigned to the nearest kernel.
186
+ If false, then orphans are given cluster label -1.
187
+
188
+ max_iter : int, default=300
189
+ Maximum number of iterations, per seed point before the clustering
190
+ operation terminates (for that seed point), if has not converged yet.
191
+
192
+ n_jobs : int, default=None
193
+ The number of jobs to use for the computation. The following tasks benefit
194
+ from the parallelization:
195
+
196
+ - The search of nearest neighbors for bandwidth estimation and label
197
+ assignments. See the details in the docstring of the
198
+ ``NearestNeighbors`` class.
199
+ - Hill-climbing optimization for all seeds.
200
+
201
+ See :term:`Glossary <n_jobs>` for more details.
202
+
203
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
204
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
205
+ for more details.
206
+
207
+ .. versionadded:: 0.17
208
+ Parallel Execution using *n_jobs*.
209
+
210
+ Returns
211
+ -------
212
+
213
+ cluster_centers : ndarray of shape (n_clusters, n_features)
214
+ Coordinates of cluster centers.
215
+
216
+ labels : ndarray of shape (n_samples,)
217
+ Cluster labels for each point.
218
+
219
+ Notes
220
+ -----
221
+ For an example, see :ref:`examples/cluster/plot_mean_shift.py
222
+ <sphx_glr_auto_examples_cluster_plot_mean_shift.py>`.
223
+
224
+ Examples
225
+ --------
226
+ >>> import numpy as np
227
+ >>> from sklearn.cluster import mean_shift
228
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
229
+ ... [4, 7], [3, 5], [3, 6]])
230
+ >>> cluster_centers, labels = mean_shift(X, bandwidth=2)
231
+ >>> cluster_centers
232
+ array([[3.33..., 6. ],
233
+ [1.33..., 0.66...]])
234
+ >>> labels
235
+ array([1, 1, 1, 0, 0, 0])
236
+ """
237
+ model = MeanShift(
238
+ bandwidth=bandwidth,
239
+ seeds=seeds,
240
+ min_bin_freq=min_bin_freq,
241
+ bin_seeding=bin_seeding,
242
+ cluster_all=cluster_all,
243
+ n_jobs=n_jobs,
244
+ max_iter=max_iter,
245
+ ).fit(X)
246
+ return model.cluster_centers_, model.labels_
247
+
248
+
249
+ def get_bin_seeds(X, bin_size, min_bin_freq=1):
250
+ """Find seeds for mean_shift.
251
+
252
+ Finds seeds by first binning data onto a grid whose lines are
253
+ spaced bin_size apart, and then choosing those bins with at least
254
+ min_bin_freq points.
255
+
256
+ Parameters
257
+ ----------
258
+
259
+ X : array-like of shape (n_samples, n_features)
260
+ Input points, the same points that will be used in mean_shift.
261
+
262
+ bin_size : float
263
+ Controls the coarseness of the binning. Smaller values lead
264
+ to more seeding (which is computationally more expensive). If you're
265
+ not sure how to set this, set it to the value of the bandwidth used
266
+ in clustering.mean_shift.
267
+
268
+ min_bin_freq : int, default=1
269
+ Only bins with at least min_bin_freq will be selected as seeds.
270
+ Raising this value decreases the number of seeds found, which
271
+ makes mean_shift computationally cheaper.
272
+
273
+ Returns
274
+ -------
275
+ bin_seeds : array-like of shape (n_samples, n_features)
276
+ Points used as initial kernel positions in clustering.mean_shift.
277
+ """
278
+ if bin_size == 0:
279
+ return X
280
+
281
+ # Bin points
282
+ bin_sizes = defaultdict(int)
283
+ for point in X:
284
+ binned_point = np.round(point / bin_size)
285
+ bin_sizes[tuple(binned_point)] += 1
286
+
287
+ # Select only those bins as seeds which have enough members
288
+ bin_seeds = np.array(
289
+ [point for point, freq in bin_sizes.items() if freq >= min_bin_freq],
290
+ dtype=np.float32,
291
+ )
292
+ if len(bin_seeds) == len(X):
293
+ warnings.warn(
294
+ "Binning data failed with provided bin_size=%f, using data points as seeds."
295
+ % bin_size
296
+ )
297
+ return X
298
+ bin_seeds = bin_seeds * bin_size
299
+ return bin_seeds
300
+
301
+
302
+ class MeanShift(ClusterMixin, BaseEstimator):
303
+ """Mean shift clustering using a flat kernel.
304
+
305
+ Mean shift clustering aims to discover "blobs" in a smooth density of
306
+ samples. It is a centroid-based algorithm, which works by updating
307
+ candidates for centroids to be the mean of the points within a given
308
+ region. These candidates are then filtered in a post-processing stage to
309
+ eliminate near-duplicates to form the final set of centroids.
310
+
311
+ Seeding is performed using a binning technique for scalability.
312
+
313
+ Read more in the :ref:`User Guide <mean_shift>`.
314
+
315
+ Parameters
316
+ ----------
317
+ bandwidth : float, default=None
318
+ Bandwidth used in the flat kernel.
319
+
320
+ If not given, the bandwidth is estimated using
321
+ sklearn.cluster.estimate_bandwidth; see the documentation for that
322
+ function for hints on scalability (see also the Notes, below).
323
+
324
+ seeds : array-like of shape (n_samples, n_features), default=None
325
+ Seeds used to initialize kernels. If not set,
326
+ the seeds are calculated by clustering.get_bin_seeds
327
+ with bandwidth as the grid size and default values for
328
+ other parameters.
329
+
330
+ bin_seeding : bool, default=False
331
+ If true, initial kernel locations are not locations of all
332
+ points, but rather the location of the discretized version of
333
+ points, where points are binned onto a grid whose coarseness
334
+ corresponds to the bandwidth. Setting this option to True will speed
335
+ up the algorithm because fewer seeds will be initialized.
336
+ The default value is False.
337
+ Ignored if seeds argument is not None.
338
+
339
+ min_bin_freq : int, default=1
340
+ To speed up the algorithm, accept only those bins with at least
341
+ min_bin_freq points as seeds.
342
+
343
+ cluster_all : bool, default=True
344
+ If true, then all points are clustered, even those orphans that are
345
+ not within any kernel. Orphans are assigned to the nearest kernel.
346
+ If false, then orphans are given cluster label -1.
347
+
348
+ n_jobs : int, default=None
349
+ The number of jobs to use for the computation. The following tasks benefit
350
+ from the parallelization:
351
+
352
+ - The search of nearest neighbors for bandwidth estimation and label
353
+ assignments. See the details in the docstring of the
354
+ ``NearestNeighbors`` class.
355
+ - Hill-climbing optimization for all seeds.
356
+
357
+ See :term:`Glossary <n_jobs>` for more details.
358
+
359
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
360
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
361
+ for more details.
362
+
363
+ max_iter : int, default=300
364
+ Maximum number of iterations, per seed point before the clustering
365
+ operation terminates (for that seed point), if has not converged yet.
366
+
367
+ .. versionadded:: 0.22
368
+
369
+ Attributes
370
+ ----------
371
+ cluster_centers_ : ndarray of shape (n_clusters, n_features)
372
+ Coordinates of cluster centers.
373
+
374
+ labels_ : ndarray of shape (n_samples,)
375
+ Labels of each point.
376
+
377
+ n_iter_ : int
378
+ Maximum number of iterations performed on each seed.
379
+
380
+ .. versionadded:: 0.22
381
+
382
+ n_features_in_ : int
383
+ Number of features seen during :term:`fit`.
384
+
385
+ .. versionadded:: 0.24
386
+
387
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
388
+ Names of features seen during :term:`fit`. Defined only when `X`
389
+ has feature names that are all strings.
390
+
391
+ .. versionadded:: 1.0
392
+
393
+ See Also
394
+ --------
395
+ KMeans : K-Means clustering.
396
+
397
+ Notes
398
+ -----
399
+
400
+ Scalability:
401
+
402
+ Because this implementation uses a flat kernel and
403
+ a Ball Tree to look up members of each kernel, the complexity will tend
404
+ towards O(T*n*log(n)) in lower dimensions, with n the number of samples
405
+ and T the number of points. In higher dimensions the complexity will
406
+ tend towards O(T*n^2).
407
+
408
+ Scalability can be boosted by using fewer seeds, for example by using
409
+ a higher value of min_bin_freq in the get_bin_seeds function.
410
+
411
+ Note that the estimate_bandwidth function is much less scalable than the
412
+ mean shift algorithm and will be the bottleneck if it is used.
413
+
414
+ References
415
+ ----------
416
+
417
+ Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
418
+ feature space analysis". IEEE Transactions on Pattern Analysis and
419
+ Machine Intelligence. 2002. pp. 603-619.
420
+
421
+ Examples
422
+ --------
423
+ >>> from sklearn.cluster import MeanShift
424
+ >>> import numpy as np
425
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
426
+ ... [4, 7], [3, 5], [3, 6]])
427
+ >>> clustering = MeanShift(bandwidth=2).fit(X)
428
+ >>> clustering.labels_
429
+ array([1, 1, 1, 0, 0, 0])
430
+ >>> clustering.predict([[0, 0], [5, 5]])
431
+ array([1, 0])
432
+ >>> clustering
433
+ MeanShift(bandwidth=2)
434
+ """
435
+
436
+ _parameter_constraints: dict = {
437
+ "bandwidth": [Interval(Real, 0, None, closed="neither"), None],
438
+ "seeds": ["array-like", None],
439
+ "bin_seeding": ["boolean"],
440
+ "min_bin_freq": [Interval(Integral, 1, None, closed="left")],
441
+ "cluster_all": ["boolean"],
442
+ "n_jobs": [Integral, None],
443
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
444
+ }
445
+
446
+ def __init__(
447
+ self,
448
+ *,
449
+ bandwidth=None,
450
+ seeds=None,
451
+ bin_seeding=False,
452
+ min_bin_freq=1,
453
+ cluster_all=True,
454
+ n_jobs=None,
455
+ max_iter=300,
456
+ ):
457
+ self.bandwidth = bandwidth
458
+ self.seeds = seeds
459
+ self.bin_seeding = bin_seeding
460
+ self.cluster_all = cluster_all
461
+ self.min_bin_freq = min_bin_freq
462
+ self.n_jobs = n_jobs
463
+ self.max_iter = max_iter
464
+
465
+ @_fit_context(prefer_skip_nested_validation=True)
466
+ def fit(self, X, y=None):
467
+ """Perform clustering.
468
+
469
+ Parameters
470
+ ----------
471
+ X : array-like of shape (n_samples, n_features)
472
+ Samples to cluster.
473
+
474
+ y : Ignored
475
+ Not used, present for API consistency by convention.
476
+
477
+ Returns
478
+ -------
479
+ self : object
480
+ Fitted instance.
481
+ """
482
+ X = self._validate_data(X)
483
+ bandwidth = self.bandwidth
484
+ if bandwidth is None:
485
+ bandwidth = estimate_bandwidth(X, n_jobs=self.n_jobs)
486
+
487
+ seeds = self.seeds
488
+ if seeds is None:
489
+ if self.bin_seeding:
490
+ seeds = get_bin_seeds(X, bandwidth, self.min_bin_freq)
491
+ else:
492
+ seeds = X
493
+ n_samples, n_features = X.shape
494
+ center_intensity_dict = {}
495
+
496
+ # We use n_jobs=1 because this will be used in nested calls under
497
+ # parallel calls to _mean_shift_single_seed so there is no need for
498
+ # for further parallelism.
499
+ nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)
500
+
501
+ # execute iterations on all seeds in parallel
502
+ all_res = Parallel(n_jobs=self.n_jobs)(
503
+ delayed(_mean_shift_single_seed)(seed, X, nbrs, self.max_iter)
504
+ for seed in seeds
505
+ )
506
+ # copy results in a dictionary
507
+ for i in range(len(seeds)):
508
+ if all_res[i][1]: # i.e. len(points_within) > 0
509
+ center_intensity_dict[all_res[i][0]] = all_res[i][1]
510
+
511
+ self.n_iter_ = max([x[2] for x in all_res])
512
+
513
+ if not center_intensity_dict:
514
+ # nothing near seeds
515
+ raise ValueError(
516
+ "No point was within bandwidth=%f of any seed. Try a different seeding"
517
+ " strategy or increase the bandwidth."
518
+ % bandwidth
519
+ )
520
+
521
+ # POST PROCESSING: remove near duplicate points
522
+ # If the distance between two kernels is less than the bandwidth,
523
+ # then we have to remove one because it is a duplicate. Remove the
524
+ # one with fewer points.
525
+
526
+ sorted_by_intensity = sorted(
527
+ center_intensity_dict.items(),
528
+ key=lambda tup: (tup[1], tup[0]),
529
+ reverse=True,
530
+ )
531
+ sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
532
+ unique = np.ones(len(sorted_centers), dtype=bool)
533
+ nbrs = NearestNeighbors(radius=bandwidth, n_jobs=self.n_jobs).fit(
534
+ sorted_centers
535
+ )
536
+ for i, center in enumerate(sorted_centers):
537
+ if unique[i]:
538
+ neighbor_idxs = nbrs.radius_neighbors([center], return_distance=False)[
539
+ 0
540
+ ]
541
+ unique[neighbor_idxs] = 0
542
+ unique[i] = 1 # leave the current point as unique
543
+ cluster_centers = sorted_centers[unique]
544
+
545
+ # ASSIGN LABELS: a point belongs to the cluster that it is closest to
546
+ nbrs = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs).fit(cluster_centers)
547
+ labels = np.zeros(n_samples, dtype=int)
548
+ distances, idxs = nbrs.kneighbors(X)
549
+ if self.cluster_all:
550
+ labels = idxs.flatten()
551
+ else:
552
+ labels.fill(-1)
553
+ bool_selector = distances.flatten() <= bandwidth
554
+ labels[bool_selector] = idxs.flatten()[bool_selector]
555
+
556
+ self.cluster_centers_, self.labels_ = cluster_centers, labels
557
+ return self
558
+
559
+ def predict(self, X):
560
+ """Predict the closest cluster each sample in X belongs to.
561
+
562
+ Parameters
563
+ ----------
564
+ X : array-like of shape (n_samples, n_features)
565
+ New data to predict.
566
+
567
+ Returns
568
+ -------
569
+ labels : ndarray of shape (n_samples,)
570
+ Index of the cluster each sample belongs to.
571
+ """
572
+ check_is_fitted(self)
573
+ X = self._validate_data(X, reset=False)
574
+ with config_context(assume_finite=True):
575
+ return pairwise_distances_argmin(X, self.cluster_centers_)
venv/lib/python3.10/site-packages/sklearn/cluster/_optics.py ADDED
@@ -0,0 +1,1199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Ordering Points To Identify the Clustering Structure (OPTICS)
2
+
3
+ These routines execute the OPTICS algorithm, and implement various
4
+ cluster extraction methods of the ordered list.
5
+
6
+ Authors: Shane Grigsby <[email protected]>
7
+ Adrin Jalali <[email protected]>
8
+ Erich Schubert <[email protected]>
9
+ Hanmin Qin <[email protected]>
10
+ License: BSD 3 clause
11
+ """
12
+
13
+ import warnings
14
+ from numbers import Integral, Real
15
+
16
+ import numpy as np
17
+ from scipy.sparse import SparseEfficiencyWarning, issparse
18
+
19
+ from ..base import BaseEstimator, ClusterMixin, _fit_context
20
+ from ..exceptions import DataConversionWarning
21
+ from ..metrics import pairwise_distances
22
+ from ..metrics.pairwise import _VALID_METRICS, PAIRWISE_BOOLEAN_FUNCTIONS
23
+ from ..neighbors import NearestNeighbors
24
+ from ..utils import gen_batches, get_chunk_n_rows
25
+ from ..utils._param_validation import (
26
+ HasMethods,
27
+ Interval,
28
+ RealNotInt,
29
+ StrOptions,
30
+ validate_params,
31
+ )
32
+ from ..utils.validation import check_memory
33
+
34
+
35
+ class OPTICS(ClusterMixin, BaseEstimator):
36
+ """Estimate clustering structure from vector array.
37
+
38
+ OPTICS (Ordering Points To Identify the Clustering Structure), closely
39
+ related to DBSCAN, finds core sample of high density and expands clusters
40
+ from them [1]_. Unlike DBSCAN, keeps cluster hierarchy for a variable
41
+ neighborhood radius. Better suited for usage on large datasets than the
42
+ current sklearn implementation of DBSCAN.
43
+
44
+ Clusters are then extracted using a DBSCAN-like method
45
+ (cluster_method = 'dbscan') or an automatic
46
+ technique proposed in [1]_ (cluster_method = 'xi').
47
+
48
+ This implementation deviates from the original OPTICS by first performing
49
+ k-nearest-neighborhood searches on all points to identify core sizes, then
50
+ computing only the distances to unprocessed points when constructing the
51
+ cluster order. Note that we do not employ a heap to manage the expansion
52
+ candidates, so the time complexity will be O(n^2).
53
+
54
+ Read more in the :ref:`User Guide <optics>`.
55
+
56
+ Parameters
57
+ ----------
58
+ min_samples : int > 1 or float between 0 and 1, default=5
59
+ The number of samples in a neighborhood for a point to be considered as
60
+ a core point. Also, up and down steep regions can't have more than
61
+ ``min_samples`` consecutive non-steep points. Expressed as an absolute
62
+ number or a fraction of the number of samples (rounded to be at least
63
+ 2).
64
+
65
+ max_eps : float, default=np.inf
66
+ The maximum distance between two samples for one to be considered as
67
+ in the neighborhood of the other. Default value of ``np.inf`` will
68
+ identify clusters across all scales; reducing ``max_eps`` will result
69
+ in shorter run times.
70
+
71
+ metric : str or callable, default='minkowski'
72
+ Metric to use for distance computation. Any metric from scikit-learn
73
+ or scipy.spatial.distance can be used.
74
+
75
+ If metric is a callable function, it is called on each
76
+ pair of instances (rows) and the resulting value recorded. The callable
77
+ should take two arrays as input and return one value indicating the
78
+ distance between them. This works for Scipy's metrics, but is less
79
+ efficient than passing the metric name as a string. If metric is
80
+ "precomputed", `X` is assumed to be a distance matrix and must be
81
+ square.
82
+
83
+ Valid values for metric are:
84
+
85
+ - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
86
+ 'manhattan']
87
+
88
+ - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
89
+ 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
90
+ 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
91
+ 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
92
+ 'yule']
93
+
94
+ Sparse matrices are only supported by scikit-learn metrics.
95
+ See the documentation for scipy.spatial.distance for details on these
96
+ metrics.
97
+
98
+ .. note::
99
+ `'kulsinski'` is deprecated from SciPy 1.9 and will removed in SciPy 1.11.
100
+
101
+ p : float, default=2
102
+ Parameter for the Minkowski metric from
103
+ :class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
104
+ equivalent to using manhattan_distance (l1), and euclidean_distance
105
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
106
+
107
+ metric_params : dict, default=None
108
+ Additional keyword arguments for the metric function.
109
+
110
+ cluster_method : str, default='xi'
111
+ The extraction method used to extract clusters using the calculated
112
+ reachability and ordering. Possible values are "xi" and "dbscan".
113
+
114
+ eps : float, default=None
115
+ The maximum distance between two samples for one to be considered as
116
+ in the neighborhood of the other. By default it assumes the same value
117
+ as ``max_eps``.
118
+ Used only when ``cluster_method='dbscan'``.
119
+
120
+ xi : float between 0 and 1, default=0.05
121
+ Determines the minimum steepness on the reachability plot that
122
+ constitutes a cluster boundary. For example, an upwards point in the
123
+ reachability plot is defined by the ratio from one point to its
124
+ successor being at most 1-xi.
125
+ Used only when ``cluster_method='xi'``.
126
+
127
+ predecessor_correction : bool, default=True
128
+ Correct clusters according to the predecessors calculated by OPTICS
129
+ [2]_. This parameter has minimal effect on most datasets.
130
+ Used only when ``cluster_method='xi'``.
131
+
132
+ min_cluster_size : int > 1 or float between 0 and 1, default=None
133
+ Minimum number of samples in an OPTICS cluster, expressed as an
134
+ absolute number or a fraction of the number of samples (rounded to be
135
+ at least 2). If ``None``, the value of ``min_samples`` is used instead.
136
+ Used only when ``cluster_method='xi'``.
137
+
138
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
139
+ Algorithm used to compute the nearest neighbors:
140
+
141
+ - 'ball_tree' will use :class:`~sklearn.neighbors.BallTree`.
142
+ - 'kd_tree' will use :class:`~sklearn.neighbors.KDTree`.
143
+ - 'brute' will use a brute-force search.
144
+ - 'auto' (default) will attempt to decide the most appropriate
145
+ algorithm based on the values passed to :meth:`fit` method.
146
+
147
+ Note: fitting on sparse input will override the setting of
148
+ this parameter, using brute force.
149
+
150
+ leaf_size : int, default=30
151
+ Leaf size passed to :class:`~sklearn.neighbors.BallTree` or
152
+ :class:`~sklearn.neighbors.KDTree`. This can affect the speed of the
153
+ construction and query, as well as the memory required to store the
154
+ tree. The optimal value depends on the nature of the problem.
155
+
156
+ memory : str or object with the joblib.Memory interface, default=None
157
+ Used to cache the output of the computation of the tree.
158
+ By default, no caching is done. If a string is given, it is the
159
+ path to the caching directory.
160
+
161
+ n_jobs : int, default=None
162
+ The number of parallel jobs to run for neighbors search.
163
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
164
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
165
+ for more details.
166
+
167
+ Attributes
168
+ ----------
169
+ labels_ : ndarray of shape (n_samples,)
170
+ Cluster labels for each point in the dataset given to fit().
171
+ Noisy samples and points which are not included in a leaf cluster
172
+ of ``cluster_hierarchy_`` are labeled as -1.
173
+
174
+ reachability_ : ndarray of shape (n_samples,)
175
+ Reachability distances per sample, indexed by object order. Use
176
+ ``clust.reachability_[clust.ordering_]`` to access in cluster order.
177
+
178
+ ordering_ : ndarray of shape (n_samples,)
179
+ The cluster ordered list of sample indices.
180
+
181
+ core_distances_ : ndarray of shape (n_samples,)
182
+ Distance at which each sample becomes a core point, indexed by object
183
+ order. Points which will never be core have a distance of inf. Use
184
+ ``clust.core_distances_[clust.ordering_]`` to access in cluster order.
185
+
186
+ predecessor_ : ndarray of shape (n_samples,)
187
+ Point that a sample was reached from, indexed by object order.
188
+ Seed points have a predecessor of -1.
189
+
190
+ cluster_hierarchy_ : ndarray of shape (n_clusters, 2)
191
+ The list of clusters in the form of ``[start, end]`` in each row, with
192
+ all indices inclusive. The clusters are ordered according to
193
+ ``(end, -start)`` (ascending) so that larger clusters encompassing
194
+ smaller clusters come after those smaller ones. Since ``labels_`` does
195
+ not reflect the hierarchy, usually
196
+ ``len(cluster_hierarchy_) > np.unique(optics.labels_)``. Please also
197
+ note that these indices are of the ``ordering_``, i.e.
198
+ ``X[ordering_][start:end + 1]`` form a cluster.
199
+ Only available when ``cluster_method='xi'``.
200
+
201
+ n_features_in_ : int
202
+ Number of features seen during :term:`fit`.
203
+
204
+ .. versionadded:: 0.24
205
+
206
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
207
+ Names of features seen during :term:`fit`. Defined only when `X`
208
+ has feature names that are all strings.
209
+
210
+ .. versionadded:: 1.0
211
+
212
+ See Also
213
+ --------
214
+ DBSCAN : A similar clustering for a specified neighborhood radius (eps).
215
+ Our implementation is optimized for runtime.
216
+
217
+ References
218
+ ----------
219
+ .. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
220
+ and Jörg Sander. "OPTICS: ordering points to identify the clustering
221
+ structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
222
+
223
+ .. [2] Schubert, Erich, Michael Gertz.
224
+ "Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of
225
+ the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329.
226
+
227
+ Examples
228
+ --------
229
+ >>> from sklearn.cluster import OPTICS
230
+ >>> import numpy as np
231
+ >>> X = np.array([[1, 2], [2, 5], [3, 6],
232
+ ... [8, 7], [8, 8], [7, 3]])
233
+ >>> clustering = OPTICS(min_samples=2).fit(X)
234
+ >>> clustering.labels_
235
+ array([0, 0, 0, 1, 1, 1])
236
+
237
+ For a more detailed example see
238
+ :ref:`sphx_glr_auto_examples_cluster_plot_optics.py`.
239
+ """
240
+
241
+ _parameter_constraints: dict = {
242
+ "min_samples": [
243
+ Interval(Integral, 2, None, closed="left"),
244
+ Interval(RealNotInt, 0, 1, closed="both"),
245
+ ],
246
+ "max_eps": [Interval(Real, 0, None, closed="both")],
247
+ "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
248
+ "p": [Interval(Real, 1, None, closed="left")],
249
+ "metric_params": [dict, None],
250
+ "cluster_method": [StrOptions({"dbscan", "xi"})],
251
+ "eps": [Interval(Real, 0, None, closed="both"), None],
252
+ "xi": [Interval(Real, 0, 1, closed="both")],
253
+ "predecessor_correction": ["boolean"],
254
+ "min_cluster_size": [
255
+ Interval(Integral, 2, None, closed="left"),
256
+ Interval(RealNotInt, 0, 1, closed="right"),
257
+ None,
258
+ ],
259
+ "algorithm": [StrOptions({"auto", "brute", "ball_tree", "kd_tree"})],
260
+ "leaf_size": [Interval(Integral, 1, None, closed="left")],
261
+ "memory": [str, HasMethods("cache"), None],
262
+ "n_jobs": [Integral, None],
263
+ }
264
+
265
+ def __init__(
266
+ self,
267
+ *,
268
+ min_samples=5,
269
+ max_eps=np.inf,
270
+ metric="minkowski",
271
+ p=2,
272
+ metric_params=None,
273
+ cluster_method="xi",
274
+ eps=None,
275
+ xi=0.05,
276
+ predecessor_correction=True,
277
+ min_cluster_size=None,
278
+ algorithm="auto",
279
+ leaf_size=30,
280
+ memory=None,
281
+ n_jobs=None,
282
+ ):
283
+ self.max_eps = max_eps
284
+ self.min_samples = min_samples
285
+ self.min_cluster_size = min_cluster_size
286
+ self.algorithm = algorithm
287
+ self.metric = metric
288
+ self.metric_params = metric_params
289
+ self.p = p
290
+ self.leaf_size = leaf_size
291
+ self.cluster_method = cluster_method
292
+ self.eps = eps
293
+ self.xi = xi
294
+ self.predecessor_correction = predecessor_correction
295
+ self.memory = memory
296
+ self.n_jobs = n_jobs
297
+
298
+ @_fit_context(
299
+ # Optics.metric is not validated yet
300
+ prefer_skip_nested_validation=False
301
+ )
302
+ def fit(self, X, y=None):
303
+ """Perform OPTICS clustering.
304
+
305
+ Extracts an ordered list of points and reachability distances, and
306
+ performs initial clustering using ``max_eps`` distance specified at
307
+ OPTICS object instantiation.
308
+
309
+ Parameters
310
+ ----------
311
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \
312
+ (n_samples, n_samples) if metric='precomputed'
313
+ A feature array, or array of distances between samples if
314
+ metric='precomputed'. If a sparse matrix is provided, it will be
315
+ converted into CSR format.
316
+
317
+ y : Ignored
318
+ Not used, present for API consistency by convention.
319
+
320
+ Returns
321
+ -------
322
+ self : object
323
+ Returns a fitted instance of self.
324
+ """
325
+ dtype = bool if self.metric in PAIRWISE_BOOLEAN_FUNCTIONS else float
326
+ if dtype == bool and X.dtype != bool:
327
+ msg = (
328
+ "Data will be converted to boolean for"
329
+ f" metric {self.metric}, to avoid this warning,"
330
+ " you may convert the data prior to calling fit."
331
+ )
332
+ warnings.warn(msg, DataConversionWarning)
333
+
334
+ X = self._validate_data(X, dtype=dtype, accept_sparse="csr")
335
+ if self.metric == "precomputed" and issparse(X):
336
+ with warnings.catch_warnings():
337
+ warnings.simplefilter("ignore", SparseEfficiencyWarning)
338
+ # Set each diagonal to an explicit value so each point is its
339
+ # own neighbor
340
+ X.setdiag(X.diagonal())
341
+ memory = check_memory(self.memory)
342
+
343
+ (
344
+ self.ordering_,
345
+ self.core_distances_,
346
+ self.reachability_,
347
+ self.predecessor_,
348
+ ) = memory.cache(compute_optics_graph)(
349
+ X=X,
350
+ min_samples=self.min_samples,
351
+ algorithm=self.algorithm,
352
+ leaf_size=self.leaf_size,
353
+ metric=self.metric,
354
+ metric_params=self.metric_params,
355
+ p=self.p,
356
+ n_jobs=self.n_jobs,
357
+ max_eps=self.max_eps,
358
+ )
359
+
360
+ # Extract clusters from the calculated orders and reachability
361
+ if self.cluster_method == "xi":
362
+ labels_, clusters_ = cluster_optics_xi(
363
+ reachability=self.reachability_,
364
+ predecessor=self.predecessor_,
365
+ ordering=self.ordering_,
366
+ min_samples=self.min_samples,
367
+ min_cluster_size=self.min_cluster_size,
368
+ xi=self.xi,
369
+ predecessor_correction=self.predecessor_correction,
370
+ )
371
+ self.cluster_hierarchy_ = clusters_
372
+ elif self.cluster_method == "dbscan":
373
+ if self.eps is None:
374
+ eps = self.max_eps
375
+ else:
376
+ eps = self.eps
377
+
378
+ if eps > self.max_eps:
379
+ raise ValueError(
380
+ "Specify an epsilon smaller than %s. Got %s." % (self.max_eps, eps)
381
+ )
382
+
383
+ labels_ = cluster_optics_dbscan(
384
+ reachability=self.reachability_,
385
+ core_distances=self.core_distances_,
386
+ ordering=self.ordering_,
387
+ eps=eps,
388
+ )
389
+
390
+ self.labels_ = labels_
391
+ return self
392
+
393
+
394
+ def _validate_size(size, n_samples, param_name):
395
+ if size > n_samples:
396
+ raise ValueError(
397
+ "%s must be no greater than the number of samples (%d). Got %d"
398
+ % (param_name, n_samples, size)
399
+ )
400
+
401
+
402
+ # OPTICS helper functions
403
+ def _compute_core_distances_(X, neighbors, min_samples, working_memory):
404
+ """Compute the k-th nearest neighbor of each sample.
405
+
406
+ Equivalent to neighbors.kneighbors(X, self.min_samples)[0][:, -1]
407
+ but with more memory efficiency.
408
+
409
+ Parameters
410
+ ----------
411
+ X : array-like of shape (n_samples, n_features)
412
+ The data.
413
+ neighbors : NearestNeighbors instance
414
+ The fitted nearest neighbors estimator.
415
+ working_memory : int, default=None
416
+ The sought maximum memory for temporary distance matrix chunks.
417
+ When None (default), the value of
418
+ ``sklearn.get_config()['working_memory']`` is used.
419
+
420
+ Returns
421
+ -------
422
+ core_distances : ndarray of shape (n_samples,)
423
+ Distance at which each sample becomes a core point.
424
+ Points which will never be core have a distance of inf.
425
+ """
426
+ n_samples = X.shape[0]
427
+ core_distances = np.empty(n_samples)
428
+ core_distances.fill(np.nan)
429
+
430
+ chunk_n_rows = get_chunk_n_rows(
431
+ row_bytes=16 * min_samples, max_n_rows=n_samples, working_memory=working_memory
432
+ )
433
+ slices = gen_batches(n_samples, chunk_n_rows)
434
+ for sl in slices:
435
+ core_distances[sl] = neighbors.kneighbors(X[sl], min_samples)[0][:, -1]
436
+ return core_distances
437
+
438
+
439
+ @validate_params(
440
+ {
441
+ "X": [np.ndarray, "sparse matrix"],
442
+ "min_samples": [
443
+ Interval(Integral, 2, None, closed="left"),
444
+ Interval(RealNotInt, 0, 1, closed="both"),
445
+ ],
446
+ "max_eps": [Interval(Real, 0, None, closed="both")],
447
+ "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
448
+ "p": [Interval(Real, 0, None, closed="right"), None],
449
+ "metric_params": [dict, None],
450
+ "algorithm": [StrOptions({"auto", "brute", "ball_tree", "kd_tree"})],
451
+ "leaf_size": [Interval(Integral, 1, None, closed="left")],
452
+ "n_jobs": [Integral, None],
453
+ },
454
+ prefer_skip_nested_validation=False, # metric is not validated yet
455
+ )
456
+ def compute_optics_graph(
457
+ X, *, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs
458
+ ):
459
+ """Compute the OPTICS reachability graph.
460
+
461
+ Read more in the :ref:`User Guide <optics>`.
462
+
463
+ Parameters
464
+ ----------
465
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \
466
+ (n_samples, n_samples) if metric='precomputed'
467
+ A feature array, or array of distances between samples if
468
+ metric='precomputed'.
469
+
470
+ min_samples : int > 1 or float between 0 and 1
471
+ The number of samples in a neighborhood for a point to be considered
472
+ as a core point. Expressed as an absolute number or a fraction of the
473
+ number of samples (rounded to be at least 2).
474
+
475
+ max_eps : float, default=np.inf
476
+ The maximum distance between two samples for one to be considered as
477
+ in the neighborhood of the other. Default value of ``np.inf`` will
478
+ identify clusters across all scales; reducing ``max_eps`` will result
479
+ in shorter run times.
480
+
481
+ metric : str or callable, default='minkowski'
482
+ Metric to use for distance computation. Any metric from scikit-learn
483
+ or scipy.spatial.distance can be used.
484
+
485
+ If metric is a callable function, it is called on each
486
+ pair of instances (rows) and the resulting value recorded. The callable
487
+ should take two arrays as input and return one value indicating the
488
+ distance between them. This works for Scipy's metrics, but is less
489
+ efficient than passing the metric name as a string. If metric is
490
+ "precomputed", X is assumed to be a distance matrix and must be square.
491
+
492
+ Valid values for metric are:
493
+
494
+ - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
495
+ 'manhattan']
496
+
497
+ - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
498
+ 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
499
+ 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
500
+ 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
501
+ 'yule']
502
+
503
+ See the documentation for scipy.spatial.distance for details on these
504
+ metrics.
505
+
506
+ .. note::
507
+ `'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11.
508
+
509
+ p : float, default=2
510
+ Parameter for the Minkowski metric from
511
+ :class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
512
+ equivalent to using manhattan_distance (l1), and euclidean_distance
513
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
514
+
515
+ metric_params : dict, default=None
516
+ Additional keyword arguments for the metric function.
517
+
518
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
519
+ Algorithm used to compute the nearest neighbors:
520
+
521
+ - 'ball_tree' will use :class:`~sklearn.neighbors.BallTree`.
522
+ - 'kd_tree' will use :class:`~sklearn.neighbors.KDTree`.
523
+ - 'brute' will use a brute-force search.
524
+ - 'auto' will attempt to decide the most appropriate algorithm
525
+ based on the values passed to `fit` method. (default)
526
+
527
+ Note: fitting on sparse input will override the setting of
528
+ this parameter, using brute force.
529
+
530
+ leaf_size : int, default=30
531
+ Leaf size passed to :class:`~sklearn.neighbors.BallTree` or
532
+ :class:`~sklearn.neighbors.KDTree`. This can affect the speed of the
533
+ construction and query, as well as the memory required to store the
534
+ tree. The optimal value depends on the nature of the problem.
535
+
536
+ n_jobs : int, default=None
537
+ The number of parallel jobs to run for neighbors search.
538
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
539
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
540
+ for more details.
541
+
542
+ Returns
543
+ -------
544
+ ordering_ : array of shape (n_samples,)
545
+ The cluster ordered list of sample indices.
546
+
547
+ core_distances_ : array of shape (n_samples,)
548
+ Distance at which each sample becomes a core point, indexed by object
549
+ order. Points which will never be core have a distance of inf. Use
550
+ ``clust.core_distances_[clust.ordering_]`` to access in cluster order.
551
+
552
+ reachability_ : array of shape (n_samples,)
553
+ Reachability distances per sample, indexed by object order. Use
554
+ ``clust.reachability_[clust.ordering_]`` to access in cluster order.
555
+
556
+ predecessor_ : array of shape (n_samples,)
557
+ Point that a sample was reached from, indexed by object order.
558
+ Seed points have a predecessor of -1.
559
+
560
+ References
561
+ ----------
562
+ .. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
563
+ and Jörg Sander. "OPTICS: ordering points to identify the clustering
564
+ structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
565
+
566
+ Examples
567
+ --------
568
+ >>> import numpy as np
569
+ >>> from sklearn.cluster import compute_optics_graph
570
+ >>> X = np.array([[1, 2], [2, 5], [3, 6],
571
+ ... [8, 7], [8, 8], [7, 3]])
572
+ >>> ordering, core_distances, reachability, predecessor = compute_optics_graph(
573
+ ... X,
574
+ ... min_samples=2,
575
+ ... max_eps=np.inf,
576
+ ... metric="minkowski",
577
+ ... p=2,
578
+ ... metric_params=None,
579
+ ... algorithm="auto",
580
+ ... leaf_size=30,
581
+ ... n_jobs=None,
582
+ ... )
583
+ >>> ordering
584
+ array([0, 1, 2, 5, 3, 4])
585
+ >>> core_distances
586
+ array([3.16..., 1.41..., 1.41..., 1. , 1. ,
587
+ 4.12...])
588
+ >>> reachability
589
+ array([ inf, 3.16..., 1.41..., 4.12..., 1. ,
590
+ 5. ])
591
+ >>> predecessor
592
+ array([-1, 0, 1, 5, 3, 2])
593
+ """
594
+ n_samples = X.shape[0]
595
+ _validate_size(min_samples, n_samples, "min_samples")
596
+ if min_samples <= 1:
597
+ min_samples = max(2, int(min_samples * n_samples))
598
+
599
+ # Start all points as 'unprocessed' ##
600
+ reachability_ = np.empty(n_samples)
601
+ reachability_.fill(np.inf)
602
+ predecessor_ = np.empty(n_samples, dtype=int)
603
+ predecessor_.fill(-1)
604
+
605
+ nbrs = NearestNeighbors(
606
+ n_neighbors=min_samples,
607
+ algorithm=algorithm,
608
+ leaf_size=leaf_size,
609
+ metric=metric,
610
+ metric_params=metric_params,
611
+ p=p,
612
+ n_jobs=n_jobs,
613
+ )
614
+
615
+ nbrs.fit(X)
616
+ # Here we first do a kNN query for each point, this differs from
617
+ # the original OPTICS that only used epsilon range queries.
618
+ # TODO: handle working_memory somehow?
619
+ core_distances_ = _compute_core_distances_(
620
+ X=X, neighbors=nbrs, min_samples=min_samples, working_memory=None
621
+ )
622
+ # OPTICS puts an upper limit on these, use inf for undefined.
623
+ core_distances_[core_distances_ > max_eps] = np.inf
624
+ np.around(
625
+ core_distances_,
626
+ decimals=np.finfo(core_distances_.dtype).precision,
627
+ out=core_distances_,
628
+ )
629
+
630
+ # Main OPTICS loop. Not parallelizable. The order that entries are
631
+ # written to the 'ordering_' list is important!
632
+ # Note that this implementation is O(n^2) theoretically, but
633
+ # supposedly with very low constant factors.
634
+ processed = np.zeros(X.shape[0], dtype=bool)
635
+ ordering = np.zeros(X.shape[0], dtype=int)
636
+ for ordering_idx in range(X.shape[0]):
637
+ # Choose next based on smallest reachability distance
638
+ # (And prefer smaller ids on ties, possibly np.inf!)
639
+ index = np.where(processed == 0)[0]
640
+ point = index[np.argmin(reachability_[index])]
641
+
642
+ processed[point] = True
643
+ ordering[ordering_idx] = point
644
+ if core_distances_[point] != np.inf:
645
+ _set_reach_dist(
646
+ core_distances_=core_distances_,
647
+ reachability_=reachability_,
648
+ predecessor_=predecessor_,
649
+ point_index=point,
650
+ processed=processed,
651
+ X=X,
652
+ nbrs=nbrs,
653
+ metric=metric,
654
+ metric_params=metric_params,
655
+ p=p,
656
+ max_eps=max_eps,
657
+ )
658
+ if np.all(np.isinf(reachability_)):
659
+ warnings.warn(
660
+ (
661
+ "All reachability values are inf. Set a larger"
662
+ " max_eps or all data will be considered outliers."
663
+ ),
664
+ UserWarning,
665
+ )
666
+ return ordering, core_distances_, reachability_, predecessor_
667
+
668
+
669
+ def _set_reach_dist(
670
+ core_distances_,
671
+ reachability_,
672
+ predecessor_,
673
+ point_index,
674
+ processed,
675
+ X,
676
+ nbrs,
677
+ metric,
678
+ metric_params,
679
+ p,
680
+ max_eps,
681
+ ):
682
+ P = X[point_index : point_index + 1]
683
+ # Assume that radius_neighbors is faster without distances
684
+ # and we don't need all distances, nevertheless, this means
685
+ # we may be doing some work twice.
686
+ indices = nbrs.radius_neighbors(P, radius=max_eps, return_distance=False)[0]
687
+
688
+ # Getting indices of neighbors that have not been processed
689
+ unproc = np.compress(~np.take(processed, indices), indices)
690
+ # Neighbors of current point are already processed.
691
+ if not unproc.size:
692
+ return
693
+
694
+ # Only compute distances to unprocessed neighbors:
695
+ if metric == "precomputed":
696
+ dists = X[[point_index], unproc]
697
+ if isinstance(dists, np.matrix):
698
+ dists = np.asarray(dists)
699
+ dists = dists.ravel()
700
+ else:
701
+ _params = dict() if metric_params is None else metric_params.copy()
702
+ if metric == "minkowski" and "p" not in _params:
703
+ # the same logic as neighbors, p is ignored if explicitly set
704
+ # in the dict params
705
+ _params["p"] = p
706
+ dists = pairwise_distances(P, X[unproc], metric, n_jobs=None, **_params).ravel()
707
+
708
+ rdists = np.maximum(dists, core_distances_[point_index])
709
+ np.around(rdists, decimals=np.finfo(rdists.dtype).precision, out=rdists)
710
+ improved = np.where(rdists < np.take(reachability_, unproc))
711
+ reachability_[unproc[improved]] = rdists[improved]
712
+ predecessor_[unproc[improved]] = point_index
713
+
714
+
715
+ @validate_params(
716
+ {
717
+ "reachability": [np.ndarray],
718
+ "core_distances": [np.ndarray],
719
+ "ordering": [np.ndarray],
720
+ "eps": [Interval(Real, 0, None, closed="both")],
721
+ },
722
+ prefer_skip_nested_validation=True,
723
+ )
724
+ def cluster_optics_dbscan(*, reachability, core_distances, ordering, eps):
725
+ """Perform DBSCAN extraction for an arbitrary epsilon.
726
+
727
+ Extracting the clusters runs in linear time. Note that this results in
728
+ ``labels_`` which are close to a :class:`~sklearn.cluster.DBSCAN` with
729
+ similar settings and ``eps``, only if ``eps`` is close to ``max_eps``.
730
+
731
+ Parameters
732
+ ----------
733
+ reachability : ndarray of shape (n_samples,)
734
+ Reachability distances calculated by OPTICS (``reachability_``).
735
+
736
+ core_distances : ndarray of shape (n_samples,)
737
+ Distances at which points become core (``core_distances_``).
738
+
739
+ ordering : ndarray of shape (n_samples,)
740
+ OPTICS ordered point indices (``ordering_``).
741
+
742
+ eps : float
743
+ DBSCAN ``eps`` parameter. Must be set to < ``max_eps``. Results
744
+ will be close to DBSCAN algorithm if ``eps`` and ``max_eps`` are close
745
+ to one another.
746
+
747
+ Returns
748
+ -------
749
+ labels_ : array of shape (n_samples,)
750
+ The estimated labels.
751
+
752
+ Examples
753
+ --------
754
+ >>> import numpy as np
755
+ >>> from sklearn.cluster import cluster_optics_dbscan, compute_optics_graph
756
+ >>> X = np.array([[1, 2], [2, 5], [3, 6],
757
+ ... [8, 7], [8, 8], [7, 3]])
758
+ >>> ordering, core_distances, reachability, predecessor = compute_optics_graph(
759
+ ... X,
760
+ ... min_samples=2,
761
+ ... max_eps=np.inf,
762
+ ... metric="minkowski",
763
+ ... p=2,
764
+ ... metric_params=None,
765
+ ... algorithm="auto",
766
+ ... leaf_size=30,
767
+ ... n_jobs=None,
768
+ ... )
769
+ >>> eps = 4.5
770
+ >>> labels = cluster_optics_dbscan(
771
+ ... reachability=reachability,
772
+ ... core_distances=core_distances,
773
+ ... ordering=ordering,
774
+ ... eps=eps,
775
+ ... )
776
+ >>> labels
777
+ array([0, 0, 0, 1, 1, 1])
778
+ """
779
+ n_samples = len(core_distances)
780
+ labels = np.zeros(n_samples, dtype=int)
781
+
782
+ far_reach = reachability > eps
783
+ near_core = core_distances <= eps
784
+ labels[ordering] = np.cumsum(far_reach[ordering] & near_core[ordering]) - 1
785
+ labels[far_reach & ~near_core] = -1
786
+ return labels
787
+
788
+
789
+ @validate_params(
790
+ {
791
+ "reachability": [np.ndarray],
792
+ "predecessor": [np.ndarray],
793
+ "ordering": [np.ndarray],
794
+ "min_samples": [
795
+ Interval(Integral, 2, None, closed="left"),
796
+ Interval(RealNotInt, 0, 1, closed="both"),
797
+ ],
798
+ "min_cluster_size": [
799
+ Interval(Integral, 2, None, closed="left"),
800
+ Interval(RealNotInt, 0, 1, closed="both"),
801
+ None,
802
+ ],
803
+ "xi": [Interval(Real, 0, 1, closed="both")],
804
+ "predecessor_correction": ["boolean"],
805
+ },
806
+ prefer_skip_nested_validation=True,
807
+ )
808
+ def cluster_optics_xi(
809
+ *,
810
+ reachability,
811
+ predecessor,
812
+ ordering,
813
+ min_samples,
814
+ min_cluster_size=None,
815
+ xi=0.05,
816
+ predecessor_correction=True,
817
+ ):
818
+ """Automatically extract clusters according to the Xi-steep method.
819
+
820
+ Parameters
821
+ ----------
822
+ reachability : ndarray of shape (n_samples,)
823
+ Reachability distances calculated by OPTICS (`reachability_`).
824
+
825
+ predecessor : ndarray of shape (n_samples,)
826
+ Predecessors calculated by OPTICS.
827
+
828
+ ordering : ndarray of shape (n_samples,)
829
+ OPTICS ordered point indices (`ordering_`).
830
+
831
+ min_samples : int > 1 or float between 0 and 1
832
+ The same as the min_samples given to OPTICS. Up and down steep regions
833
+ can't have more then ``min_samples`` consecutive non-steep points.
834
+ Expressed as an absolute number or a fraction of the number of samples
835
+ (rounded to be at least 2).
836
+
837
+ min_cluster_size : int > 1 or float between 0 and 1, default=None
838
+ Minimum number of samples in an OPTICS cluster, expressed as an
839
+ absolute number or a fraction of the number of samples (rounded to be
840
+ at least 2). If ``None``, the value of ``min_samples`` is used instead.
841
+
842
+ xi : float between 0 and 1, default=0.05
843
+ Determines the minimum steepness on the reachability plot that
844
+ constitutes a cluster boundary. For example, an upwards point in the
845
+ reachability plot is defined by the ratio from one point to its
846
+ successor being at most 1-xi.
847
+
848
+ predecessor_correction : bool, default=True
849
+ Correct clusters based on the calculated predecessors.
850
+
851
+ Returns
852
+ -------
853
+ labels : ndarray of shape (n_samples,)
854
+ The labels assigned to samples. Points which are not included
855
+ in any cluster are labeled as -1.
856
+
857
+ clusters : ndarray of shape (n_clusters, 2)
858
+ The list of clusters in the form of ``[start, end]`` in each row, with
859
+ all indices inclusive. The clusters are ordered according to ``(end,
860
+ -start)`` (ascending) so that larger clusters encompassing smaller
861
+ clusters come after such nested smaller clusters. Since ``labels`` does
862
+ not reflect the hierarchy, usually ``len(clusters) >
863
+ np.unique(labels)``.
864
+
865
+ Examples
866
+ --------
867
+ >>> import numpy as np
868
+ >>> from sklearn.cluster import cluster_optics_xi, compute_optics_graph
869
+ >>> X = np.array([[1, 2], [2, 5], [3, 6],
870
+ ... [8, 7], [8, 8], [7, 3]])
871
+ >>> ordering, core_distances, reachability, predecessor = compute_optics_graph(
872
+ ... X,
873
+ ... min_samples=2,
874
+ ... max_eps=np.inf,
875
+ ... metric="minkowski",
876
+ ... p=2,
877
+ ... metric_params=None,
878
+ ... algorithm="auto",
879
+ ... leaf_size=30,
880
+ ... n_jobs=None
881
+ ... )
882
+ >>> min_samples = 2
883
+ >>> labels, clusters = cluster_optics_xi(
884
+ ... reachability=reachability,
885
+ ... predecessor=predecessor,
886
+ ... ordering=ordering,
887
+ ... min_samples=min_samples,
888
+ ... )
889
+ >>> labels
890
+ array([0, 0, 0, 1, 1, 1])
891
+ >>> clusters
892
+ array([[0, 2],
893
+ [3, 5],
894
+ [0, 5]])
895
+ """
896
+ n_samples = len(reachability)
897
+ _validate_size(min_samples, n_samples, "min_samples")
898
+ if min_samples <= 1:
899
+ min_samples = max(2, int(min_samples * n_samples))
900
+ if min_cluster_size is None:
901
+ min_cluster_size = min_samples
902
+ _validate_size(min_cluster_size, n_samples, "min_cluster_size")
903
+ if min_cluster_size <= 1:
904
+ min_cluster_size = max(2, int(min_cluster_size * n_samples))
905
+
906
+ clusters = _xi_cluster(
907
+ reachability[ordering],
908
+ predecessor[ordering],
909
+ ordering,
910
+ xi,
911
+ min_samples,
912
+ min_cluster_size,
913
+ predecessor_correction,
914
+ )
915
+ labels = _extract_xi_labels(ordering, clusters)
916
+ return labels, clusters
917
+
918
+
919
+ def _extend_region(steep_point, xward_point, start, min_samples):
920
+ """Extend the area until it's maximal.
921
+
922
+ It's the same function for both upward and downward reagions, depending on
923
+ the given input parameters. Assuming:
924
+
925
+ - steep_{upward/downward}: bool array indicating whether a point is a
926
+ steep {upward/downward};
927
+ - upward/downward: bool array indicating whether a point is
928
+ upward/downward;
929
+
930
+ To extend an upward reagion, ``steep_point=steep_upward`` and
931
+ ``xward_point=downward`` are expected, and to extend a downward region,
932
+ ``steep_point=steep_downward`` and ``xward_point=upward``.
933
+
934
+ Parameters
935
+ ----------
936
+ steep_point : ndarray of shape (n_samples,), dtype=bool
937
+ True if the point is steep downward (upward).
938
+
939
+ xward_point : ndarray of shape (n_samples,), dtype=bool
940
+ True if the point is an upward (respectively downward) point.
941
+
942
+ start : int
943
+ The start of the xward region.
944
+
945
+ min_samples : int
946
+ The same as the min_samples given to OPTICS. Up and down steep
947
+ regions can't have more then ``min_samples`` consecutive non-steep
948
+ points.
949
+
950
+ Returns
951
+ -------
952
+ index : int
953
+ The current index iterating over all the samples, i.e. where we are up
954
+ to in our search.
955
+
956
+ end : int
957
+ The end of the region, which can be behind the index. The region
958
+ includes the ``end`` index.
959
+ """
960
+ n_samples = len(steep_point)
961
+ non_xward_points = 0
962
+ index = start
963
+ end = start
964
+ # find a maximal area
965
+ while index < n_samples:
966
+ if steep_point[index]:
967
+ non_xward_points = 0
968
+ end = index
969
+ elif not xward_point[index]:
970
+ # it's not a steep point, but still goes up.
971
+ non_xward_points += 1
972
+ # region should include no more than min_samples consecutive
973
+ # non steep xward points.
974
+ if non_xward_points > min_samples:
975
+ break
976
+ else:
977
+ return end
978
+ index += 1
979
+ return end
980
+
981
+
982
+ def _update_filter_sdas(sdas, mib, xi_complement, reachability_plot):
983
+ """Update steep down areas (SDAs) using the new maximum in between (mib)
984
+ value, and the given complement of xi, i.e. ``1 - xi``.
985
+ """
986
+ if np.isinf(mib):
987
+ return []
988
+ res = [
989
+ sda for sda in sdas if mib <= reachability_plot[sda["start"]] * xi_complement
990
+ ]
991
+ for sda in res:
992
+ sda["mib"] = max(sda["mib"], mib)
993
+ return res
994
+
995
+
996
+ def _correct_predecessor(reachability_plot, predecessor_plot, ordering, s, e):
997
+ """Correct for predecessors.
998
+
999
+ Applies Algorithm 2 of [1]_.
1000
+
1001
+ Input parameters are ordered by the computer OPTICS ordering.
1002
+
1003
+ .. [1] Schubert, Erich, Michael Gertz.
1004
+ "Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of
1005
+ the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329.
1006
+ """
1007
+ while s < e:
1008
+ if reachability_plot[s] > reachability_plot[e]:
1009
+ return s, e
1010
+ p_e = predecessor_plot[e]
1011
+ for i in range(s, e):
1012
+ if p_e == ordering[i]:
1013
+ return s, e
1014
+ e -= 1
1015
+ return None, None
1016
+
1017
+
1018
+ def _xi_cluster(
1019
+ reachability_plot,
1020
+ predecessor_plot,
1021
+ ordering,
1022
+ xi,
1023
+ min_samples,
1024
+ min_cluster_size,
1025
+ predecessor_correction,
1026
+ ):
1027
+ """Automatically extract clusters according to the Xi-steep method.
1028
+
1029
+ This is rouphly an implementation of Figure 19 of the OPTICS paper.
1030
+
1031
+ Parameters
1032
+ ----------
1033
+ reachability_plot : array-like of shape (n_samples,)
1034
+ The reachability plot, i.e. reachability ordered according to
1035
+ the calculated ordering, all computed by OPTICS.
1036
+
1037
+ predecessor_plot : array-like of shape (n_samples,)
1038
+ Predecessors ordered according to the calculated ordering.
1039
+
1040
+ xi : float, between 0 and 1
1041
+ Determines the minimum steepness on the reachability plot that
1042
+ constitutes a cluster boundary. For example, an upwards point in the
1043
+ reachability plot is defined by the ratio from one point to its
1044
+ successor being at most 1-xi.
1045
+
1046
+ min_samples : int > 1
1047
+ The same as the min_samples given to OPTICS. Up and down steep regions
1048
+ can't have more then ``min_samples`` consecutive non-steep points.
1049
+
1050
+ min_cluster_size : int > 1
1051
+ Minimum number of samples in an OPTICS cluster.
1052
+
1053
+ predecessor_correction : bool
1054
+ Correct clusters based on the calculated predecessors.
1055
+
1056
+ Returns
1057
+ -------
1058
+ clusters : ndarray of shape (n_clusters, 2)
1059
+ The list of clusters in the form of [start, end] in each row, with all
1060
+ indices inclusive. The clusters are ordered in a way that larger
1061
+ clusters encompassing smaller clusters come after those smaller
1062
+ clusters.
1063
+ """
1064
+
1065
+ # Our implementation adds an inf to the end of reachability plot
1066
+ # this helps to find potential clusters at the end of the
1067
+ # reachability plot even if there's no upward region at the end of it.
1068
+ reachability_plot = np.hstack((reachability_plot, np.inf))
1069
+
1070
+ xi_complement = 1 - xi
1071
+ sdas = [] # steep down areas, introduced in section 4.3.2 of the paper
1072
+ clusters = []
1073
+ index = 0
1074
+ mib = 0.0 # maximum in between, section 4.3.2
1075
+
1076
+ # Our implementation corrects a mistake in the original
1077
+ # paper, i.e., in Definition 9 steep downward point,
1078
+ # r(p) * (1 - x1) <= r(p + 1) should be
1079
+ # r(p) * (1 - x1) >= r(p + 1)
1080
+ with np.errstate(invalid="ignore"):
1081
+ ratio = reachability_plot[:-1] / reachability_plot[1:]
1082
+ steep_upward = ratio <= xi_complement
1083
+ steep_downward = ratio >= 1 / xi_complement
1084
+ downward = ratio > 1
1085
+ upward = ratio < 1
1086
+
1087
+ # the following loop is almost exactly as Figure 19 of the paper.
1088
+ # it jumps over the areas which are not either steep down or up areas
1089
+ for steep_index in iter(np.flatnonzero(steep_upward | steep_downward)):
1090
+ # just continue if steep_index has been a part of a discovered xward
1091
+ # area.
1092
+ if steep_index < index:
1093
+ continue
1094
+
1095
+ mib = max(mib, np.max(reachability_plot[index : steep_index + 1]))
1096
+
1097
+ # steep downward areas
1098
+ if steep_downward[steep_index]:
1099
+ sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability_plot)
1100
+ D_start = steep_index
1101
+ D_end = _extend_region(steep_downward, upward, D_start, min_samples)
1102
+ D = {"start": D_start, "end": D_end, "mib": 0.0}
1103
+ sdas.append(D)
1104
+ index = D_end + 1
1105
+ mib = reachability_plot[index]
1106
+
1107
+ # steep upward areas
1108
+ else:
1109
+ sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability_plot)
1110
+ U_start = steep_index
1111
+ U_end = _extend_region(steep_upward, downward, U_start, min_samples)
1112
+ index = U_end + 1
1113
+ mib = reachability_plot[index]
1114
+
1115
+ U_clusters = []
1116
+ for D in sdas:
1117
+ c_start = D["start"]
1118
+ c_end = U_end
1119
+
1120
+ # line (**), sc2*
1121
+ if reachability_plot[c_end + 1] * xi_complement < D["mib"]:
1122
+ continue
1123
+
1124
+ # Definition 11: criterion 4
1125
+ D_max = reachability_plot[D["start"]]
1126
+ if D_max * xi_complement >= reachability_plot[c_end + 1]:
1127
+ # Find the first index from the left side which is almost
1128
+ # at the same level as the end of the detected cluster.
1129
+ while (
1130
+ reachability_plot[c_start + 1] > reachability_plot[c_end + 1]
1131
+ and c_start < D["end"]
1132
+ ):
1133
+ c_start += 1
1134
+ elif reachability_plot[c_end + 1] * xi_complement >= D_max:
1135
+ # Find the first index from the right side which is almost
1136
+ # at the same level as the beginning of the detected
1137
+ # cluster.
1138
+ # Our implementation corrects a mistake in the original
1139
+ # paper, i.e., in Definition 11 4c, r(x) < r(sD) should be
1140
+ # r(x) > r(sD).
1141
+ while reachability_plot[c_end - 1] > D_max and c_end > U_start:
1142
+ c_end -= 1
1143
+
1144
+ # predecessor correction
1145
+ if predecessor_correction:
1146
+ c_start, c_end = _correct_predecessor(
1147
+ reachability_plot, predecessor_plot, ordering, c_start, c_end
1148
+ )
1149
+ if c_start is None:
1150
+ continue
1151
+
1152
+ # Definition 11: criterion 3.a
1153
+ if c_end - c_start + 1 < min_cluster_size:
1154
+ continue
1155
+
1156
+ # Definition 11: criterion 1
1157
+ if c_start > D["end"]:
1158
+ continue
1159
+
1160
+ # Definition 11: criterion 2
1161
+ if c_end < U_start:
1162
+ continue
1163
+
1164
+ U_clusters.append((c_start, c_end))
1165
+
1166
+ # add smaller clusters first.
1167
+ U_clusters.reverse()
1168
+ clusters.extend(U_clusters)
1169
+
1170
+ return np.array(clusters)
1171
+
1172
+
1173
+ def _extract_xi_labels(ordering, clusters):
1174
+ """Extracts the labels from the clusters returned by `_xi_cluster`.
1175
+ We rely on the fact that clusters are stored
1176
+ with the smaller clusters coming before the larger ones.
1177
+
1178
+ Parameters
1179
+ ----------
1180
+ ordering : array-like of shape (n_samples,)
1181
+ The ordering of points calculated by OPTICS
1182
+
1183
+ clusters : array-like of shape (n_clusters, 2)
1184
+ List of clusters i.e. (start, end) tuples,
1185
+ as returned by `_xi_cluster`.
1186
+
1187
+ Returns
1188
+ -------
1189
+ labels : ndarray of shape (n_samples,)
1190
+ """
1191
+
1192
+ labels = np.full(len(ordering), -1, dtype=int)
1193
+ label = 0
1194
+ for c in clusters:
1195
+ if not np.any(labels[c[0] : (c[1] + 1)] != -1):
1196
+ labels[c[0] : (c[1] + 1)] = label
1197
+ label += 1
1198
+ labels[ordering] = labels.copy()
1199
+ return labels
venv/lib/python3.10/site-packages/sklearn/cluster/_spectral.py ADDED
@@ -0,0 +1,799 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Algorithms for spectral clustering"""
2
+
3
+ # Author: Gael Varoquaux <[email protected]>
4
+ # Brian Cheung
5
+ # Wei LI <[email protected]>
6
+ # Andrew Knyazev <[email protected]>
7
+ # License: BSD 3 clause
8
+
9
+ import warnings
10
+ from numbers import Integral, Real
11
+
12
+ import numpy as np
13
+ from scipy.linalg import LinAlgError, qr, svd
14
+ from scipy.sparse import csc_matrix
15
+
16
+ from ..base import BaseEstimator, ClusterMixin, _fit_context
17
+ from ..manifold import spectral_embedding
18
+ from ..metrics.pairwise import KERNEL_PARAMS, pairwise_kernels
19
+ from ..neighbors import NearestNeighbors, kneighbors_graph
20
+ from ..utils import as_float_array, check_random_state
21
+ from ..utils._param_validation import Interval, StrOptions, validate_params
22
+ from ._kmeans import k_means
23
+
24
+
25
+ def cluster_qr(vectors):
26
+ """Find the discrete partition closest to the eigenvector embedding.
27
+
28
+ This implementation was proposed in [1]_.
29
+
30
+ .. versionadded:: 1.1
31
+
32
+ Parameters
33
+ ----------
34
+ vectors : array-like, shape: (n_samples, n_clusters)
35
+ The embedding space of the samples.
36
+
37
+ Returns
38
+ -------
39
+ labels : array of integers, shape: n_samples
40
+ The cluster labels of vectors.
41
+
42
+ References
43
+ ----------
44
+ .. [1] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
45
+ Anil Damle, Victor Minden, Lexing Ying
46
+ <10.1093/imaiai/iay008>`
47
+
48
+ """
49
+
50
+ k = vectors.shape[1]
51
+ _, _, piv = qr(vectors.T, pivoting=True)
52
+ ut, _, v = svd(vectors[piv[:k], :].T)
53
+ vectors = abs(np.dot(vectors, np.dot(ut, v.conj())))
54
+ return vectors.argmax(axis=1)
55
+
56
+
57
+ def discretize(
58
+ vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None
59
+ ):
60
+ """Search for a partition matrix which is closest to the eigenvector embedding.
61
+
62
+ This implementation was proposed in [1]_.
63
+
64
+ Parameters
65
+ ----------
66
+ vectors : array-like of shape (n_samples, n_clusters)
67
+ The embedding space of the samples.
68
+
69
+ copy : bool, default=True
70
+ Whether to copy vectors, or perform in-place normalization.
71
+
72
+ max_svd_restarts : int, default=30
73
+ Maximum number of attempts to restart SVD if convergence fails
74
+
75
+ n_iter_max : int, default=30
76
+ Maximum number of iterations to attempt in rotation and partition
77
+ matrix search if machine precision convergence is not reached
78
+
79
+ random_state : int, RandomState instance, default=None
80
+ Determines random number generation for rotation matrix initialization.
81
+ Use an int to make the randomness deterministic.
82
+ See :term:`Glossary <random_state>`.
83
+
84
+ Returns
85
+ -------
86
+ labels : array of integers, shape: n_samples
87
+ The labels of the clusters.
88
+
89
+ References
90
+ ----------
91
+
92
+ .. [1] `Multiclass spectral clustering, 2003
93
+ Stella X. Yu, Jianbo Shi
94
+ <https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
95
+
96
+ Notes
97
+ -----
98
+
99
+ The eigenvector embedding is used to iteratively search for the
100
+ closest discrete partition. First, the eigenvector embedding is
101
+ normalized to the space of partition matrices. An optimal discrete
102
+ partition matrix closest to this normalized embedding multiplied by
103
+ an initial rotation is calculated. Fixing this discrete partition
104
+ matrix, an optimal rotation matrix is calculated. These two
105
+ calculations are performed until convergence. The discrete partition
106
+ matrix is returned as the clustering solution. Used in spectral
107
+ clustering, this method tends to be faster and more robust to random
108
+ initialization than k-means.
109
+
110
+ """
111
+
112
+ random_state = check_random_state(random_state)
113
+
114
+ vectors = as_float_array(vectors, copy=copy)
115
+
116
+ eps = np.finfo(float).eps
117
+ n_samples, n_components = vectors.shape
118
+
119
+ # Normalize the eigenvectors to an equal length of a vector of ones.
120
+ # Reorient the eigenvectors to point in the negative direction with respect
121
+ # to the first element. This may have to do with constraining the
122
+ # eigenvectors to lie in a specific quadrant to make the discretization
123
+ # search easier.
124
+ norm_ones = np.sqrt(n_samples)
125
+ for i in range(vectors.shape[1]):
126
+ vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) * norm_ones
127
+ if vectors[0, i] != 0:
128
+ vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
129
+
130
+ # Normalize the rows of the eigenvectors. Samples should lie on the unit
131
+ # hypersphere centered at the origin. This transforms the samples in the
132
+ # embedding space to the space of partition matrices.
133
+ vectors = vectors / np.sqrt((vectors**2).sum(axis=1))[:, np.newaxis]
134
+
135
+ svd_restarts = 0
136
+ has_converged = False
137
+
138
+ # If there is an exception we try to randomize and rerun SVD again
139
+ # do this max_svd_restarts times.
140
+ while (svd_restarts < max_svd_restarts) and not has_converged:
141
+ # Initialize first column of rotation matrix with a row of the
142
+ # eigenvectors
143
+ rotation = np.zeros((n_components, n_components))
144
+ rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
145
+
146
+ # To initialize the rest of the rotation matrix, find the rows
147
+ # of the eigenvectors that are as orthogonal to each other as
148
+ # possible
149
+ c = np.zeros(n_samples)
150
+ for j in range(1, n_components):
151
+ # Accumulate c to ensure row is as orthogonal as possible to
152
+ # previous picks as well as current one
153
+ c += np.abs(np.dot(vectors, rotation[:, j - 1]))
154
+ rotation[:, j] = vectors[c.argmin(), :].T
155
+
156
+ last_objective_value = 0.0
157
+ n_iter = 0
158
+
159
+ while not has_converged:
160
+ n_iter += 1
161
+
162
+ t_discrete = np.dot(vectors, rotation)
163
+
164
+ labels = t_discrete.argmax(axis=1)
165
+ vectors_discrete = csc_matrix(
166
+ (np.ones(len(labels)), (np.arange(0, n_samples), labels)),
167
+ shape=(n_samples, n_components),
168
+ )
169
+
170
+ t_svd = vectors_discrete.T * vectors
171
+
172
+ try:
173
+ U, S, Vh = np.linalg.svd(t_svd)
174
+ except LinAlgError:
175
+ svd_restarts += 1
176
+ print("SVD did not converge, randomizing and trying again")
177
+ break
178
+
179
+ ncut_value = 2.0 * (n_samples - S.sum())
180
+ if (abs(ncut_value - last_objective_value) < eps) or (n_iter > n_iter_max):
181
+ has_converged = True
182
+ else:
183
+ # otherwise calculate rotation and continue
184
+ last_objective_value = ncut_value
185
+ rotation = np.dot(Vh.T, U.T)
186
+
187
+ if not has_converged:
188
+ raise LinAlgError("SVD did not converge")
189
+ return labels
190
+
191
+
192
+ @validate_params(
193
+ {"affinity": ["array-like", "sparse matrix"]},
194
+ prefer_skip_nested_validation=False,
195
+ )
196
+ def spectral_clustering(
197
+ affinity,
198
+ *,
199
+ n_clusters=8,
200
+ n_components=None,
201
+ eigen_solver=None,
202
+ random_state=None,
203
+ n_init=10,
204
+ eigen_tol="auto",
205
+ assign_labels="kmeans",
206
+ verbose=False,
207
+ ):
208
+ """Apply clustering to a projection of the normalized Laplacian.
209
+
210
+ In practice Spectral Clustering is very useful when the structure of
211
+ the individual clusters is highly non-convex or more generally when
212
+ a measure of the center and spread of the cluster is not a suitable
213
+ description of the complete cluster. For instance, when clusters are
214
+ nested circles on the 2D plane.
215
+
216
+ If affinity is the adjacency matrix of a graph, this method can be
217
+ used to find normalized graph cuts [1]_, [2]_.
218
+
219
+ Read more in the :ref:`User Guide <spectral_clustering>`.
220
+
221
+ Parameters
222
+ ----------
223
+ affinity : {array-like, sparse matrix} of shape (n_samples, n_samples)
224
+ The affinity matrix describing the relationship of the samples to
225
+ embed. **Must be symmetric**.
226
+
227
+ Possible examples:
228
+ - adjacency matrix of a graph,
229
+ - heat kernel of the pairwise distance matrix of the samples,
230
+ - symmetric k-nearest neighbours connectivity matrix of the samples.
231
+
232
+ n_clusters : int, default=None
233
+ Number of clusters to extract.
234
+
235
+ n_components : int, default=n_clusters
236
+ Number of eigenvectors to use for the spectral embedding.
237
+
238
+ eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
239
+ The eigenvalue decomposition method. If None then ``'arpack'`` is used.
240
+ See [4]_ for more details regarding ``'lobpcg'``.
241
+ Eigensolver ``'amg'`` runs ``'lobpcg'`` with optional
242
+ Algebraic MultiGrid preconditioning and requires pyamg to be installed.
243
+ It can be faster on very large sparse problems [6]_ and [7]_.
244
+
245
+ random_state : int, RandomState instance, default=None
246
+ A pseudo random number generator used for the initialization
247
+ of the lobpcg eigenvectors decomposition when `eigen_solver ==
248
+ 'amg'`, and for the K-Means initialization. Use an int to make
249
+ the results deterministic across calls (See
250
+ :term:`Glossary <random_state>`).
251
+
252
+ .. note::
253
+ When using `eigen_solver == 'amg'`,
254
+ it is necessary to also fix the global numpy seed with
255
+ `np.random.seed(int)` to get deterministic results. See
256
+ https://github.com/pyamg/pyamg/issues/139 for further
257
+ information.
258
+
259
+ n_init : int, default=10
260
+ Number of time the k-means algorithm will be run with different
261
+ centroid seeds. The final results will be the best output of n_init
262
+ consecutive runs in terms of inertia. Only used if
263
+ ``assign_labels='kmeans'``.
264
+
265
+ eigen_tol : float, default="auto"
266
+ Stopping criterion for eigendecomposition of the Laplacian matrix.
267
+ If `eigen_tol="auto"` then the passed tolerance will depend on the
268
+ `eigen_solver`:
269
+
270
+ - If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
271
+ - If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
272
+ `eigen_tol=None` which configures the underlying `lobpcg` solver to
273
+ automatically resolve the value according to their heuristics. See,
274
+ :func:`scipy.sparse.linalg.lobpcg` for details.
275
+
276
+ Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"`
277
+ values of `tol<1e-5` may lead to convergence issues and should be
278
+ avoided.
279
+
280
+ .. versionadded:: 1.2
281
+ Added 'auto' option.
282
+
283
+ assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans'
284
+ The strategy to use to assign labels in the embedding
285
+ space. There are three ways to assign labels after the Laplacian
286
+ embedding. k-means can be applied and is a popular choice. But it can
287
+ also be sensitive to initialization. Discretization is another
288
+ approach which is less sensitive to random initialization [3]_.
289
+ The cluster_qr method [5]_ directly extracts clusters from eigenvectors
290
+ in spectral clustering. In contrast to k-means and discretization, cluster_qr
291
+ has no tuning parameters and is not an iterative method, yet may outperform
292
+ k-means and discretization in terms of both quality and speed.
293
+
294
+ .. versionchanged:: 1.1
295
+ Added new labeling method 'cluster_qr'.
296
+
297
+ verbose : bool, default=False
298
+ Verbosity mode.
299
+
300
+ .. versionadded:: 0.24
301
+
302
+ Returns
303
+ -------
304
+ labels : array of integers, shape: n_samples
305
+ The labels of the clusters.
306
+
307
+ Notes
308
+ -----
309
+ The graph should contain only one connected component, elsewhere
310
+ the results make little sense.
311
+
312
+ This algorithm solves the normalized cut for `k=2`: it is a
313
+ normalized spectral clustering.
314
+
315
+ References
316
+ ----------
317
+
318
+ .. [1] :doi:`Normalized cuts and image segmentation, 2000
319
+ Jianbo Shi, Jitendra Malik
320
+ <10.1109/34.868688>`
321
+
322
+ .. [2] :doi:`A Tutorial on Spectral Clustering, 2007
323
+ Ulrike von Luxburg
324
+ <10.1007/s11222-007-9033-z>`
325
+
326
+ .. [3] `Multiclass spectral clustering, 2003
327
+ Stella X. Yu, Jianbo Shi
328
+ <https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
329
+
330
+ .. [4] :doi:`Toward the Optimal Preconditioned Eigensolver:
331
+ Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001
332
+ A. V. Knyazev
333
+ SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541.
334
+ <10.1137/S1064827500366124>`
335
+
336
+ .. [5] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
337
+ Anil Damle, Victor Minden, Lexing Ying
338
+ <10.1093/imaiai/iay008>`
339
+
340
+ .. [6] :doi:`Multiscale Spectral Image Segmentation Multiscale preconditioning
341
+ for computing eigenvalues of graph Laplacians in image segmentation, 2006
342
+ Andrew Knyazev
343
+ <10.13140/RG.2.2.35280.02565>`
344
+
345
+ .. [7] :doi:`Preconditioned spectral clustering for stochastic block partition
346
+ streaming graph challenge (Preliminary version at arXiv.)
347
+ David Zhuzhunashvili, Andrew Knyazev
348
+ <10.1109/HPEC.2017.8091045>`
349
+
350
+ Examples
351
+ --------
352
+ >>> import numpy as np
353
+ >>> from sklearn.metrics.pairwise import pairwise_kernels
354
+ >>> from sklearn.cluster import spectral_clustering
355
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
356
+ ... [4, 7], [3, 5], [3, 6]])
357
+ >>> affinity = pairwise_kernels(X, metric='rbf')
358
+ >>> spectral_clustering(
359
+ ... affinity=affinity, n_clusters=2, assign_labels="discretize", random_state=0
360
+ ... )
361
+ array([1, 1, 1, 0, 0, 0])
362
+ """
363
+
364
+ clusterer = SpectralClustering(
365
+ n_clusters=n_clusters,
366
+ n_components=n_components,
367
+ eigen_solver=eigen_solver,
368
+ random_state=random_state,
369
+ n_init=n_init,
370
+ affinity="precomputed",
371
+ eigen_tol=eigen_tol,
372
+ assign_labels=assign_labels,
373
+ verbose=verbose,
374
+ ).fit(affinity)
375
+
376
+ return clusterer.labels_
377
+
378
+
379
+ class SpectralClustering(ClusterMixin, BaseEstimator):
380
+ """Apply clustering to a projection of the normalized Laplacian.
381
+
382
+ In practice Spectral Clustering is very useful when the structure of
383
+ the individual clusters is highly non-convex, or more generally when
384
+ a measure of the center and spread of the cluster is not a suitable
385
+ description of the complete cluster, such as when clusters are
386
+ nested circles on the 2D plane.
387
+
388
+ If the affinity matrix is the adjacency matrix of a graph, this method
389
+ can be used to find normalized graph cuts [1]_, [2]_.
390
+
391
+ When calling ``fit``, an affinity matrix is constructed using either
392
+ a kernel function such the Gaussian (aka RBF) kernel with Euclidean
393
+ distance ``d(X, X)``::
394
+
395
+ np.exp(-gamma * d(X,X) ** 2)
396
+
397
+ or a k-nearest neighbors connectivity matrix.
398
+
399
+ Alternatively, a user-provided affinity matrix can be specified by
400
+ setting ``affinity='precomputed'``.
401
+
402
+ Read more in the :ref:`User Guide <spectral_clustering>`.
403
+
404
+ Parameters
405
+ ----------
406
+ n_clusters : int, default=8
407
+ The dimension of the projection subspace.
408
+
409
+ eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
410
+ The eigenvalue decomposition strategy to use. AMG requires pyamg
411
+ to be installed. It can be faster on very large, sparse problems,
412
+ but may also lead to instabilities. If None, then ``'arpack'`` is
413
+ used. See [4]_ for more details regarding `'lobpcg'`.
414
+
415
+ n_components : int, default=None
416
+ Number of eigenvectors to use for the spectral embedding. If None,
417
+ defaults to `n_clusters`.
418
+
419
+ random_state : int, RandomState instance, default=None
420
+ A pseudo random number generator used for the initialization
421
+ of the lobpcg eigenvectors decomposition when `eigen_solver ==
422
+ 'amg'`, and for the K-Means initialization. Use an int to make
423
+ the results deterministic across calls (See
424
+ :term:`Glossary <random_state>`).
425
+
426
+ .. note::
427
+ When using `eigen_solver == 'amg'`,
428
+ it is necessary to also fix the global numpy seed with
429
+ `np.random.seed(int)` to get deterministic results. See
430
+ https://github.com/pyamg/pyamg/issues/139 for further
431
+ information.
432
+
433
+ n_init : int, default=10
434
+ Number of time the k-means algorithm will be run with different
435
+ centroid seeds. The final results will be the best output of n_init
436
+ consecutive runs in terms of inertia. Only used if
437
+ ``assign_labels='kmeans'``.
438
+
439
+ gamma : float, default=1.0
440
+ Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
441
+ Ignored for ``affinity='nearest_neighbors'``.
442
+
443
+ affinity : str or callable, default='rbf'
444
+ How to construct the affinity matrix.
445
+ - 'nearest_neighbors': construct the affinity matrix by computing a
446
+ graph of nearest neighbors.
447
+ - 'rbf': construct the affinity matrix using a radial basis function
448
+ (RBF) kernel.
449
+ - 'precomputed': interpret ``X`` as a precomputed affinity matrix,
450
+ where larger values indicate greater similarity between instances.
451
+ - 'precomputed_nearest_neighbors': interpret ``X`` as a sparse graph
452
+ of precomputed distances, and construct a binary affinity matrix
453
+ from the ``n_neighbors`` nearest neighbors of each instance.
454
+ - one of the kernels supported by
455
+ :func:`~sklearn.metrics.pairwise.pairwise_kernels`.
456
+
457
+ Only kernels that produce similarity scores (non-negative values that
458
+ increase with similarity) should be used. This property is not checked
459
+ by the clustering algorithm.
460
+
461
+ n_neighbors : int, default=10
462
+ Number of neighbors to use when constructing the affinity matrix using
463
+ the nearest neighbors method. Ignored for ``affinity='rbf'``.
464
+
465
+ eigen_tol : float, default="auto"
466
+ Stopping criterion for eigen decomposition of the Laplacian matrix.
467
+ If `eigen_tol="auto"` then the passed tolerance will depend on the
468
+ `eigen_solver`:
469
+
470
+ - If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
471
+ - If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
472
+ `eigen_tol=None` which configures the underlying `lobpcg` solver to
473
+ automatically resolve the value according to their heuristics. See,
474
+ :func:`scipy.sparse.linalg.lobpcg` for details.
475
+
476
+ Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"`
477
+ values of `tol<1e-5` may lead to convergence issues and should be
478
+ avoided.
479
+
480
+ .. versionadded:: 1.2
481
+ Added 'auto' option.
482
+
483
+ assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans'
484
+ The strategy for assigning labels in the embedding space. There are two
485
+ ways to assign labels after the Laplacian embedding. k-means is a
486
+ popular choice, but it can be sensitive to initialization.
487
+ Discretization is another approach which is less sensitive to random
488
+ initialization [3]_.
489
+ The cluster_qr method [5]_ directly extract clusters from eigenvectors
490
+ in spectral clustering. In contrast to k-means and discretization, cluster_qr
491
+ has no tuning parameters and runs no iterations, yet may outperform
492
+ k-means and discretization in terms of both quality and speed.
493
+
494
+ .. versionchanged:: 1.1
495
+ Added new labeling method 'cluster_qr'.
496
+
497
+ degree : float, default=3
498
+ Degree of the polynomial kernel. Ignored by other kernels.
499
+
500
+ coef0 : float, default=1
501
+ Zero coefficient for polynomial and sigmoid kernels.
502
+ Ignored by other kernels.
503
+
504
+ kernel_params : dict of str to any, default=None
505
+ Parameters (keyword arguments) and values for kernel passed as
506
+ callable object. Ignored by other kernels.
507
+
508
+ n_jobs : int, default=None
509
+ The number of parallel jobs to run when `affinity='nearest_neighbors'`
510
+ or `affinity='precomputed_nearest_neighbors'`. The neighbors search
511
+ will be done in parallel.
512
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
513
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
514
+ for more details.
515
+
516
+ verbose : bool, default=False
517
+ Verbosity mode.
518
+
519
+ .. versionadded:: 0.24
520
+
521
+ Attributes
522
+ ----------
523
+ affinity_matrix_ : array-like of shape (n_samples, n_samples)
524
+ Affinity matrix used for clustering. Available only after calling
525
+ ``fit``.
526
+
527
+ labels_ : ndarray of shape (n_samples,)
528
+ Labels of each point
529
+
530
+ n_features_in_ : int
531
+ Number of features seen during :term:`fit`.
532
+
533
+ .. versionadded:: 0.24
534
+
535
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
536
+ Names of features seen during :term:`fit`. Defined only when `X`
537
+ has feature names that are all strings.
538
+
539
+ .. versionadded:: 1.0
540
+
541
+ See Also
542
+ --------
543
+ sklearn.cluster.KMeans : K-Means clustering.
544
+ sklearn.cluster.DBSCAN : Density-Based Spatial Clustering of
545
+ Applications with Noise.
546
+
547
+ Notes
548
+ -----
549
+ A distance matrix for which 0 indicates identical elements and high values
550
+ indicate very dissimilar elements can be transformed into an affinity /
551
+ similarity matrix that is well-suited for the algorithm by
552
+ applying the Gaussian (aka RBF, heat) kernel::
553
+
554
+ np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
555
+
556
+ where ``delta`` is a free parameter representing the width of the Gaussian
557
+ kernel.
558
+
559
+ An alternative is to take a symmetric version of the k-nearest neighbors
560
+ connectivity matrix of the points.
561
+
562
+ If the pyamg package is installed, it is used: this greatly
563
+ speeds up computation.
564
+
565
+ References
566
+ ----------
567
+ .. [1] :doi:`Normalized cuts and image segmentation, 2000
568
+ Jianbo Shi, Jitendra Malik
569
+ <10.1109/34.868688>`
570
+
571
+ .. [2] :doi:`A Tutorial on Spectral Clustering, 2007
572
+ Ulrike von Luxburg
573
+ <10.1007/s11222-007-9033-z>`
574
+
575
+ .. [3] `Multiclass spectral clustering, 2003
576
+ Stella X. Yu, Jianbo Shi
577
+ <https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
578
+
579
+ .. [4] :doi:`Toward the Optimal Preconditioned Eigensolver:
580
+ Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001
581
+ A. V. Knyazev
582
+ SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541.
583
+ <10.1137/S1064827500366124>`
584
+
585
+ .. [5] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
586
+ Anil Damle, Victor Minden, Lexing Ying
587
+ <10.1093/imaiai/iay008>`
588
+
589
+ Examples
590
+ --------
591
+ >>> from sklearn.cluster import SpectralClustering
592
+ >>> import numpy as np
593
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
594
+ ... [4, 7], [3, 5], [3, 6]])
595
+ >>> clustering = SpectralClustering(n_clusters=2,
596
+ ... assign_labels='discretize',
597
+ ... random_state=0).fit(X)
598
+ >>> clustering.labels_
599
+ array([1, 1, 1, 0, 0, 0])
600
+ >>> clustering
601
+ SpectralClustering(assign_labels='discretize', n_clusters=2,
602
+ random_state=0)
603
+ """
604
+
605
+ _parameter_constraints: dict = {
606
+ "n_clusters": [Interval(Integral, 1, None, closed="left")],
607
+ "eigen_solver": [StrOptions({"arpack", "lobpcg", "amg"}), None],
608
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
609
+ "random_state": ["random_state"],
610
+ "n_init": [Interval(Integral, 1, None, closed="left")],
611
+ "gamma": [Interval(Real, 0, None, closed="left")],
612
+ "affinity": [
613
+ callable,
614
+ StrOptions(
615
+ set(KERNEL_PARAMS)
616
+ | {"nearest_neighbors", "precomputed", "precomputed_nearest_neighbors"}
617
+ ),
618
+ ],
619
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
620
+ "eigen_tol": [
621
+ Interval(Real, 0.0, None, closed="left"),
622
+ StrOptions({"auto"}),
623
+ ],
624
+ "assign_labels": [StrOptions({"kmeans", "discretize", "cluster_qr"})],
625
+ "degree": [Interval(Real, 0, None, closed="left")],
626
+ "coef0": [Interval(Real, None, None, closed="neither")],
627
+ "kernel_params": [dict, None],
628
+ "n_jobs": [Integral, None],
629
+ "verbose": ["verbose"],
630
+ }
631
+
632
+ def __init__(
633
+ self,
634
+ n_clusters=8,
635
+ *,
636
+ eigen_solver=None,
637
+ n_components=None,
638
+ random_state=None,
639
+ n_init=10,
640
+ gamma=1.0,
641
+ affinity="rbf",
642
+ n_neighbors=10,
643
+ eigen_tol="auto",
644
+ assign_labels="kmeans",
645
+ degree=3,
646
+ coef0=1,
647
+ kernel_params=None,
648
+ n_jobs=None,
649
+ verbose=False,
650
+ ):
651
+ self.n_clusters = n_clusters
652
+ self.eigen_solver = eigen_solver
653
+ self.n_components = n_components
654
+ self.random_state = random_state
655
+ self.n_init = n_init
656
+ self.gamma = gamma
657
+ self.affinity = affinity
658
+ self.n_neighbors = n_neighbors
659
+ self.eigen_tol = eigen_tol
660
+ self.assign_labels = assign_labels
661
+ self.degree = degree
662
+ self.coef0 = coef0
663
+ self.kernel_params = kernel_params
664
+ self.n_jobs = n_jobs
665
+ self.verbose = verbose
666
+
667
+ @_fit_context(prefer_skip_nested_validation=True)
668
+ def fit(self, X, y=None):
669
+ """Perform spectral clustering from features, or affinity matrix.
670
+
671
+ Parameters
672
+ ----------
673
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
674
+ (n_samples, n_samples)
675
+ Training instances to cluster, similarities / affinities between
676
+ instances if ``affinity='precomputed'``, or distances between
677
+ instances if ``affinity='precomputed_nearest_neighbors``. If a
678
+ sparse matrix is provided in a format other than ``csr_matrix``,
679
+ ``csc_matrix``, or ``coo_matrix``, it will be converted into a
680
+ sparse ``csr_matrix``.
681
+
682
+ y : Ignored
683
+ Not used, present here for API consistency by convention.
684
+
685
+ Returns
686
+ -------
687
+ self : object
688
+ A fitted instance of the estimator.
689
+ """
690
+ X = self._validate_data(
691
+ X,
692
+ accept_sparse=["csr", "csc", "coo"],
693
+ dtype=np.float64,
694
+ ensure_min_samples=2,
695
+ )
696
+ allow_squared = self.affinity in [
697
+ "precomputed",
698
+ "precomputed_nearest_neighbors",
699
+ ]
700
+ if X.shape[0] == X.shape[1] and not allow_squared:
701
+ warnings.warn(
702
+ "The spectral clustering API has changed. ``fit``"
703
+ "now constructs an affinity matrix from data. To use"
704
+ " a custom affinity matrix, "
705
+ "set ``affinity=precomputed``."
706
+ )
707
+
708
+ if self.affinity == "nearest_neighbors":
709
+ connectivity = kneighbors_graph(
710
+ X, n_neighbors=self.n_neighbors, include_self=True, n_jobs=self.n_jobs
711
+ )
712
+ self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
713
+ elif self.affinity == "precomputed_nearest_neighbors":
714
+ estimator = NearestNeighbors(
715
+ n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
716
+ ).fit(X)
717
+ connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
718
+ self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
719
+ elif self.affinity == "precomputed":
720
+ self.affinity_matrix_ = X
721
+ else:
722
+ params = self.kernel_params
723
+ if params is None:
724
+ params = {}
725
+ if not callable(self.affinity):
726
+ params["gamma"] = self.gamma
727
+ params["degree"] = self.degree
728
+ params["coef0"] = self.coef0
729
+ self.affinity_matrix_ = pairwise_kernels(
730
+ X, metric=self.affinity, filter_params=True, **params
731
+ )
732
+
733
+ random_state = check_random_state(self.random_state)
734
+ n_components = (
735
+ self.n_clusters if self.n_components is None else self.n_components
736
+ )
737
+ # We now obtain the real valued solution matrix to the
738
+ # relaxed Ncut problem, solving the eigenvalue problem
739
+ # L_sym x = lambda x and recovering u = D^-1/2 x.
740
+ # The first eigenvector is constant only for fully connected graphs
741
+ # and should be kept for spectral clustering (drop_first = False)
742
+ # See spectral_embedding documentation.
743
+ maps = spectral_embedding(
744
+ self.affinity_matrix_,
745
+ n_components=n_components,
746
+ eigen_solver=self.eigen_solver,
747
+ random_state=random_state,
748
+ eigen_tol=self.eigen_tol,
749
+ drop_first=False,
750
+ )
751
+ if self.verbose:
752
+ print(f"Computing label assignment using {self.assign_labels}")
753
+
754
+ if self.assign_labels == "kmeans":
755
+ _, self.labels_, _ = k_means(
756
+ maps,
757
+ self.n_clusters,
758
+ random_state=random_state,
759
+ n_init=self.n_init,
760
+ verbose=self.verbose,
761
+ )
762
+ elif self.assign_labels == "cluster_qr":
763
+ self.labels_ = cluster_qr(maps)
764
+ else:
765
+ self.labels_ = discretize(maps, random_state=random_state)
766
+
767
+ return self
768
+
769
+ def fit_predict(self, X, y=None):
770
+ """Perform spectral clustering on `X` and return cluster labels.
771
+
772
+ Parameters
773
+ ----------
774
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
775
+ (n_samples, n_samples)
776
+ Training instances to cluster, similarities / affinities between
777
+ instances if ``affinity='precomputed'``, or distances between
778
+ instances if ``affinity='precomputed_nearest_neighbors``. If a
779
+ sparse matrix is provided in a format other than ``csr_matrix``,
780
+ ``csc_matrix``, or ``coo_matrix``, it will be converted into a
781
+ sparse ``csr_matrix``.
782
+
783
+ y : Ignored
784
+ Not used, present here for API consistency by convention.
785
+
786
+ Returns
787
+ -------
788
+ labels : ndarray of shape (n_samples,)
789
+ Cluster labels.
790
+ """
791
+ return super().fit_predict(X, y)
792
+
793
+ def _more_tags(self):
794
+ return {
795
+ "pairwise": self.affinity in [
796
+ "precomputed",
797
+ "precomputed_nearest_neighbors",
798
+ ]
799
+ }
venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (618 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc ADDED
Binary file (49.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc ADDED
Binary file (9.68 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/compose/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc ADDED
Binary file (60.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py ADDED
@@ -0,0 +1,2582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the ColumnTransformer.
3
+ """
4
+
5
+ import pickle
6
+ import re
7
+ import warnings
8
+
9
+ import numpy as np
10
+ import pytest
11
+ from numpy.testing import assert_allclose
12
+ from scipy import sparse
13
+
14
+ from sklearn.base import BaseEstimator, TransformerMixin
15
+ from sklearn.compose import (
16
+ ColumnTransformer,
17
+ make_column_selector,
18
+ make_column_transformer,
19
+ )
20
+ from sklearn.exceptions import NotFittedError
21
+ from sklearn.feature_selection import VarianceThreshold
22
+ from sklearn.preprocessing import (
23
+ FunctionTransformer,
24
+ Normalizer,
25
+ OneHotEncoder,
26
+ StandardScaler,
27
+ )
28
+ from sklearn.tests.metadata_routing_common import (
29
+ ConsumingTransformer,
30
+ _Registry,
31
+ check_recorded_metadata,
32
+ )
33
+ from sklearn.utils._testing import (
34
+ _convert_container,
35
+ assert_allclose_dense_sparse,
36
+ assert_almost_equal,
37
+ assert_array_equal,
38
+ )
39
+ from sklearn.utils.fixes import CSR_CONTAINERS
40
+
41
+
42
+ class Trans(TransformerMixin, BaseEstimator):
43
+ def fit(self, X, y=None):
44
+ return self
45
+
46
+ def transform(self, X, y=None):
47
+ # 1D Series -> 2D DataFrame
48
+ if hasattr(X, "to_frame"):
49
+ return X.to_frame()
50
+ # 1D array -> 2D array
51
+ if getattr(X, "ndim", 2) == 1:
52
+ return np.atleast_2d(X).T
53
+ return X
54
+
55
+
56
+ class DoubleTrans(BaseEstimator):
57
+ def fit(self, X, y=None):
58
+ return self
59
+
60
+ def transform(self, X):
61
+ return 2 * X
62
+
63
+
64
+ class SparseMatrixTrans(BaseEstimator):
65
+ def __init__(self, csr_container):
66
+ self.csr_container = csr_container
67
+
68
+ def fit(self, X, y=None):
69
+ return self
70
+
71
+ def transform(self, X, y=None):
72
+ n_samples = len(X)
73
+ return self.csr_container(sparse.eye(n_samples, n_samples))
74
+
75
+
76
+ class TransNo2D(BaseEstimator):
77
+ def fit(self, X, y=None):
78
+ return self
79
+
80
+ def transform(self, X, y=None):
81
+ return X
82
+
83
+
84
+ class TransRaise(BaseEstimator):
85
+ def fit(self, X, y=None):
86
+ raise ValueError("specific message")
87
+
88
+ def transform(self, X, y=None):
89
+ raise ValueError("specific message")
90
+
91
+
92
+ def test_column_transformer():
93
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
94
+
95
+ X_res_first1D = np.array([0, 1, 2])
96
+ X_res_second1D = np.array([2, 4, 6])
97
+ X_res_first = X_res_first1D.reshape(-1, 1)
98
+ X_res_both = X_array
99
+
100
+ cases = [
101
+ # single column 1D / 2D
102
+ (0, X_res_first),
103
+ ([0], X_res_first),
104
+ # list-like
105
+ ([0, 1], X_res_both),
106
+ (np.array([0, 1]), X_res_both),
107
+ # slice
108
+ (slice(0, 1), X_res_first),
109
+ (slice(0, 2), X_res_both),
110
+ # boolean mask
111
+ (np.array([True, False]), X_res_first),
112
+ ([True, False], X_res_first),
113
+ (np.array([True, True]), X_res_both),
114
+ ([True, True], X_res_both),
115
+ ]
116
+
117
+ for selection, res in cases:
118
+ ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop")
119
+ assert_array_equal(ct.fit_transform(X_array), res)
120
+ assert_array_equal(ct.fit(X_array).transform(X_array), res)
121
+
122
+ # callable that returns any of the allowed specifiers
123
+ ct = ColumnTransformer(
124
+ [("trans", Trans(), lambda x: selection)], remainder="drop"
125
+ )
126
+ assert_array_equal(ct.fit_transform(X_array), res)
127
+ assert_array_equal(ct.fit(X_array).transform(X_array), res)
128
+
129
+ ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
130
+ assert_array_equal(ct.fit_transform(X_array), X_res_both)
131
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
132
+ assert len(ct.transformers_) == 2
133
+
134
+ # test with transformer_weights
135
+ transformer_weights = {"trans1": 0.1, "trans2": 10}
136
+ both = ColumnTransformer(
137
+ [("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
138
+ transformer_weights=transformer_weights,
139
+ )
140
+ res = np.vstack(
141
+ [
142
+ transformer_weights["trans1"] * X_res_first1D,
143
+ transformer_weights["trans2"] * X_res_second1D,
144
+ ]
145
+ ).T
146
+ assert_array_equal(both.fit_transform(X_array), res)
147
+ assert_array_equal(both.fit(X_array).transform(X_array), res)
148
+ assert len(both.transformers_) == 2
149
+
150
+ both = ColumnTransformer(
151
+ [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
152
+ )
153
+ assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both)
154
+ assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both)
155
+ assert len(both.transformers_) == 1
156
+
157
+
158
+ def test_column_transformer_tuple_transformers_parameter():
159
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
160
+
161
+ transformers = [("trans1", Trans(), [0]), ("trans2", Trans(), [1])]
162
+
163
+ ct_with_list = ColumnTransformer(transformers)
164
+ ct_with_tuple = ColumnTransformer(tuple(transformers))
165
+
166
+ assert_array_equal(
167
+ ct_with_list.fit_transform(X_array), ct_with_tuple.fit_transform(X_array)
168
+ )
169
+ assert_array_equal(
170
+ ct_with_list.fit(X_array).transform(X_array),
171
+ ct_with_tuple.fit(X_array).transform(X_array),
172
+ )
173
+
174
+
175
+ @pytest.mark.parametrize("constructor_name", ["dataframe", "polars"])
176
+ def test_column_transformer_dataframe(constructor_name):
177
+ if constructor_name == "dataframe":
178
+ dataframe_lib = pytest.importorskip("pandas")
179
+ else:
180
+ dataframe_lib = pytest.importorskip(constructor_name)
181
+
182
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
183
+ X_df = _convert_container(
184
+ X_array, constructor_name, columns_name=["first", "second"]
185
+ )
186
+
187
+ X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
188
+ X_res_both = X_array
189
+
190
+ cases = [
191
+ # String keys: label based
192
+ # list
193
+ (["first"], X_res_first),
194
+ (["first", "second"], X_res_both),
195
+ # slice
196
+ (slice("first", "second"), X_res_both),
197
+ # int keys: positional
198
+ # list
199
+ ([0], X_res_first),
200
+ ([0, 1], X_res_both),
201
+ (np.array([0, 1]), X_res_both),
202
+ # slice
203
+ (slice(0, 1), X_res_first),
204
+ (slice(0, 2), X_res_both),
205
+ # boolean mask
206
+ (np.array([True, False]), X_res_first),
207
+ ([True, False], X_res_first),
208
+ ]
209
+ if constructor_name == "dataframe":
210
+ # Scalars are only supported for pandas dataframes.
211
+ cases.extend(
212
+ [
213
+ # scalar
214
+ (0, X_res_first),
215
+ ("first", X_res_first),
216
+ (
217
+ dataframe_lib.Series([True, False], index=["first", "second"]),
218
+ X_res_first,
219
+ ),
220
+ ]
221
+ )
222
+
223
+ for selection, res in cases:
224
+ ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop")
225
+ assert_array_equal(ct.fit_transform(X_df), res)
226
+ assert_array_equal(ct.fit(X_df).transform(X_df), res)
227
+
228
+ # callable that returns any of the allowed specifiers
229
+ ct = ColumnTransformer(
230
+ [("trans", Trans(), lambda X: selection)], remainder="drop"
231
+ )
232
+ assert_array_equal(ct.fit_transform(X_df), res)
233
+ assert_array_equal(ct.fit(X_df).transform(X_df), res)
234
+
235
+ ct = ColumnTransformer(
236
+ [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])]
237
+ )
238
+ assert_array_equal(ct.fit_transform(X_df), X_res_both)
239
+ assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
240
+ assert len(ct.transformers_) == 2
241
+ assert ct.transformers_[-1][0] != "remainder"
242
+
243
+ ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
244
+ assert_array_equal(ct.fit_transform(X_df), X_res_both)
245
+ assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
246
+ assert len(ct.transformers_) == 2
247
+ assert ct.transformers_[-1][0] != "remainder"
248
+
249
+ # test with transformer_weights
250
+ transformer_weights = {"trans1": 0.1, "trans2": 10}
251
+ both = ColumnTransformer(
252
+ [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])],
253
+ transformer_weights=transformer_weights,
254
+ )
255
+ res = np.vstack(
256
+ [
257
+ transformer_weights["trans1"] * X_df["first"],
258
+ transformer_weights["trans2"] * X_df["second"],
259
+ ]
260
+ ).T
261
+ assert_array_equal(both.fit_transform(X_df), res)
262
+ assert_array_equal(both.fit(X_df).transform(X_df), res)
263
+ assert len(both.transformers_) == 2
264
+ assert both.transformers_[-1][0] != "remainder"
265
+
266
+ # test multiple columns
267
+ both = ColumnTransformer(
268
+ [("trans", Trans(), ["first", "second"])], transformer_weights={"trans": 0.1}
269
+ )
270
+ assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
271
+ assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
272
+ assert len(both.transformers_) == 1
273
+ assert both.transformers_[-1][0] != "remainder"
274
+
275
+ both = ColumnTransformer(
276
+ [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
277
+ )
278
+ assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
279
+ assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
280
+ assert len(both.transformers_) == 1
281
+ assert both.transformers_[-1][0] != "remainder"
282
+
283
+ # ensure pandas object is passed through
284
+
285
+ class TransAssert(BaseEstimator):
286
+ def __init__(self, expected_type_transform):
287
+ self.expected_type_transform = expected_type_transform
288
+
289
+ def fit(self, X, y=None):
290
+ return self
291
+
292
+ def transform(self, X, y=None):
293
+ assert isinstance(X, self.expected_type_transform)
294
+ if isinstance(X, dataframe_lib.Series):
295
+ X = X.to_frame()
296
+ return X
297
+
298
+ ct = ColumnTransformer(
299
+ [
300
+ (
301
+ "trans",
302
+ TransAssert(expected_type_transform=dataframe_lib.DataFrame),
303
+ ["first", "second"],
304
+ )
305
+ ]
306
+ )
307
+ ct.fit_transform(X_df)
308
+
309
+ if constructor_name == "dataframe":
310
+ # DataFrame protocol does not have 1d columns, so we only test on Pandas
311
+ # dataframes.
312
+ ct = ColumnTransformer(
313
+ [
314
+ (
315
+ "trans",
316
+ TransAssert(expected_type_transform=dataframe_lib.Series),
317
+ "first",
318
+ )
319
+ ],
320
+ remainder="drop",
321
+ )
322
+ ct.fit_transform(X_df)
323
+
324
+ # Only test on pandas because the dataframe protocol requires string column
325
+ # names
326
+ # integer column spec + integer column names -> still use positional
327
+ X_df2 = X_df.copy()
328
+ X_df2.columns = [1, 0]
329
+ ct = ColumnTransformer([("trans", Trans(), 0)], remainder="drop")
330
+ assert_array_equal(ct.fit_transform(X_df2), X_res_first)
331
+ assert_array_equal(ct.fit(X_df2).transform(X_df2), X_res_first)
332
+
333
+ assert len(ct.transformers_) == 2
334
+ assert ct.transformers_[-1][0] == "remainder"
335
+ assert ct.transformers_[-1][1] == "drop"
336
+ assert_array_equal(ct.transformers_[-1][2], [1])
337
+
338
+
339
+ @pytest.mark.parametrize("pandas", [True, False], ids=["pandas", "numpy"])
340
+ @pytest.mark.parametrize(
341
+ "column_selection",
342
+ [[], np.array([False, False]), [False, False]],
343
+ ids=["list", "bool", "bool_int"],
344
+ )
345
+ @pytest.mark.parametrize("callable_column", [False, True])
346
+ def test_column_transformer_empty_columns(pandas, column_selection, callable_column):
347
+ # test case that ensures that the column transformer does also work when
348
+ # a given transformer doesn't have any columns to work on
349
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
350
+ X_res_both = X_array
351
+
352
+ if pandas:
353
+ pd = pytest.importorskip("pandas")
354
+ X = pd.DataFrame(X_array, columns=["first", "second"])
355
+ else:
356
+ X = X_array
357
+
358
+ if callable_column:
359
+ column = lambda X: column_selection # noqa
360
+ else:
361
+ column = column_selection
362
+
363
+ ct = ColumnTransformer(
364
+ [("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), column)]
365
+ )
366
+ assert_array_equal(ct.fit_transform(X), X_res_both)
367
+ assert_array_equal(ct.fit(X).transform(X), X_res_both)
368
+ assert len(ct.transformers_) == 2
369
+ assert isinstance(ct.transformers_[1][1], TransRaise)
370
+
371
+ ct = ColumnTransformer(
372
+ [("trans1", TransRaise(), column), ("trans2", Trans(), [0, 1])]
373
+ )
374
+ assert_array_equal(ct.fit_transform(X), X_res_both)
375
+ assert_array_equal(ct.fit(X).transform(X), X_res_both)
376
+ assert len(ct.transformers_) == 2
377
+ assert isinstance(ct.transformers_[0][1], TransRaise)
378
+
379
+ ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="passthrough")
380
+ assert_array_equal(ct.fit_transform(X), X_res_both)
381
+ assert_array_equal(ct.fit(X).transform(X), X_res_both)
382
+ assert len(ct.transformers_) == 2 # including remainder
383
+ assert isinstance(ct.transformers_[0][1], TransRaise)
384
+
385
+ fixture = np.array([[], [], []])
386
+ ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="drop")
387
+ assert_array_equal(ct.fit_transform(X), fixture)
388
+ assert_array_equal(ct.fit(X).transform(X), fixture)
389
+ assert len(ct.transformers_) == 2 # including remainder
390
+ assert isinstance(ct.transformers_[0][1], TransRaise)
391
+
392
+
393
+ def test_column_transformer_output_indices():
394
+ # Checks for the output_indices_ attribute
395
+ X_array = np.arange(6).reshape(3, 2)
396
+
397
+ ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
398
+ X_trans = ct.fit_transform(X_array)
399
+ assert ct.output_indices_ == {
400
+ "trans1": slice(0, 1),
401
+ "trans2": slice(1, 2),
402
+ "remainder": slice(0, 0),
403
+ }
404
+ assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
405
+ assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
406
+
407
+ # test with transformer_weights and multiple columns
408
+ ct = ColumnTransformer(
409
+ [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
410
+ )
411
+ X_trans = ct.fit_transform(X_array)
412
+ assert ct.output_indices_ == {"trans": slice(0, 2), "remainder": slice(0, 0)}
413
+ assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans"]])
414
+ assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
415
+
416
+ # test case that ensures that the attribute does also work when
417
+ # a given transformer doesn't have any columns to work on
418
+ ct = ColumnTransformer([("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), [])])
419
+ X_trans = ct.fit_transform(X_array)
420
+ assert ct.output_indices_ == {
421
+ "trans1": slice(0, 2),
422
+ "trans2": slice(0, 0),
423
+ "remainder": slice(0, 0),
424
+ }
425
+ assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans1"]])
426
+ assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans2"]])
427
+ assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
428
+
429
+ ct = ColumnTransformer([("trans", TransRaise(), [])], remainder="passthrough")
430
+ X_trans = ct.fit_transform(X_array)
431
+ assert ct.output_indices_ == {"trans": slice(0, 0), "remainder": slice(0, 2)}
432
+ assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans"]])
433
+ assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["remainder"]])
434
+
435
+
436
+ def test_column_transformer_output_indices_df():
437
+ # Checks for the output_indices_ attribute with data frames
438
+ pd = pytest.importorskip("pandas")
439
+
440
+ X_df = pd.DataFrame(np.arange(6).reshape(3, 2), columns=["first", "second"])
441
+
442
+ ct = ColumnTransformer(
443
+ [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])]
444
+ )
445
+ X_trans = ct.fit_transform(X_df)
446
+ assert ct.output_indices_ == {
447
+ "trans1": slice(0, 1),
448
+ "trans2": slice(1, 2),
449
+ "remainder": slice(0, 0),
450
+ }
451
+ assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
452
+ assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
453
+ assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
454
+
455
+ ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
456
+ X_trans = ct.fit_transform(X_df)
457
+ assert ct.output_indices_ == {
458
+ "trans1": slice(0, 1),
459
+ "trans2": slice(1, 2),
460
+ "remainder": slice(0, 0),
461
+ }
462
+ assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
463
+ assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
464
+ assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
465
+
466
+
467
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
468
+ def test_column_transformer_sparse_array(csr_container):
469
+ X_sparse = csr_container(sparse.eye(3, 2))
470
+
471
+ # no distinction between 1D and 2D
472
+ X_res_first = X_sparse[:, [0]]
473
+ X_res_both = X_sparse
474
+
475
+ for col in [(0,), [0], slice(0, 1)]:
476
+ for remainder, res in [("drop", X_res_first), ("passthrough", X_res_both)]:
477
+ ct = ColumnTransformer(
478
+ [("trans", Trans(), col)], remainder=remainder, sparse_threshold=0.8
479
+ )
480
+ assert sparse.issparse(ct.fit_transform(X_sparse))
481
+ assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res)
482
+ assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), res)
483
+
484
+ for col in [[0, 1], slice(0, 2)]:
485
+ ct = ColumnTransformer([("trans", Trans(), col)], sparse_threshold=0.8)
486
+ assert sparse.issparse(ct.fit_transform(X_sparse))
487
+ assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both)
488
+ assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), X_res_both)
489
+
490
+
491
+ def test_column_transformer_list():
492
+ X_list = [[1, float("nan"), "a"], [0, 0, "b"]]
493
+ expected_result = np.array(
494
+ [
495
+ [1, float("nan"), 1, 0],
496
+ [-1, 0, 0, 1],
497
+ ]
498
+ )
499
+
500
+ ct = ColumnTransformer(
501
+ [
502
+ ("numerical", StandardScaler(), [0, 1]),
503
+ ("categorical", OneHotEncoder(), [2]),
504
+ ]
505
+ )
506
+
507
+ assert_array_equal(ct.fit_transform(X_list), expected_result)
508
+ assert_array_equal(ct.fit(X_list).transform(X_list), expected_result)
509
+
510
+
511
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
512
+ def test_column_transformer_sparse_stacking(csr_container):
513
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
514
+ col_trans = ColumnTransformer(
515
+ [("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)],
516
+ sparse_threshold=0.8,
517
+ )
518
+ col_trans.fit(X_array)
519
+ X_trans = col_trans.transform(X_array)
520
+ assert sparse.issparse(X_trans)
521
+ assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
522
+ assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0]))
523
+ assert len(col_trans.transformers_) == 2
524
+ assert col_trans.transformers_[-1][0] != "remainder"
525
+
526
+ col_trans = ColumnTransformer(
527
+ [("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)],
528
+ sparse_threshold=0.1,
529
+ )
530
+ col_trans.fit(X_array)
531
+ X_trans = col_trans.transform(X_array)
532
+ assert not sparse.issparse(X_trans)
533
+ assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
534
+ assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0]))
535
+
536
+
537
+ def test_column_transformer_mixed_cols_sparse():
538
+ df = np.array([["a", 1, True], ["b", 2, False]], dtype="O")
539
+
540
+ ct = make_column_transformer(
541
+ (OneHotEncoder(), [0]), ("passthrough", [1, 2]), sparse_threshold=1.0
542
+ )
543
+
544
+ # this shouldn't fail, since boolean can be coerced into a numeric
545
+ # See: https://github.com/scikit-learn/scikit-learn/issues/11912
546
+ X_trans = ct.fit_transform(df)
547
+ assert X_trans.getformat() == "csr"
548
+ assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1], [0, 1, 2, 0]]))
549
+
550
+ ct = make_column_transformer(
551
+ (OneHotEncoder(), [0]), ("passthrough", [0]), sparse_threshold=1.0
552
+ )
553
+ with pytest.raises(ValueError, match="For a sparse output, all columns should"):
554
+ # this fails since strings `a` and `b` cannot be
555
+ # coerced into a numeric.
556
+ ct.fit_transform(df)
557
+
558
+
559
+ def test_column_transformer_sparse_threshold():
560
+ X_array = np.array([["a", "b"], ["A", "B"]], dtype=object).T
561
+ # above data has sparsity of 4 / 8 = 0.5
562
+
563
+ # apply threshold even if all sparse
564
+ col_trans = ColumnTransformer(
565
+ [("trans1", OneHotEncoder(), [0]), ("trans2", OneHotEncoder(), [1])],
566
+ sparse_threshold=0.2,
567
+ )
568
+ res = col_trans.fit_transform(X_array)
569
+ assert not sparse.issparse(res)
570
+ assert not col_trans.sparse_output_
571
+
572
+ # mixed -> sparsity of (4 + 2) / 8 = 0.75
573
+ for thres in [0.75001, 1]:
574
+ col_trans = ColumnTransformer(
575
+ [
576
+ ("trans1", OneHotEncoder(sparse_output=True), [0]),
577
+ ("trans2", OneHotEncoder(sparse_output=False), [1]),
578
+ ],
579
+ sparse_threshold=thres,
580
+ )
581
+ res = col_trans.fit_transform(X_array)
582
+ assert sparse.issparse(res)
583
+ assert col_trans.sparse_output_
584
+
585
+ for thres in [0.75, 0]:
586
+ col_trans = ColumnTransformer(
587
+ [
588
+ ("trans1", OneHotEncoder(sparse_output=True), [0]),
589
+ ("trans2", OneHotEncoder(sparse_output=False), [1]),
590
+ ],
591
+ sparse_threshold=thres,
592
+ )
593
+ res = col_trans.fit_transform(X_array)
594
+ assert not sparse.issparse(res)
595
+ assert not col_trans.sparse_output_
596
+
597
+ # if nothing is sparse -> no sparse
598
+ for thres in [0.33, 0, 1]:
599
+ col_trans = ColumnTransformer(
600
+ [
601
+ ("trans1", OneHotEncoder(sparse_output=False), [0]),
602
+ ("trans2", OneHotEncoder(sparse_output=False), [1]),
603
+ ],
604
+ sparse_threshold=thres,
605
+ )
606
+ res = col_trans.fit_transform(X_array)
607
+ assert not sparse.issparse(res)
608
+ assert not col_trans.sparse_output_
609
+
610
+
611
+ def test_column_transformer_error_msg_1D():
612
+ X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
613
+
614
+ col_trans = ColumnTransformer([("trans", StandardScaler(), 0)])
615
+ msg = "1D data passed to a transformer"
616
+ with pytest.raises(ValueError, match=msg):
617
+ col_trans.fit(X_array)
618
+
619
+ with pytest.raises(ValueError, match=msg):
620
+ col_trans.fit_transform(X_array)
621
+
622
+ col_trans = ColumnTransformer([("trans", TransRaise(), 0)])
623
+ for func in [col_trans.fit, col_trans.fit_transform]:
624
+ with pytest.raises(ValueError, match="specific message"):
625
+ func(X_array)
626
+
627
+
628
+ def test_2D_transformer_output():
629
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
630
+
631
+ # if one transformer is dropped, test that name is still correct
632
+ ct = ColumnTransformer([("trans1", "drop", 0), ("trans2", TransNo2D(), 1)])
633
+
634
+ msg = "the 'trans2' transformer should be 2D"
635
+ with pytest.raises(ValueError, match=msg):
636
+ ct.fit_transform(X_array)
637
+ # because fit is also doing transform, this raises already on fit
638
+ with pytest.raises(ValueError, match=msg):
639
+ ct.fit(X_array)
640
+
641
+
642
+ def test_2D_transformer_output_pandas():
643
+ pd = pytest.importorskip("pandas")
644
+
645
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
646
+ X_df = pd.DataFrame(X_array, columns=["col1", "col2"])
647
+
648
+ # if one transformer is dropped, test that name is still correct
649
+ ct = ColumnTransformer([("trans1", TransNo2D(), "col1")])
650
+ msg = "the 'trans1' transformer should be 2D"
651
+ with pytest.raises(ValueError, match=msg):
652
+ ct.fit_transform(X_df)
653
+ # because fit is also doing transform, this raises already on fit
654
+ with pytest.raises(ValueError, match=msg):
655
+ ct.fit(X_df)
656
+
657
+
658
+ @pytest.mark.parametrize("remainder", ["drop", "passthrough"])
659
+ def test_column_transformer_invalid_columns(remainder):
660
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
661
+
662
+ # general invalid
663
+ for col in [1.5, ["string", 1], slice(1, "s"), np.array([1.0])]:
664
+ ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
665
+ with pytest.raises(ValueError, match="No valid specification"):
666
+ ct.fit(X_array)
667
+
668
+ # invalid for arrays
669
+ for col in ["string", ["string", "other"], slice("a", "b")]:
670
+ ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
671
+ with pytest.raises(ValueError, match="Specifying the columns"):
672
+ ct.fit(X_array)
673
+
674
+ # transformed n_features does not match fitted n_features
675
+ col = [0, 1]
676
+ ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
677
+ ct.fit(X_array)
678
+ X_array_more = np.array([[0, 1, 2], [2, 4, 6], [3, 6, 9]]).T
679
+ msg = "X has 3 features, but ColumnTransformer is expecting 2 features as input."
680
+ with pytest.raises(ValueError, match=msg):
681
+ ct.transform(X_array_more)
682
+ X_array_fewer = np.array(
683
+ [
684
+ [0, 1, 2],
685
+ ]
686
+ ).T
687
+ err_msg = (
688
+ "X has 1 features, but ColumnTransformer is expecting 2 features as input."
689
+ )
690
+ with pytest.raises(ValueError, match=err_msg):
691
+ ct.transform(X_array_fewer)
692
+
693
+
694
+ def test_column_transformer_invalid_transformer():
695
+ class NoTrans(BaseEstimator):
696
+ def fit(self, X, y=None):
697
+ return self
698
+
699
+ def predict(self, X):
700
+ return X
701
+
702
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
703
+ ct = ColumnTransformer([("trans", NoTrans(), [0])])
704
+ msg = "All estimators should implement fit and transform"
705
+ with pytest.raises(TypeError, match=msg):
706
+ ct.fit(X_array)
707
+
708
+
709
+ def test_make_column_transformer():
710
+ scaler = StandardScaler()
711
+ norm = Normalizer()
712
+ ct = make_column_transformer((scaler, "first"), (norm, ["second"]))
713
+ names, transformers, columns = zip(*ct.transformers)
714
+ assert names == ("standardscaler", "normalizer")
715
+ assert transformers == (scaler, norm)
716
+ assert columns == ("first", ["second"])
717
+
718
+
719
+ def test_make_column_transformer_pandas():
720
+ pd = pytest.importorskip("pandas")
721
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
722
+ X_df = pd.DataFrame(X_array, columns=["first", "second"])
723
+ norm = Normalizer()
724
+ ct1 = ColumnTransformer([("norm", Normalizer(), X_df.columns)])
725
+ ct2 = make_column_transformer((norm, X_df.columns))
726
+ assert_almost_equal(ct1.fit_transform(X_df), ct2.fit_transform(X_df))
727
+
728
+
729
+ def test_make_column_transformer_kwargs():
730
+ scaler = StandardScaler()
731
+ norm = Normalizer()
732
+ ct = make_column_transformer(
733
+ (scaler, "first"),
734
+ (norm, ["second"]),
735
+ n_jobs=3,
736
+ remainder="drop",
737
+ sparse_threshold=0.5,
738
+ )
739
+ assert (
740
+ ct.transformers
741
+ == make_column_transformer((scaler, "first"), (norm, ["second"])).transformers
742
+ )
743
+ assert ct.n_jobs == 3
744
+ assert ct.remainder == "drop"
745
+ assert ct.sparse_threshold == 0.5
746
+ # invalid keyword parameters should raise an error message
747
+ msg = re.escape(
748
+ "make_column_transformer() got an unexpected "
749
+ "keyword argument 'transformer_weights'"
750
+ )
751
+ with pytest.raises(TypeError, match=msg):
752
+ make_column_transformer(
753
+ (scaler, "first"),
754
+ (norm, ["second"]),
755
+ transformer_weights={"pca": 10, "Transf": 1},
756
+ )
757
+
758
+
759
+ def test_make_column_transformer_remainder_transformer():
760
+ scaler = StandardScaler()
761
+ norm = Normalizer()
762
+ remainder = StandardScaler()
763
+ ct = make_column_transformer(
764
+ (scaler, "first"), (norm, ["second"]), remainder=remainder
765
+ )
766
+ assert ct.remainder == remainder
767
+
768
+
769
+ def test_column_transformer_get_set_params():
770
+ ct = ColumnTransformer(
771
+ [("trans1", StandardScaler(), [0]), ("trans2", StandardScaler(), [1])]
772
+ )
773
+
774
+ exp = {
775
+ "n_jobs": None,
776
+ "remainder": "drop",
777
+ "sparse_threshold": 0.3,
778
+ "trans1": ct.transformers[0][1],
779
+ "trans1__copy": True,
780
+ "trans1__with_mean": True,
781
+ "trans1__with_std": True,
782
+ "trans2": ct.transformers[1][1],
783
+ "trans2__copy": True,
784
+ "trans2__with_mean": True,
785
+ "trans2__with_std": True,
786
+ "transformers": ct.transformers,
787
+ "transformer_weights": None,
788
+ "verbose_feature_names_out": True,
789
+ "verbose": False,
790
+ }
791
+
792
+ assert ct.get_params() == exp
793
+
794
+ ct.set_params(trans1__with_mean=False)
795
+ assert not ct.get_params()["trans1__with_mean"]
796
+
797
+ ct.set_params(trans1="passthrough")
798
+ exp = {
799
+ "n_jobs": None,
800
+ "remainder": "drop",
801
+ "sparse_threshold": 0.3,
802
+ "trans1": "passthrough",
803
+ "trans2": ct.transformers[1][1],
804
+ "trans2__copy": True,
805
+ "trans2__with_mean": True,
806
+ "trans2__with_std": True,
807
+ "transformers": ct.transformers,
808
+ "transformer_weights": None,
809
+ "verbose_feature_names_out": True,
810
+ "verbose": False,
811
+ }
812
+
813
+ assert ct.get_params() == exp
814
+
815
+
816
+ def test_column_transformer_named_estimators():
817
+ X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
818
+ ct = ColumnTransformer(
819
+ [
820
+ ("trans1", StandardScaler(), [0]),
821
+ ("trans2", StandardScaler(with_std=False), [1]),
822
+ ]
823
+ )
824
+ assert not hasattr(ct, "transformers_")
825
+ ct.fit(X_array)
826
+ assert hasattr(ct, "transformers_")
827
+ assert isinstance(ct.named_transformers_["trans1"], StandardScaler)
828
+ assert isinstance(ct.named_transformers_.trans1, StandardScaler)
829
+ assert isinstance(ct.named_transformers_["trans2"], StandardScaler)
830
+ assert isinstance(ct.named_transformers_.trans2, StandardScaler)
831
+ assert not ct.named_transformers_.trans2.with_std
832
+ # check it are fitted transformers
833
+ assert ct.named_transformers_.trans1.mean_ == 1.0
834
+
835
+
836
+ def test_column_transformer_cloning():
837
+ X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
838
+
839
+ ct = ColumnTransformer([("trans", StandardScaler(), [0])])
840
+ ct.fit(X_array)
841
+ assert not hasattr(ct.transformers[0][1], "mean_")
842
+ assert hasattr(ct.transformers_[0][1], "mean_")
843
+
844
+ ct = ColumnTransformer([("trans", StandardScaler(), [0])])
845
+ ct.fit_transform(X_array)
846
+ assert not hasattr(ct.transformers[0][1], "mean_")
847
+ assert hasattr(ct.transformers_[0][1], "mean_")
848
+
849
+
850
+ def test_column_transformer_get_feature_names():
851
+ X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
852
+ ct = ColumnTransformer([("trans", Trans(), [0, 1])])
853
+ # raise correct error when not fitted
854
+ with pytest.raises(NotFittedError):
855
+ ct.get_feature_names_out()
856
+ # raise correct error when no feature names are available
857
+ ct.fit(X_array)
858
+ msg = re.escape(
859
+ "Transformer trans (type Trans) does not provide get_feature_names_out"
860
+ )
861
+ with pytest.raises(AttributeError, match=msg):
862
+ ct.get_feature_names_out()
863
+
864
+
865
+ def test_column_transformer_special_strings():
866
+ # one 'drop' -> ignore
867
+ X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
868
+ ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "drop", [1])])
869
+ exp = np.array([[0.0], [1.0], [2.0]])
870
+ assert_array_equal(ct.fit_transform(X_array), exp)
871
+ assert_array_equal(ct.fit(X_array).transform(X_array), exp)
872
+ assert len(ct.transformers_) == 2
873
+ assert ct.transformers_[-1][0] != "remainder"
874
+
875
+ # all 'drop' -> return shape 0 array
876
+ ct = ColumnTransformer([("trans1", "drop", [0]), ("trans2", "drop", [1])])
877
+ assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0))
878
+ assert_array_equal(ct.fit_transform(X_array).shape, (3, 0))
879
+ assert len(ct.transformers_) == 2
880
+ assert ct.transformers_[-1][0] != "remainder"
881
+
882
+ # 'passthrough'
883
+ X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
884
+ ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "passthrough", [1])])
885
+ exp = X_array
886
+ assert_array_equal(ct.fit_transform(X_array), exp)
887
+ assert_array_equal(ct.fit(X_array).transform(X_array), exp)
888
+ assert len(ct.transformers_) == 2
889
+ assert ct.transformers_[-1][0] != "remainder"
890
+
891
+
892
+ def test_column_transformer_remainder():
893
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
894
+
895
+ X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
896
+ X_res_second = np.array([2, 4, 6]).reshape(-1, 1)
897
+ X_res_both = X_array
898
+
899
+ # default drop
900
+ ct = ColumnTransformer([("trans1", Trans(), [0])])
901
+ assert_array_equal(ct.fit_transform(X_array), X_res_first)
902
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
903
+ assert len(ct.transformers_) == 2
904
+ assert ct.transformers_[-1][0] == "remainder"
905
+ assert ct.transformers_[-1][1] == "drop"
906
+ assert_array_equal(ct.transformers_[-1][2], [1])
907
+
908
+ # specify passthrough
909
+ ct = ColumnTransformer([("trans", Trans(), [0])], remainder="passthrough")
910
+ assert_array_equal(ct.fit_transform(X_array), X_res_both)
911
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
912
+ assert len(ct.transformers_) == 2
913
+ assert ct.transformers_[-1][0] == "remainder"
914
+ assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
915
+ assert_array_equal(ct.transformers_[-1][2], [1])
916
+
917
+ # column order is not preserved (passed through added to end)
918
+ ct = ColumnTransformer([("trans1", Trans(), [1])], remainder="passthrough")
919
+ assert_array_equal(ct.fit_transform(X_array), X_res_both[:, ::-1])
920
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both[:, ::-1])
921
+ assert len(ct.transformers_) == 2
922
+ assert ct.transformers_[-1][0] == "remainder"
923
+ assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
924
+ assert_array_equal(ct.transformers_[-1][2], [0])
925
+
926
+ # passthrough when all actual transformers are skipped
927
+ ct = ColumnTransformer([("trans1", "drop", [0])], remainder="passthrough")
928
+ assert_array_equal(ct.fit_transform(X_array), X_res_second)
929
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_second)
930
+ assert len(ct.transformers_) == 2
931
+ assert ct.transformers_[-1][0] == "remainder"
932
+ assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
933
+ assert_array_equal(ct.transformers_[-1][2], [1])
934
+
935
+ # check default for make_column_transformer
936
+ ct = make_column_transformer((Trans(), [0]))
937
+ assert ct.remainder == "drop"
938
+
939
+
940
+ @pytest.mark.parametrize(
941
+ "key", [[0], np.array([0]), slice(0, 1), np.array([True, False])]
942
+ )
943
+ def test_column_transformer_remainder_numpy(key):
944
+ # test different ways that columns are specified with passthrough
945
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
946
+ X_res_both = X_array
947
+
948
+ ct = ColumnTransformer([("trans1", Trans(), key)], remainder="passthrough")
949
+ assert_array_equal(ct.fit_transform(X_array), X_res_both)
950
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
951
+ assert len(ct.transformers_) == 2
952
+ assert ct.transformers_[-1][0] == "remainder"
953
+ assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
954
+ assert_array_equal(ct.transformers_[-1][2], [1])
955
+
956
+
957
+ @pytest.mark.parametrize(
958
+ "key",
959
+ [
960
+ [0],
961
+ slice(0, 1),
962
+ np.array([True, False]),
963
+ ["first"],
964
+ "pd-index",
965
+ np.array(["first"]),
966
+ np.array(["first"], dtype=object),
967
+ slice(None, "first"),
968
+ slice("first", "first"),
969
+ ],
970
+ )
971
+ def test_column_transformer_remainder_pandas(key):
972
+ # test different ways that columns are specified with passthrough
973
+ pd = pytest.importorskip("pandas")
974
+ if isinstance(key, str) and key == "pd-index":
975
+ key = pd.Index(["first"])
976
+
977
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
978
+ X_df = pd.DataFrame(X_array, columns=["first", "second"])
979
+ X_res_both = X_array
980
+
981
+ ct = ColumnTransformer([("trans1", Trans(), key)], remainder="passthrough")
982
+ assert_array_equal(ct.fit_transform(X_df), X_res_both)
983
+ assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
984
+ assert len(ct.transformers_) == 2
985
+ assert ct.transformers_[-1][0] == "remainder"
986
+ assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
987
+ assert_array_equal(ct.transformers_[-1][2], [1])
988
+
989
+
990
+ @pytest.mark.parametrize(
991
+ "key", [[0], np.array([0]), slice(0, 1), np.array([True, False, False])]
992
+ )
993
+ def test_column_transformer_remainder_transformer(key):
994
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
995
+ X_res_both = X_array.copy()
996
+
997
+ # second and third columns are doubled when remainder = DoubleTrans
998
+ X_res_both[:, 1:3] *= 2
999
+
1000
+ ct = ColumnTransformer([("trans1", Trans(), key)], remainder=DoubleTrans())
1001
+
1002
+ assert_array_equal(ct.fit_transform(X_array), X_res_both)
1003
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
1004
+ assert len(ct.transformers_) == 2
1005
+ assert ct.transformers_[-1][0] == "remainder"
1006
+ assert isinstance(ct.transformers_[-1][1], DoubleTrans)
1007
+ assert_array_equal(ct.transformers_[-1][2], [1, 2])
1008
+
1009
+
1010
+ def test_column_transformer_no_remaining_remainder_transformer():
1011
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
1012
+
1013
+ ct = ColumnTransformer([("trans1", Trans(), [0, 1, 2])], remainder=DoubleTrans())
1014
+
1015
+ assert_array_equal(ct.fit_transform(X_array), X_array)
1016
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_array)
1017
+ assert len(ct.transformers_) == 1
1018
+ assert ct.transformers_[-1][0] != "remainder"
1019
+
1020
+
1021
+ def test_column_transformer_drops_all_remainder_transformer():
1022
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
1023
+
1024
+ # columns are doubled when remainder = DoubleTrans
1025
+ X_res_both = 2 * X_array.copy()[:, 1:3]
1026
+
1027
+ ct = ColumnTransformer([("trans1", "drop", [0])], remainder=DoubleTrans())
1028
+
1029
+ assert_array_equal(ct.fit_transform(X_array), X_res_both)
1030
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
1031
+ assert len(ct.transformers_) == 2
1032
+ assert ct.transformers_[-1][0] == "remainder"
1033
+ assert isinstance(ct.transformers_[-1][1], DoubleTrans)
1034
+ assert_array_equal(ct.transformers_[-1][2], [1, 2])
1035
+
1036
+
1037
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
1038
+ def test_column_transformer_sparse_remainder_transformer(csr_container):
1039
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
1040
+
1041
+ ct = ColumnTransformer(
1042
+ [("trans1", Trans(), [0])],
1043
+ remainder=SparseMatrixTrans(csr_container),
1044
+ sparse_threshold=0.8,
1045
+ )
1046
+
1047
+ X_trans = ct.fit_transform(X_array)
1048
+ assert sparse.issparse(X_trans)
1049
+ # SparseMatrixTrans creates 3 features for each column. There is
1050
+ # one column in ``transformers``, thus:
1051
+ assert X_trans.shape == (3, 3 + 1)
1052
+
1053
+ exp_array = np.hstack((X_array[:, 0].reshape(-1, 1), np.eye(3)))
1054
+ assert_array_equal(X_trans.toarray(), exp_array)
1055
+ assert len(ct.transformers_) == 2
1056
+ assert ct.transformers_[-1][0] == "remainder"
1057
+ assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
1058
+ assert_array_equal(ct.transformers_[-1][2], [1, 2])
1059
+
1060
+
1061
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
1062
+ def test_column_transformer_drop_all_sparse_remainder_transformer(csr_container):
1063
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
1064
+ ct = ColumnTransformer(
1065
+ [("trans1", "drop", [0])],
1066
+ remainder=SparseMatrixTrans(csr_container),
1067
+ sparse_threshold=0.8,
1068
+ )
1069
+
1070
+ X_trans = ct.fit_transform(X_array)
1071
+ assert sparse.issparse(X_trans)
1072
+
1073
+ # SparseMatrixTrans creates 3 features for each column, thus:
1074
+ assert X_trans.shape == (3, 3)
1075
+ assert_array_equal(X_trans.toarray(), np.eye(3))
1076
+ assert len(ct.transformers_) == 2
1077
+ assert ct.transformers_[-1][0] == "remainder"
1078
+ assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
1079
+ assert_array_equal(ct.transformers_[-1][2], [1, 2])
1080
+
1081
+
1082
+ def test_column_transformer_get_set_params_with_remainder():
1083
+ ct = ColumnTransformer(
1084
+ [("trans1", StandardScaler(), [0])], remainder=StandardScaler()
1085
+ )
1086
+
1087
+ exp = {
1088
+ "n_jobs": None,
1089
+ "remainder": ct.remainder,
1090
+ "remainder__copy": True,
1091
+ "remainder__with_mean": True,
1092
+ "remainder__with_std": True,
1093
+ "sparse_threshold": 0.3,
1094
+ "trans1": ct.transformers[0][1],
1095
+ "trans1__copy": True,
1096
+ "trans1__with_mean": True,
1097
+ "trans1__with_std": True,
1098
+ "transformers": ct.transformers,
1099
+ "transformer_weights": None,
1100
+ "verbose_feature_names_out": True,
1101
+ "verbose": False,
1102
+ }
1103
+
1104
+ assert ct.get_params() == exp
1105
+
1106
+ ct.set_params(remainder__with_std=False)
1107
+ assert not ct.get_params()["remainder__with_std"]
1108
+
1109
+ ct.set_params(trans1="passthrough")
1110
+ exp = {
1111
+ "n_jobs": None,
1112
+ "remainder": ct.remainder,
1113
+ "remainder__copy": True,
1114
+ "remainder__with_mean": True,
1115
+ "remainder__with_std": False,
1116
+ "sparse_threshold": 0.3,
1117
+ "trans1": "passthrough",
1118
+ "transformers": ct.transformers,
1119
+ "transformer_weights": None,
1120
+ "verbose_feature_names_out": True,
1121
+ "verbose": False,
1122
+ }
1123
+ assert ct.get_params() == exp
1124
+
1125
+
1126
+ def test_column_transformer_no_estimators():
1127
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).astype("float").T
1128
+ ct = ColumnTransformer([], remainder=StandardScaler())
1129
+
1130
+ params = ct.get_params()
1131
+ assert params["remainder__with_mean"]
1132
+
1133
+ X_trans = ct.fit_transform(X_array)
1134
+ assert X_trans.shape == X_array.shape
1135
+ assert len(ct.transformers_) == 1
1136
+ assert ct.transformers_[-1][0] == "remainder"
1137
+ assert ct.transformers_[-1][2] == [0, 1, 2]
1138
+
1139
+
1140
+ @pytest.mark.parametrize(
1141
+ ["est", "pattern"],
1142
+ [
1143
+ (
1144
+ ColumnTransformer(
1145
+ [("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
1146
+ remainder=DoubleTrans(),
1147
+ ),
1148
+ (
1149
+ r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n"
1150
+ r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n"
1151
+ r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$"
1152
+ ),
1153
+ ),
1154
+ (
1155
+ ColumnTransformer(
1156
+ [("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
1157
+ remainder="passthrough",
1158
+ ),
1159
+ (
1160
+ r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n"
1161
+ r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n"
1162
+ r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$"
1163
+ ),
1164
+ ),
1165
+ (
1166
+ ColumnTransformer(
1167
+ [("trans1", Trans(), [0]), ("trans2", "drop", [1])],
1168
+ remainder="passthrough",
1169
+ ),
1170
+ (
1171
+ r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n"
1172
+ r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$"
1173
+ ),
1174
+ ),
1175
+ (
1176
+ ColumnTransformer(
1177
+ [("trans1", Trans(), [0]), ("trans2", "passthrough", [1])],
1178
+ remainder="passthrough",
1179
+ ),
1180
+ (
1181
+ r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n"
1182
+ r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n"
1183
+ r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$"
1184
+ ),
1185
+ ),
1186
+ (
1187
+ ColumnTransformer([("trans1", Trans(), [0])], remainder="passthrough"),
1188
+ (
1189
+ r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n"
1190
+ r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$"
1191
+ ),
1192
+ ),
1193
+ (
1194
+ ColumnTransformer(
1195
+ [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], remainder="drop"
1196
+ ),
1197
+ (
1198
+ r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n"
1199
+ r"\[ColumnTransformer\].*\(2 of 2\) Processing trans2.* total=.*\n$"
1200
+ ),
1201
+ ),
1202
+ (
1203
+ ColumnTransformer([("trans1", Trans(), [0])], remainder="drop"),
1204
+ r"\[ColumnTransformer\].*\(1 of 1\) Processing trans1.* total=.*\n$",
1205
+ ),
1206
+ ],
1207
+ )
1208
+ @pytest.mark.parametrize("method", ["fit", "fit_transform"])
1209
+ def test_column_transformer_verbose(est, pattern, method, capsys):
1210
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
1211
+
1212
+ func = getattr(est, method)
1213
+ est.set_params(verbose=False)
1214
+ func(X_array)
1215
+ assert not capsys.readouterr().out, "Got output for verbose=False"
1216
+
1217
+ est.set_params(verbose=True)
1218
+ func(X_array)
1219
+ assert re.match(pattern, capsys.readouterr()[0])
1220
+
1221
+
1222
+ def test_column_transformer_no_estimators_set_params():
1223
+ ct = ColumnTransformer([]).set_params(n_jobs=2)
1224
+ assert ct.n_jobs == 2
1225
+
1226
+
1227
+ def test_column_transformer_callable_specifier():
1228
+ # assert that function gets the full array
1229
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
1230
+ X_res_first = np.array([[0, 1, 2]]).T
1231
+
1232
+ def func(X):
1233
+ assert_array_equal(X, X_array)
1234
+ return [0]
1235
+
1236
+ ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop")
1237
+ assert_array_equal(ct.fit_transform(X_array), X_res_first)
1238
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
1239
+ assert callable(ct.transformers[0][2])
1240
+ assert ct.transformers_[0][2] == [0]
1241
+
1242
+
1243
+ def test_column_transformer_callable_specifier_dataframe():
1244
+ # assert that function gets the full dataframe
1245
+ pd = pytest.importorskip("pandas")
1246
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
1247
+ X_res_first = np.array([[0, 1, 2]]).T
1248
+
1249
+ X_df = pd.DataFrame(X_array, columns=["first", "second"])
1250
+
1251
+ def func(X):
1252
+ assert_array_equal(X.columns, X_df.columns)
1253
+ assert_array_equal(X.values, X_df.values)
1254
+ return ["first"]
1255
+
1256
+ ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop")
1257
+ assert_array_equal(ct.fit_transform(X_df), X_res_first)
1258
+ assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first)
1259
+ assert callable(ct.transformers[0][2])
1260
+ assert ct.transformers_[0][2] == ["first"]
1261
+
1262
+
1263
+ def test_column_transformer_negative_column_indexes():
1264
+ X = np.random.randn(2, 2)
1265
+ X_categories = np.array([[1], [2]])
1266
+ X = np.concatenate([X, X_categories], axis=1)
1267
+
1268
+ ohe = OneHotEncoder()
1269
+
1270
+ tf_1 = ColumnTransformer([("ohe", ohe, [-1])], remainder="passthrough")
1271
+ tf_2 = ColumnTransformer([("ohe", ohe, [2])], remainder="passthrough")
1272
+ assert_array_equal(tf_1.fit_transform(X), tf_2.fit_transform(X))
1273
+
1274
+
1275
+ @pytest.mark.parametrize("array_type", [np.asarray, *CSR_CONTAINERS])
1276
+ def test_column_transformer_mask_indexing(array_type):
1277
+ # Regression test for #14510
1278
+ # Boolean array-like does not behave as boolean array with sparse matrices.
1279
+ X = np.transpose([[1, 2, 3], [4, 5, 6], [5, 6, 7], [8, 9, 10]])
1280
+ X = array_type(X)
1281
+ column_transformer = ColumnTransformer(
1282
+ [("identity", FunctionTransformer(), [False, True, False, True])]
1283
+ )
1284
+ X_trans = column_transformer.fit_transform(X)
1285
+ assert X_trans.shape == (3, 2)
1286
+
1287
+
1288
+ def test_n_features_in():
1289
+ # make sure n_features_in is what is passed as input to the column
1290
+ # transformer.
1291
+
1292
+ X = [[1, 2], [3, 4], [5, 6]]
1293
+ ct = ColumnTransformer([("a", DoubleTrans(), [0]), ("b", DoubleTrans(), [1])])
1294
+ assert not hasattr(ct, "n_features_in_")
1295
+ ct.fit(X)
1296
+ assert ct.n_features_in_ == 2
1297
+
1298
+
1299
+ @pytest.mark.parametrize(
1300
+ "cols, pattern, include, exclude",
1301
+ [
1302
+ (["col_int", "col_float"], None, np.number, None),
1303
+ (["col_int", "col_float"], None, None, object),
1304
+ (["col_int", "col_float"], None, [int, float], None),
1305
+ (["col_str"], None, [object], None),
1306
+ (["col_str"], None, object, None),
1307
+ (["col_float"], None, float, None),
1308
+ (["col_float"], "at$", [np.number], None),
1309
+ (["col_int"], None, [int], None),
1310
+ (["col_int"], "^col_int", [np.number], None),
1311
+ (["col_float", "col_str"], "float|str", None, None),
1312
+ (["col_str"], "^col_s", None, [int]),
1313
+ ([], "str$", float, None),
1314
+ (["col_int", "col_float", "col_str"], None, [np.number, object], None),
1315
+ ],
1316
+ )
1317
+ def test_make_column_selector_with_select_dtypes(cols, pattern, include, exclude):
1318
+ pd = pytest.importorskip("pandas")
1319
+
1320
+ X_df = pd.DataFrame(
1321
+ {
1322
+ "col_int": np.array([0, 1, 2], dtype=int),
1323
+ "col_float": np.array([0.0, 1.0, 2.0], dtype=float),
1324
+ "col_str": ["one", "two", "three"],
1325
+ },
1326
+ columns=["col_int", "col_float", "col_str"],
1327
+ )
1328
+
1329
+ selector = make_column_selector(
1330
+ dtype_include=include, dtype_exclude=exclude, pattern=pattern
1331
+ )
1332
+
1333
+ assert_array_equal(selector(X_df), cols)
1334
+
1335
+
1336
+ def test_column_transformer_with_make_column_selector():
1337
+ # Functional test for column transformer + column selector
1338
+ pd = pytest.importorskip("pandas")
1339
+ X_df = pd.DataFrame(
1340
+ {
1341
+ "col_int": np.array([0, 1, 2], dtype=int),
1342
+ "col_float": np.array([0.0, 1.0, 2.0], dtype=float),
1343
+ "col_cat": ["one", "two", "one"],
1344
+ "col_str": ["low", "middle", "high"],
1345
+ },
1346
+ columns=["col_int", "col_float", "col_cat", "col_str"],
1347
+ )
1348
+ X_df["col_str"] = X_df["col_str"].astype("category")
1349
+
1350
+ cat_selector = make_column_selector(dtype_include=["category", object])
1351
+ num_selector = make_column_selector(dtype_include=np.number)
1352
+
1353
+ ohe = OneHotEncoder()
1354
+ scaler = StandardScaler()
1355
+
1356
+ ct_selector = make_column_transformer((ohe, cat_selector), (scaler, num_selector))
1357
+ ct_direct = make_column_transformer(
1358
+ (ohe, ["col_cat", "col_str"]), (scaler, ["col_float", "col_int"])
1359
+ )
1360
+
1361
+ X_selector = ct_selector.fit_transform(X_df)
1362
+ X_direct = ct_direct.fit_transform(X_df)
1363
+
1364
+ assert_allclose(X_selector, X_direct)
1365
+
1366
+
1367
+ def test_make_column_selector_error():
1368
+ selector = make_column_selector(dtype_include=np.number)
1369
+ X = np.array([[0.1, 0.2]])
1370
+ msg = "make_column_selector can only be applied to pandas dataframes"
1371
+ with pytest.raises(ValueError, match=msg):
1372
+ selector(X)
1373
+
1374
+
1375
+ def test_make_column_selector_pickle():
1376
+ pd = pytest.importorskip("pandas")
1377
+
1378
+ X_df = pd.DataFrame(
1379
+ {
1380
+ "col_int": np.array([0, 1, 2], dtype=int),
1381
+ "col_float": np.array([0.0, 1.0, 2.0], dtype=float),
1382
+ "col_str": ["one", "two", "three"],
1383
+ },
1384
+ columns=["col_int", "col_float", "col_str"],
1385
+ )
1386
+
1387
+ selector = make_column_selector(dtype_include=[object])
1388
+ selector_picked = pickle.loads(pickle.dumps(selector))
1389
+
1390
+ assert_array_equal(selector(X_df), selector_picked(X_df))
1391
+
1392
+
1393
+ @pytest.mark.parametrize(
1394
+ "empty_col",
1395
+ [[], np.array([], dtype=int), lambda x: []],
1396
+ ids=["list", "array", "callable"],
1397
+ )
1398
+ def test_feature_names_empty_columns(empty_col):
1399
+ pd = pytest.importorskip("pandas")
1400
+
1401
+ df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]})
1402
+
1403
+ ct = ColumnTransformer(
1404
+ transformers=[
1405
+ ("ohe", OneHotEncoder(), ["col1", "col2"]),
1406
+ ("empty_features", OneHotEncoder(), empty_col),
1407
+ ],
1408
+ )
1409
+
1410
+ ct.fit(df)
1411
+ assert_array_equal(
1412
+ ct.get_feature_names_out(), ["ohe__col1_a", "ohe__col1_b", "ohe__col2_z"]
1413
+ )
1414
+
1415
+
1416
+ @pytest.mark.parametrize(
1417
+ "selector",
1418
+ [
1419
+ [1],
1420
+ lambda x: [1],
1421
+ ["col2"],
1422
+ lambda x: ["col2"],
1423
+ [False, True],
1424
+ lambda x: [False, True],
1425
+ ],
1426
+ )
1427
+ def test_feature_names_out_pandas(selector):
1428
+ """Checks name when selecting only the second column"""
1429
+ pd = pytest.importorskip("pandas")
1430
+ df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]})
1431
+ ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)])
1432
+ ct.fit(df)
1433
+
1434
+ assert_array_equal(ct.get_feature_names_out(), ["ohe__col2_z"])
1435
+
1436
+
1437
+ @pytest.mark.parametrize(
1438
+ "selector", [[1], lambda x: [1], [False, True], lambda x: [False, True]]
1439
+ )
1440
+ def test_feature_names_out_non_pandas(selector):
1441
+ """Checks name when selecting the second column with numpy array"""
1442
+ X = [["a", "z"], ["a", "z"], ["b", "z"]]
1443
+ ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)])
1444
+ ct.fit(X)
1445
+
1446
+ assert_array_equal(ct.get_feature_names_out(), ["ohe__x1_z"])
1447
+
1448
+
1449
+ @pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()])
1450
+ def test_sk_visual_block_remainder(remainder):
1451
+ # remainder='passthrough' or an estimator will be shown in repr_html
1452
+ ohe = OneHotEncoder()
1453
+ ct = ColumnTransformer(
1454
+ transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder
1455
+ )
1456
+ visual_block = ct._sk_visual_block_()
1457
+ assert visual_block.names == ("ohe", "remainder")
1458
+ assert visual_block.name_details == (["col1", "col2"], "")
1459
+ assert visual_block.estimators == (ohe, remainder)
1460
+
1461
+
1462
+ def test_sk_visual_block_remainder_drop():
1463
+ # remainder='drop' is not shown in repr_html
1464
+ ohe = OneHotEncoder()
1465
+ ct = ColumnTransformer(transformers=[("ohe", ohe, ["col1", "col2"])])
1466
+ visual_block = ct._sk_visual_block_()
1467
+ assert visual_block.names == ("ohe",)
1468
+ assert visual_block.name_details == (["col1", "col2"],)
1469
+ assert visual_block.estimators == (ohe,)
1470
+
1471
+
1472
+ @pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()])
1473
+ def test_sk_visual_block_remainder_fitted_pandas(remainder):
1474
+ # Remainder shows the columns after fitting
1475
+ pd = pytest.importorskip("pandas")
1476
+ ohe = OneHotEncoder()
1477
+ ct = ColumnTransformer(
1478
+ transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder
1479
+ )
1480
+ df = pd.DataFrame(
1481
+ {
1482
+ "col1": ["a", "b", "c"],
1483
+ "col2": ["z", "z", "z"],
1484
+ "col3": [1, 2, 3],
1485
+ "col4": [3, 4, 5],
1486
+ }
1487
+ )
1488
+ ct.fit(df)
1489
+ visual_block = ct._sk_visual_block_()
1490
+ assert visual_block.names == ("ohe", "remainder")
1491
+ assert visual_block.name_details == (["col1", "col2"], ["col3", "col4"])
1492
+ assert visual_block.estimators == (ohe, remainder)
1493
+
1494
+
1495
+ @pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()])
1496
+ def test_sk_visual_block_remainder_fitted_numpy(remainder):
1497
+ # Remainder shows the indices after fitting
1498
+ X = np.array([[1, 2, 3], [4, 5, 6]], dtype=float)
1499
+ scaler = StandardScaler()
1500
+ ct = ColumnTransformer(
1501
+ transformers=[("scale", scaler, [0, 2])], remainder=remainder
1502
+ )
1503
+ ct.fit(X)
1504
+ visual_block = ct._sk_visual_block_()
1505
+ assert visual_block.names == ("scale", "remainder")
1506
+ assert visual_block.name_details == ([0, 2], [1])
1507
+ assert visual_block.estimators == (scaler, remainder)
1508
+
1509
+
1510
+ @pytest.mark.parametrize("explicit_colname", ["first", "second", 0, 1])
1511
+ @pytest.mark.parametrize("remainder", [Trans(), "passthrough", "drop"])
1512
+ def test_column_transformer_reordered_column_names_remainder(
1513
+ explicit_colname, remainder
1514
+ ):
1515
+ """Test the interaction between remainder and column transformer"""
1516
+ pd = pytest.importorskip("pandas")
1517
+
1518
+ X_fit_array = np.array([[0, 1, 2], [2, 4, 6]]).T
1519
+ X_fit_df = pd.DataFrame(X_fit_array, columns=["first", "second"])
1520
+
1521
+ X_trans_array = np.array([[2, 4, 6], [0, 1, 2]]).T
1522
+ X_trans_df = pd.DataFrame(X_trans_array, columns=["second", "first"])
1523
+
1524
+ tf = ColumnTransformer([("bycol", Trans(), explicit_colname)], remainder=remainder)
1525
+
1526
+ tf.fit(X_fit_df)
1527
+ X_fit_trans = tf.transform(X_fit_df)
1528
+
1529
+ # Changing the order still works
1530
+ X_trans = tf.transform(X_trans_df)
1531
+ assert_allclose(X_trans, X_fit_trans)
1532
+
1533
+ # extra columns are ignored
1534
+ X_extended_df = X_fit_df.copy()
1535
+ X_extended_df["third"] = [3, 6, 9]
1536
+ X_trans = tf.transform(X_extended_df)
1537
+ assert_allclose(X_trans, X_fit_trans)
1538
+
1539
+ if isinstance(explicit_colname, str):
1540
+ # Raise error if columns are specified by names but input only allows
1541
+ # to specify by position, e.g. numpy array instead of a pandas df.
1542
+ X_array = X_fit_array.copy()
1543
+ err_msg = "Specifying the columns"
1544
+ with pytest.raises(ValueError, match=err_msg):
1545
+ tf.transform(X_array)
1546
+
1547
+
1548
+ def test_feature_name_validation_missing_columns_drop_passthough():
1549
+ """Test the interaction between {'drop', 'passthrough'} and
1550
+ missing column names."""
1551
+ pd = pytest.importorskip("pandas")
1552
+
1553
+ X = np.ones(shape=(3, 4))
1554
+ df = pd.DataFrame(X, columns=["a", "b", "c", "d"])
1555
+
1556
+ df_dropped = df.drop("c", axis=1)
1557
+
1558
+ # with remainder='passthrough', all columns seen during `fit` must be
1559
+ # present
1560
+ tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="passthrough")
1561
+ tf.fit(df)
1562
+ msg = r"columns are missing: {'c'}"
1563
+ with pytest.raises(ValueError, match=msg):
1564
+ tf.transform(df_dropped)
1565
+
1566
+ # with remainder='drop', it is allowed to have column 'c' missing
1567
+ tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="drop")
1568
+ tf.fit(df)
1569
+
1570
+ df_dropped_trans = tf.transform(df_dropped)
1571
+ df_fit_trans = tf.transform(df)
1572
+ assert_allclose(df_dropped_trans, df_fit_trans)
1573
+
1574
+ # bycol drops 'c', thus it is allowed for 'c' to be missing
1575
+ tf = ColumnTransformer([("bycol", "drop", ["c"])], remainder="passthrough")
1576
+ tf.fit(df)
1577
+ df_dropped_trans = tf.transform(df_dropped)
1578
+ df_fit_trans = tf.transform(df)
1579
+ assert_allclose(df_dropped_trans, df_fit_trans)
1580
+
1581
+
1582
+ def test_feature_names_in_():
1583
+ """Feature names are stored in column transformer.
1584
+
1585
+ Column transformer deliberately does not check for column name consistency.
1586
+ It only checks that the non-dropped names seen in `fit` are seen
1587
+ in `transform`. This behavior is already tested in
1588
+ `test_feature_name_validation_missing_columns_drop_passthough`"""
1589
+
1590
+ pd = pytest.importorskip("pandas")
1591
+
1592
+ feature_names = ["a", "c", "d"]
1593
+ df = pd.DataFrame([[1, 2, 3]], columns=feature_names)
1594
+ ct = ColumnTransformer([("bycol", Trans(), ["a", "d"])], remainder="passthrough")
1595
+
1596
+ ct.fit(df)
1597
+ assert_array_equal(ct.feature_names_in_, feature_names)
1598
+ assert isinstance(ct.feature_names_in_, np.ndarray)
1599
+ assert ct.feature_names_in_.dtype == object
1600
+
1601
+
1602
+ class TransWithNames(Trans):
1603
+ def __init__(self, feature_names_out=None):
1604
+ self.feature_names_out = feature_names_out
1605
+
1606
+ def get_feature_names_out(self, input_features=None):
1607
+ if self.feature_names_out is not None:
1608
+ return np.asarray(self.feature_names_out, dtype=object)
1609
+ return input_features
1610
+
1611
+
1612
+ @pytest.mark.parametrize(
1613
+ "transformers, remainder, expected_names",
1614
+ [
1615
+ (
1616
+ [
1617
+ ("bycol1", TransWithNames(), ["d", "c"]),
1618
+ ("bycol2", "passthrough", ["d"]),
1619
+ ],
1620
+ "passthrough",
1621
+ ["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"],
1622
+ ),
1623
+ (
1624
+ [
1625
+ ("bycol1", TransWithNames(), ["d", "c"]),
1626
+ ("bycol2", "passthrough", ["d"]),
1627
+ ],
1628
+ "drop",
1629
+ ["bycol1__d", "bycol1__c", "bycol2__d"],
1630
+ ),
1631
+ (
1632
+ [
1633
+ ("bycol1", TransWithNames(), ["b"]),
1634
+ ("bycol2", "drop", ["d"]),
1635
+ ],
1636
+ "passthrough",
1637
+ ["bycol1__b", "remainder__a", "remainder__c"],
1638
+ ),
1639
+ (
1640
+ [
1641
+ ("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]),
1642
+ ],
1643
+ "passthrough",
1644
+ ["bycol1__pca1", "bycol1__pca2", "remainder__c"],
1645
+ ),
1646
+ (
1647
+ [
1648
+ ("bycol1", TransWithNames(["a", "b"]), ["d"]),
1649
+ ("bycol2", "passthrough", ["b"]),
1650
+ ],
1651
+ "drop",
1652
+ ["bycol1__a", "bycol1__b", "bycol2__b"],
1653
+ ),
1654
+ (
1655
+ [
1656
+ ("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]),
1657
+ ("bycol2", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]),
1658
+ ],
1659
+ "passthrough",
1660
+ [
1661
+ "bycol1__pca0",
1662
+ "bycol1__pca1",
1663
+ "bycol2__pca0",
1664
+ "bycol2__pca1",
1665
+ "remainder__a",
1666
+ "remainder__c",
1667
+ "remainder__d",
1668
+ ],
1669
+ ),
1670
+ (
1671
+ [
1672
+ ("bycol1", "drop", ["d"]),
1673
+ ],
1674
+ "drop",
1675
+ [],
1676
+ ),
1677
+ (
1678
+ [
1679
+ ("bycol1", TransWithNames(), slice(1, 3)),
1680
+ ],
1681
+ "drop",
1682
+ ["bycol1__b", "bycol1__c"],
1683
+ ),
1684
+ (
1685
+ [
1686
+ ("bycol1", TransWithNames(), ["b"]),
1687
+ ("bycol2", "drop", slice(3, 4)),
1688
+ ],
1689
+ "passthrough",
1690
+ ["bycol1__b", "remainder__a", "remainder__c"],
1691
+ ),
1692
+ (
1693
+ [
1694
+ ("bycol1", TransWithNames(), ["d", "c"]),
1695
+ ("bycol2", "passthrough", slice(3, 4)),
1696
+ ],
1697
+ "passthrough",
1698
+ ["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"],
1699
+ ),
1700
+ (
1701
+ [
1702
+ ("bycol1", TransWithNames(), slice("b", "c")),
1703
+ ],
1704
+ "drop",
1705
+ ["bycol1__b", "bycol1__c"],
1706
+ ),
1707
+ (
1708
+ [
1709
+ ("bycol1", TransWithNames(), ["b"]),
1710
+ ("bycol2", "drop", slice("c", "d")),
1711
+ ],
1712
+ "passthrough",
1713
+ ["bycol1__b", "remainder__a"],
1714
+ ),
1715
+ (
1716
+ [
1717
+ ("bycol1", TransWithNames(), ["d", "c"]),
1718
+ ("bycol2", "passthrough", slice("c", "d")),
1719
+ ],
1720
+ "passthrough",
1721
+ [
1722
+ "bycol1__d",
1723
+ "bycol1__c",
1724
+ "bycol2__c",
1725
+ "bycol2__d",
1726
+ "remainder__a",
1727
+ "remainder__b",
1728
+ ],
1729
+ ),
1730
+ ],
1731
+ )
1732
+ def test_verbose_feature_names_out_true(transformers, remainder, expected_names):
1733
+ """Check feature_names_out for verbose_feature_names_out=True (default)"""
1734
+ pd = pytest.importorskip("pandas")
1735
+ df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
1736
+ ct = ColumnTransformer(
1737
+ transformers,
1738
+ remainder=remainder,
1739
+ )
1740
+ ct.fit(df)
1741
+
1742
+ names = ct.get_feature_names_out()
1743
+ assert isinstance(names, np.ndarray)
1744
+ assert names.dtype == object
1745
+ assert_array_equal(names, expected_names)
1746
+
1747
+
1748
+ @pytest.mark.parametrize(
1749
+ "transformers, remainder, expected_names",
1750
+ [
1751
+ (
1752
+ [
1753
+ ("bycol1", TransWithNames(), ["d", "c"]),
1754
+ ("bycol2", "passthrough", ["a"]),
1755
+ ],
1756
+ "passthrough",
1757
+ ["d", "c", "a", "b"],
1758
+ ),
1759
+ (
1760
+ [
1761
+ ("bycol1", TransWithNames(["a"]), ["d", "c"]),
1762
+ ("bycol2", "passthrough", ["d"]),
1763
+ ],
1764
+ "drop",
1765
+ ["a", "d"],
1766
+ ),
1767
+ (
1768
+ [
1769
+ ("bycol1", TransWithNames(), ["b"]),
1770
+ ("bycol2", "drop", ["d"]),
1771
+ ],
1772
+ "passthrough",
1773
+ ["b", "a", "c"],
1774
+ ),
1775
+ (
1776
+ [
1777
+ ("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]),
1778
+ ],
1779
+ "passthrough",
1780
+ ["pca1", "pca2", "c"],
1781
+ ),
1782
+ (
1783
+ [
1784
+ ("bycol1", TransWithNames(["a", "c"]), ["d"]),
1785
+ ("bycol2", "passthrough", ["d"]),
1786
+ ],
1787
+ "drop",
1788
+ ["a", "c", "d"],
1789
+ ),
1790
+ (
1791
+ [
1792
+ ("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]),
1793
+ ("bycol2", TransWithNames([f"kpca{i}" for i in range(2)]), ["b"]),
1794
+ ],
1795
+ "passthrough",
1796
+ ["pca0", "pca1", "kpca0", "kpca1", "a", "c", "d"],
1797
+ ),
1798
+ (
1799
+ [
1800
+ ("bycol1", "drop", ["d"]),
1801
+ ],
1802
+ "drop",
1803
+ [],
1804
+ ),
1805
+ (
1806
+ [
1807
+ ("bycol1", TransWithNames(), slice(1, 2)),
1808
+ ("bycol2", "drop", ["d"]),
1809
+ ],
1810
+ "passthrough",
1811
+ ["b", "a", "c"],
1812
+ ),
1813
+ (
1814
+ [
1815
+ ("bycol1", TransWithNames(), ["b"]),
1816
+ ("bycol2", "drop", slice(3, 4)),
1817
+ ],
1818
+ "passthrough",
1819
+ ["b", "a", "c"],
1820
+ ),
1821
+ (
1822
+ [
1823
+ ("bycol1", TransWithNames(), ["d", "c"]),
1824
+ ("bycol2", "passthrough", slice(0, 2)),
1825
+ ],
1826
+ "drop",
1827
+ ["d", "c", "a", "b"],
1828
+ ),
1829
+ (
1830
+ [
1831
+ ("bycol1", TransWithNames(), slice("a", "b")),
1832
+ ("bycol2", "drop", ["d"]),
1833
+ ],
1834
+ "passthrough",
1835
+ ["a", "b", "c"],
1836
+ ),
1837
+ (
1838
+ [
1839
+ ("bycol1", TransWithNames(), ["b"]),
1840
+ ("bycol2", "drop", slice("c", "d")),
1841
+ ],
1842
+ "passthrough",
1843
+ ["b", "a"],
1844
+ ),
1845
+ (
1846
+ [
1847
+ ("bycol1", TransWithNames(), ["d", "c"]),
1848
+ ("bycol2", "passthrough", slice("a", "b")),
1849
+ ],
1850
+ "drop",
1851
+ ["d", "c", "a", "b"],
1852
+ ),
1853
+ (
1854
+ [
1855
+ ("bycol1", TransWithNames(), ["d", "c"]),
1856
+ ("bycol2", "passthrough", slice("b", "b")),
1857
+ ],
1858
+ "drop",
1859
+ ["d", "c", "b"],
1860
+ ),
1861
+ ],
1862
+ )
1863
+ def test_verbose_feature_names_out_false(transformers, remainder, expected_names):
1864
+ """Check feature_names_out for verbose_feature_names_out=False"""
1865
+ pd = pytest.importorskip("pandas")
1866
+ df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
1867
+ ct = ColumnTransformer(
1868
+ transformers,
1869
+ remainder=remainder,
1870
+ verbose_feature_names_out=False,
1871
+ )
1872
+ ct.fit(df)
1873
+
1874
+ names = ct.get_feature_names_out()
1875
+ assert isinstance(names, np.ndarray)
1876
+ assert names.dtype == object
1877
+ assert_array_equal(names, expected_names)
1878
+
1879
+
1880
+ @pytest.mark.parametrize(
1881
+ "transformers, remainder, colliding_columns",
1882
+ [
1883
+ (
1884
+ [
1885
+ ("bycol1", TransWithNames(), ["b"]),
1886
+ ("bycol2", "passthrough", ["b"]),
1887
+ ],
1888
+ "drop",
1889
+ "['b']",
1890
+ ),
1891
+ (
1892
+ [
1893
+ ("bycol1", TransWithNames(["c", "d"]), ["c"]),
1894
+ ("bycol2", "passthrough", ["c"]),
1895
+ ],
1896
+ "drop",
1897
+ "['c']",
1898
+ ),
1899
+ (
1900
+ [
1901
+ ("bycol1", TransWithNames(["a"]), ["b"]),
1902
+ ("bycol2", "passthrough", ["b"]),
1903
+ ],
1904
+ "passthrough",
1905
+ "['a']",
1906
+ ),
1907
+ (
1908
+ [
1909
+ ("bycol1", TransWithNames(["a"]), ["b"]),
1910
+ ("bycol2", "drop", ["b"]),
1911
+ ],
1912
+ "passthrough",
1913
+ "['a']",
1914
+ ),
1915
+ (
1916
+ [
1917
+ ("bycol1", TransWithNames(["c", "b"]), ["b"]),
1918
+ ("bycol2", "passthrough", ["c", "b"]),
1919
+ ],
1920
+ "drop",
1921
+ "['b', 'c']",
1922
+ ),
1923
+ (
1924
+ [
1925
+ ("bycol1", TransWithNames(["a"]), ["b"]),
1926
+ ("bycol2", "passthrough", ["a"]),
1927
+ ("bycol3", TransWithNames(["a"]), ["b"]),
1928
+ ],
1929
+ "passthrough",
1930
+ "['a']",
1931
+ ),
1932
+ (
1933
+ [
1934
+ ("bycol1", TransWithNames(["a", "b"]), ["b"]),
1935
+ ("bycol2", "passthrough", ["a"]),
1936
+ ("bycol3", TransWithNames(["b"]), ["c"]),
1937
+ ],
1938
+ "passthrough",
1939
+ "['a', 'b']",
1940
+ ),
1941
+ (
1942
+ [
1943
+ ("bycol1", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]),
1944
+ ("bycol2", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]),
1945
+ ],
1946
+ "passthrough",
1947
+ "['pca0', 'pca1', 'pca2', 'pca3', 'pca4', ...]",
1948
+ ),
1949
+ (
1950
+ [
1951
+ ("bycol1", TransWithNames(["a", "b"]), slice(1, 2)),
1952
+ ("bycol2", "passthrough", ["a"]),
1953
+ ("bycol3", TransWithNames(["b"]), ["c"]),
1954
+ ],
1955
+ "passthrough",
1956
+ "['a', 'b']",
1957
+ ),
1958
+ (
1959
+ [
1960
+ ("bycol1", TransWithNames(["a", "b"]), ["b"]),
1961
+ ("bycol2", "passthrough", slice(0, 1)),
1962
+ ("bycol3", TransWithNames(["b"]), ["c"]),
1963
+ ],
1964
+ "passthrough",
1965
+ "['a', 'b']",
1966
+ ),
1967
+ (
1968
+ [
1969
+ ("bycol1", TransWithNames(["a", "b"]), slice("b", "c")),
1970
+ ("bycol2", "passthrough", ["a"]),
1971
+ ("bycol3", TransWithNames(["b"]), ["c"]),
1972
+ ],
1973
+ "passthrough",
1974
+ "['a', 'b']",
1975
+ ),
1976
+ (
1977
+ [
1978
+ ("bycol1", TransWithNames(["a", "b"]), ["b"]),
1979
+ ("bycol2", "passthrough", slice("a", "a")),
1980
+ ("bycol3", TransWithNames(["b"]), ["c"]),
1981
+ ],
1982
+ "passthrough",
1983
+ "['a', 'b']",
1984
+ ),
1985
+ ],
1986
+ )
1987
+ def test_verbose_feature_names_out_false_errors(
1988
+ transformers, remainder, colliding_columns
1989
+ ):
1990
+ """Check feature_names_out for verbose_feature_names_out=False"""
1991
+
1992
+ pd = pytest.importorskip("pandas")
1993
+ df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
1994
+ ct = ColumnTransformer(
1995
+ transformers,
1996
+ remainder=remainder,
1997
+ verbose_feature_names_out=False,
1998
+ )
1999
+ ct.fit(df)
2000
+
2001
+ msg = re.escape(
2002
+ f"Output feature names: {colliding_columns} are not unique. Please set "
2003
+ "verbose_feature_names_out=True to add prefixes to feature names"
2004
+ )
2005
+ with pytest.raises(ValueError, match=msg):
2006
+ ct.get_feature_names_out()
2007
+
2008
+
2009
+ @pytest.mark.parametrize("verbose_feature_names_out", [True, False])
2010
+ @pytest.mark.parametrize("remainder", ["drop", "passthrough"])
2011
+ def test_column_transformer_set_output(verbose_feature_names_out, remainder):
2012
+ """Check column transformer behavior with set_output."""
2013
+ pd = pytest.importorskip("pandas")
2014
+ df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"], index=[10])
2015
+ ct = ColumnTransformer(
2016
+ [("first", TransWithNames(), ["a", "c"]), ("second", TransWithNames(), ["d"])],
2017
+ remainder=remainder,
2018
+ verbose_feature_names_out=verbose_feature_names_out,
2019
+ )
2020
+ X_trans = ct.fit_transform(df)
2021
+ assert isinstance(X_trans, np.ndarray)
2022
+
2023
+ ct.set_output(transform="pandas")
2024
+
2025
+ df_test = pd.DataFrame([[1, 2, 3, 4]], columns=df.columns, index=[20])
2026
+ X_trans = ct.transform(df_test)
2027
+ assert isinstance(X_trans, pd.DataFrame)
2028
+
2029
+ feature_names_out = ct.get_feature_names_out()
2030
+ assert_array_equal(X_trans.columns, feature_names_out)
2031
+ assert_array_equal(X_trans.index, df_test.index)
2032
+
2033
+
2034
+ @pytest.mark.parametrize("remainder", ["drop", "passthrough"])
2035
+ @pytest.mark.parametrize("fit_transform", [True, False])
2036
+ def test_column_transform_set_output_mixed(remainder, fit_transform):
2037
+ """Check ColumnTransformer outputs mixed types correctly."""
2038
+ pd = pytest.importorskip("pandas")
2039
+ df = pd.DataFrame(
2040
+ {
2041
+ "pet": pd.Series(["dog", "cat", "snake"], dtype="category"),
2042
+ "color": pd.Series(["green", "blue", "red"], dtype="object"),
2043
+ "age": [1.4, 2.1, 4.4],
2044
+ "height": [20, 40, 10],
2045
+ "distance": pd.Series([20, pd.NA, 100], dtype="Int32"),
2046
+ }
2047
+ )
2048
+ ct = ColumnTransformer(
2049
+ [
2050
+ (
2051
+ "color_encode",
2052
+ OneHotEncoder(sparse_output=False, dtype="int8"),
2053
+ ["color"],
2054
+ ),
2055
+ ("age", StandardScaler(), ["age"]),
2056
+ ],
2057
+ remainder=remainder,
2058
+ verbose_feature_names_out=False,
2059
+ ).set_output(transform="pandas")
2060
+ if fit_transform:
2061
+ X_trans = ct.fit_transform(df)
2062
+ else:
2063
+ X_trans = ct.fit(df).transform(df)
2064
+
2065
+ assert isinstance(X_trans, pd.DataFrame)
2066
+ assert_array_equal(X_trans.columns, ct.get_feature_names_out())
2067
+
2068
+ expected_dtypes = {
2069
+ "color_blue": "int8",
2070
+ "color_green": "int8",
2071
+ "color_red": "int8",
2072
+ "age": "float64",
2073
+ "pet": "category",
2074
+ "height": "int64",
2075
+ "distance": "Int32",
2076
+ }
2077
+ for col, dtype in X_trans.dtypes.items():
2078
+ assert dtype == expected_dtypes[col]
2079
+
2080
+
2081
+ @pytest.mark.parametrize("remainder", ["drop", "passthrough"])
2082
+ def test_column_transform_set_output_after_fitting(remainder):
2083
+ pd = pytest.importorskip("pandas")
2084
+ df = pd.DataFrame(
2085
+ {
2086
+ "pet": pd.Series(["dog", "cat", "snake"], dtype="category"),
2087
+ "age": [1.4, 2.1, 4.4],
2088
+ "height": [20, 40, 10],
2089
+ }
2090
+ )
2091
+ ct = ColumnTransformer(
2092
+ [
2093
+ (
2094
+ "color_encode",
2095
+ OneHotEncoder(sparse_output=False, dtype="int16"),
2096
+ ["pet"],
2097
+ ),
2098
+ ("age", StandardScaler(), ["age"]),
2099
+ ],
2100
+ remainder=remainder,
2101
+ verbose_feature_names_out=False,
2102
+ )
2103
+
2104
+ # fit without calling set_output
2105
+ X_trans = ct.fit_transform(df)
2106
+ assert isinstance(X_trans, np.ndarray)
2107
+ assert X_trans.dtype == "float64"
2108
+
2109
+ ct.set_output(transform="pandas")
2110
+ X_trans_df = ct.transform(df)
2111
+ expected_dtypes = {
2112
+ "pet_cat": "int16",
2113
+ "pet_dog": "int16",
2114
+ "pet_snake": "int16",
2115
+ "height": "int64",
2116
+ "age": "float64",
2117
+ }
2118
+ for col, dtype in X_trans_df.dtypes.items():
2119
+ assert dtype == expected_dtypes[col]
2120
+
2121
+
2122
+ # PandasOutTransformer that does not define get_feature_names_out and always expects
2123
+ # the input to be a DataFrame.
2124
+ class PandasOutTransformer(BaseEstimator):
2125
+ def __init__(self, offset=1.0):
2126
+ self.offset = offset
2127
+
2128
+ def fit(self, X, y=None):
2129
+ pd = pytest.importorskip("pandas")
2130
+ assert isinstance(X, pd.DataFrame)
2131
+ return self
2132
+
2133
+ def transform(self, X, y=None):
2134
+ pd = pytest.importorskip("pandas")
2135
+ assert isinstance(X, pd.DataFrame)
2136
+ return X - self.offset
2137
+
2138
+ def set_output(self, transform=None):
2139
+ # This transformer will always output a DataFrame regardless of the
2140
+ # configuration.
2141
+ return self
2142
+
2143
+
2144
+ @pytest.mark.parametrize(
2145
+ "trans_1, expected_verbose_names, expected_non_verbose_names",
2146
+ [
2147
+ (
2148
+ PandasOutTransformer(offset=2.0),
2149
+ ["trans_0__feat1", "trans_1__feat0"],
2150
+ ["feat1", "feat0"],
2151
+ ),
2152
+ (
2153
+ "drop",
2154
+ ["trans_0__feat1"],
2155
+ ["feat1"],
2156
+ ),
2157
+ (
2158
+ "passthrough",
2159
+ ["trans_0__feat1", "trans_1__feat0"],
2160
+ ["feat1", "feat0"],
2161
+ ),
2162
+ ],
2163
+ )
2164
+ def test_transformers_with_pandas_out_but_not_feature_names_out(
2165
+ trans_1, expected_verbose_names, expected_non_verbose_names
2166
+ ):
2167
+ """Check that set_config(transform="pandas") is compatible with more transformers.
2168
+
2169
+ Specifically, if transformers returns a DataFrame, but does not define
2170
+ `get_feature_names_out`.
2171
+ """
2172
+ pd = pytest.importorskip("pandas")
2173
+
2174
+ X_df = pd.DataFrame({"feat0": [1.0, 2.0, 3.0], "feat1": [2.0, 3.0, 4.0]})
2175
+ ct = ColumnTransformer(
2176
+ [
2177
+ ("trans_0", PandasOutTransformer(offset=3.0), ["feat1"]),
2178
+ ("trans_1", trans_1, ["feat0"]),
2179
+ ]
2180
+ )
2181
+ X_trans_np = ct.fit_transform(X_df)
2182
+ assert isinstance(X_trans_np, np.ndarray)
2183
+
2184
+ # `ct` does not have `get_feature_names_out` because `PandasOutTransformer` does
2185
+ # not define the method.
2186
+ with pytest.raises(AttributeError, match="not provide get_feature_names_out"):
2187
+ ct.get_feature_names_out()
2188
+
2189
+ # The feature names are prefixed because verbose_feature_names_out=True is default
2190
+ ct.set_output(transform="pandas")
2191
+ X_trans_df0 = ct.fit_transform(X_df)
2192
+ assert_array_equal(X_trans_df0.columns, expected_verbose_names)
2193
+
2194
+ ct.set_params(verbose_feature_names_out=False)
2195
+ X_trans_df1 = ct.fit_transform(X_df)
2196
+ assert_array_equal(X_trans_df1.columns, expected_non_verbose_names)
2197
+
2198
+
2199
+ @pytest.mark.parametrize(
2200
+ "empty_selection",
2201
+ [[], np.array([False, False]), [False, False]],
2202
+ ids=["list", "bool", "bool_int"],
2203
+ )
2204
+ def test_empty_selection_pandas_output(empty_selection):
2205
+ """Check that pandas output works when there is an empty selection.
2206
+
2207
+ Non-regression test for gh-25487
2208
+ """
2209
+ pd = pytest.importorskip("pandas")
2210
+
2211
+ X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"])
2212
+ ct = ColumnTransformer(
2213
+ [
2214
+ ("categorical", "passthrough", empty_selection),
2215
+ ("numerical", StandardScaler(), ["a", "b"]),
2216
+ ],
2217
+ verbose_feature_names_out=True,
2218
+ )
2219
+ ct.set_output(transform="pandas")
2220
+ X_out = ct.fit_transform(X)
2221
+ assert_array_equal(X_out.columns, ["numerical__a", "numerical__b"])
2222
+
2223
+ ct.set_params(verbose_feature_names_out=False)
2224
+ X_out = ct.fit_transform(X)
2225
+ assert_array_equal(X_out.columns, ["a", "b"])
2226
+
2227
+
2228
+ def test_raise_error_if_index_not_aligned():
2229
+ """Check column transformer raises error if indices are not aligned.
2230
+
2231
+ Non-regression test for gh-26210.
2232
+ """
2233
+ pd = pytest.importorskip("pandas")
2234
+
2235
+ X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"], index=[8, 3])
2236
+ reset_index_transformer = FunctionTransformer(
2237
+ lambda x: x.reset_index(drop=True), feature_names_out="one-to-one"
2238
+ )
2239
+
2240
+ ct = ColumnTransformer(
2241
+ [
2242
+ ("num1", "passthrough", ["a"]),
2243
+ ("num2", reset_index_transformer, ["b"]),
2244
+ ],
2245
+ )
2246
+ ct.set_output(transform="pandas")
2247
+ msg = (
2248
+ "Concatenating DataFrames from the transformer's output lead to"
2249
+ " an inconsistent number of samples. The output may have Pandas"
2250
+ " Indexes that do not match."
2251
+ )
2252
+ with pytest.raises(ValueError, match=msg):
2253
+ ct.fit_transform(X)
2254
+
2255
+
2256
+ def test_remainder_set_output():
2257
+ """Check that the output is set for the remainder.
2258
+
2259
+ Non-regression test for #26306.
2260
+ """
2261
+
2262
+ pd = pytest.importorskip("pandas")
2263
+ df = pd.DataFrame({"a": [True, False, True], "b": [1, 2, 3]})
2264
+
2265
+ ct = make_column_transformer(
2266
+ (VarianceThreshold(), make_column_selector(dtype_include=bool)),
2267
+ remainder=VarianceThreshold(),
2268
+ verbose_feature_names_out=False,
2269
+ )
2270
+ ct.set_output(transform="pandas")
2271
+
2272
+ out = ct.fit_transform(df)
2273
+ pd.testing.assert_frame_equal(out, df)
2274
+
2275
+ ct.set_output(transform="default")
2276
+ out = ct.fit_transform(df)
2277
+ assert isinstance(out, np.ndarray)
2278
+
2279
+
2280
+ # TODO(1.6): replace the warning by a ValueError exception
2281
+ def test_transform_pd_na():
2282
+ """Check behavior when a tranformer's output contains pandas.NA
2283
+
2284
+ It should emit a warning unless the output config is set to 'pandas'.
2285
+ """
2286
+ pd = pytest.importorskip("pandas")
2287
+ if not hasattr(pd, "Float64Dtype"):
2288
+ pytest.skip(
2289
+ "The issue with pd.NA tested here does not happen in old versions that do"
2290
+ " not have the extension dtypes"
2291
+ )
2292
+ df = pd.DataFrame({"a": [1.5, None]})
2293
+ ct = make_column_transformer(("passthrough", ["a"]))
2294
+ # No warning with non-extension dtypes and np.nan
2295
+ with warnings.catch_warnings():
2296
+ warnings.simplefilter("error")
2297
+ ct.fit_transform(df)
2298
+ df = df.convert_dtypes()
2299
+ # Error with extension dtype and pd.NA
2300
+ with pytest.warns(FutureWarning, match=r"set_output\(transform='pandas'\)"):
2301
+ ct.fit_transform(df)
2302
+ # No warning when output is set to pandas
2303
+ with warnings.catch_warnings():
2304
+ warnings.simplefilter("error")
2305
+ ct.set_output(transform="pandas")
2306
+ ct.fit_transform(df)
2307
+ ct.set_output(transform="default")
2308
+ # No warning when there are no pd.NA
2309
+ with warnings.catch_warnings():
2310
+ warnings.simplefilter("error")
2311
+ ct.fit_transform(df.fillna(-1.0))
2312
+
2313
+
2314
+ def test_dataframe_different_dataframe_libraries():
2315
+ """Check fitting and transforming on pandas and polars dataframes."""
2316
+ pd = pytest.importorskip("pandas")
2317
+ pl = pytest.importorskip("polars")
2318
+ X_train_np = np.array([[0, 1], [2, 4], [4, 5]])
2319
+ X_test_np = np.array([[1, 2], [1, 3], [2, 3]])
2320
+
2321
+ # Fit on pandas and transform on polars
2322
+ X_train_pd = pd.DataFrame(X_train_np, columns=["a", "b"])
2323
+ X_test_pl = pl.DataFrame(X_test_np, schema=["a", "b"])
2324
+
2325
+ ct = make_column_transformer((Trans(), [0, 1]))
2326
+ ct.fit(X_train_pd)
2327
+
2328
+ out_pl_in = ct.transform(X_test_pl)
2329
+ assert_array_equal(out_pl_in, X_test_np)
2330
+
2331
+ # Fit on polars and transform on pandas
2332
+ X_train_pl = pl.DataFrame(X_train_np, schema=["a", "b"])
2333
+ X_test_pd = pd.DataFrame(X_test_np, columns=["a", "b"])
2334
+ ct.fit(X_train_pl)
2335
+
2336
+ out_pd_in = ct.transform(X_test_pd)
2337
+ assert_array_equal(out_pd_in, X_test_np)
2338
+
2339
+
2340
+ @pytest.mark.parametrize("transform_output", ["default", "pandas"])
2341
+ def test_column_transformer_remainder_passthrough_naming_consistency(transform_output):
2342
+ """Check that when `remainder="passthrough"`, inconsistent naming is handled
2343
+ correctly by the underlying `FunctionTransformer`.
2344
+
2345
+ Non-regression test for:
2346
+ https://github.com/scikit-learn/scikit-learn/issues/28232
2347
+ """
2348
+ pd = pytest.importorskip("pandas")
2349
+ X = pd.DataFrame(np.random.randn(10, 4))
2350
+
2351
+ preprocessor = ColumnTransformer(
2352
+ transformers=[("scaler", StandardScaler(), [0, 1])],
2353
+ remainder="passthrough",
2354
+ ).set_output(transform=transform_output)
2355
+ X_trans = preprocessor.fit_transform(X)
2356
+ assert X_trans.shape == X.shape
2357
+
2358
+ expected_column_names = [
2359
+ "scaler__x0",
2360
+ "scaler__x1",
2361
+ "remainder__x2",
2362
+ "remainder__x3",
2363
+ ]
2364
+ if hasattr(X_trans, "columns"):
2365
+ assert X_trans.columns.tolist() == expected_column_names
2366
+ assert preprocessor.get_feature_names_out().tolist() == expected_column_names
2367
+
2368
+
2369
+ @pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
2370
+ def test_column_transformer_column_renaming(dataframe_lib):
2371
+ """Check that we properly rename columns when using `ColumnTransformer` and
2372
+ selected columns are redundant between transformers.
2373
+
2374
+ Non-regression test for:
2375
+ https://github.com/scikit-learn/scikit-learn/issues/28260
2376
+ """
2377
+ lib = pytest.importorskip(dataframe_lib)
2378
+
2379
+ df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]})
2380
+
2381
+ transformer = ColumnTransformer(
2382
+ transformers=[
2383
+ ("A", "passthrough", ["x1", "x2", "x3"]),
2384
+ ("B", FunctionTransformer(), ["x1", "x2"]),
2385
+ ("C", StandardScaler(), ["x1", "x3"]),
2386
+ # special case of empty transformer
2387
+ ("D", FunctionTransformer(lambda x: x[[]]), ["x1", "x2", "x3"]),
2388
+ ],
2389
+ verbose_feature_names_out=True,
2390
+ ).set_output(transform=dataframe_lib)
2391
+ df_trans = transformer.fit_transform(df)
2392
+ assert list(df_trans.columns) == [
2393
+ "A__x1",
2394
+ "A__x2",
2395
+ "A__x3",
2396
+ "B__x1",
2397
+ "B__x2",
2398
+ "C__x1",
2399
+ "C__x3",
2400
+ ]
2401
+
2402
+
2403
+ @pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
2404
+ def test_column_transformer_error_with_duplicated_columns(dataframe_lib):
2405
+ """Check that we raise an error when using `ColumnTransformer` and
2406
+ the columns names are duplicated between transformers."""
2407
+ lib = pytest.importorskip(dataframe_lib)
2408
+
2409
+ df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]})
2410
+
2411
+ transformer = ColumnTransformer(
2412
+ transformers=[
2413
+ ("A", "passthrough", ["x1", "x2", "x3"]),
2414
+ ("B", FunctionTransformer(), ["x1", "x2"]),
2415
+ ("C", StandardScaler(), ["x1", "x3"]),
2416
+ # special case of empty transformer
2417
+ ("D", FunctionTransformer(lambda x: x[[]]), ["x1", "x2", "x3"]),
2418
+ ],
2419
+ verbose_feature_names_out=False,
2420
+ ).set_output(transform=dataframe_lib)
2421
+ err_msg = re.escape(
2422
+ "Duplicated feature names found before concatenating the outputs of the "
2423
+ "transformers: ['x1', 'x2', 'x3'].\n"
2424
+ "Transformer A has conflicting columns names: ['x1', 'x2', 'x3'].\n"
2425
+ "Transformer B has conflicting columns names: ['x1', 'x2'].\n"
2426
+ "Transformer C has conflicting columns names: ['x1', 'x3'].\n"
2427
+ )
2428
+ with pytest.raises(ValueError, match=err_msg):
2429
+ transformer.fit_transform(df)
2430
+
2431
+
2432
+ # Metadata Routing Tests
2433
+ # ======================
2434
+
2435
+
2436
+ @pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"])
2437
+ def test_routing_passed_metadata_not_supported(method):
2438
+ """Test that the right error message is raised when metadata is passed while
2439
+ not supported when `enable_metadata_routing=False`."""
2440
+
2441
+ X = np.array([[0, 1, 2], [2, 4, 6]]).T
2442
+ y = [1, 2, 3]
2443
+ trs = ColumnTransformer([("trans", Trans(), [0])]).fit(X, y)
2444
+
2445
+ with pytest.raises(
2446
+ ValueError, match="is only supported if enable_metadata_routing=True"
2447
+ ):
2448
+ getattr(trs, method)([[1]], sample_weight=[1], prop="a")
2449
+
2450
+
2451
+ @pytest.mark.usefixtures("enable_slep006")
2452
+ @pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"])
2453
+ def test_metadata_routing_for_column_transformer(method):
2454
+ """Test that metadata is routed correctly for column transformer."""
2455
+ X = np.array([[0, 1, 2], [2, 4, 6]]).T
2456
+ y = [1, 2, 3]
2457
+ registry = _Registry()
2458
+ sample_weight, metadata = [1], "a"
2459
+ trs = ColumnTransformer(
2460
+ [
2461
+ (
2462
+ "trans",
2463
+ ConsumingTransformer(registry=registry)
2464
+ .set_fit_request(sample_weight=True, metadata=True)
2465
+ .set_transform_request(sample_weight=True, metadata=True),
2466
+ [0],
2467
+ )
2468
+ ]
2469
+ )
2470
+
2471
+ if method == "transform":
2472
+ trs.fit(X, y)
2473
+ trs.transform(X, sample_weight=sample_weight, metadata=metadata)
2474
+ else:
2475
+ getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata)
2476
+
2477
+ assert len(registry)
2478
+ for _trs in registry:
2479
+ check_recorded_metadata(
2480
+ obj=_trs, method=method, sample_weight=sample_weight, metadata=metadata
2481
+ )
2482
+
2483
+
2484
+ @pytest.mark.usefixtures("enable_slep006")
2485
+ def test_metadata_routing_no_fit_transform():
2486
+ """Test metadata routing when the sub-estimator doesn't implement
2487
+ ``fit_transform``."""
2488
+
2489
+ class NoFitTransform(BaseEstimator):
2490
+ def fit(self, X, y=None, sample_weight=None, metadata=None):
2491
+ assert sample_weight
2492
+ assert metadata
2493
+ return self
2494
+
2495
+ def transform(self, X, sample_weight=None, metadata=None):
2496
+ assert sample_weight
2497
+ assert metadata
2498
+ return X
2499
+
2500
+ X = np.array([[0, 1, 2], [2, 4, 6]]).T
2501
+ y = [1, 2, 3]
2502
+ _Registry()
2503
+ sample_weight, metadata = [1], "a"
2504
+ trs = ColumnTransformer(
2505
+ [
2506
+ (
2507
+ "trans",
2508
+ NoFitTransform()
2509
+ .set_fit_request(sample_weight=True, metadata=True)
2510
+ .set_transform_request(sample_weight=True, metadata=True),
2511
+ [0],
2512
+ )
2513
+ ]
2514
+ )
2515
+
2516
+ trs.fit(X, y, sample_weight=sample_weight, metadata=metadata)
2517
+ trs.fit_transform(X, y, sample_weight=sample_weight, metadata=metadata)
2518
+
2519
+
2520
+ @pytest.mark.usefixtures("enable_slep006")
2521
+ @pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"])
2522
+ def test_metadata_routing_error_for_column_transformer(method):
2523
+ """Test that the right error is raised when metadata is not requested."""
2524
+ X = np.array([[0, 1, 2], [2, 4, 6]]).T
2525
+ y = [1, 2, 3]
2526
+ sample_weight, metadata = [1], "a"
2527
+ trs = ColumnTransformer([("trans", ConsumingTransformer(), [0])])
2528
+
2529
+ error_message = (
2530
+ "[sample_weight, metadata] are passed but are not explicitly set as requested"
2531
+ f" or not for ConsumingTransformer.{method}"
2532
+ )
2533
+ with pytest.raises(ValueError, match=re.escape(error_message)):
2534
+ if method == "transform":
2535
+ trs.fit(X, y)
2536
+ trs.transform(X, sample_weight=sample_weight, metadata=metadata)
2537
+ else:
2538
+ getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata)
2539
+
2540
+
2541
+ @pytest.mark.usefixtures("enable_slep006")
2542
+ def test_get_metadata_routing_works_without_fit():
2543
+ # Regression test for https://github.com/scikit-learn/scikit-learn/issues/28186
2544
+ # Make sure ct.get_metadata_routing() works w/o having called fit.
2545
+ ct = ColumnTransformer([("trans", ConsumingTransformer(), [0])])
2546
+ ct.get_metadata_routing()
2547
+
2548
+
2549
+ @pytest.mark.usefixtures("enable_slep006")
2550
+ def test_remainder_request_always_present():
2551
+ # Test that remainder request is always present.
2552
+ ct = ColumnTransformer(
2553
+ [("trans", StandardScaler(), [0])],
2554
+ remainder=ConsumingTransformer()
2555
+ .set_fit_request(metadata=True)
2556
+ .set_transform_request(metadata=True),
2557
+ )
2558
+ router = ct.get_metadata_routing()
2559
+ assert router.consumes("fit", ["metadata"]) == set(["metadata"])
2560
+
2561
+
2562
+ @pytest.mark.usefixtures("enable_slep006")
2563
+ def test_unused_transformer_request_present():
2564
+ # Test that the request of a transformer is always present even when not
2565
+ # used due to no selected columns.
2566
+ ct = ColumnTransformer(
2567
+ [
2568
+ (
2569
+ "trans",
2570
+ ConsumingTransformer()
2571
+ .set_fit_request(metadata=True)
2572
+ .set_transform_request(metadata=True),
2573
+ lambda X: [],
2574
+ )
2575
+ ]
2576
+ )
2577
+ router = ct.get_metadata_routing()
2578
+ assert router.consumes("fit", ["metadata"]) == set(["metadata"])
2579
+
2580
+
2581
+ # End of Metadata Routing Tests
2582
+ # =============================
venv/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn import datasets
5
+ from sklearn.base import BaseEstimator, TransformerMixin, clone
6
+ from sklearn.compose import TransformedTargetRegressor
7
+ from sklearn.dummy import DummyRegressor
8
+ from sklearn.linear_model import LinearRegression, OrthogonalMatchingPursuit
9
+ from sklearn.pipeline import Pipeline
10
+ from sklearn.preprocessing import FunctionTransformer, StandardScaler
11
+ from sklearn.utils._testing import assert_allclose, assert_no_warnings
12
+
13
+ friedman = datasets.make_friedman1(random_state=0)
14
+
15
+
16
+ def test_transform_target_regressor_error():
17
+ X, y = friedman
18
+ # provide a transformer and functions at the same time
19
+ regr = TransformedTargetRegressor(
20
+ regressor=LinearRegression(),
21
+ transformer=StandardScaler(),
22
+ func=np.exp,
23
+ inverse_func=np.log,
24
+ )
25
+ with pytest.raises(
26
+ ValueError,
27
+ match="'transformer' and functions 'func'/'inverse_func' cannot both be set.",
28
+ ):
29
+ regr.fit(X, y)
30
+ # fit with sample_weight with a regressor which does not support it
31
+ sample_weight = np.ones((y.shape[0],))
32
+ regr = TransformedTargetRegressor(
33
+ regressor=OrthogonalMatchingPursuit(), transformer=StandardScaler()
34
+ )
35
+ with pytest.raises(
36
+ TypeError,
37
+ match=r"fit\(\) got an unexpected " "keyword argument 'sample_weight'",
38
+ ):
39
+ regr.fit(X, y, sample_weight=sample_weight)
40
+ # func is given but inverse_func is not
41
+ regr = TransformedTargetRegressor(func=np.exp)
42
+ with pytest.raises(
43
+ ValueError,
44
+ match="When 'func' is provided, 'inverse_func' must also be provided",
45
+ ):
46
+ regr.fit(X, y)
47
+
48
+
49
+ def test_transform_target_regressor_invertible():
50
+ X, y = friedman
51
+ regr = TransformedTargetRegressor(
52
+ regressor=LinearRegression(),
53
+ func=np.sqrt,
54
+ inverse_func=np.log,
55
+ check_inverse=True,
56
+ )
57
+ with pytest.warns(
58
+ UserWarning,
59
+ match=(
60
+ "The provided functions or"
61
+ " transformer are not strictly inverse of each other."
62
+ ),
63
+ ):
64
+ regr.fit(X, y)
65
+ regr = TransformedTargetRegressor(
66
+ regressor=LinearRegression(), func=np.sqrt, inverse_func=np.log
67
+ )
68
+ regr.set_params(check_inverse=False)
69
+ assert_no_warnings(regr.fit, X, y)
70
+
71
+
72
+ def _check_standard_scaled(y, y_pred):
73
+ y_mean = np.mean(y, axis=0)
74
+ y_std = np.std(y, axis=0)
75
+ assert_allclose((y - y_mean) / y_std, y_pred)
76
+
77
+
78
+ def _check_shifted_by_one(y, y_pred):
79
+ assert_allclose(y + 1, y_pred)
80
+
81
+
82
+ def test_transform_target_regressor_functions():
83
+ X, y = friedman
84
+ regr = TransformedTargetRegressor(
85
+ regressor=LinearRegression(), func=np.log, inverse_func=np.exp
86
+ )
87
+ y_pred = regr.fit(X, y).predict(X)
88
+ # check the transformer output
89
+ y_tran = regr.transformer_.transform(y.reshape(-1, 1)).squeeze()
90
+ assert_allclose(np.log(y), y_tran)
91
+ assert_allclose(
92
+ y, regr.transformer_.inverse_transform(y_tran.reshape(-1, 1)).squeeze()
93
+ )
94
+ assert y.shape == y_pred.shape
95
+ assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X)))
96
+ # check the regressor output
97
+ lr = LinearRegression().fit(X, regr.func(y))
98
+ assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel())
99
+
100
+
101
+ def test_transform_target_regressor_functions_multioutput():
102
+ X = friedman[0]
103
+ y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
104
+ regr = TransformedTargetRegressor(
105
+ regressor=LinearRegression(), func=np.log, inverse_func=np.exp
106
+ )
107
+ y_pred = regr.fit(X, y).predict(X)
108
+ # check the transformer output
109
+ y_tran = regr.transformer_.transform(y)
110
+ assert_allclose(np.log(y), y_tran)
111
+ assert_allclose(y, regr.transformer_.inverse_transform(y_tran))
112
+ assert y.shape == y_pred.shape
113
+ assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X)))
114
+ # check the regressor output
115
+ lr = LinearRegression().fit(X, regr.func(y))
116
+ assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel())
117
+
118
+
119
+ @pytest.mark.parametrize(
120
+ "X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)]
121
+ )
122
+ def test_transform_target_regressor_1d_transformer(X, y):
123
+ # All transformer in scikit-learn expect 2D data. FunctionTransformer with
124
+ # validate=False lift this constraint without checking that the input is a
125
+ # 2D vector. We check the consistency of the data shape using a 1D and 2D y
126
+ # array.
127
+ transformer = FunctionTransformer(
128
+ func=lambda x: x + 1, inverse_func=lambda x: x - 1
129
+ )
130
+ regr = TransformedTargetRegressor(
131
+ regressor=LinearRegression(), transformer=transformer
132
+ )
133
+ y_pred = regr.fit(X, y).predict(X)
134
+ assert y.shape == y_pred.shape
135
+ # consistency forward transform
136
+ y_tran = regr.transformer_.transform(y)
137
+ _check_shifted_by_one(y, y_tran)
138
+ assert y.shape == y_pred.shape
139
+ # consistency inverse transform
140
+ assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze())
141
+ # consistency of the regressor
142
+ lr = LinearRegression()
143
+ transformer2 = clone(transformer)
144
+ lr.fit(X, transformer2.fit_transform(y))
145
+ y_lr_pred = lr.predict(X)
146
+ assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
147
+ assert_allclose(regr.regressor_.coef_, lr.coef_)
148
+
149
+
150
+ @pytest.mark.parametrize(
151
+ "X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)]
152
+ )
153
+ def test_transform_target_regressor_2d_transformer(X, y):
154
+ # Check consistency with transformer accepting only 2D array and a 1D/2D y
155
+ # array.
156
+ transformer = StandardScaler()
157
+ regr = TransformedTargetRegressor(
158
+ regressor=LinearRegression(), transformer=transformer
159
+ )
160
+ y_pred = regr.fit(X, y).predict(X)
161
+ assert y.shape == y_pred.shape
162
+ # consistency forward transform
163
+ if y.ndim == 1: # create a 2D array and squeeze results
164
+ y_tran = regr.transformer_.transform(y.reshape(-1, 1))
165
+ else:
166
+ y_tran = regr.transformer_.transform(y)
167
+ _check_standard_scaled(y, y_tran.squeeze())
168
+ assert y.shape == y_pred.shape
169
+ # consistency inverse transform
170
+ assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze())
171
+ # consistency of the regressor
172
+ lr = LinearRegression()
173
+ transformer2 = clone(transformer)
174
+ if y.ndim == 1: # create a 2D array and squeeze results
175
+ lr.fit(X, transformer2.fit_transform(y.reshape(-1, 1)).squeeze())
176
+ y_lr_pred = lr.predict(X).reshape(-1, 1)
177
+ y_pred2 = transformer2.inverse_transform(y_lr_pred).squeeze()
178
+ else:
179
+ lr.fit(X, transformer2.fit_transform(y))
180
+ y_lr_pred = lr.predict(X)
181
+ y_pred2 = transformer2.inverse_transform(y_lr_pred)
182
+
183
+ assert_allclose(y_pred, y_pred2)
184
+ assert_allclose(regr.regressor_.coef_, lr.coef_)
185
+
186
+
187
+ def test_transform_target_regressor_2d_transformer_multioutput():
188
+ # Check consistency with transformer accepting only 2D array and a 2D y
189
+ # array.
190
+ X = friedman[0]
191
+ y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
192
+ transformer = StandardScaler()
193
+ regr = TransformedTargetRegressor(
194
+ regressor=LinearRegression(), transformer=transformer
195
+ )
196
+ y_pred = regr.fit(X, y).predict(X)
197
+ assert y.shape == y_pred.shape
198
+ # consistency forward transform
199
+ y_tran = regr.transformer_.transform(y)
200
+ _check_standard_scaled(y, y_tran)
201
+ assert y.shape == y_pred.shape
202
+ # consistency inverse transform
203
+ assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze())
204
+ # consistency of the regressor
205
+ lr = LinearRegression()
206
+ transformer2 = clone(transformer)
207
+ lr.fit(X, transformer2.fit_transform(y))
208
+ y_lr_pred = lr.predict(X)
209
+ assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
210
+ assert_allclose(regr.regressor_.coef_, lr.coef_)
211
+
212
+
213
+ def test_transform_target_regressor_3d_target():
214
+ # Non-regression test for:
215
+ # https://github.com/scikit-learn/scikit-learn/issues/18866
216
+ # Check with a 3D target with a transformer that reshapes the target
217
+ X = friedman[0]
218
+ y = np.tile(friedman[1].reshape(-1, 1, 1), [1, 3, 2])
219
+
220
+ def flatten_data(data):
221
+ return data.reshape(data.shape[0], -1)
222
+
223
+ def unflatten_data(data):
224
+ return data.reshape(data.shape[0], -1, 2)
225
+
226
+ transformer = FunctionTransformer(func=flatten_data, inverse_func=unflatten_data)
227
+ regr = TransformedTargetRegressor(
228
+ regressor=LinearRegression(), transformer=transformer
229
+ )
230
+ y_pred = regr.fit(X, y).predict(X)
231
+ assert y.shape == y_pred.shape
232
+
233
+
234
+ def test_transform_target_regressor_multi_to_single():
235
+ X = friedman[0]
236
+ y = np.transpose([friedman[1], (friedman[1] ** 2 + 1)])
237
+
238
+ def func(y):
239
+ out = np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2)
240
+ return out[:, np.newaxis]
241
+
242
+ def inverse_func(y):
243
+ return y
244
+
245
+ tt = TransformedTargetRegressor(
246
+ func=func, inverse_func=inverse_func, check_inverse=False
247
+ )
248
+ tt.fit(X, y)
249
+ y_pred_2d_func = tt.predict(X)
250
+ assert y_pred_2d_func.shape == (100, 1)
251
+
252
+ # force that the function only return a 1D array
253
+ def func(y):
254
+ return np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2)
255
+
256
+ tt = TransformedTargetRegressor(
257
+ func=func, inverse_func=inverse_func, check_inverse=False
258
+ )
259
+ tt.fit(X, y)
260
+ y_pred_1d_func = tt.predict(X)
261
+ assert y_pred_1d_func.shape == (100, 1)
262
+
263
+ assert_allclose(y_pred_1d_func, y_pred_2d_func)
264
+
265
+
266
+ class DummyCheckerArrayTransformer(TransformerMixin, BaseEstimator):
267
+ def fit(self, X, y=None):
268
+ assert isinstance(X, np.ndarray)
269
+ return self
270
+
271
+ def transform(self, X):
272
+ assert isinstance(X, np.ndarray)
273
+ return X
274
+
275
+ def inverse_transform(self, X):
276
+ assert isinstance(X, np.ndarray)
277
+ return X
278
+
279
+
280
+ class DummyCheckerListRegressor(DummyRegressor):
281
+ def fit(self, X, y, sample_weight=None):
282
+ assert isinstance(X, list)
283
+ return super().fit(X, y, sample_weight)
284
+
285
+ def predict(self, X):
286
+ assert isinstance(X, list)
287
+ return super().predict(X)
288
+
289
+
290
+ def test_transform_target_regressor_ensure_y_array():
291
+ # check that the target ``y`` passed to the transformer will always be a
292
+ # numpy array. Similarly, if ``X`` is passed as a list, we check that the
293
+ # predictor receive as it is.
294
+ X, y = friedman
295
+ tt = TransformedTargetRegressor(
296
+ transformer=DummyCheckerArrayTransformer(),
297
+ regressor=DummyCheckerListRegressor(),
298
+ check_inverse=False,
299
+ )
300
+ tt.fit(X.tolist(), y.tolist())
301
+ tt.predict(X.tolist())
302
+ with pytest.raises(AssertionError):
303
+ tt.fit(X, y.tolist())
304
+ with pytest.raises(AssertionError):
305
+ tt.predict(X)
306
+
307
+
308
+ class DummyTransformer(TransformerMixin, BaseEstimator):
309
+ """Dummy transformer which count how many time fit was called."""
310
+
311
+ def __init__(self, fit_counter=0):
312
+ self.fit_counter = fit_counter
313
+
314
+ def fit(self, X, y=None):
315
+ self.fit_counter += 1
316
+ return self
317
+
318
+ def transform(self, X):
319
+ return X
320
+
321
+ def inverse_transform(self, X):
322
+ return X
323
+
324
+
325
+ @pytest.mark.parametrize("check_inverse", [False, True])
326
+ def test_transform_target_regressor_count_fit(check_inverse):
327
+ # regression test for gh-issue #11618
328
+ # check that we only call a single time fit for the transformer
329
+ X, y = friedman
330
+ ttr = TransformedTargetRegressor(
331
+ transformer=DummyTransformer(), check_inverse=check_inverse
332
+ )
333
+ ttr.fit(X, y)
334
+ assert ttr.transformer_.fit_counter == 1
335
+
336
+
337
+ class DummyRegressorWithExtraFitParams(DummyRegressor):
338
+ def fit(self, X, y, sample_weight=None, check_input=True):
339
+ # on the test below we force this to false, we make sure this is
340
+ # actually passed to the regressor
341
+ assert not check_input
342
+ return super().fit(X, y, sample_weight)
343
+
344
+
345
+ def test_transform_target_regressor_pass_fit_parameters():
346
+ X, y = friedman
347
+ regr = TransformedTargetRegressor(
348
+ regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer()
349
+ )
350
+
351
+ regr.fit(X, y, check_input=False)
352
+ assert regr.transformer_.fit_counter == 1
353
+
354
+
355
+ def test_transform_target_regressor_route_pipeline():
356
+ X, y = friedman
357
+
358
+ regr = TransformedTargetRegressor(
359
+ regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer()
360
+ )
361
+ estimators = [("normalize", StandardScaler()), ("est", regr)]
362
+
363
+ pip = Pipeline(estimators)
364
+ pip.fit(X, y, **{"est__check_input": False})
365
+
366
+ assert regr.transformer_.fit_counter == 1
367
+
368
+
369
+ class DummyRegressorWithExtraPredictParams(DummyRegressor):
370
+ def predict(self, X, check_input=True):
371
+ # In the test below we make sure that the check input parameter is
372
+ # passed as false
373
+ self.predict_called = True
374
+ assert not check_input
375
+ return super().predict(X)
376
+
377
+
378
+ def test_transform_target_regressor_pass_extra_predict_parameters():
379
+ # Checks that predict kwargs are passed to regressor.
380
+ X, y = friedman
381
+ regr = TransformedTargetRegressor(
382
+ regressor=DummyRegressorWithExtraPredictParams(), transformer=DummyTransformer()
383
+ )
384
+
385
+ regr.fit(X, y)
386
+ regr.predict(X, check_input=False)
387
+ assert regr.regressor_.predict_called
venv/lib/python3.10/site-packages/sklearn/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (181 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/random_seed.cpython-310.pyc ADDED
Binary file (3.23 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc ADDED
Binary file (29.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc ADDED
Binary file (29 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc ADDED
Binary file (525 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc ADDED
Binary file (5.71 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc ADDED
Binary file (8.12 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc ADDED
Binary file (4.35 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc ADDED
Binary file (18.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc ADDED
Binary file (487 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc ADDED
Binary file (18.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_ridge.cpython-310.pyc ADDED
Binary file (2.98 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metadata_routing.cpython-310.pyc ADDED
Binary file (27.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators.cpython-310.pyc ADDED
Binary file (8.48 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators_metadata_routing.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_min_dependencies_readme.cpython-310.pyc ADDED
Binary file (2.26 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multiclass.cpython-310.pyc ADDED
Binary file (24.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multioutput.cpython-310.pyc ADDED
Binary file (23.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_naive_bayes.cpython-310.pyc ADDED
Binary file (22.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_pipeline.cpython-310.pyc ADDED
Binary file (50.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_public_functions.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_random_projection.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/tests/metadata_routing_common.py ADDED
@@ -0,0 +1,456 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+ import numpy as np
4
+
5
+ from sklearn.base import (
6
+ BaseEstimator,
7
+ ClassifierMixin,
8
+ MetaEstimatorMixin,
9
+ RegressorMixin,
10
+ TransformerMixin,
11
+ clone,
12
+ )
13
+ from sklearn.metrics._scorer import _Scorer, mean_squared_error
14
+ from sklearn.model_selection import BaseCrossValidator
15
+ from sklearn.model_selection._split import GroupsConsumerMixin
16
+ from sklearn.utils._metadata_requests import (
17
+ SIMPLE_METHODS,
18
+ )
19
+ from sklearn.utils.metadata_routing import (
20
+ MetadataRouter,
21
+ process_routing,
22
+ )
23
+ from sklearn.utils.multiclass import _check_partial_fit_first_call
24
+
25
+
26
+ def record_metadata(obj, method, record_default=True, **kwargs):
27
+ """Utility function to store passed metadata to a method.
28
+
29
+ If record_default is False, kwargs whose values are "default" are skipped.
30
+ This is so that checks on keyword arguments whose default was not changed
31
+ are skipped.
32
+
33
+ """
34
+ if not hasattr(obj, "_records"):
35
+ obj._records = {}
36
+ if not record_default:
37
+ kwargs = {
38
+ key: val
39
+ for key, val in kwargs.items()
40
+ if not isinstance(val, str) or (val != "default")
41
+ }
42
+ obj._records[method] = kwargs
43
+
44
+
45
+ def check_recorded_metadata(obj, method, split_params=tuple(), **kwargs):
46
+ """Check whether the expected metadata is passed to the object's method.
47
+
48
+ Parameters
49
+ ----------
50
+ obj : estimator object
51
+ sub-estimator to check routed params for
52
+ method : str
53
+ sub-estimator's method where metadata is routed to
54
+ split_params : tuple, default=empty
55
+ specifies any parameters which are to be checked as being a subset
56
+ of the original values.
57
+ """
58
+ records = getattr(obj, "_records", dict()).get(method, dict())
59
+ assert set(kwargs.keys()) == set(records.keys())
60
+ for key, value in kwargs.items():
61
+ recorded_value = records[key]
62
+ # The following condition is used to check for any specified parameters
63
+ # being a subset of the original values
64
+ if key in split_params and recorded_value is not None:
65
+ assert np.isin(recorded_value, value).all()
66
+ else:
67
+ assert recorded_value is value
68
+
69
+
70
+ record_metadata_not_default = partial(record_metadata, record_default=False)
71
+
72
+
73
+ def assert_request_is_empty(metadata_request, exclude=None):
74
+ """Check if a metadata request dict is empty.
75
+
76
+ One can exclude a method or a list of methods from the check using the
77
+ ``exclude`` parameter. If metadata_request is a MetadataRouter, then
78
+ ``exclude`` can be of the form ``{"object" : [method, ...]}``.
79
+ """
80
+ if isinstance(metadata_request, MetadataRouter):
81
+ for name, route_mapping in metadata_request:
82
+ if exclude is not None and name in exclude:
83
+ _exclude = exclude[name]
84
+ else:
85
+ _exclude = None
86
+ assert_request_is_empty(route_mapping.router, exclude=_exclude)
87
+ return
88
+
89
+ exclude = [] if exclude is None else exclude
90
+ for method in SIMPLE_METHODS:
91
+ if method in exclude:
92
+ continue
93
+ mmr = getattr(metadata_request, method)
94
+ props = [
95
+ prop
96
+ for prop, alias in mmr.requests.items()
97
+ if isinstance(alias, str) or alias is not None
98
+ ]
99
+ assert not props
100
+
101
+
102
+ def assert_request_equal(request, dictionary):
103
+ for method, requests in dictionary.items():
104
+ mmr = getattr(request, method)
105
+ assert mmr.requests == requests
106
+
107
+ empty_methods = [method for method in SIMPLE_METHODS if method not in dictionary]
108
+ for method in empty_methods:
109
+ assert not len(getattr(request, method).requests)
110
+
111
+
112
+ class _Registry(list):
113
+ # This list is used to get a reference to the sub-estimators, which are not
114
+ # necessarily stored on the metaestimator. We need to override __deepcopy__
115
+ # because the sub-estimators are probably cloned, which would result in a
116
+ # new copy of the list, but we need copy and deep copy both to return the
117
+ # same instance.
118
+ def __deepcopy__(self, memo):
119
+ return self
120
+
121
+ def __copy__(self):
122
+ return self
123
+
124
+
125
+ class ConsumingRegressor(RegressorMixin, BaseEstimator):
126
+ """A regressor consuming metadata.
127
+
128
+ Parameters
129
+ ----------
130
+ registry : list, default=None
131
+ If a list, the estimator will append itself to the list in order to have
132
+ a reference to the estimator later on. Since that reference is not
133
+ required in all tests, registration can be skipped by leaving this value
134
+ as None.
135
+ """
136
+
137
+ def __init__(self, registry=None):
138
+ self.registry = registry
139
+
140
+ def partial_fit(self, X, y, sample_weight="default", metadata="default"):
141
+ if self.registry is not None:
142
+ self.registry.append(self)
143
+
144
+ record_metadata_not_default(
145
+ self, "partial_fit", sample_weight=sample_weight, metadata=metadata
146
+ )
147
+ return self
148
+
149
+ def fit(self, X, y, sample_weight="default", metadata="default"):
150
+ if self.registry is not None:
151
+ self.registry.append(self)
152
+
153
+ record_metadata_not_default(
154
+ self, "fit", sample_weight=sample_weight, metadata=metadata
155
+ )
156
+ return self
157
+
158
+ def predict(self, X, sample_weight="default", metadata="default"):
159
+ pass # pragma: no cover
160
+
161
+ # when needed, uncomment the implementation
162
+ # record_metadata_not_default(
163
+ # self, "predict", sample_weight=sample_weight, metadata=metadata
164
+ # )
165
+ # return np.zeros(shape=(len(X),))
166
+
167
+
168
+ class NonConsumingClassifier(ClassifierMixin, BaseEstimator):
169
+ """A classifier which accepts no metadata on any method."""
170
+
171
+ def __init__(self, alpha=0.0):
172
+ self.alpha = alpha
173
+
174
+ def fit(self, X, y):
175
+ self.classes_ = np.unique(y)
176
+ return self
177
+
178
+ def partial_fit(self, X, y, classes=None):
179
+ return self
180
+
181
+ def decision_function(self, X):
182
+ return self.predict(X)
183
+
184
+ def predict(self, X):
185
+ return np.ones(len(X))
186
+
187
+
188
+ class NonConsumingRegressor(RegressorMixin, BaseEstimator):
189
+ """A classifier which accepts no metadata on any method."""
190
+
191
+ def fit(self, X, y):
192
+ return self
193
+
194
+ def partial_fit(self, X, y):
195
+ return self
196
+
197
+ def predict(self, X):
198
+ return np.ones(len(X)) # pragma: no cover
199
+
200
+
201
+ class ConsumingClassifier(ClassifierMixin, BaseEstimator):
202
+ """A classifier consuming metadata.
203
+
204
+ Parameters
205
+ ----------
206
+ registry : list, default=None
207
+ If a list, the estimator will append itself to the list in order to have
208
+ a reference to the estimator later on. Since that reference is not
209
+ required in all tests, registration can be skipped by leaving this value
210
+ as None.
211
+
212
+ alpha : float, default=0
213
+ This parameter is only used to test the ``*SearchCV`` objects, and
214
+ doesn't do anything.
215
+ """
216
+
217
+ def __init__(self, registry=None, alpha=0.0):
218
+ self.alpha = alpha
219
+ self.registry = registry
220
+
221
+ def partial_fit(
222
+ self, X, y, classes=None, sample_weight="default", metadata="default"
223
+ ):
224
+ if self.registry is not None:
225
+ self.registry.append(self)
226
+
227
+ record_metadata_not_default(
228
+ self, "partial_fit", sample_weight=sample_weight, metadata=metadata
229
+ )
230
+ _check_partial_fit_first_call(self, classes)
231
+ return self
232
+
233
+ def fit(self, X, y, sample_weight="default", metadata="default"):
234
+ if self.registry is not None:
235
+ self.registry.append(self)
236
+
237
+ record_metadata_not_default(
238
+ self, "fit", sample_weight=sample_weight, metadata=metadata
239
+ )
240
+ self.classes_ = np.unique(y)
241
+ return self
242
+
243
+ def predict(self, X, sample_weight="default", metadata="default"):
244
+ record_metadata_not_default(
245
+ self, "predict", sample_weight=sample_weight, metadata=metadata
246
+ )
247
+ return np.zeros(shape=(len(X),))
248
+
249
+ def predict_proba(self, X, sample_weight="default", metadata="default"):
250
+ pass # pragma: no cover
251
+
252
+ # uncomment when needed
253
+ # record_metadata_not_default(
254
+ # self, "predict_proba", sample_weight=sample_weight, metadata=metadata
255
+ # )
256
+ # return np.asarray([[0.0, 1.0]] * len(X))
257
+
258
+ def predict_log_proba(self, X, sample_weight="default", metadata="default"):
259
+ pass # pragma: no cover
260
+
261
+ # uncomment when needed
262
+ # record_metadata_not_default(
263
+ # self, "predict_log_proba", sample_weight=sample_weight, metadata=metadata
264
+ # )
265
+ # return np.zeros(shape=(len(X), 2))
266
+
267
+ def decision_function(self, X, sample_weight="default", metadata="default"):
268
+ record_metadata_not_default(
269
+ self, "predict_proba", sample_weight=sample_weight, metadata=metadata
270
+ )
271
+ return np.zeros(shape=(len(X),))
272
+
273
+
274
+ class ConsumingTransformer(TransformerMixin, BaseEstimator):
275
+ """A transformer which accepts metadata on fit and transform.
276
+
277
+ Parameters
278
+ ----------
279
+ registry : list, default=None
280
+ If a list, the estimator will append itself to the list in order to have
281
+ a reference to the estimator later on. Since that reference is not
282
+ required in all tests, registration can be skipped by leaving this value
283
+ as None.
284
+ """
285
+
286
+ def __init__(self, registry=None):
287
+ self.registry = registry
288
+
289
+ def fit(self, X, y=None, sample_weight=None, metadata=None):
290
+ if self.registry is not None:
291
+ self.registry.append(self)
292
+
293
+ record_metadata_not_default(
294
+ self, "fit", sample_weight=sample_weight, metadata=metadata
295
+ )
296
+ return self
297
+
298
+ def transform(self, X, sample_weight=None, metadata=None):
299
+ record_metadata(
300
+ self, "transform", sample_weight=sample_weight, metadata=metadata
301
+ )
302
+ return X
303
+
304
+ def fit_transform(self, X, y, sample_weight=None, metadata=None):
305
+ # implementing ``fit_transform`` is necessary since
306
+ # ``TransformerMixin.fit_transform`` doesn't route any metadata to
307
+ # ``transform``, while here we want ``transform`` to receive
308
+ # ``sample_weight`` and ``metadata``.
309
+ record_metadata(
310
+ self, "fit_transform", sample_weight=sample_weight, metadata=metadata
311
+ )
312
+ return self.fit(X, y, sample_weight=sample_weight, metadata=metadata).transform(
313
+ X, sample_weight=sample_weight, metadata=metadata
314
+ )
315
+
316
+ def inverse_transform(self, X, sample_weight=None, metadata=None):
317
+ record_metadata(
318
+ self, "inverse_transform", sample_weight=sample_weight, metadata=metadata
319
+ )
320
+ return X
321
+
322
+
323
+ class ConsumingScorer(_Scorer):
324
+ def __init__(self, registry=None):
325
+ super().__init__(
326
+ score_func=mean_squared_error, sign=1, kwargs={}, response_method="predict"
327
+ )
328
+ self.registry = registry
329
+
330
+ def _score(self, method_caller, clf, X, y, **kwargs):
331
+ if self.registry is not None:
332
+ self.registry.append(self)
333
+
334
+ record_metadata_not_default(self, "score", **kwargs)
335
+
336
+ sample_weight = kwargs.get("sample_weight", None)
337
+ return super()._score(method_caller, clf, X, y, sample_weight=sample_weight)
338
+
339
+
340
+ class ConsumingSplitter(BaseCrossValidator, GroupsConsumerMixin):
341
+ def __init__(self, registry=None):
342
+ self.registry = registry
343
+
344
+ def split(self, X, y=None, groups="default", metadata="default"):
345
+ if self.registry is not None:
346
+ self.registry.append(self)
347
+
348
+ record_metadata_not_default(self, "split", groups=groups, metadata=metadata)
349
+
350
+ split_index = len(X) // 2
351
+ train_indices = list(range(0, split_index))
352
+ test_indices = list(range(split_index, len(X)))
353
+ yield test_indices, train_indices
354
+ yield train_indices, test_indices
355
+
356
+ def get_n_splits(self, X=None, y=None, groups=None, metadata=None):
357
+ return 2
358
+
359
+ def _iter_test_indices(self, X=None, y=None, groups=None):
360
+ split_index = len(X) // 2
361
+ train_indices = list(range(0, split_index))
362
+ test_indices = list(range(split_index, len(X)))
363
+ yield test_indices
364
+ yield train_indices
365
+
366
+
367
+ class MetaRegressor(MetaEstimatorMixin, RegressorMixin, BaseEstimator):
368
+ """A meta-regressor which is only a router."""
369
+
370
+ def __init__(self, estimator):
371
+ self.estimator = estimator
372
+
373
+ def fit(self, X, y, **fit_params):
374
+ params = process_routing(self, "fit", **fit_params)
375
+ self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit)
376
+
377
+ def get_metadata_routing(self):
378
+ router = MetadataRouter(owner=self.__class__.__name__).add(
379
+ estimator=self.estimator, method_mapping="one-to-one"
380
+ )
381
+ return router
382
+
383
+
384
+ class WeightedMetaRegressor(MetaEstimatorMixin, RegressorMixin, BaseEstimator):
385
+ """A meta-regressor which is also a consumer."""
386
+
387
+ def __init__(self, estimator, registry=None):
388
+ self.estimator = estimator
389
+ self.registry = registry
390
+
391
+ def fit(self, X, y, sample_weight=None, **fit_params):
392
+ if self.registry is not None:
393
+ self.registry.append(self)
394
+
395
+ record_metadata(self, "fit", sample_weight=sample_weight)
396
+ params = process_routing(self, "fit", sample_weight=sample_weight, **fit_params)
397
+ self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit)
398
+ return self
399
+
400
+ def predict(self, X, **predict_params):
401
+ params = process_routing(self, "predict", **predict_params)
402
+ return self.estimator_.predict(X, **params.estimator.predict)
403
+
404
+ def get_metadata_routing(self):
405
+ router = (
406
+ MetadataRouter(owner=self.__class__.__name__)
407
+ .add_self_request(self)
408
+ .add(estimator=self.estimator, method_mapping="one-to-one")
409
+ )
410
+ return router
411
+
412
+
413
+ class WeightedMetaClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator):
414
+ """A meta-estimator which also consumes sample_weight itself in ``fit``."""
415
+
416
+ def __init__(self, estimator, registry=None):
417
+ self.estimator = estimator
418
+ self.registry = registry
419
+
420
+ def fit(self, X, y, sample_weight=None, **kwargs):
421
+ if self.registry is not None:
422
+ self.registry.append(self)
423
+
424
+ record_metadata(self, "fit", sample_weight=sample_weight)
425
+ params = process_routing(self, "fit", sample_weight=sample_weight, **kwargs)
426
+ self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit)
427
+ return self
428
+
429
+ def get_metadata_routing(self):
430
+ router = (
431
+ MetadataRouter(owner=self.__class__.__name__)
432
+ .add_self_request(self)
433
+ .add(estimator=self.estimator, method_mapping="fit")
434
+ )
435
+ return router
436
+
437
+
438
+ class MetaTransformer(MetaEstimatorMixin, TransformerMixin, BaseEstimator):
439
+ """A simple meta-transformer."""
440
+
441
+ def __init__(self, transformer):
442
+ self.transformer = transformer
443
+
444
+ def fit(self, X, y=None, **fit_params):
445
+ params = process_routing(self, "fit", **fit_params)
446
+ self.transformer_ = clone(self.transformer).fit(X, y, **params.transformer.fit)
447
+ return self
448
+
449
+ def transform(self, X, y=None, **transform_params):
450
+ params = process_routing(self, "transform", **transform_params)
451
+ return self.transformer_.transform(X, **params.transformer.transform)
452
+
453
+ def get_metadata_routing(self):
454
+ return MetadataRouter(owner=self.__class__.__name__).add(
455
+ transformer=self.transformer, method_mapping="one-to-one"
456
+ )