Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/5.attention.dense.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/5.attention.dense.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/5.attention.dense.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__init__.py +56 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_affinity_propagation.py +604 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_agglomerative.py +1336 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_bicluster.py +622 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_dbscan_inner.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__init__.py +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/hdbscan.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_linkage.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_reachability.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.pxd +49 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/hdbscan.py +1018 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__init__.py +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/test_reachibility.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/test_reachibility.py +63 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.pxd +9 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/_kmeans.py +2318 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/tests/__init__.py +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_dbscan.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3dcc4f15d99faf64fa1a42ca9ef283e9de214645ac2b982fb8f8ab9ebe175680
|
3 |
+
size 33555533
|
ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5b9ac618dac9dcb48bdd7092028de9b66854c94ea938ec8568a6b7eb4676a1f5
|
3 |
+
size 50332828
|
ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da06feecd6d1e718d9b9167e25651b0e8a80cc781019405e3f4e2f7e4f3caaaf
|
3 |
+
size 50332843
|
ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:884dd2ccf06672fbfd2a1a6967c1c6009be0cf2f6fccffc267fe1c7e107020a8
|
3 |
+
size 50332749
|
ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:32d106debe5c31a8b7a7adc8a86f1fbf36057d61a321b48f758e2d527ad96239
|
3 |
+
size 33555612
|
ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9d33edf71fffdb23d152d4eb93bbd429470641ea212b0732c5a41fd6039f6417
|
3 |
+
size 33555627
|
ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7513cb9647fb584b4b7f27664d47405ce13bcb1ceaeadd82cb27e2483fc7a6e7
|
3 |
+
size 33555533
|
ckpts/universal/global_step40/zero/5.attention.dense.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2070dc0cf506fcb7ab6cd339af1893f171326c5036101c10165116f6bb89c9c
|
3 |
+
size 16778396
|
ckpts/universal/global_step40/zero/5.attention.dense.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:565b00d218ce0f593c4250f8b2b43c531b090f8676cd6f398a86ccfb9ed5100a
|
3 |
+
size 16778411
|
ckpts/universal/global_step40/zero/5.attention.dense.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:acaf21d177b9f556a4e87b2a4af8ff7756dd368faa9fe6c54f4b7f1bab938219
|
3 |
+
size 16778317
|
venv/lib/python3.10/site-packages/sklearn/cluster/__init__.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
|
3 |
+
algorithms.
|
4 |
+
"""
|
5 |
+
|
6 |
+
from ._affinity_propagation import AffinityPropagation, affinity_propagation
|
7 |
+
from ._agglomerative import (
|
8 |
+
AgglomerativeClustering,
|
9 |
+
FeatureAgglomeration,
|
10 |
+
linkage_tree,
|
11 |
+
ward_tree,
|
12 |
+
)
|
13 |
+
from ._bicluster import SpectralBiclustering, SpectralCoclustering
|
14 |
+
from ._birch import Birch
|
15 |
+
from ._bisect_k_means import BisectingKMeans
|
16 |
+
from ._dbscan import DBSCAN, dbscan
|
17 |
+
from ._hdbscan.hdbscan import HDBSCAN
|
18 |
+
from ._kmeans import KMeans, MiniBatchKMeans, k_means, kmeans_plusplus
|
19 |
+
from ._mean_shift import MeanShift, estimate_bandwidth, get_bin_seeds, mean_shift
|
20 |
+
from ._optics import (
|
21 |
+
OPTICS,
|
22 |
+
cluster_optics_dbscan,
|
23 |
+
cluster_optics_xi,
|
24 |
+
compute_optics_graph,
|
25 |
+
)
|
26 |
+
from ._spectral import SpectralClustering, spectral_clustering
|
27 |
+
|
28 |
+
__all__ = [
|
29 |
+
"AffinityPropagation",
|
30 |
+
"AgglomerativeClustering",
|
31 |
+
"Birch",
|
32 |
+
"DBSCAN",
|
33 |
+
"OPTICS",
|
34 |
+
"cluster_optics_dbscan",
|
35 |
+
"cluster_optics_xi",
|
36 |
+
"compute_optics_graph",
|
37 |
+
"KMeans",
|
38 |
+
"BisectingKMeans",
|
39 |
+
"FeatureAgglomeration",
|
40 |
+
"MeanShift",
|
41 |
+
"MiniBatchKMeans",
|
42 |
+
"SpectralClustering",
|
43 |
+
"affinity_propagation",
|
44 |
+
"dbscan",
|
45 |
+
"estimate_bandwidth",
|
46 |
+
"get_bin_seeds",
|
47 |
+
"k_means",
|
48 |
+
"kmeans_plusplus",
|
49 |
+
"linkage_tree",
|
50 |
+
"mean_shift",
|
51 |
+
"spectral_clustering",
|
52 |
+
"ward_tree",
|
53 |
+
"SpectralBiclustering",
|
54 |
+
"SpectralCoclustering",
|
55 |
+
"HDBSCAN",
|
56 |
+
]
|
venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.39 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc
ADDED
Binary file (17.2 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc
ADDED
Binary file (37.4 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc
ADDED
Binary file (19.9 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc
ADDED
Binary file (19.4 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc
ADDED
Binary file (16 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc
ADDED
Binary file (17.1 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc
ADDED
Binary file (3.33 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc
ADDED
Binary file (62 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc
ADDED
Binary file (17.9 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc
ADDED
Binary file (35.4 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc
ADDED
Binary file (26.3 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/_affinity_propagation.py
ADDED
@@ -0,0 +1,604 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Affinity Propagation clustering algorithm."""
|
2 |
+
|
3 |
+
# Author: Alexandre Gramfort [email protected]
|
4 |
+
# Gael Varoquaux [email protected]
|
5 |
+
|
6 |
+
# License: BSD 3 clause
|
7 |
+
|
8 |
+
import warnings
|
9 |
+
from numbers import Integral, Real
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
from .._config import config_context
|
14 |
+
from ..base import BaseEstimator, ClusterMixin, _fit_context
|
15 |
+
from ..exceptions import ConvergenceWarning
|
16 |
+
from ..metrics import euclidean_distances, pairwise_distances_argmin
|
17 |
+
from ..utils import check_random_state
|
18 |
+
from ..utils._param_validation import Interval, StrOptions, validate_params
|
19 |
+
from ..utils.validation import check_is_fitted
|
20 |
+
|
21 |
+
|
22 |
+
def _equal_similarities_and_preferences(S, preference):
|
23 |
+
def all_equal_preferences():
|
24 |
+
return np.all(preference == preference.flat[0])
|
25 |
+
|
26 |
+
def all_equal_similarities():
|
27 |
+
# Create mask to ignore diagonal of S
|
28 |
+
mask = np.ones(S.shape, dtype=bool)
|
29 |
+
np.fill_diagonal(mask, 0)
|
30 |
+
|
31 |
+
return np.all(S[mask].flat == S[mask].flat[0])
|
32 |
+
|
33 |
+
return all_equal_preferences() and all_equal_similarities()
|
34 |
+
|
35 |
+
|
36 |
+
def _affinity_propagation(
|
37 |
+
S,
|
38 |
+
*,
|
39 |
+
preference,
|
40 |
+
convergence_iter,
|
41 |
+
max_iter,
|
42 |
+
damping,
|
43 |
+
verbose,
|
44 |
+
return_n_iter,
|
45 |
+
random_state,
|
46 |
+
):
|
47 |
+
"""Main affinity propagation algorithm."""
|
48 |
+
n_samples = S.shape[0]
|
49 |
+
if n_samples == 1 or _equal_similarities_and_preferences(S, preference):
|
50 |
+
# It makes no sense to run the algorithm in this case, so return 1 or
|
51 |
+
# n_samples clusters, depending on preferences
|
52 |
+
warnings.warn(
|
53 |
+
"All samples have mutually equal similarities. "
|
54 |
+
"Returning arbitrary cluster center(s)."
|
55 |
+
)
|
56 |
+
if preference.flat[0] > S.flat[n_samples - 1]:
|
57 |
+
return (
|
58 |
+
(np.arange(n_samples), np.arange(n_samples), 0)
|
59 |
+
if return_n_iter
|
60 |
+
else (np.arange(n_samples), np.arange(n_samples))
|
61 |
+
)
|
62 |
+
else:
|
63 |
+
return (
|
64 |
+
(np.array([0]), np.array([0] * n_samples), 0)
|
65 |
+
if return_n_iter
|
66 |
+
else (np.array([0]), np.array([0] * n_samples))
|
67 |
+
)
|
68 |
+
|
69 |
+
# Place preference on the diagonal of S
|
70 |
+
S.flat[:: (n_samples + 1)] = preference
|
71 |
+
|
72 |
+
A = np.zeros((n_samples, n_samples))
|
73 |
+
R = np.zeros((n_samples, n_samples)) # Initialize messages
|
74 |
+
# Intermediate results
|
75 |
+
tmp = np.zeros((n_samples, n_samples))
|
76 |
+
|
77 |
+
# Remove degeneracies
|
78 |
+
S += (
|
79 |
+
np.finfo(S.dtype).eps * S + np.finfo(S.dtype).tiny * 100
|
80 |
+
) * random_state.standard_normal(size=(n_samples, n_samples))
|
81 |
+
|
82 |
+
# Execute parallel affinity propagation updates
|
83 |
+
e = np.zeros((n_samples, convergence_iter))
|
84 |
+
|
85 |
+
ind = np.arange(n_samples)
|
86 |
+
|
87 |
+
for it in range(max_iter):
|
88 |
+
# tmp = A + S; compute responsibilities
|
89 |
+
np.add(A, S, tmp)
|
90 |
+
I = np.argmax(tmp, axis=1)
|
91 |
+
Y = tmp[ind, I] # np.max(A + S, axis=1)
|
92 |
+
tmp[ind, I] = -np.inf
|
93 |
+
Y2 = np.max(tmp, axis=1)
|
94 |
+
|
95 |
+
# tmp = Rnew
|
96 |
+
np.subtract(S, Y[:, None], tmp)
|
97 |
+
tmp[ind, I] = S[ind, I] - Y2
|
98 |
+
|
99 |
+
# Damping
|
100 |
+
tmp *= 1 - damping
|
101 |
+
R *= damping
|
102 |
+
R += tmp
|
103 |
+
|
104 |
+
# tmp = Rp; compute availabilities
|
105 |
+
np.maximum(R, 0, tmp)
|
106 |
+
tmp.flat[:: n_samples + 1] = R.flat[:: n_samples + 1]
|
107 |
+
|
108 |
+
# tmp = -Anew
|
109 |
+
tmp -= np.sum(tmp, axis=0)
|
110 |
+
dA = np.diag(tmp).copy()
|
111 |
+
tmp.clip(0, np.inf, tmp)
|
112 |
+
tmp.flat[:: n_samples + 1] = dA
|
113 |
+
|
114 |
+
# Damping
|
115 |
+
tmp *= 1 - damping
|
116 |
+
A *= damping
|
117 |
+
A -= tmp
|
118 |
+
|
119 |
+
# Check for convergence
|
120 |
+
E = (np.diag(A) + np.diag(R)) > 0
|
121 |
+
e[:, it % convergence_iter] = E
|
122 |
+
K = np.sum(E, axis=0)
|
123 |
+
|
124 |
+
if it >= convergence_iter:
|
125 |
+
se = np.sum(e, axis=1)
|
126 |
+
unconverged = np.sum((se == convergence_iter) + (se == 0)) != n_samples
|
127 |
+
if (not unconverged and (K > 0)) or (it == max_iter):
|
128 |
+
never_converged = False
|
129 |
+
if verbose:
|
130 |
+
print("Converged after %d iterations." % it)
|
131 |
+
break
|
132 |
+
else:
|
133 |
+
never_converged = True
|
134 |
+
if verbose:
|
135 |
+
print("Did not converge")
|
136 |
+
|
137 |
+
I = np.flatnonzero(E)
|
138 |
+
K = I.size # Identify exemplars
|
139 |
+
|
140 |
+
if K > 0:
|
141 |
+
if never_converged:
|
142 |
+
warnings.warn(
|
143 |
+
(
|
144 |
+
"Affinity propagation did not converge, this model "
|
145 |
+
"may return degenerate cluster centers and labels."
|
146 |
+
),
|
147 |
+
ConvergenceWarning,
|
148 |
+
)
|
149 |
+
c = np.argmax(S[:, I], axis=1)
|
150 |
+
c[I] = np.arange(K) # Identify clusters
|
151 |
+
# Refine the final set of exemplars and clusters and return results
|
152 |
+
for k in range(K):
|
153 |
+
ii = np.where(c == k)[0]
|
154 |
+
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
|
155 |
+
I[k] = ii[j]
|
156 |
+
|
157 |
+
c = np.argmax(S[:, I], axis=1)
|
158 |
+
c[I] = np.arange(K)
|
159 |
+
labels = I[c]
|
160 |
+
# Reduce labels to a sorted, gapless, list
|
161 |
+
cluster_centers_indices = np.unique(labels)
|
162 |
+
labels = np.searchsorted(cluster_centers_indices, labels)
|
163 |
+
else:
|
164 |
+
warnings.warn(
|
165 |
+
(
|
166 |
+
"Affinity propagation did not converge and this model "
|
167 |
+
"will not have any cluster centers."
|
168 |
+
),
|
169 |
+
ConvergenceWarning,
|
170 |
+
)
|
171 |
+
labels = np.array([-1] * n_samples)
|
172 |
+
cluster_centers_indices = []
|
173 |
+
|
174 |
+
if return_n_iter:
|
175 |
+
return cluster_centers_indices, labels, it + 1
|
176 |
+
else:
|
177 |
+
return cluster_centers_indices, labels
|
178 |
+
|
179 |
+
|
180 |
+
###############################################################################
|
181 |
+
# Public API
|
182 |
+
|
183 |
+
|
184 |
+
@validate_params(
|
185 |
+
{
|
186 |
+
"S": ["array-like"],
|
187 |
+
"return_n_iter": ["boolean"],
|
188 |
+
},
|
189 |
+
prefer_skip_nested_validation=False,
|
190 |
+
)
|
191 |
+
def affinity_propagation(
|
192 |
+
S,
|
193 |
+
*,
|
194 |
+
preference=None,
|
195 |
+
convergence_iter=15,
|
196 |
+
max_iter=200,
|
197 |
+
damping=0.5,
|
198 |
+
copy=True,
|
199 |
+
verbose=False,
|
200 |
+
return_n_iter=False,
|
201 |
+
random_state=None,
|
202 |
+
):
|
203 |
+
"""Perform Affinity Propagation Clustering of data.
|
204 |
+
|
205 |
+
Read more in the :ref:`User Guide <affinity_propagation>`.
|
206 |
+
|
207 |
+
Parameters
|
208 |
+
----------
|
209 |
+
S : array-like of shape (n_samples, n_samples)
|
210 |
+
Matrix of similarities between points.
|
211 |
+
|
212 |
+
preference : array-like of shape (n_samples,) or float, default=None
|
213 |
+
Preferences for each point - points with larger values of
|
214 |
+
preferences are more likely to be chosen as exemplars. The number of
|
215 |
+
exemplars, i.e. of clusters, is influenced by the input preferences
|
216 |
+
value. If the preferences are not passed as arguments, they will be
|
217 |
+
set to the median of the input similarities (resulting in a moderate
|
218 |
+
number of clusters). For a smaller amount of clusters, this can be set
|
219 |
+
to the minimum value of the similarities.
|
220 |
+
|
221 |
+
convergence_iter : int, default=15
|
222 |
+
Number of iterations with no change in the number
|
223 |
+
of estimated clusters that stops the convergence.
|
224 |
+
|
225 |
+
max_iter : int, default=200
|
226 |
+
Maximum number of iterations.
|
227 |
+
|
228 |
+
damping : float, default=0.5
|
229 |
+
Damping factor between 0.5 and 1.
|
230 |
+
|
231 |
+
copy : bool, default=True
|
232 |
+
If copy is False, the affinity matrix is modified inplace by the
|
233 |
+
algorithm, for memory efficiency.
|
234 |
+
|
235 |
+
verbose : bool, default=False
|
236 |
+
The verbosity level.
|
237 |
+
|
238 |
+
return_n_iter : bool, default=False
|
239 |
+
Whether or not to return the number of iterations.
|
240 |
+
|
241 |
+
random_state : int, RandomState instance or None, default=None
|
242 |
+
Pseudo-random number generator to control the starting state.
|
243 |
+
Use an int for reproducible results across function calls.
|
244 |
+
See the :term:`Glossary <random_state>`.
|
245 |
+
|
246 |
+
.. versionadded:: 0.23
|
247 |
+
this parameter was previously hardcoded as 0.
|
248 |
+
|
249 |
+
Returns
|
250 |
+
-------
|
251 |
+
cluster_centers_indices : ndarray of shape (n_clusters,)
|
252 |
+
Index of clusters centers.
|
253 |
+
|
254 |
+
labels : ndarray of shape (n_samples,)
|
255 |
+
Cluster labels for each point.
|
256 |
+
|
257 |
+
n_iter : int
|
258 |
+
Number of iterations run. Returned only if `return_n_iter` is
|
259 |
+
set to True.
|
260 |
+
|
261 |
+
Notes
|
262 |
+
-----
|
263 |
+
For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
|
264 |
+
<sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
|
265 |
+
|
266 |
+
When the algorithm does not converge, it will still return a arrays of
|
267 |
+
``cluster_center_indices`` and labels if there are any exemplars/clusters,
|
268 |
+
however they may be degenerate and should be used with caution.
|
269 |
+
|
270 |
+
When all training samples have equal similarities and equal preferences,
|
271 |
+
the assignment of cluster centers and labels depends on the preference.
|
272 |
+
If the preference is smaller than the similarities, a single cluster center
|
273 |
+
and label ``0`` for every sample will be returned. Otherwise, every
|
274 |
+
training sample becomes its own cluster center and is assigned a unique
|
275 |
+
label.
|
276 |
+
|
277 |
+
References
|
278 |
+
----------
|
279 |
+
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
|
280 |
+
Between Data Points", Science Feb. 2007
|
281 |
+
|
282 |
+
Examples
|
283 |
+
--------
|
284 |
+
>>> import numpy as np
|
285 |
+
>>> from sklearn.cluster import affinity_propagation
|
286 |
+
>>> from sklearn.metrics.pairwise import euclidean_distances
|
287 |
+
>>> X = np.array([[1, 2], [1, 4], [1, 0],
|
288 |
+
... [4, 2], [4, 4], [4, 0]])
|
289 |
+
>>> S = -euclidean_distances(X, squared=True)
|
290 |
+
>>> cluster_centers_indices, labels = affinity_propagation(S, random_state=0)
|
291 |
+
>>> cluster_centers_indices
|
292 |
+
array([0, 3])
|
293 |
+
>>> labels
|
294 |
+
array([0, 0, 0, 1, 1, 1])
|
295 |
+
"""
|
296 |
+
estimator = AffinityPropagation(
|
297 |
+
damping=damping,
|
298 |
+
max_iter=max_iter,
|
299 |
+
convergence_iter=convergence_iter,
|
300 |
+
copy=copy,
|
301 |
+
preference=preference,
|
302 |
+
affinity="precomputed",
|
303 |
+
verbose=verbose,
|
304 |
+
random_state=random_state,
|
305 |
+
).fit(S)
|
306 |
+
|
307 |
+
if return_n_iter:
|
308 |
+
return estimator.cluster_centers_indices_, estimator.labels_, estimator.n_iter_
|
309 |
+
return estimator.cluster_centers_indices_, estimator.labels_
|
310 |
+
|
311 |
+
|
312 |
+
class AffinityPropagation(ClusterMixin, BaseEstimator):
|
313 |
+
"""Perform Affinity Propagation Clustering of data.
|
314 |
+
|
315 |
+
Read more in the :ref:`User Guide <affinity_propagation>`.
|
316 |
+
|
317 |
+
Parameters
|
318 |
+
----------
|
319 |
+
damping : float, default=0.5
|
320 |
+
Damping factor in the range `[0.5, 1.0)` is the extent to
|
321 |
+
which the current value is maintained relative to
|
322 |
+
incoming values (weighted 1 - damping). This in order
|
323 |
+
to avoid numerical oscillations when updating these
|
324 |
+
values (messages).
|
325 |
+
|
326 |
+
max_iter : int, default=200
|
327 |
+
Maximum number of iterations.
|
328 |
+
|
329 |
+
convergence_iter : int, default=15
|
330 |
+
Number of iterations with no change in the number
|
331 |
+
of estimated clusters that stops the convergence.
|
332 |
+
|
333 |
+
copy : bool, default=True
|
334 |
+
Make a copy of input data.
|
335 |
+
|
336 |
+
preference : array-like of shape (n_samples,) or float, default=None
|
337 |
+
Preferences for each point - points with larger values of
|
338 |
+
preferences are more likely to be chosen as exemplars. The number
|
339 |
+
of exemplars, ie of clusters, is influenced by the input
|
340 |
+
preferences value. If the preferences are not passed as arguments,
|
341 |
+
they will be set to the median of the input similarities.
|
342 |
+
|
343 |
+
affinity : {'euclidean', 'precomputed'}, default='euclidean'
|
344 |
+
Which affinity to use. At the moment 'precomputed' and
|
345 |
+
``euclidean`` are supported. 'euclidean' uses the
|
346 |
+
negative squared euclidean distance between points.
|
347 |
+
|
348 |
+
verbose : bool, default=False
|
349 |
+
Whether to be verbose.
|
350 |
+
|
351 |
+
random_state : int, RandomState instance or None, default=None
|
352 |
+
Pseudo-random number generator to control the starting state.
|
353 |
+
Use an int for reproducible results across function calls.
|
354 |
+
See the :term:`Glossary <random_state>`.
|
355 |
+
|
356 |
+
.. versionadded:: 0.23
|
357 |
+
this parameter was previously hardcoded as 0.
|
358 |
+
|
359 |
+
Attributes
|
360 |
+
----------
|
361 |
+
cluster_centers_indices_ : ndarray of shape (n_clusters,)
|
362 |
+
Indices of cluster centers.
|
363 |
+
|
364 |
+
cluster_centers_ : ndarray of shape (n_clusters, n_features)
|
365 |
+
Cluster centers (if affinity != ``precomputed``).
|
366 |
+
|
367 |
+
labels_ : ndarray of shape (n_samples,)
|
368 |
+
Labels of each point.
|
369 |
+
|
370 |
+
affinity_matrix_ : ndarray of shape (n_samples, n_samples)
|
371 |
+
Stores the affinity matrix used in ``fit``.
|
372 |
+
|
373 |
+
n_iter_ : int
|
374 |
+
Number of iterations taken to converge.
|
375 |
+
|
376 |
+
n_features_in_ : int
|
377 |
+
Number of features seen during :term:`fit`.
|
378 |
+
|
379 |
+
.. versionadded:: 0.24
|
380 |
+
|
381 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
382 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
383 |
+
has feature names that are all strings.
|
384 |
+
|
385 |
+
.. versionadded:: 1.0
|
386 |
+
|
387 |
+
See Also
|
388 |
+
--------
|
389 |
+
AgglomerativeClustering : Recursively merges the pair of
|
390 |
+
clusters that minimally increases a given linkage distance.
|
391 |
+
FeatureAgglomeration : Similar to AgglomerativeClustering,
|
392 |
+
but recursively merges features instead of samples.
|
393 |
+
KMeans : K-Means clustering.
|
394 |
+
MiniBatchKMeans : Mini-Batch K-Means clustering.
|
395 |
+
MeanShift : Mean shift clustering using a flat kernel.
|
396 |
+
SpectralClustering : Apply clustering to a projection
|
397 |
+
of the normalized Laplacian.
|
398 |
+
|
399 |
+
Notes
|
400 |
+
-----
|
401 |
+
For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
|
402 |
+
<sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
|
403 |
+
|
404 |
+
The algorithmic complexity of affinity propagation is quadratic
|
405 |
+
in the number of points.
|
406 |
+
|
407 |
+
When the algorithm does not converge, it will still return a arrays of
|
408 |
+
``cluster_center_indices`` and labels if there are any exemplars/clusters,
|
409 |
+
however they may be degenerate and should be used with caution.
|
410 |
+
|
411 |
+
When ``fit`` does not converge, ``cluster_centers_`` is still populated
|
412 |
+
however it may be degenerate. In such a case, proceed with caution.
|
413 |
+
If ``fit`` does not converge and fails to produce any ``cluster_centers_``
|
414 |
+
then ``predict`` will label every sample as ``-1``.
|
415 |
+
|
416 |
+
When all training samples have equal similarities and equal preferences,
|
417 |
+
the assignment of cluster centers and labels depends on the preference.
|
418 |
+
If the preference is smaller than the similarities, ``fit`` will result in
|
419 |
+
a single cluster center and label ``0`` for every sample. Otherwise, every
|
420 |
+
training sample becomes its own cluster center and is assigned a unique
|
421 |
+
label.
|
422 |
+
|
423 |
+
References
|
424 |
+
----------
|
425 |
+
|
426 |
+
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
|
427 |
+
Between Data Points", Science Feb. 2007
|
428 |
+
|
429 |
+
Examples
|
430 |
+
--------
|
431 |
+
>>> from sklearn.cluster import AffinityPropagation
|
432 |
+
>>> import numpy as np
|
433 |
+
>>> X = np.array([[1, 2], [1, 4], [1, 0],
|
434 |
+
... [4, 2], [4, 4], [4, 0]])
|
435 |
+
>>> clustering = AffinityPropagation(random_state=5).fit(X)
|
436 |
+
>>> clustering
|
437 |
+
AffinityPropagation(random_state=5)
|
438 |
+
>>> clustering.labels_
|
439 |
+
array([0, 0, 0, 1, 1, 1])
|
440 |
+
>>> clustering.predict([[0, 0], [4, 4]])
|
441 |
+
array([0, 1])
|
442 |
+
>>> clustering.cluster_centers_
|
443 |
+
array([[1, 2],
|
444 |
+
[4, 2]])
|
445 |
+
"""
|
446 |
+
|
447 |
+
_parameter_constraints: dict = {
|
448 |
+
"damping": [Interval(Real, 0.5, 1.0, closed="left")],
|
449 |
+
"max_iter": [Interval(Integral, 1, None, closed="left")],
|
450 |
+
"convergence_iter": [Interval(Integral, 1, None, closed="left")],
|
451 |
+
"copy": ["boolean"],
|
452 |
+
"preference": [
|
453 |
+
"array-like",
|
454 |
+
Interval(Real, None, None, closed="neither"),
|
455 |
+
None,
|
456 |
+
],
|
457 |
+
"affinity": [StrOptions({"euclidean", "precomputed"})],
|
458 |
+
"verbose": ["verbose"],
|
459 |
+
"random_state": ["random_state"],
|
460 |
+
}
|
461 |
+
|
462 |
+
def __init__(
|
463 |
+
self,
|
464 |
+
*,
|
465 |
+
damping=0.5,
|
466 |
+
max_iter=200,
|
467 |
+
convergence_iter=15,
|
468 |
+
copy=True,
|
469 |
+
preference=None,
|
470 |
+
affinity="euclidean",
|
471 |
+
verbose=False,
|
472 |
+
random_state=None,
|
473 |
+
):
|
474 |
+
self.damping = damping
|
475 |
+
self.max_iter = max_iter
|
476 |
+
self.convergence_iter = convergence_iter
|
477 |
+
self.copy = copy
|
478 |
+
self.verbose = verbose
|
479 |
+
self.preference = preference
|
480 |
+
self.affinity = affinity
|
481 |
+
self.random_state = random_state
|
482 |
+
|
483 |
+
def _more_tags(self):
|
484 |
+
return {"pairwise": self.affinity == "precomputed"}
|
485 |
+
|
486 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
487 |
+
def fit(self, X, y=None):
|
488 |
+
"""Fit the clustering from features, or affinity matrix.
|
489 |
+
|
490 |
+
Parameters
|
491 |
+
----------
|
492 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
|
493 |
+
array-like of shape (n_samples, n_samples)
|
494 |
+
Training instances to cluster, or similarities / affinities between
|
495 |
+
instances if ``affinity='precomputed'``. If a sparse feature matrix
|
496 |
+
is provided, it will be converted into a sparse ``csr_matrix``.
|
497 |
+
|
498 |
+
y : Ignored
|
499 |
+
Not used, present here for API consistency by convention.
|
500 |
+
|
501 |
+
Returns
|
502 |
+
-------
|
503 |
+
self
|
504 |
+
Returns the instance itself.
|
505 |
+
"""
|
506 |
+
if self.affinity == "precomputed":
|
507 |
+
accept_sparse = False
|
508 |
+
else:
|
509 |
+
accept_sparse = "csr"
|
510 |
+
X = self._validate_data(X, accept_sparse=accept_sparse)
|
511 |
+
if self.affinity == "precomputed":
|
512 |
+
self.affinity_matrix_ = X.copy() if self.copy else X
|
513 |
+
else: # self.affinity == "euclidean"
|
514 |
+
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
|
515 |
+
|
516 |
+
if self.affinity_matrix_.shape[0] != self.affinity_matrix_.shape[1]:
|
517 |
+
raise ValueError(
|
518 |
+
"The matrix of similarities must be a square array. "
|
519 |
+
f"Got {self.affinity_matrix_.shape} instead."
|
520 |
+
)
|
521 |
+
|
522 |
+
if self.preference is None:
|
523 |
+
preference = np.median(self.affinity_matrix_)
|
524 |
+
else:
|
525 |
+
preference = self.preference
|
526 |
+
preference = np.asarray(preference)
|
527 |
+
|
528 |
+
random_state = check_random_state(self.random_state)
|
529 |
+
|
530 |
+
(
|
531 |
+
self.cluster_centers_indices_,
|
532 |
+
self.labels_,
|
533 |
+
self.n_iter_,
|
534 |
+
) = _affinity_propagation(
|
535 |
+
self.affinity_matrix_,
|
536 |
+
max_iter=self.max_iter,
|
537 |
+
convergence_iter=self.convergence_iter,
|
538 |
+
preference=preference,
|
539 |
+
damping=self.damping,
|
540 |
+
verbose=self.verbose,
|
541 |
+
return_n_iter=True,
|
542 |
+
random_state=random_state,
|
543 |
+
)
|
544 |
+
|
545 |
+
if self.affinity != "precomputed":
|
546 |
+
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
|
547 |
+
|
548 |
+
return self
|
549 |
+
|
550 |
+
def predict(self, X):
|
551 |
+
"""Predict the closest cluster each sample in X belongs to.
|
552 |
+
|
553 |
+
Parameters
|
554 |
+
----------
|
555 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
556 |
+
New data to predict. If a sparse matrix is provided, it will be
|
557 |
+
converted into a sparse ``csr_matrix``.
|
558 |
+
|
559 |
+
Returns
|
560 |
+
-------
|
561 |
+
labels : ndarray of shape (n_samples,)
|
562 |
+
Cluster labels.
|
563 |
+
"""
|
564 |
+
check_is_fitted(self)
|
565 |
+
X = self._validate_data(X, reset=False, accept_sparse="csr")
|
566 |
+
if not hasattr(self, "cluster_centers_"):
|
567 |
+
raise ValueError(
|
568 |
+
"Predict method is not supported when affinity='precomputed'."
|
569 |
+
)
|
570 |
+
|
571 |
+
if self.cluster_centers_.shape[0] > 0:
|
572 |
+
with config_context(assume_finite=True):
|
573 |
+
return pairwise_distances_argmin(X, self.cluster_centers_)
|
574 |
+
else:
|
575 |
+
warnings.warn(
|
576 |
+
(
|
577 |
+
"This model does not have any cluster centers "
|
578 |
+
"because affinity propagation did not converge. "
|
579 |
+
"Labeling every sample as '-1'."
|
580 |
+
),
|
581 |
+
ConvergenceWarning,
|
582 |
+
)
|
583 |
+
return np.array([-1] * X.shape[0])
|
584 |
+
|
585 |
+
def fit_predict(self, X, y=None):
|
586 |
+
"""Fit clustering from features/affinity matrix; return cluster labels.
|
587 |
+
|
588 |
+
Parameters
|
589 |
+
----------
|
590 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
|
591 |
+
array-like of shape (n_samples, n_samples)
|
592 |
+
Training instances to cluster, or similarities / affinities between
|
593 |
+
instances if ``affinity='precomputed'``. If a sparse feature matrix
|
594 |
+
is provided, it will be converted into a sparse ``csr_matrix``.
|
595 |
+
|
596 |
+
y : Ignored
|
597 |
+
Not used, present here for API consistency by convention.
|
598 |
+
|
599 |
+
Returns
|
600 |
+
-------
|
601 |
+
labels : ndarray of shape (n_samples,)
|
602 |
+
Cluster labels.
|
603 |
+
"""
|
604 |
+
return super().fit_predict(X, y)
|
venv/lib/python3.10/site-packages/sklearn/cluster/_agglomerative.py
ADDED
@@ -0,0 +1,1336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Hierarchical Agglomerative Clustering
|
2 |
+
|
3 |
+
These routines perform some hierarchical agglomerative clustering of some
|
4 |
+
input data.
|
5 |
+
|
6 |
+
Authors : Vincent Michel, Bertrand Thirion, Alexandre Gramfort,
|
7 |
+
Gael Varoquaux
|
8 |
+
License: BSD 3 clause
|
9 |
+
"""
|
10 |
+
import warnings
|
11 |
+
from heapq import heapify, heappop, heappush, heappushpop
|
12 |
+
from numbers import Integral, Real
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
from scipy import sparse
|
16 |
+
from scipy.sparse.csgraph import connected_components
|
17 |
+
|
18 |
+
from ..base import (
|
19 |
+
BaseEstimator,
|
20 |
+
ClassNamePrefixFeaturesOutMixin,
|
21 |
+
ClusterMixin,
|
22 |
+
_fit_context,
|
23 |
+
)
|
24 |
+
from ..metrics import DistanceMetric
|
25 |
+
from ..metrics._dist_metrics import METRIC_MAPPING64
|
26 |
+
from ..metrics.pairwise import _VALID_METRICS, paired_distances
|
27 |
+
from ..utils import check_array
|
28 |
+
from ..utils._fast_dict import IntFloatDict
|
29 |
+
from ..utils._param_validation import (
|
30 |
+
HasMethods,
|
31 |
+
Hidden,
|
32 |
+
Interval,
|
33 |
+
StrOptions,
|
34 |
+
validate_params,
|
35 |
+
)
|
36 |
+
from ..utils.graph import _fix_connected_components
|
37 |
+
from ..utils.validation import check_memory
|
38 |
+
|
39 |
+
# mypy error: Module 'sklearn.cluster' has no attribute '_hierarchical_fast'
|
40 |
+
from . import _hierarchical_fast as _hierarchical # type: ignore
|
41 |
+
from ._feature_agglomeration import AgglomerationTransform
|
42 |
+
|
43 |
+
###############################################################################
|
44 |
+
# For non fully-connected graphs
|
45 |
+
|
46 |
+
|
47 |
+
def _fix_connectivity(X, connectivity, affinity):
|
48 |
+
"""
|
49 |
+
Fixes the connectivity matrix.
|
50 |
+
|
51 |
+
The different steps are:
|
52 |
+
|
53 |
+
- copies it
|
54 |
+
- makes it symmetric
|
55 |
+
- converts it to LIL if necessary
|
56 |
+
- completes it if necessary.
|
57 |
+
|
58 |
+
Parameters
|
59 |
+
----------
|
60 |
+
X : array-like of shape (n_samples, n_features)
|
61 |
+
Feature matrix representing `n_samples` samples to be clustered.
|
62 |
+
|
63 |
+
connectivity : sparse matrix, default=None
|
64 |
+
Connectivity matrix. Defines for each sample the neighboring samples
|
65 |
+
following a given structure of the data. The matrix is assumed to
|
66 |
+
be symmetric and only the upper triangular half is used.
|
67 |
+
Default is `None`, i.e, the Ward algorithm is unstructured.
|
68 |
+
|
69 |
+
affinity : {"euclidean", "precomputed"}, default="euclidean"
|
70 |
+
Which affinity to use. At the moment `precomputed` and
|
71 |
+
``euclidean`` are supported. `euclidean` uses the
|
72 |
+
negative squared Euclidean distance between points.
|
73 |
+
|
74 |
+
Returns
|
75 |
+
-------
|
76 |
+
connectivity : sparse matrix
|
77 |
+
The fixed connectivity matrix.
|
78 |
+
|
79 |
+
n_connected_components : int
|
80 |
+
The number of connected components in the graph.
|
81 |
+
"""
|
82 |
+
n_samples = X.shape[0]
|
83 |
+
if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples:
|
84 |
+
raise ValueError(
|
85 |
+
"Wrong shape for connectivity matrix: %s when X is %s"
|
86 |
+
% (connectivity.shape, X.shape)
|
87 |
+
)
|
88 |
+
|
89 |
+
# Make the connectivity matrix symmetric:
|
90 |
+
connectivity = connectivity + connectivity.T
|
91 |
+
|
92 |
+
# Convert connectivity matrix to LIL
|
93 |
+
if not sparse.issparse(connectivity):
|
94 |
+
connectivity = sparse.lil_matrix(connectivity)
|
95 |
+
|
96 |
+
# `connectivity` is a sparse matrix at this point
|
97 |
+
if connectivity.format != "lil":
|
98 |
+
connectivity = connectivity.tolil()
|
99 |
+
|
100 |
+
# Compute the number of nodes
|
101 |
+
n_connected_components, labels = connected_components(connectivity)
|
102 |
+
|
103 |
+
if n_connected_components > 1:
|
104 |
+
warnings.warn(
|
105 |
+
"the number of connected components of the "
|
106 |
+
"connectivity matrix is %d > 1. Completing it to avoid "
|
107 |
+
"stopping the tree early." % n_connected_components,
|
108 |
+
stacklevel=2,
|
109 |
+
)
|
110 |
+
# XXX: Can we do without completing the matrix?
|
111 |
+
connectivity = _fix_connected_components(
|
112 |
+
X=X,
|
113 |
+
graph=connectivity,
|
114 |
+
n_connected_components=n_connected_components,
|
115 |
+
component_labels=labels,
|
116 |
+
metric=affinity,
|
117 |
+
mode="connectivity",
|
118 |
+
)
|
119 |
+
|
120 |
+
return connectivity, n_connected_components
|
121 |
+
|
122 |
+
|
123 |
+
def _single_linkage_tree(
|
124 |
+
connectivity,
|
125 |
+
n_samples,
|
126 |
+
n_nodes,
|
127 |
+
n_clusters,
|
128 |
+
n_connected_components,
|
129 |
+
return_distance,
|
130 |
+
):
|
131 |
+
"""
|
132 |
+
Perform single linkage clustering on sparse data via the minimum
|
133 |
+
spanning tree from scipy.sparse.csgraph, then using union-find to label.
|
134 |
+
The parent array is then generated by walking through the tree.
|
135 |
+
"""
|
136 |
+
from scipy.sparse.csgraph import minimum_spanning_tree
|
137 |
+
|
138 |
+
# explicitly cast connectivity to ensure safety
|
139 |
+
connectivity = connectivity.astype(np.float64, copy=False)
|
140 |
+
|
141 |
+
# Ensure zero distances aren't ignored by setting them to "epsilon"
|
142 |
+
epsilon_value = np.finfo(dtype=connectivity.data.dtype).eps
|
143 |
+
connectivity.data[connectivity.data == 0] = epsilon_value
|
144 |
+
|
145 |
+
# Use scipy.sparse.csgraph to generate a minimum spanning tree
|
146 |
+
mst = minimum_spanning_tree(connectivity.tocsr())
|
147 |
+
|
148 |
+
# Convert the graph to scipy.cluster.hierarchy array format
|
149 |
+
mst = mst.tocoo()
|
150 |
+
|
151 |
+
# Undo the epsilon values
|
152 |
+
mst.data[mst.data == epsilon_value] = 0
|
153 |
+
|
154 |
+
mst_array = np.vstack([mst.row, mst.col, mst.data]).T
|
155 |
+
|
156 |
+
# Sort edges of the min_spanning_tree by weight
|
157 |
+
mst_array = mst_array[np.argsort(mst_array.T[2], kind="mergesort"), :]
|
158 |
+
|
159 |
+
# Convert edge list into standard hierarchical clustering format
|
160 |
+
single_linkage_tree = _hierarchical._single_linkage_label(mst_array)
|
161 |
+
children_ = single_linkage_tree[:, :2].astype(int)
|
162 |
+
|
163 |
+
# Compute parents
|
164 |
+
parent = np.arange(n_nodes, dtype=np.intp)
|
165 |
+
for i, (left, right) in enumerate(children_, n_samples):
|
166 |
+
if n_clusters is not None and i >= n_nodes:
|
167 |
+
break
|
168 |
+
if left < n_nodes:
|
169 |
+
parent[left] = i
|
170 |
+
if right < n_nodes:
|
171 |
+
parent[right] = i
|
172 |
+
|
173 |
+
if return_distance:
|
174 |
+
distances = single_linkage_tree[:, 2]
|
175 |
+
return children_, n_connected_components, n_samples, parent, distances
|
176 |
+
return children_, n_connected_components, n_samples, parent
|
177 |
+
|
178 |
+
|
179 |
+
###############################################################################
|
180 |
+
# Hierarchical tree building functions
|
181 |
+
|
182 |
+
|
183 |
+
@validate_params(
|
184 |
+
{
|
185 |
+
"X": ["array-like"],
|
186 |
+
"connectivity": ["array-like", "sparse matrix", None],
|
187 |
+
"n_clusters": [Interval(Integral, 1, None, closed="left"), None],
|
188 |
+
"return_distance": ["boolean"],
|
189 |
+
},
|
190 |
+
prefer_skip_nested_validation=True,
|
191 |
+
)
|
192 |
+
def ward_tree(X, *, connectivity=None, n_clusters=None, return_distance=False):
|
193 |
+
"""Ward clustering based on a Feature matrix.
|
194 |
+
|
195 |
+
Recursively merges the pair of clusters that minimally increases
|
196 |
+
within-cluster variance.
|
197 |
+
|
198 |
+
The inertia matrix uses a Heapq-based representation.
|
199 |
+
|
200 |
+
This is the structured version, that takes into account some topological
|
201 |
+
structure between samples.
|
202 |
+
|
203 |
+
Read more in the :ref:`User Guide <hierarchical_clustering>`.
|
204 |
+
|
205 |
+
Parameters
|
206 |
+
----------
|
207 |
+
X : array-like of shape (n_samples, n_features)
|
208 |
+
Feature matrix representing `n_samples` samples to be clustered.
|
209 |
+
|
210 |
+
connectivity : {array-like, sparse matrix}, default=None
|
211 |
+
Connectivity matrix. Defines for each sample the neighboring samples
|
212 |
+
following a given structure of the data. The matrix is assumed to
|
213 |
+
be symmetric and only the upper triangular half is used.
|
214 |
+
Default is None, i.e, the Ward algorithm is unstructured.
|
215 |
+
|
216 |
+
n_clusters : int, default=None
|
217 |
+
`n_clusters` should be less than `n_samples`. Stop early the
|
218 |
+
construction of the tree at `n_clusters.` This is useful to decrease
|
219 |
+
computation time if the number of clusters is not small compared to the
|
220 |
+
number of samples. In this case, the complete tree is not computed, thus
|
221 |
+
the 'children' output is of limited use, and the 'parents' output should
|
222 |
+
rather be used. This option is valid only when specifying a connectivity
|
223 |
+
matrix.
|
224 |
+
|
225 |
+
return_distance : bool, default=False
|
226 |
+
If `True`, return the distance between the clusters.
|
227 |
+
|
228 |
+
Returns
|
229 |
+
-------
|
230 |
+
children : ndarray of shape (n_nodes-1, 2)
|
231 |
+
The children of each non-leaf node. Values less than `n_samples`
|
232 |
+
correspond to leaves of the tree which are the original samples.
|
233 |
+
A node `i` greater than or equal to `n_samples` is a non-leaf
|
234 |
+
node and has children `children_[i - n_samples]`. Alternatively
|
235 |
+
at the i-th iteration, children[i][0] and children[i][1]
|
236 |
+
are merged to form node `n_samples + i`.
|
237 |
+
|
238 |
+
n_connected_components : int
|
239 |
+
The number of connected components in the graph.
|
240 |
+
|
241 |
+
n_leaves : int
|
242 |
+
The number of leaves in the tree.
|
243 |
+
|
244 |
+
parents : ndarray of shape (n_nodes,) or None
|
245 |
+
The parent of each node. Only returned when a connectivity matrix
|
246 |
+
is specified, elsewhere 'None' is returned.
|
247 |
+
|
248 |
+
distances : ndarray of shape (n_nodes-1,)
|
249 |
+
Only returned if `return_distance` is set to `True` (for compatibility).
|
250 |
+
The distances between the centers of the nodes. `distances[i]`
|
251 |
+
corresponds to a weighted Euclidean distance between
|
252 |
+
the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to
|
253 |
+
leaves of the tree, then `distances[i]` is their unweighted Euclidean
|
254 |
+
distance. Distances are updated in the following way
|
255 |
+
(from scipy.hierarchy.linkage):
|
256 |
+
|
257 |
+
The new entry :math:`d(u,v)` is computed as follows,
|
258 |
+
|
259 |
+
.. math::
|
260 |
+
|
261 |
+
d(u,v) = \\sqrt{\\frac{|v|+|s|}
|
262 |
+
{T}d(v,s)^2
|
263 |
+
+ \\frac{|v|+|t|}
|
264 |
+
{T}d(v,t)^2
|
265 |
+
- \\frac{|v|}
|
266 |
+
{T}d(s,t)^2}
|
267 |
+
|
268 |
+
where :math:`u` is the newly joined cluster consisting of
|
269 |
+
clusters :math:`s` and :math:`t`, :math:`v` is an unused
|
270 |
+
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
|
271 |
+
:math:`|*|` is the cardinality of its argument. This is also
|
272 |
+
known as the incremental algorithm.
|
273 |
+
|
274 |
+
Examples
|
275 |
+
--------
|
276 |
+
>>> import numpy as np
|
277 |
+
>>> from sklearn.cluster import ward_tree
|
278 |
+
>>> X = np.array([[1, 2], [1, 4], [1, 0],
|
279 |
+
... [4, 2], [4, 4], [4, 0]])
|
280 |
+
>>> children, n_connected_components, n_leaves, parents = ward_tree(X)
|
281 |
+
>>> children
|
282 |
+
array([[0, 1],
|
283 |
+
[3, 5],
|
284 |
+
[2, 6],
|
285 |
+
[4, 7],
|
286 |
+
[8, 9]])
|
287 |
+
>>> n_connected_components
|
288 |
+
1
|
289 |
+
>>> n_leaves
|
290 |
+
6
|
291 |
+
"""
|
292 |
+
X = np.asarray(X)
|
293 |
+
if X.ndim == 1:
|
294 |
+
X = np.reshape(X, (-1, 1))
|
295 |
+
n_samples, n_features = X.shape
|
296 |
+
|
297 |
+
if connectivity is None:
|
298 |
+
from scipy.cluster import hierarchy # imports PIL
|
299 |
+
|
300 |
+
if n_clusters is not None:
|
301 |
+
warnings.warn(
|
302 |
+
(
|
303 |
+
"Partial build of the tree is implemented "
|
304 |
+
"only for structured clustering (i.e. with "
|
305 |
+
"explicit connectivity). The algorithm "
|
306 |
+
"will build the full tree and only "
|
307 |
+
"retain the lower branches required "
|
308 |
+
"for the specified number of clusters"
|
309 |
+
),
|
310 |
+
stacklevel=2,
|
311 |
+
)
|
312 |
+
X = np.require(X, requirements="W")
|
313 |
+
out = hierarchy.ward(X)
|
314 |
+
children_ = out[:, :2].astype(np.intp)
|
315 |
+
|
316 |
+
if return_distance:
|
317 |
+
distances = out[:, 2]
|
318 |
+
return children_, 1, n_samples, None, distances
|
319 |
+
else:
|
320 |
+
return children_, 1, n_samples, None
|
321 |
+
|
322 |
+
connectivity, n_connected_components = _fix_connectivity(
|
323 |
+
X, connectivity, affinity="euclidean"
|
324 |
+
)
|
325 |
+
if n_clusters is None:
|
326 |
+
n_nodes = 2 * n_samples - 1
|
327 |
+
else:
|
328 |
+
if n_clusters > n_samples:
|
329 |
+
raise ValueError(
|
330 |
+
"Cannot provide more clusters than samples. "
|
331 |
+
"%i n_clusters was asked, and there are %i "
|
332 |
+
"samples." % (n_clusters, n_samples)
|
333 |
+
)
|
334 |
+
n_nodes = 2 * n_samples - n_clusters
|
335 |
+
|
336 |
+
# create inertia matrix
|
337 |
+
coord_row = []
|
338 |
+
coord_col = []
|
339 |
+
A = []
|
340 |
+
for ind, row in enumerate(connectivity.rows):
|
341 |
+
A.append(row)
|
342 |
+
# We keep only the upper triangular for the moments
|
343 |
+
# Generator expressions are faster than arrays on the following
|
344 |
+
row = [i for i in row if i < ind]
|
345 |
+
coord_row.extend(
|
346 |
+
len(row)
|
347 |
+
* [
|
348 |
+
ind,
|
349 |
+
]
|
350 |
+
)
|
351 |
+
coord_col.extend(row)
|
352 |
+
|
353 |
+
coord_row = np.array(coord_row, dtype=np.intp, order="C")
|
354 |
+
coord_col = np.array(coord_col, dtype=np.intp, order="C")
|
355 |
+
|
356 |
+
# build moments as a list
|
357 |
+
moments_1 = np.zeros(n_nodes, order="C")
|
358 |
+
moments_1[:n_samples] = 1
|
359 |
+
moments_2 = np.zeros((n_nodes, n_features), order="C")
|
360 |
+
moments_2[:n_samples] = X
|
361 |
+
inertia = np.empty(len(coord_row), dtype=np.float64, order="C")
|
362 |
+
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, inertia)
|
363 |
+
inertia = list(zip(inertia, coord_row, coord_col))
|
364 |
+
heapify(inertia)
|
365 |
+
|
366 |
+
# prepare the main fields
|
367 |
+
parent = np.arange(n_nodes, dtype=np.intp)
|
368 |
+
used_node = np.ones(n_nodes, dtype=bool)
|
369 |
+
children = []
|
370 |
+
if return_distance:
|
371 |
+
distances = np.empty(n_nodes - n_samples)
|
372 |
+
|
373 |
+
not_visited = np.empty(n_nodes, dtype=bool, order="C")
|
374 |
+
|
375 |
+
# recursive merge loop
|
376 |
+
for k in range(n_samples, n_nodes):
|
377 |
+
# identify the merge
|
378 |
+
while True:
|
379 |
+
inert, i, j = heappop(inertia)
|
380 |
+
if used_node[i] and used_node[j]:
|
381 |
+
break
|
382 |
+
parent[i], parent[j] = k, k
|
383 |
+
children.append((i, j))
|
384 |
+
used_node[i] = used_node[j] = False
|
385 |
+
if return_distance: # store inertia value
|
386 |
+
distances[k - n_samples] = inert
|
387 |
+
|
388 |
+
# update the moments
|
389 |
+
moments_1[k] = moments_1[i] + moments_1[j]
|
390 |
+
moments_2[k] = moments_2[i] + moments_2[j]
|
391 |
+
|
392 |
+
# update the structure matrix A and the inertia matrix
|
393 |
+
coord_col = []
|
394 |
+
not_visited.fill(1)
|
395 |
+
not_visited[k] = 0
|
396 |
+
_hierarchical._get_parents(A[i], coord_col, parent, not_visited)
|
397 |
+
_hierarchical._get_parents(A[j], coord_col, parent, not_visited)
|
398 |
+
# List comprehension is faster than a for loop
|
399 |
+
[A[col].append(k) for col in coord_col]
|
400 |
+
A.append(coord_col)
|
401 |
+
coord_col = np.array(coord_col, dtype=np.intp, order="C")
|
402 |
+
coord_row = np.empty(coord_col.shape, dtype=np.intp, order="C")
|
403 |
+
coord_row.fill(k)
|
404 |
+
n_additions = len(coord_row)
|
405 |
+
ini = np.empty(n_additions, dtype=np.float64, order="C")
|
406 |
+
|
407 |
+
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, ini)
|
408 |
+
|
409 |
+
# List comprehension is faster than a for loop
|
410 |
+
[heappush(inertia, (ini[idx], k, coord_col[idx])) for idx in range(n_additions)]
|
411 |
+
|
412 |
+
# Separate leaves in children (empty lists up to now)
|
413 |
+
n_leaves = n_samples
|
414 |
+
# sort children to get consistent output with unstructured version
|
415 |
+
children = [c[::-1] for c in children]
|
416 |
+
children = np.array(children) # return numpy array for efficient caching
|
417 |
+
|
418 |
+
if return_distance:
|
419 |
+
# 2 is scaling factor to compare w/ unstructured version
|
420 |
+
distances = np.sqrt(2.0 * distances)
|
421 |
+
return children, n_connected_components, n_leaves, parent, distances
|
422 |
+
else:
|
423 |
+
return children, n_connected_components, n_leaves, parent
|
424 |
+
|
425 |
+
|
426 |
+
# single average and complete linkage
|
427 |
+
def linkage_tree(
|
428 |
+
X,
|
429 |
+
connectivity=None,
|
430 |
+
n_clusters=None,
|
431 |
+
linkage="complete",
|
432 |
+
affinity="euclidean",
|
433 |
+
return_distance=False,
|
434 |
+
):
|
435 |
+
"""Linkage agglomerative clustering based on a Feature matrix.
|
436 |
+
|
437 |
+
The inertia matrix uses a Heapq-based representation.
|
438 |
+
|
439 |
+
This is the structured version, that takes into account some topological
|
440 |
+
structure between samples.
|
441 |
+
|
442 |
+
Read more in the :ref:`User Guide <hierarchical_clustering>`.
|
443 |
+
|
444 |
+
Parameters
|
445 |
+
----------
|
446 |
+
X : array-like of shape (n_samples, n_features)
|
447 |
+
Feature matrix representing `n_samples` samples to be clustered.
|
448 |
+
|
449 |
+
connectivity : sparse matrix, default=None
|
450 |
+
Connectivity matrix. Defines for each sample the neighboring samples
|
451 |
+
following a given structure of the data. The matrix is assumed to
|
452 |
+
be symmetric and only the upper triangular half is used.
|
453 |
+
Default is `None`, i.e, the Ward algorithm is unstructured.
|
454 |
+
|
455 |
+
n_clusters : int, default=None
|
456 |
+
Stop early the construction of the tree at `n_clusters`. This is
|
457 |
+
useful to decrease computation time if the number of clusters is
|
458 |
+
not small compared to the number of samples. In this case, the
|
459 |
+
complete tree is not computed, thus the 'children' output is of
|
460 |
+
limited use, and the 'parents' output should rather be used.
|
461 |
+
This option is valid only when specifying a connectivity matrix.
|
462 |
+
|
463 |
+
linkage : {"average", "complete", "single"}, default="complete"
|
464 |
+
Which linkage criteria to use. The linkage criterion determines which
|
465 |
+
distance to use between sets of observation.
|
466 |
+
- "average" uses the average of the distances of each observation of
|
467 |
+
the two sets.
|
468 |
+
- "complete" or maximum linkage uses the maximum distances between
|
469 |
+
all observations of the two sets.
|
470 |
+
- "single" uses the minimum of the distances between all
|
471 |
+
observations of the two sets.
|
472 |
+
|
473 |
+
affinity : str or callable, default='euclidean'
|
474 |
+
Which metric to use. Can be 'euclidean', 'manhattan', or any
|
475 |
+
distance known to paired distance (see metric.pairwise).
|
476 |
+
|
477 |
+
return_distance : bool, default=False
|
478 |
+
Whether or not to return the distances between the clusters.
|
479 |
+
|
480 |
+
Returns
|
481 |
+
-------
|
482 |
+
children : ndarray of shape (n_nodes-1, 2)
|
483 |
+
The children of each non-leaf node. Values less than `n_samples`
|
484 |
+
correspond to leaves of the tree which are the original samples.
|
485 |
+
A node `i` greater than or equal to `n_samples` is a non-leaf
|
486 |
+
node and has children `children_[i - n_samples]`. Alternatively
|
487 |
+
at the i-th iteration, children[i][0] and children[i][1]
|
488 |
+
are merged to form node `n_samples + i`.
|
489 |
+
|
490 |
+
n_connected_components : int
|
491 |
+
The number of connected components in the graph.
|
492 |
+
|
493 |
+
n_leaves : int
|
494 |
+
The number of leaves in the tree.
|
495 |
+
|
496 |
+
parents : ndarray of shape (n_nodes, ) or None
|
497 |
+
The parent of each node. Only returned when a connectivity matrix
|
498 |
+
is specified, elsewhere 'None' is returned.
|
499 |
+
|
500 |
+
distances : ndarray of shape (n_nodes-1,)
|
501 |
+
Returned when `return_distance` is set to `True`.
|
502 |
+
|
503 |
+
distances[i] refers to the distance between children[i][0] and
|
504 |
+
children[i][1] when they are merged.
|
505 |
+
|
506 |
+
See Also
|
507 |
+
--------
|
508 |
+
ward_tree : Hierarchical clustering with ward linkage.
|
509 |
+
"""
|
510 |
+
X = np.asarray(X)
|
511 |
+
if X.ndim == 1:
|
512 |
+
X = np.reshape(X, (-1, 1))
|
513 |
+
n_samples, n_features = X.shape
|
514 |
+
|
515 |
+
linkage_choices = {
|
516 |
+
"complete": _hierarchical.max_merge,
|
517 |
+
"average": _hierarchical.average_merge,
|
518 |
+
"single": None,
|
519 |
+
} # Single linkage is handled differently
|
520 |
+
try:
|
521 |
+
join_func = linkage_choices[linkage]
|
522 |
+
except KeyError as e:
|
523 |
+
raise ValueError(
|
524 |
+
"Unknown linkage option, linkage should be one of %s, but %s was given"
|
525 |
+
% (linkage_choices.keys(), linkage)
|
526 |
+
) from e
|
527 |
+
|
528 |
+
if affinity == "cosine" and np.any(~np.any(X, axis=1)):
|
529 |
+
raise ValueError("Cosine affinity cannot be used when X contains zero vectors")
|
530 |
+
|
531 |
+
if connectivity is None:
|
532 |
+
from scipy.cluster import hierarchy # imports PIL
|
533 |
+
|
534 |
+
if n_clusters is not None:
|
535 |
+
warnings.warn(
|
536 |
+
(
|
537 |
+
"Partial build of the tree is implemented "
|
538 |
+
"only for structured clustering (i.e. with "
|
539 |
+
"explicit connectivity). The algorithm "
|
540 |
+
"will build the full tree and only "
|
541 |
+
"retain the lower branches required "
|
542 |
+
"for the specified number of clusters"
|
543 |
+
),
|
544 |
+
stacklevel=2,
|
545 |
+
)
|
546 |
+
|
547 |
+
if affinity == "precomputed":
|
548 |
+
# for the linkage function of hierarchy to work on precomputed
|
549 |
+
# data, provide as first argument an ndarray of the shape returned
|
550 |
+
# by sklearn.metrics.pairwise_distances.
|
551 |
+
if X.shape[0] != X.shape[1]:
|
552 |
+
raise ValueError(
|
553 |
+
f"Distance matrix should be square, got matrix of shape {X.shape}"
|
554 |
+
)
|
555 |
+
i, j = np.triu_indices(X.shape[0], k=1)
|
556 |
+
X = X[i, j]
|
557 |
+
elif affinity == "l2":
|
558 |
+
# Translate to something understood by scipy
|
559 |
+
affinity = "euclidean"
|
560 |
+
elif affinity in ("l1", "manhattan"):
|
561 |
+
affinity = "cityblock"
|
562 |
+
elif callable(affinity):
|
563 |
+
X = affinity(X)
|
564 |
+
i, j = np.triu_indices(X.shape[0], k=1)
|
565 |
+
X = X[i, j]
|
566 |
+
if (
|
567 |
+
linkage == "single"
|
568 |
+
and affinity != "precomputed"
|
569 |
+
and not callable(affinity)
|
570 |
+
and affinity in METRIC_MAPPING64
|
571 |
+
):
|
572 |
+
# We need the fast cythonized metric from neighbors
|
573 |
+
dist_metric = DistanceMetric.get_metric(affinity)
|
574 |
+
|
575 |
+
# The Cython routines used require contiguous arrays
|
576 |
+
X = np.ascontiguousarray(X, dtype=np.double)
|
577 |
+
|
578 |
+
mst = _hierarchical.mst_linkage_core(X, dist_metric)
|
579 |
+
# Sort edges of the min_spanning_tree by weight
|
580 |
+
mst = mst[np.argsort(mst.T[2], kind="mergesort"), :]
|
581 |
+
|
582 |
+
# Convert edge list into standard hierarchical clustering format
|
583 |
+
out = _hierarchical.single_linkage_label(mst)
|
584 |
+
else:
|
585 |
+
out = hierarchy.linkage(X, method=linkage, metric=affinity)
|
586 |
+
children_ = out[:, :2].astype(int, copy=False)
|
587 |
+
|
588 |
+
if return_distance:
|
589 |
+
distances = out[:, 2]
|
590 |
+
return children_, 1, n_samples, None, distances
|
591 |
+
return children_, 1, n_samples, None
|
592 |
+
|
593 |
+
connectivity, n_connected_components = _fix_connectivity(
|
594 |
+
X, connectivity, affinity=affinity
|
595 |
+
)
|
596 |
+
connectivity = connectivity.tocoo()
|
597 |
+
# Put the diagonal to zero
|
598 |
+
diag_mask = connectivity.row != connectivity.col
|
599 |
+
connectivity.row = connectivity.row[diag_mask]
|
600 |
+
connectivity.col = connectivity.col[diag_mask]
|
601 |
+
connectivity.data = connectivity.data[diag_mask]
|
602 |
+
del diag_mask
|
603 |
+
|
604 |
+
if affinity == "precomputed":
|
605 |
+
distances = X[connectivity.row, connectivity.col].astype(np.float64, copy=False)
|
606 |
+
else:
|
607 |
+
# FIXME We compute all the distances, while we could have only computed
|
608 |
+
# the "interesting" distances
|
609 |
+
distances = paired_distances(
|
610 |
+
X[connectivity.row], X[connectivity.col], metric=affinity
|
611 |
+
)
|
612 |
+
connectivity.data = distances
|
613 |
+
|
614 |
+
if n_clusters is None:
|
615 |
+
n_nodes = 2 * n_samples - 1
|
616 |
+
else:
|
617 |
+
assert n_clusters <= n_samples
|
618 |
+
n_nodes = 2 * n_samples - n_clusters
|
619 |
+
|
620 |
+
if linkage == "single":
|
621 |
+
return _single_linkage_tree(
|
622 |
+
connectivity,
|
623 |
+
n_samples,
|
624 |
+
n_nodes,
|
625 |
+
n_clusters,
|
626 |
+
n_connected_components,
|
627 |
+
return_distance,
|
628 |
+
)
|
629 |
+
|
630 |
+
if return_distance:
|
631 |
+
distances = np.empty(n_nodes - n_samples)
|
632 |
+
# create inertia heap and connection matrix
|
633 |
+
A = np.empty(n_nodes, dtype=object)
|
634 |
+
inertia = list()
|
635 |
+
|
636 |
+
# LIL seems to the best format to access the rows quickly,
|
637 |
+
# without the numpy overhead of slicing CSR indices and data.
|
638 |
+
connectivity = connectivity.tolil()
|
639 |
+
# We are storing the graph in a list of IntFloatDict
|
640 |
+
for ind, (data, row) in enumerate(zip(connectivity.data, connectivity.rows)):
|
641 |
+
A[ind] = IntFloatDict(
|
642 |
+
np.asarray(row, dtype=np.intp), np.asarray(data, dtype=np.float64)
|
643 |
+
)
|
644 |
+
# We keep only the upper triangular for the heap
|
645 |
+
# Generator expressions are faster than arrays on the following
|
646 |
+
inertia.extend(
|
647 |
+
_hierarchical.WeightedEdge(d, ind, r) for r, d in zip(row, data) if r < ind
|
648 |
+
)
|
649 |
+
del connectivity
|
650 |
+
|
651 |
+
heapify(inertia)
|
652 |
+
|
653 |
+
# prepare the main fields
|
654 |
+
parent = np.arange(n_nodes, dtype=np.intp)
|
655 |
+
used_node = np.ones(n_nodes, dtype=np.intp)
|
656 |
+
children = []
|
657 |
+
|
658 |
+
# recursive merge loop
|
659 |
+
for k in range(n_samples, n_nodes):
|
660 |
+
# identify the merge
|
661 |
+
while True:
|
662 |
+
edge = heappop(inertia)
|
663 |
+
if used_node[edge.a] and used_node[edge.b]:
|
664 |
+
break
|
665 |
+
i = edge.a
|
666 |
+
j = edge.b
|
667 |
+
|
668 |
+
if return_distance:
|
669 |
+
# store distances
|
670 |
+
distances[k - n_samples] = edge.weight
|
671 |
+
|
672 |
+
parent[i] = parent[j] = k
|
673 |
+
children.append((i, j))
|
674 |
+
# Keep track of the number of elements per cluster
|
675 |
+
n_i = used_node[i]
|
676 |
+
n_j = used_node[j]
|
677 |
+
used_node[k] = n_i + n_j
|
678 |
+
used_node[i] = used_node[j] = False
|
679 |
+
|
680 |
+
# update the structure matrix A and the inertia matrix
|
681 |
+
# a clever 'min', or 'max' operation between A[i] and A[j]
|
682 |
+
coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
|
683 |
+
for col, d in coord_col:
|
684 |
+
A[col].append(k, d)
|
685 |
+
# Here we use the information from coord_col (containing the
|
686 |
+
# distances) to update the heap
|
687 |
+
heappush(inertia, _hierarchical.WeightedEdge(d, k, col))
|
688 |
+
A[k] = coord_col
|
689 |
+
# Clear A[i] and A[j] to save memory
|
690 |
+
A[i] = A[j] = 0
|
691 |
+
|
692 |
+
# Separate leaves in children (empty lists up to now)
|
693 |
+
n_leaves = n_samples
|
694 |
+
|
695 |
+
# # return numpy array for efficient caching
|
696 |
+
children = np.array(children)[:, ::-1]
|
697 |
+
|
698 |
+
if return_distance:
|
699 |
+
return children, n_connected_components, n_leaves, parent, distances
|
700 |
+
return children, n_connected_components, n_leaves, parent
|
701 |
+
|
702 |
+
|
703 |
+
# Matching names to tree-building strategies
|
704 |
+
def _complete_linkage(*args, **kwargs):
|
705 |
+
kwargs["linkage"] = "complete"
|
706 |
+
return linkage_tree(*args, **kwargs)
|
707 |
+
|
708 |
+
|
709 |
+
def _average_linkage(*args, **kwargs):
|
710 |
+
kwargs["linkage"] = "average"
|
711 |
+
return linkage_tree(*args, **kwargs)
|
712 |
+
|
713 |
+
|
714 |
+
def _single_linkage(*args, **kwargs):
|
715 |
+
kwargs["linkage"] = "single"
|
716 |
+
return linkage_tree(*args, **kwargs)
|
717 |
+
|
718 |
+
|
719 |
+
_TREE_BUILDERS = dict(
|
720 |
+
ward=ward_tree,
|
721 |
+
complete=_complete_linkage,
|
722 |
+
average=_average_linkage,
|
723 |
+
single=_single_linkage,
|
724 |
+
)
|
725 |
+
|
726 |
+
###############################################################################
|
727 |
+
# Functions for cutting hierarchical clustering tree
|
728 |
+
|
729 |
+
|
730 |
+
def _hc_cut(n_clusters, children, n_leaves):
|
731 |
+
"""Function cutting the ward tree for a given number of clusters.
|
732 |
+
|
733 |
+
Parameters
|
734 |
+
----------
|
735 |
+
n_clusters : int or ndarray
|
736 |
+
The number of clusters to form.
|
737 |
+
|
738 |
+
children : ndarray of shape (n_nodes-1, 2)
|
739 |
+
The children of each non-leaf node. Values less than `n_samples`
|
740 |
+
correspond to leaves of the tree which are the original samples.
|
741 |
+
A node `i` greater than or equal to `n_samples` is a non-leaf
|
742 |
+
node and has children `children_[i - n_samples]`. Alternatively
|
743 |
+
at the i-th iteration, children[i][0] and children[i][1]
|
744 |
+
are merged to form node `n_samples + i`.
|
745 |
+
|
746 |
+
n_leaves : int
|
747 |
+
Number of leaves of the tree.
|
748 |
+
|
749 |
+
Returns
|
750 |
+
-------
|
751 |
+
labels : array [n_samples]
|
752 |
+
Cluster labels for each point.
|
753 |
+
"""
|
754 |
+
if n_clusters > n_leaves:
|
755 |
+
raise ValueError(
|
756 |
+
"Cannot extract more clusters than samples: "
|
757 |
+
"%s clusters where given for a tree with %s leaves."
|
758 |
+
% (n_clusters, n_leaves)
|
759 |
+
)
|
760 |
+
# In this function, we store nodes as a heap to avoid recomputing
|
761 |
+
# the max of the nodes: the first element is always the smallest
|
762 |
+
# We use negated indices as heaps work on smallest elements, and we
|
763 |
+
# are interested in largest elements
|
764 |
+
# children[-1] is the root of the tree
|
765 |
+
nodes = [-(max(children[-1]) + 1)]
|
766 |
+
for _ in range(n_clusters - 1):
|
767 |
+
# As we have a heap, nodes[0] is the smallest element
|
768 |
+
these_children = children[-nodes[0] - n_leaves]
|
769 |
+
# Insert the 2 children and remove the largest node
|
770 |
+
heappush(nodes, -these_children[0])
|
771 |
+
heappushpop(nodes, -these_children[1])
|
772 |
+
label = np.zeros(n_leaves, dtype=np.intp)
|
773 |
+
for i, node in enumerate(nodes):
|
774 |
+
label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
|
775 |
+
return label
|
776 |
+
|
777 |
+
|
778 |
+
###############################################################################
|
779 |
+
|
780 |
+
|
781 |
+
class AgglomerativeClustering(ClusterMixin, BaseEstimator):
|
782 |
+
"""
|
783 |
+
Agglomerative Clustering.
|
784 |
+
|
785 |
+
Recursively merges pair of clusters of sample data; uses linkage distance.
|
786 |
+
|
787 |
+
Read more in the :ref:`User Guide <hierarchical_clustering>`.
|
788 |
+
|
789 |
+
Parameters
|
790 |
+
----------
|
791 |
+
n_clusters : int or None, default=2
|
792 |
+
The number of clusters to find. It must be ``None`` if
|
793 |
+
``distance_threshold`` is not ``None``.
|
794 |
+
|
795 |
+
metric : str or callable, default="euclidean"
|
796 |
+
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
|
797 |
+
"manhattan", "cosine", or "precomputed". If linkage is "ward", only
|
798 |
+
"euclidean" is accepted. If "precomputed", a distance matrix is needed
|
799 |
+
as input for the fit method.
|
800 |
+
|
801 |
+
.. versionadded:: 1.2
|
802 |
+
|
803 |
+
.. deprecated:: 1.4
|
804 |
+
`metric=None` is deprecated in 1.4 and will be removed in 1.6.
|
805 |
+
Let `metric` be the default value (i.e. `"euclidean"`) instead.
|
806 |
+
|
807 |
+
memory : str or object with the joblib.Memory interface, default=None
|
808 |
+
Used to cache the output of the computation of the tree.
|
809 |
+
By default, no caching is done. If a string is given, it is the
|
810 |
+
path to the caching directory.
|
811 |
+
|
812 |
+
connectivity : array-like or callable, default=None
|
813 |
+
Connectivity matrix. Defines for each sample the neighboring
|
814 |
+
samples following a given structure of the data.
|
815 |
+
This can be a connectivity matrix itself or a callable that transforms
|
816 |
+
the data into a connectivity matrix, such as derived from
|
817 |
+
`kneighbors_graph`. Default is ``None``, i.e, the
|
818 |
+
hierarchical clustering algorithm is unstructured.
|
819 |
+
|
820 |
+
compute_full_tree : 'auto' or bool, default='auto'
|
821 |
+
Stop early the construction of the tree at ``n_clusters``. This is
|
822 |
+
useful to decrease computation time if the number of clusters is not
|
823 |
+
small compared to the number of samples. This option is useful only
|
824 |
+
when specifying a connectivity matrix. Note also that when varying the
|
825 |
+
number of clusters and using caching, it may be advantageous to compute
|
826 |
+
the full tree. It must be ``True`` if ``distance_threshold`` is not
|
827 |
+
``None``. By default `compute_full_tree` is "auto", which is equivalent
|
828 |
+
to `True` when `distance_threshold` is not `None` or that `n_clusters`
|
829 |
+
is inferior to the maximum between 100 or `0.02 * n_samples`.
|
830 |
+
Otherwise, "auto" is equivalent to `False`.
|
831 |
+
|
832 |
+
linkage : {'ward', 'complete', 'average', 'single'}, default='ward'
|
833 |
+
Which linkage criterion to use. The linkage criterion determines which
|
834 |
+
distance to use between sets of observation. The algorithm will merge
|
835 |
+
the pairs of cluster that minimize this criterion.
|
836 |
+
|
837 |
+
- 'ward' minimizes the variance of the clusters being merged.
|
838 |
+
- 'average' uses the average of the distances of each observation of
|
839 |
+
the two sets.
|
840 |
+
- 'complete' or 'maximum' linkage uses the maximum distances between
|
841 |
+
all observations of the two sets.
|
842 |
+
- 'single' uses the minimum of the distances between all observations
|
843 |
+
of the two sets.
|
844 |
+
|
845 |
+
.. versionadded:: 0.20
|
846 |
+
Added the 'single' option
|
847 |
+
|
848 |
+
distance_threshold : float, default=None
|
849 |
+
The linkage distance threshold at or above which clusters will not be
|
850 |
+
merged. If not ``None``, ``n_clusters`` must be ``None`` and
|
851 |
+
``compute_full_tree`` must be ``True``.
|
852 |
+
|
853 |
+
.. versionadded:: 0.21
|
854 |
+
|
855 |
+
compute_distances : bool, default=False
|
856 |
+
Computes distances between clusters even if `distance_threshold` is not
|
857 |
+
used. This can be used to make dendrogram visualization, but introduces
|
858 |
+
a computational and memory overhead.
|
859 |
+
|
860 |
+
.. versionadded:: 0.24
|
861 |
+
|
862 |
+
Attributes
|
863 |
+
----------
|
864 |
+
n_clusters_ : int
|
865 |
+
The number of clusters found by the algorithm. If
|
866 |
+
``distance_threshold=None``, it will be equal to the given
|
867 |
+
``n_clusters``.
|
868 |
+
|
869 |
+
labels_ : ndarray of shape (n_samples)
|
870 |
+
Cluster labels for each point.
|
871 |
+
|
872 |
+
n_leaves_ : int
|
873 |
+
Number of leaves in the hierarchical tree.
|
874 |
+
|
875 |
+
n_connected_components_ : int
|
876 |
+
The estimated number of connected components in the graph.
|
877 |
+
|
878 |
+
.. versionadded:: 0.21
|
879 |
+
``n_connected_components_`` was added to replace ``n_components_``.
|
880 |
+
|
881 |
+
n_features_in_ : int
|
882 |
+
Number of features seen during :term:`fit`.
|
883 |
+
|
884 |
+
.. versionadded:: 0.24
|
885 |
+
|
886 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
887 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
888 |
+
has feature names that are all strings.
|
889 |
+
|
890 |
+
.. versionadded:: 1.0
|
891 |
+
|
892 |
+
children_ : array-like of shape (n_samples-1, 2)
|
893 |
+
The children of each non-leaf node. Values less than `n_samples`
|
894 |
+
correspond to leaves of the tree which are the original samples.
|
895 |
+
A node `i` greater than or equal to `n_samples` is a non-leaf
|
896 |
+
node and has children `children_[i - n_samples]`. Alternatively
|
897 |
+
at the i-th iteration, children[i][0] and children[i][1]
|
898 |
+
are merged to form node `n_samples + i`.
|
899 |
+
|
900 |
+
distances_ : array-like of shape (n_nodes-1,)
|
901 |
+
Distances between nodes in the corresponding place in `children_`.
|
902 |
+
Only computed if `distance_threshold` is used or `compute_distances`
|
903 |
+
is set to `True`.
|
904 |
+
|
905 |
+
See Also
|
906 |
+
--------
|
907 |
+
FeatureAgglomeration : Agglomerative clustering but for features instead of
|
908 |
+
samples.
|
909 |
+
ward_tree : Hierarchical clustering with ward linkage.
|
910 |
+
|
911 |
+
Examples
|
912 |
+
--------
|
913 |
+
>>> from sklearn.cluster import AgglomerativeClustering
|
914 |
+
>>> import numpy as np
|
915 |
+
>>> X = np.array([[1, 2], [1, 4], [1, 0],
|
916 |
+
... [4, 2], [4, 4], [4, 0]])
|
917 |
+
>>> clustering = AgglomerativeClustering().fit(X)
|
918 |
+
>>> clustering
|
919 |
+
AgglomerativeClustering()
|
920 |
+
>>> clustering.labels_
|
921 |
+
array([1, 1, 1, 0, 0, 0])
|
922 |
+
"""
|
923 |
+
|
924 |
+
_parameter_constraints: dict = {
|
925 |
+
"n_clusters": [Interval(Integral, 1, None, closed="left"), None],
|
926 |
+
"metric": [
|
927 |
+
StrOptions(set(_VALID_METRICS) | {"precomputed"}),
|
928 |
+
callable,
|
929 |
+
Hidden(None),
|
930 |
+
],
|
931 |
+
"memory": [str, HasMethods("cache"), None],
|
932 |
+
"connectivity": ["array-like", callable, None],
|
933 |
+
"compute_full_tree": [StrOptions({"auto"}), "boolean"],
|
934 |
+
"linkage": [StrOptions(set(_TREE_BUILDERS.keys()))],
|
935 |
+
"distance_threshold": [Interval(Real, 0, None, closed="left"), None],
|
936 |
+
"compute_distances": ["boolean"],
|
937 |
+
}
|
938 |
+
|
939 |
+
def __init__(
|
940 |
+
self,
|
941 |
+
n_clusters=2,
|
942 |
+
*,
|
943 |
+
metric="euclidean",
|
944 |
+
memory=None,
|
945 |
+
connectivity=None,
|
946 |
+
compute_full_tree="auto",
|
947 |
+
linkage="ward",
|
948 |
+
distance_threshold=None,
|
949 |
+
compute_distances=False,
|
950 |
+
):
|
951 |
+
self.n_clusters = n_clusters
|
952 |
+
self.distance_threshold = distance_threshold
|
953 |
+
self.memory = memory
|
954 |
+
self.connectivity = connectivity
|
955 |
+
self.compute_full_tree = compute_full_tree
|
956 |
+
self.linkage = linkage
|
957 |
+
self.metric = metric
|
958 |
+
self.compute_distances = compute_distances
|
959 |
+
|
960 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
961 |
+
def fit(self, X, y=None):
|
962 |
+
"""Fit the hierarchical clustering from features, or distance matrix.
|
963 |
+
|
964 |
+
Parameters
|
965 |
+
----------
|
966 |
+
X : array-like, shape (n_samples, n_features) or \
|
967 |
+
(n_samples, n_samples)
|
968 |
+
Training instances to cluster, or distances between instances if
|
969 |
+
``metric='precomputed'``.
|
970 |
+
|
971 |
+
y : Ignored
|
972 |
+
Not used, present here for API consistency by convention.
|
973 |
+
|
974 |
+
Returns
|
975 |
+
-------
|
976 |
+
self : object
|
977 |
+
Returns the fitted instance.
|
978 |
+
"""
|
979 |
+
X = self._validate_data(X, ensure_min_samples=2)
|
980 |
+
return self._fit(X)
|
981 |
+
|
982 |
+
def _fit(self, X):
|
983 |
+
"""Fit without validation
|
984 |
+
|
985 |
+
Parameters
|
986 |
+
----------
|
987 |
+
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
|
988 |
+
Training instances to cluster, or distances between instances if
|
989 |
+
``affinity='precomputed'``.
|
990 |
+
|
991 |
+
Returns
|
992 |
+
-------
|
993 |
+
self : object
|
994 |
+
Returns the fitted instance.
|
995 |
+
"""
|
996 |
+
memory = check_memory(self.memory)
|
997 |
+
|
998 |
+
# TODO(1.6): remove in 1.6
|
999 |
+
if self.metric is None:
|
1000 |
+
warnings.warn(
|
1001 |
+
(
|
1002 |
+
"`metric=None` is deprecated in version 1.4 and will be removed in "
|
1003 |
+
"version 1.6. Let `metric` be the default value "
|
1004 |
+
"(i.e. `'euclidean'`) instead."
|
1005 |
+
),
|
1006 |
+
FutureWarning,
|
1007 |
+
)
|
1008 |
+
self._metric = "euclidean"
|
1009 |
+
else:
|
1010 |
+
self._metric = self.metric
|
1011 |
+
|
1012 |
+
if not ((self.n_clusters is None) ^ (self.distance_threshold is None)):
|
1013 |
+
raise ValueError(
|
1014 |
+
"Exactly one of n_clusters and "
|
1015 |
+
"distance_threshold has to be set, and the other "
|
1016 |
+
"needs to be None."
|
1017 |
+
)
|
1018 |
+
|
1019 |
+
if self.distance_threshold is not None and not self.compute_full_tree:
|
1020 |
+
raise ValueError(
|
1021 |
+
"compute_full_tree must be True if distance_threshold is set."
|
1022 |
+
)
|
1023 |
+
|
1024 |
+
if self.linkage == "ward" and self._metric != "euclidean":
|
1025 |
+
raise ValueError(
|
1026 |
+
f"{self._metric} was provided as metric. Ward can only "
|
1027 |
+
"work with euclidean distances."
|
1028 |
+
)
|
1029 |
+
|
1030 |
+
tree_builder = _TREE_BUILDERS[self.linkage]
|
1031 |
+
|
1032 |
+
connectivity = self.connectivity
|
1033 |
+
if self.connectivity is not None:
|
1034 |
+
if callable(self.connectivity):
|
1035 |
+
connectivity = self.connectivity(X)
|
1036 |
+
connectivity = check_array(
|
1037 |
+
connectivity, accept_sparse=["csr", "coo", "lil"]
|
1038 |
+
)
|
1039 |
+
|
1040 |
+
n_samples = len(X)
|
1041 |
+
compute_full_tree = self.compute_full_tree
|
1042 |
+
if self.connectivity is None:
|
1043 |
+
compute_full_tree = True
|
1044 |
+
if compute_full_tree == "auto":
|
1045 |
+
if self.distance_threshold is not None:
|
1046 |
+
compute_full_tree = True
|
1047 |
+
else:
|
1048 |
+
# Early stopping is likely to give a speed up only for
|
1049 |
+
# a large number of clusters. The actual threshold
|
1050 |
+
# implemented here is heuristic
|
1051 |
+
compute_full_tree = self.n_clusters < max(100, 0.02 * n_samples)
|
1052 |
+
n_clusters = self.n_clusters
|
1053 |
+
if compute_full_tree:
|
1054 |
+
n_clusters = None
|
1055 |
+
|
1056 |
+
# Construct the tree
|
1057 |
+
kwargs = {}
|
1058 |
+
if self.linkage != "ward":
|
1059 |
+
kwargs["linkage"] = self.linkage
|
1060 |
+
kwargs["affinity"] = self._metric
|
1061 |
+
|
1062 |
+
distance_threshold = self.distance_threshold
|
1063 |
+
|
1064 |
+
return_distance = (distance_threshold is not None) or self.compute_distances
|
1065 |
+
|
1066 |
+
out = memory.cache(tree_builder)(
|
1067 |
+
X,
|
1068 |
+
connectivity=connectivity,
|
1069 |
+
n_clusters=n_clusters,
|
1070 |
+
return_distance=return_distance,
|
1071 |
+
**kwargs,
|
1072 |
+
)
|
1073 |
+
(self.children_, self.n_connected_components_, self.n_leaves_, parents) = out[
|
1074 |
+
:4
|
1075 |
+
]
|
1076 |
+
|
1077 |
+
if return_distance:
|
1078 |
+
self.distances_ = out[-1]
|
1079 |
+
|
1080 |
+
if self.distance_threshold is not None: # distance_threshold is used
|
1081 |
+
self.n_clusters_ = (
|
1082 |
+
np.count_nonzero(self.distances_ >= distance_threshold) + 1
|
1083 |
+
)
|
1084 |
+
else: # n_clusters is used
|
1085 |
+
self.n_clusters_ = self.n_clusters
|
1086 |
+
|
1087 |
+
# Cut the tree
|
1088 |
+
if compute_full_tree:
|
1089 |
+
self.labels_ = _hc_cut(self.n_clusters_, self.children_, self.n_leaves_)
|
1090 |
+
else:
|
1091 |
+
labels = _hierarchical.hc_get_heads(parents, copy=False)
|
1092 |
+
# copy to avoid holding a reference on the original array
|
1093 |
+
labels = np.copy(labels[:n_samples])
|
1094 |
+
# Reassign cluster numbers
|
1095 |
+
self.labels_ = np.searchsorted(np.unique(labels), labels)
|
1096 |
+
return self
|
1097 |
+
|
1098 |
+
def fit_predict(self, X, y=None):
|
1099 |
+
"""Fit and return the result of each sample's clustering assignment.
|
1100 |
+
|
1101 |
+
In addition to fitting, this method also return the result of the
|
1102 |
+
clustering assignment for each sample in the training set.
|
1103 |
+
|
1104 |
+
Parameters
|
1105 |
+
----------
|
1106 |
+
X : array-like of shape (n_samples, n_features) or \
|
1107 |
+
(n_samples, n_samples)
|
1108 |
+
Training instances to cluster, or distances between instances if
|
1109 |
+
``affinity='precomputed'``.
|
1110 |
+
|
1111 |
+
y : Ignored
|
1112 |
+
Not used, present here for API consistency by convention.
|
1113 |
+
|
1114 |
+
Returns
|
1115 |
+
-------
|
1116 |
+
labels : ndarray of shape (n_samples,)
|
1117 |
+
Cluster labels.
|
1118 |
+
"""
|
1119 |
+
return super().fit_predict(X, y)
|
1120 |
+
|
1121 |
+
|
1122 |
+
class FeatureAgglomeration(
|
1123 |
+
ClassNamePrefixFeaturesOutMixin, AgglomerativeClustering, AgglomerationTransform
|
1124 |
+
):
|
1125 |
+
"""Agglomerate features.
|
1126 |
+
|
1127 |
+
Recursively merges pair of clusters of features.
|
1128 |
+
|
1129 |
+
Read more in the :ref:`User Guide <hierarchical_clustering>`.
|
1130 |
+
|
1131 |
+
Parameters
|
1132 |
+
----------
|
1133 |
+
n_clusters : int or None, default=2
|
1134 |
+
The number of clusters to find. It must be ``None`` if
|
1135 |
+
``distance_threshold`` is not ``None``.
|
1136 |
+
|
1137 |
+
metric : str or callable, default="euclidean"
|
1138 |
+
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
|
1139 |
+
"manhattan", "cosine", or "precomputed". If linkage is "ward", only
|
1140 |
+
"euclidean" is accepted. If "precomputed", a distance matrix is needed
|
1141 |
+
as input for the fit method.
|
1142 |
+
|
1143 |
+
.. versionadded:: 1.2
|
1144 |
+
|
1145 |
+
.. deprecated:: 1.4
|
1146 |
+
`metric=None` is deprecated in 1.4 and will be removed in 1.6.
|
1147 |
+
Let `metric` be the default value (i.e. `"euclidean"`) instead.
|
1148 |
+
|
1149 |
+
memory : str or object with the joblib.Memory interface, default=None
|
1150 |
+
Used to cache the output of the computation of the tree.
|
1151 |
+
By default, no caching is done. If a string is given, it is the
|
1152 |
+
path to the caching directory.
|
1153 |
+
|
1154 |
+
connectivity : array-like or callable, default=None
|
1155 |
+
Connectivity matrix. Defines for each feature the neighboring
|
1156 |
+
features following a given structure of the data.
|
1157 |
+
This can be a connectivity matrix itself or a callable that transforms
|
1158 |
+
the data into a connectivity matrix, such as derived from
|
1159 |
+
`kneighbors_graph`. Default is `None`, i.e, the
|
1160 |
+
hierarchical clustering algorithm is unstructured.
|
1161 |
+
|
1162 |
+
compute_full_tree : 'auto' or bool, default='auto'
|
1163 |
+
Stop early the construction of the tree at `n_clusters`. This is useful
|
1164 |
+
to decrease computation time if the number of clusters is not small
|
1165 |
+
compared to the number of features. This option is useful only when
|
1166 |
+
specifying a connectivity matrix. Note also that when varying the
|
1167 |
+
number of clusters and using caching, it may be advantageous to compute
|
1168 |
+
the full tree. It must be ``True`` if ``distance_threshold`` is not
|
1169 |
+
``None``. By default `compute_full_tree` is "auto", which is equivalent
|
1170 |
+
to `True` when `distance_threshold` is not `None` or that `n_clusters`
|
1171 |
+
is inferior to the maximum between 100 or `0.02 * n_samples`.
|
1172 |
+
Otherwise, "auto" is equivalent to `False`.
|
1173 |
+
|
1174 |
+
linkage : {"ward", "complete", "average", "single"}, default="ward"
|
1175 |
+
Which linkage criterion to use. The linkage criterion determines which
|
1176 |
+
distance to use between sets of features. The algorithm will merge
|
1177 |
+
the pairs of cluster that minimize this criterion.
|
1178 |
+
|
1179 |
+
- "ward" minimizes the variance of the clusters being merged.
|
1180 |
+
- "complete" or maximum linkage uses the maximum distances between
|
1181 |
+
all features of the two sets.
|
1182 |
+
- "average" uses the average of the distances of each feature of
|
1183 |
+
the two sets.
|
1184 |
+
- "single" uses the minimum of the distances between all features
|
1185 |
+
of the two sets.
|
1186 |
+
|
1187 |
+
pooling_func : callable, default=np.mean
|
1188 |
+
This combines the values of agglomerated features into a single
|
1189 |
+
value, and should accept an array of shape [M, N] and the keyword
|
1190 |
+
argument `axis=1`, and reduce it to an array of size [M].
|
1191 |
+
|
1192 |
+
distance_threshold : float, default=None
|
1193 |
+
The linkage distance threshold at or above which clusters will not be
|
1194 |
+
merged. If not ``None``, ``n_clusters`` must be ``None`` and
|
1195 |
+
``compute_full_tree`` must be ``True``.
|
1196 |
+
|
1197 |
+
.. versionadded:: 0.21
|
1198 |
+
|
1199 |
+
compute_distances : bool, default=False
|
1200 |
+
Computes distances between clusters even if `distance_threshold` is not
|
1201 |
+
used. This can be used to make dendrogram visualization, but introduces
|
1202 |
+
a computational and memory overhead.
|
1203 |
+
|
1204 |
+
.. versionadded:: 0.24
|
1205 |
+
|
1206 |
+
Attributes
|
1207 |
+
----------
|
1208 |
+
n_clusters_ : int
|
1209 |
+
The number of clusters found by the algorithm. If
|
1210 |
+
``distance_threshold=None``, it will be equal to the given
|
1211 |
+
``n_clusters``.
|
1212 |
+
|
1213 |
+
labels_ : array-like of (n_features,)
|
1214 |
+
Cluster labels for each feature.
|
1215 |
+
|
1216 |
+
n_leaves_ : int
|
1217 |
+
Number of leaves in the hierarchical tree.
|
1218 |
+
|
1219 |
+
n_connected_components_ : int
|
1220 |
+
The estimated number of connected components in the graph.
|
1221 |
+
|
1222 |
+
.. versionadded:: 0.21
|
1223 |
+
``n_connected_components_`` was added to replace ``n_components_``.
|
1224 |
+
|
1225 |
+
n_features_in_ : int
|
1226 |
+
Number of features seen during :term:`fit`.
|
1227 |
+
|
1228 |
+
.. versionadded:: 0.24
|
1229 |
+
|
1230 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
1231 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
1232 |
+
has feature names that are all strings.
|
1233 |
+
|
1234 |
+
.. versionadded:: 1.0
|
1235 |
+
|
1236 |
+
children_ : array-like of shape (n_nodes-1, 2)
|
1237 |
+
The children of each non-leaf node. Values less than `n_features`
|
1238 |
+
correspond to leaves of the tree which are the original samples.
|
1239 |
+
A node `i` greater than or equal to `n_features` is a non-leaf
|
1240 |
+
node and has children `children_[i - n_features]`. Alternatively
|
1241 |
+
at the i-th iteration, children[i][0] and children[i][1]
|
1242 |
+
are merged to form node `n_features + i`.
|
1243 |
+
|
1244 |
+
distances_ : array-like of shape (n_nodes-1,)
|
1245 |
+
Distances between nodes in the corresponding place in `children_`.
|
1246 |
+
Only computed if `distance_threshold` is used or `compute_distances`
|
1247 |
+
is set to `True`.
|
1248 |
+
|
1249 |
+
See Also
|
1250 |
+
--------
|
1251 |
+
AgglomerativeClustering : Agglomerative clustering samples instead of
|
1252 |
+
features.
|
1253 |
+
ward_tree : Hierarchical clustering with ward linkage.
|
1254 |
+
|
1255 |
+
Examples
|
1256 |
+
--------
|
1257 |
+
>>> import numpy as np
|
1258 |
+
>>> from sklearn import datasets, cluster
|
1259 |
+
>>> digits = datasets.load_digits()
|
1260 |
+
>>> images = digits.images
|
1261 |
+
>>> X = np.reshape(images, (len(images), -1))
|
1262 |
+
>>> agglo = cluster.FeatureAgglomeration(n_clusters=32)
|
1263 |
+
>>> agglo.fit(X)
|
1264 |
+
FeatureAgglomeration(n_clusters=32)
|
1265 |
+
>>> X_reduced = agglo.transform(X)
|
1266 |
+
>>> X_reduced.shape
|
1267 |
+
(1797, 32)
|
1268 |
+
"""
|
1269 |
+
|
1270 |
+
_parameter_constraints: dict = {
|
1271 |
+
"n_clusters": [Interval(Integral, 1, None, closed="left"), None],
|
1272 |
+
"metric": [
|
1273 |
+
StrOptions(set(_VALID_METRICS) | {"precomputed"}),
|
1274 |
+
callable,
|
1275 |
+
Hidden(None),
|
1276 |
+
],
|
1277 |
+
"memory": [str, HasMethods("cache"), None],
|
1278 |
+
"connectivity": ["array-like", callable, None],
|
1279 |
+
"compute_full_tree": [StrOptions({"auto"}), "boolean"],
|
1280 |
+
"linkage": [StrOptions(set(_TREE_BUILDERS.keys()))],
|
1281 |
+
"pooling_func": [callable],
|
1282 |
+
"distance_threshold": [Interval(Real, 0, None, closed="left"), None],
|
1283 |
+
"compute_distances": ["boolean"],
|
1284 |
+
}
|
1285 |
+
|
1286 |
+
def __init__(
|
1287 |
+
self,
|
1288 |
+
n_clusters=2,
|
1289 |
+
*,
|
1290 |
+
metric="euclidean",
|
1291 |
+
memory=None,
|
1292 |
+
connectivity=None,
|
1293 |
+
compute_full_tree="auto",
|
1294 |
+
linkage="ward",
|
1295 |
+
pooling_func=np.mean,
|
1296 |
+
distance_threshold=None,
|
1297 |
+
compute_distances=False,
|
1298 |
+
):
|
1299 |
+
super().__init__(
|
1300 |
+
n_clusters=n_clusters,
|
1301 |
+
memory=memory,
|
1302 |
+
connectivity=connectivity,
|
1303 |
+
compute_full_tree=compute_full_tree,
|
1304 |
+
linkage=linkage,
|
1305 |
+
metric=metric,
|
1306 |
+
distance_threshold=distance_threshold,
|
1307 |
+
compute_distances=compute_distances,
|
1308 |
+
)
|
1309 |
+
self.pooling_func = pooling_func
|
1310 |
+
|
1311 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
1312 |
+
def fit(self, X, y=None):
|
1313 |
+
"""Fit the hierarchical clustering on the data.
|
1314 |
+
|
1315 |
+
Parameters
|
1316 |
+
----------
|
1317 |
+
X : array-like of shape (n_samples, n_features)
|
1318 |
+
The data.
|
1319 |
+
|
1320 |
+
y : Ignored
|
1321 |
+
Not used, present here for API consistency by convention.
|
1322 |
+
|
1323 |
+
Returns
|
1324 |
+
-------
|
1325 |
+
self : object
|
1326 |
+
Returns the transformer.
|
1327 |
+
"""
|
1328 |
+
X = self._validate_data(X, ensure_min_features=2)
|
1329 |
+
super()._fit(X.T)
|
1330 |
+
self._n_features_out = self.n_clusters_
|
1331 |
+
return self
|
1332 |
+
|
1333 |
+
@property
|
1334 |
+
def fit_predict(self):
|
1335 |
+
"""Fit and return the result of each sample's clustering assignment."""
|
1336 |
+
raise AttributeError
|
venv/lib/python3.10/site-packages/sklearn/cluster/_bicluster.py
ADDED
@@ -0,0 +1,622 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Spectral biclustering algorithms."""
|
2 |
+
# Authors : Kemal Eren
|
3 |
+
# License: BSD 3 clause
|
4 |
+
|
5 |
+
from abc import ABCMeta, abstractmethod
|
6 |
+
from numbers import Integral
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
from scipy.linalg import norm
|
10 |
+
from scipy.sparse import dia_matrix, issparse
|
11 |
+
from scipy.sparse.linalg import eigsh, svds
|
12 |
+
|
13 |
+
from ..base import BaseEstimator, BiclusterMixin, _fit_context
|
14 |
+
from ..utils import check_random_state, check_scalar
|
15 |
+
from ..utils._param_validation import Interval, StrOptions
|
16 |
+
from ..utils.extmath import make_nonnegative, randomized_svd, safe_sparse_dot
|
17 |
+
from ..utils.validation import assert_all_finite
|
18 |
+
from ._kmeans import KMeans, MiniBatchKMeans
|
19 |
+
|
20 |
+
__all__ = ["SpectralCoclustering", "SpectralBiclustering"]
|
21 |
+
|
22 |
+
|
23 |
+
def _scale_normalize(X):
|
24 |
+
"""Normalize ``X`` by scaling rows and columns independently.
|
25 |
+
|
26 |
+
Returns the normalized matrix and the row and column scaling
|
27 |
+
factors.
|
28 |
+
"""
|
29 |
+
X = make_nonnegative(X)
|
30 |
+
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
|
31 |
+
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
|
32 |
+
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
|
33 |
+
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
|
34 |
+
if issparse(X):
|
35 |
+
n_rows, n_cols = X.shape
|
36 |
+
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
|
37 |
+
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
|
38 |
+
an = r * X * c
|
39 |
+
else:
|
40 |
+
an = row_diag[:, np.newaxis] * X * col_diag
|
41 |
+
return an, row_diag, col_diag
|
42 |
+
|
43 |
+
|
44 |
+
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
|
45 |
+
"""Normalize rows and columns of ``X`` simultaneously so that all
|
46 |
+
rows sum to one constant and all columns sum to a different
|
47 |
+
constant.
|
48 |
+
"""
|
49 |
+
# According to paper, this can also be done more efficiently with
|
50 |
+
# deviation reduction and balancing algorithms.
|
51 |
+
X = make_nonnegative(X)
|
52 |
+
X_scaled = X
|
53 |
+
for _ in range(max_iter):
|
54 |
+
X_new, _, _ = _scale_normalize(X_scaled)
|
55 |
+
if issparse(X):
|
56 |
+
dist = norm(X_scaled.data - X.data)
|
57 |
+
else:
|
58 |
+
dist = norm(X_scaled - X_new)
|
59 |
+
X_scaled = X_new
|
60 |
+
if dist is not None and dist < tol:
|
61 |
+
break
|
62 |
+
return X_scaled
|
63 |
+
|
64 |
+
|
65 |
+
def _log_normalize(X):
|
66 |
+
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
|
67 |
+
X = make_nonnegative(X, min_value=1)
|
68 |
+
if issparse(X):
|
69 |
+
raise ValueError(
|
70 |
+
"Cannot compute log of a sparse matrix,"
|
71 |
+
" because log(x) diverges to -infinity as x"
|
72 |
+
" goes to 0."
|
73 |
+
)
|
74 |
+
L = np.log(X)
|
75 |
+
row_avg = L.mean(axis=1)[:, np.newaxis]
|
76 |
+
col_avg = L.mean(axis=0)
|
77 |
+
avg = L.mean()
|
78 |
+
return L - row_avg - col_avg + avg
|
79 |
+
|
80 |
+
|
81 |
+
class BaseSpectral(BiclusterMixin, BaseEstimator, metaclass=ABCMeta):
|
82 |
+
"""Base class for spectral biclustering."""
|
83 |
+
|
84 |
+
_parameter_constraints: dict = {
|
85 |
+
"svd_method": [StrOptions({"randomized", "arpack"})],
|
86 |
+
"n_svd_vecs": [Interval(Integral, 0, None, closed="left"), None],
|
87 |
+
"mini_batch": ["boolean"],
|
88 |
+
"init": [StrOptions({"k-means++", "random"}), np.ndarray],
|
89 |
+
"n_init": [Interval(Integral, 1, None, closed="left")],
|
90 |
+
"random_state": ["random_state"],
|
91 |
+
}
|
92 |
+
|
93 |
+
@abstractmethod
|
94 |
+
def __init__(
|
95 |
+
self,
|
96 |
+
n_clusters=3,
|
97 |
+
svd_method="randomized",
|
98 |
+
n_svd_vecs=None,
|
99 |
+
mini_batch=False,
|
100 |
+
init="k-means++",
|
101 |
+
n_init=10,
|
102 |
+
random_state=None,
|
103 |
+
):
|
104 |
+
self.n_clusters = n_clusters
|
105 |
+
self.svd_method = svd_method
|
106 |
+
self.n_svd_vecs = n_svd_vecs
|
107 |
+
self.mini_batch = mini_batch
|
108 |
+
self.init = init
|
109 |
+
self.n_init = n_init
|
110 |
+
self.random_state = random_state
|
111 |
+
|
112 |
+
@abstractmethod
|
113 |
+
def _check_parameters(self, n_samples):
|
114 |
+
"""Validate parameters depending on the input data."""
|
115 |
+
|
116 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
117 |
+
def fit(self, X, y=None):
|
118 |
+
"""Create a biclustering for X.
|
119 |
+
|
120 |
+
Parameters
|
121 |
+
----------
|
122 |
+
X : array-like of shape (n_samples, n_features)
|
123 |
+
Training data.
|
124 |
+
|
125 |
+
y : Ignored
|
126 |
+
Not used, present for API consistency by convention.
|
127 |
+
|
128 |
+
Returns
|
129 |
+
-------
|
130 |
+
self : object
|
131 |
+
SpectralBiclustering instance.
|
132 |
+
"""
|
133 |
+
X = self._validate_data(X, accept_sparse="csr", dtype=np.float64)
|
134 |
+
self._check_parameters(X.shape[0])
|
135 |
+
self._fit(X)
|
136 |
+
return self
|
137 |
+
|
138 |
+
def _svd(self, array, n_components, n_discard):
|
139 |
+
"""Returns first `n_components` left and right singular
|
140 |
+
vectors u and v, discarding the first `n_discard`.
|
141 |
+
"""
|
142 |
+
if self.svd_method == "randomized":
|
143 |
+
kwargs = {}
|
144 |
+
if self.n_svd_vecs is not None:
|
145 |
+
kwargs["n_oversamples"] = self.n_svd_vecs
|
146 |
+
u, _, vt = randomized_svd(
|
147 |
+
array, n_components, random_state=self.random_state, **kwargs
|
148 |
+
)
|
149 |
+
|
150 |
+
elif self.svd_method == "arpack":
|
151 |
+
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
|
152 |
+
if np.any(np.isnan(vt)):
|
153 |
+
# some eigenvalues of A * A.T are negative, causing
|
154 |
+
# sqrt() to be np.nan. This causes some vectors in vt
|
155 |
+
# to be np.nan.
|
156 |
+
A = safe_sparse_dot(array.T, array)
|
157 |
+
random_state = check_random_state(self.random_state)
|
158 |
+
# initialize with [-1,1] as in ARPACK
|
159 |
+
v0 = random_state.uniform(-1, 1, A.shape[0])
|
160 |
+
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
|
161 |
+
vt = v.T
|
162 |
+
if np.any(np.isnan(u)):
|
163 |
+
A = safe_sparse_dot(array, array.T)
|
164 |
+
random_state = check_random_state(self.random_state)
|
165 |
+
# initialize with [-1,1] as in ARPACK
|
166 |
+
v0 = random_state.uniform(-1, 1, A.shape[0])
|
167 |
+
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
|
168 |
+
|
169 |
+
assert_all_finite(u)
|
170 |
+
assert_all_finite(vt)
|
171 |
+
u = u[:, n_discard:]
|
172 |
+
vt = vt[n_discard:]
|
173 |
+
return u, vt.T
|
174 |
+
|
175 |
+
def _k_means(self, data, n_clusters):
|
176 |
+
if self.mini_batch:
|
177 |
+
model = MiniBatchKMeans(
|
178 |
+
n_clusters,
|
179 |
+
init=self.init,
|
180 |
+
n_init=self.n_init,
|
181 |
+
random_state=self.random_state,
|
182 |
+
)
|
183 |
+
else:
|
184 |
+
model = KMeans(
|
185 |
+
n_clusters,
|
186 |
+
init=self.init,
|
187 |
+
n_init=self.n_init,
|
188 |
+
random_state=self.random_state,
|
189 |
+
)
|
190 |
+
model.fit(data)
|
191 |
+
centroid = model.cluster_centers_
|
192 |
+
labels = model.labels_
|
193 |
+
return centroid, labels
|
194 |
+
|
195 |
+
def _more_tags(self):
|
196 |
+
return {
|
197 |
+
"_xfail_checks": {
|
198 |
+
"check_estimators_dtypes": "raises nan error",
|
199 |
+
"check_fit2d_1sample": "_scale_normalize fails",
|
200 |
+
"check_fit2d_1feature": "raises apply_along_axis error",
|
201 |
+
"check_estimator_sparse_data": "does not fail gracefully",
|
202 |
+
"check_methods_subset_invariance": "empty array passed inside",
|
203 |
+
"check_dont_overwrite_parameters": "empty array passed inside",
|
204 |
+
"check_fit2d_predict1d": "empty array passed inside",
|
205 |
+
}
|
206 |
+
}
|
207 |
+
|
208 |
+
|
209 |
+
class SpectralCoclustering(BaseSpectral):
|
210 |
+
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
|
211 |
+
|
212 |
+
Clusters rows and columns of an array `X` to solve the relaxed
|
213 |
+
normalized cut of the bipartite graph created from `X` as follows:
|
214 |
+
the edge between row vertex `i` and column vertex `j` has weight
|
215 |
+
`X[i, j]`.
|
216 |
+
|
217 |
+
The resulting bicluster structure is block-diagonal, since each
|
218 |
+
row and each column belongs to exactly one bicluster.
|
219 |
+
|
220 |
+
Supports sparse matrices, as long as they are nonnegative.
|
221 |
+
|
222 |
+
Read more in the :ref:`User Guide <spectral_coclustering>`.
|
223 |
+
|
224 |
+
Parameters
|
225 |
+
----------
|
226 |
+
n_clusters : int, default=3
|
227 |
+
The number of biclusters to find.
|
228 |
+
|
229 |
+
svd_method : {'randomized', 'arpack'}, default='randomized'
|
230 |
+
Selects the algorithm for finding singular vectors. May be
|
231 |
+
'randomized' or 'arpack'. If 'randomized', use
|
232 |
+
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
|
233 |
+
for large matrices. If 'arpack', use
|
234 |
+
:func:`scipy.sparse.linalg.svds`, which is more accurate, but
|
235 |
+
possibly slower in some cases.
|
236 |
+
|
237 |
+
n_svd_vecs : int, default=None
|
238 |
+
Number of vectors to use in calculating the SVD. Corresponds
|
239 |
+
to `ncv` when `svd_method=arpack` and `n_oversamples` when
|
240 |
+
`svd_method` is 'randomized`.
|
241 |
+
|
242 |
+
mini_batch : bool, default=False
|
243 |
+
Whether to use mini-batch k-means, which is faster but may get
|
244 |
+
different results.
|
245 |
+
|
246 |
+
init : {'k-means++', 'random'}, or ndarray of shape \
|
247 |
+
(n_clusters, n_features), default='k-means++'
|
248 |
+
Method for initialization of k-means algorithm; defaults to
|
249 |
+
'k-means++'.
|
250 |
+
|
251 |
+
n_init : int, default=10
|
252 |
+
Number of random initializations that are tried with the
|
253 |
+
k-means algorithm.
|
254 |
+
|
255 |
+
If mini-batch k-means is used, the best initialization is
|
256 |
+
chosen and the algorithm runs once. Otherwise, the algorithm
|
257 |
+
is run for each initialization and the best solution chosen.
|
258 |
+
|
259 |
+
random_state : int, RandomState instance, default=None
|
260 |
+
Used for randomizing the singular value decomposition and the k-means
|
261 |
+
initialization. Use an int to make the randomness deterministic.
|
262 |
+
See :term:`Glossary <random_state>`.
|
263 |
+
|
264 |
+
Attributes
|
265 |
+
----------
|
266 |
+
rows_ : array-like of shape (n_row_clusters, n_rows)
|
267 |
+
Results of the clustering. `rows[i, r]` is True if
|
268 |
+
cluster `i` contains row `r`. Available only after calling ``fit``.
|
269 |
+
|
270 |
+
columns_ : array-like of shape (n_column_clusters, n_columns)
|
271 |
+
Results of the clustering, like `rows`.
|
272 |
+
|
273 |
+
row_labels_ : array-like of shape (n_rows,)
|
274 |
+
The bicluster label of each row.
|
275 |
+
|
276 |
+
column_labels_ : array-like of shape (n_cols,)
|
277 |
+
The bicluster label of each column.
|
278 |
+
|
279 |
+
biclusters_ : tuple of two ndarrays
|
280 |
+
The tuple contains the `rows_` and `columns_` arrays.
|
281 |
+
|
282 |
+
n_features_in_ : int
|
283 |
+
Number of features seen during :term:`fit`.
|
284 |
+
|
285 |
+
.. versionadded:: 0.24
|
286 |
+
|
287 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
288 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
289 |
+
has feature names that are all strings.
|
290 |
+
|
291 |
+
.. versionadded:: 1.0
|
292 |
+
|
293 |
+
See Also
|
294 |
+
--------
|
295 |
+
SpectralBiclustering : Partitions rows and columns under the assumption
|
296 |
+
that the data has an underlying checkerboard structure.
|
297 |
+
|
298 |
+
References
|
299 |
+
----------
|
300 |
+
* :doi:`Dhillon, Inderjit S, 2001. Co-clustering documents and words using
|
301 |
+
bipartite spectral graph partitioning.
|
302 |
+
<10.1145/502512.502550>`
|
303 |
+
|
304 |
+
Examples
|
305 |
+
--------
|
306 |
+
>>> from sklearn.cluster import SpectralCoclustering
|
307 |
+
>>> import numpy as np
|
308 |
+
>>> X = np.array([[1, 1], [2, 1], [1, 0],
|
309 |
+
... [4, 7], [3, 5], [3, 6]])
|
310 |
+
>>> clustering = SpectralCoclustering(n_clusters=2, random_state=0).fit(X)
|
311 |
+
>>> clustering.row_labels_ #doctest: +SKIP
|
312 |
+
array([0, 1, 1, 0, 0, 0], dtype=int32)
|
313 |
+
>>> clustering.column_labels_ #doctest: +SKIP
|
314 |
+
array([0, 0], dtype=int32)
|
315 |
+
>>> clustering
|
316 |
+
SpectralCoclustering(n_clusters=2, random_state=0)
|
317 |
+
"""
|
318 |
+
|
319 |
+
_parameter_constraints: dict = {
|
320 |
+
**BaseSpectral._parameter_constraints,
|
321 |
+
"n_clusters": [Interval(Integral, 1, None, closed="left")],
|
322 |
+
}
|
323 |
+
|
324 |
+
def __init__(
|
325 |
+
self,
|
326 |
+
n_clusters=3,
|
327 |
+
*,
|
328 |
+
svd_method="randomized",
|
329 |
+
n_svd_vecs=None,
|
330 |
+
mini_batch=False,
|
331 |
+
init="k-means++",
|
332 |
+
n_init=10,
|
333 |
+
random_state=None,
|
334 |
+
):
|
335 |
+
super().__init__(
|
336 |
+
n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state
|
337 |
+
)
|
338 |
+
|
339 |
+
def _check_parameters(self, n_samples):
|
340 |
+
if self.n_clusters > n_samples:
|
341 |
+
raise ValueError(
|
342 |
+
f"n_clusters should be <= n_samples={n_samples}. Got"
|
343 |
+
f" {self.n_clusters} instead."
|
344 |
+
)
|
345 |
+
|
346 |
+
def _fit(self, X):
|
347 |
+
normalized_data, row_diag, col_diag = _scale_normalize(X)
|
348 |
+
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
|
349 |
+
u, v = self._svd(normalized_data, n_sv, n_discard=1)
|
350 |
+
z = np.vstack((row_diag[:, np.newaxis] * u, col_diag[:, np.newaxis] * v))
|
351 |
+
|
352 |
+
_, labels = self._k_means(z, self.n_clusters)
|
353 |
+
|
354 |
+
n_rows = X.shape[0]
|
355 |
+
self.row_labels_ = labels[:n_rows]
|
356 |
+
self.column_labels_ = labels[n_rows:]
|
357 |
+
|
358 |
+
self.rows_ = np.vstack([self.row_labels_ == c for c in range(self.n_clusters)])
|
359 |
+
self.columns_ = np.vstack(
|
360 |
+
[self.column_labels_ == c for c in range(self.n_clusters)]
|
361 |
+
)
|
362 |
+
|
363 |
+
|
364 |
+
class SpectralBiclustering(BaseSpectral):
|
365 |
+
"""Spectral biclustering (Kluger, 2003).
|
366 |
+
|
367 |
+
Partitions rows and columns under the assumption that the data has
|
368 |
+
an underlying checkerboard structure. For instance, if there are
|
369 |
+
two row partitions and three column partitions, each row will
|
370 |
+
belong to three biclusters, and each column will belong to two
|
371 |
+
biclusters. The outer product of the corresponding row and column
|
372 |
+
label vectors gives this checkerboard structure.
|
373 |
+
|
374 |
+
Read more in the :ref:`User Guide <spectral_biclustering>`.
|
375 |
+
|
376 |
+
Parameters
|
377 |
+
----------
|
378 |
+
n_clusters : int or tuple (n_row_clusters, n_column_clusters), default=3
|
379 |
+
The number of row and column clusters in the checkerboard
|
380 |
+
structure.
|
381 |
+
|
382 |
+
method : {'bistochastic', 'scale', 'log'}, default='bistochastic'
|
383 |
+
Method of normalizing and converting singular vectors into
|
384 |
+
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
|
385 |
+
The authors recommend using 'log'. If the data is sparse,
|
386 |
+
however, log normalization will not work, which is why the
|
387 |
+
default is 'bistochastic'.
|
388 |
+
|
389 |
+
.. warning::
|
390 |
+
if `method='log'`, the data must not be sparse.
|
391 |
+
|
392 |
+
n_components : int, default=6
|
393 |
+
Number of singular vectors to check.
|
394 |
+
|
395 |
+
n_best : int, default=3
|
396 |
+
Number of best singular vectors to which to project the data
|
397 |
+
for clustering.
|
398 |
+
|
399 |
+
svd_method : {'randomized', 'arpack'}, default='randomized'
|
400 |
+
Selects the algorithm for finding singular vectors. May be
|
401 |
+
'randomized' or 'arpack'. If 'randomized', uses
|
402 |
+
:func:`~sklearn.utils.extmath.randomized_svd`, which may be faster
|
403 |
+
for large matrices. If 'arpack', uses
|
404 |
+
`scipy.sparse.linalg.svds`, which is more accurate, but
|
405 |
+
possibly slower in some cases.
|
406 |
+
|
407 |
+
n_svd_vecs : int, default=None
|
408 |
+
Number of vectors to use in calculating the SVD. Corresponds
|
409 |
+
to `ncv` when `svd_method=arpack` and `n_oversamples` when
|
410 |
+
`svd_method` is 'randomized`.
|
411 |
+
|
412 |
+
mini_batch : bool, default=False
|
413 |
+
Whether to use mini-batch k-means, which is faster but may get
|
414 |
+
different results.
|
415 |
+
|
416 |
+
init : {'k-means++', 'random'} or ndarray of shape (n_clusters, n_features), \
|
417 |
+
default='k-means++'
|
418 |
+
Method for initialization of k-means algorithm; defaults to
|
419 |
+
'k-means++'.
|
420 |
+
|
421 |
+
n_init : int, default=10
|
422 |
+
Number of random initializations that are tried with the
|
423 |
+
k-means algorithm.
|
424 |
+
|
425 |
+
If mini-batch k-means is used, the best initialization is
|
426 |
+
chosen and the algorithm runs once. Otherwise, the algorithm
|
427 |
+
is run for each initialization and the best solution chosen.
|
428 |
+
|
429 |
+
random_state : int, RandomState instance, default=None
|
430 |
+
Used for randomizing the singular value decomposition and the k-means
|
431 |
+
initialization. Use an int to make the randomness deterministic.
|
432 |
+
See :term:`Glossary <random_state>`.
|
433 |
+
|
434 |
+
Attributes
|
435 |
+
----------
|
436 |
+
rows_ : array-like of shape (n_row_clusters, n_rows)
|
437 |
+
Results of the clustering. `rows[i, r]` is True if
|
438 |
+
cluster `i` contains row `r`. Available only after calling ``fit``.
|
439 |
+
|
440 |
+
columns_ : array-like of shape (n_column_clusters, n_columns)
|
441 |
+
Results of the clustering, like `rows`.
|
442 |
+
|
443 |
+
row_labels_ : array-like of shape (n_rows,)
|
444 |
+
Row partition labels.
|
445 |
+
|
446 |
+
column_labels_ : array-like of shape (n_cols,)
|
447 |
+
Column partition labels.
|
448 |
+
|
449 |
+
biclusters_ : tuple of two ndarrays
|
450 |
+
The tuple contains the `rows_` and `columns_` arrays.
|
451 |
+
|
452 |
+
n_features_in_ : int
|
453 |
+
Number of features seen during :term:`fit`.
|
454 |
+
|
455 |
+
.. versionadded:: 0.24
|
456 |
+
|
457 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
458 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
459 |
+
has feature names that are all strings.
|
460 |
+
|
461 |
+
.. versionadded:: 1.0
|
462 |
+
|
463 |
+
See Also
|
464 |
+
--------
|
465 |
+
SpectralCoclustering : Spectral Co-Clustering algorithm (Dhillon, 2001).
|
466 |
+
|
467 |
+
References
|
468 |
+
----------
|
469 |
+
|
470 |
+
* :doi:`Kluger, Yuval, et. al., 2003. Spectral biclustering of microarray
|
471 |
+
data: coclustering genes and conditions.
|
472 |
+
<10.1101/gr.648603>`
|
473 |
+
|
474 |
+
Examples
|
475 |
+
--------
|
476 |
+
>>> from sklearn.cluster import SpectralBiclustering
|
477 |
+
>>> import numpy as np
|
478 |
+
>>> X = np.array([[1, 1], [2, 1], [1, 0],
|
479 |
+
... [4, 7], [3, 5], [3, 6]])
|
480 |
+
>>> clustering = SpectralBiclustering(n_clusters=2, random_state=0).fit(X)
|
481 |
+
>>> clustering.row_labels_
|
482 |
+
array([1, 1, 1, 0, 0, 0], dtype=int32)
|
483 |
+
>>> clustering.column_labels_
|
484 |
+
array([1, 0], dtype=int32)
|
485 |
+
>>> clustering
|
486 |
+
SpectralBiclustering(n_clusters=2, random_state=0)
|
487 |
+
"""
|
488 |
+
|
489 |
+
_parameter_constraints: dict = {
|
490 |
+
**BaseSpectral._parameter_constraints,
|
491 |
+
"n_clusters": [Interval(Integral, 1, None, closed="left"), tuple],
|
492 |
+
"method": [StrOptions({"bistochastic", "scale", "log"})],
|
493 |
+
"n_components": [Interval(Integral, 1, None, closed="left")],
|
494 |
+
"n_best": [Interval(Integral, 1, None, closed="left")],
|
495 |
+
}
|
496 |
+
|
497 |
+
def __init__(
|
498 |
+
self,
|
499 |
+
n_clusters=3,
|
500 |
+
*,
|
501 |
+
method="bistochastic",
|
502 |
+
n_components=6,
|
503 |
+
n_best=3,
|
504 |
+
svd_method="randomized",
|
505 |
+
n_svd_vecs=None,
|
506 |
+
mini_batch=False,
|
507 |
+
init="k-means++",
|
508 |
+
n_init=10,
|
509 |
+
random_state=None,
|
510 |
+
):
|
511 |
+
super().__init__(
|
512 |
+
n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state
|
513 |
+
)
|
514 |
+
self.method = method
|
515 |
+
self.n_components = n_components
|
516 |
+
self.n_best = n_best
|
517 |
+
|
518 |
+
def _check_parameters(self, n_samples):
|
519 |
+
if isinstance(self.n_clusters, Integral):
|
520 |
+
if self.n_clusters > n_samples:
|
521 |
+
raise ValueError(
|
522 |
+
f"n_clusters should be <= n_samples={n_samples}. Got"
|
523 |
+
f" {self.n_clusters} instead."
|
524 |
+
)
|
525 |
+
else: # tuple
|
526 |
+
try:
|
527 |
+
n_row_clusters, n_column_clusters = self.n_clusters
|
528 |
+
check_scalar(
|
529 |
+
n_row_clusters,
|
530 |
+
"n_row_clusters",
|
531 |
+
target_type=Integral,
|
532 |
+
min_val=1,
|
533 |
+
max_val=n_samples,
|
534 |
+
)
|
535 |
+
check_scalar(
|
536 |
+
n_column_clusters,
|
537 |
+
"n_column_clusters",
|
538 |
+
target_type=Integral,
|
539 |
+
min_val=1,
|
540 |
+
max_val=n_samples,
|
541 |
+
)
|
542 |
+
except (ValueError, TypeError) as e:
|
543 |
+
raise ValueError(
|
544 |
+
"Incorrect parameter n_clusters has value:"
|
545 |
+
f" {self.n_clusters}. It should either be a single integer"
|
546 |
+
" or an iterable with two integers:"
|
547 |
+
" (n_row_clusters, n_column_clusters)"
|
548 |
+
" And the values are should be in the"
|
549 |
+
" range: (1, n_samples)"
|
550 |
+
) from e
|
551 |
+
|
552 |
+
if self.n_best > self.n_components:
|
553 |
+
raise ValueError(
|
554 |
+
f"n_best={self.n_best} must be <= n_components={self.n_components}."
|
555 |
+
)
|
556 |
+
|
557 |
+
def _fit(self, X):
|
558 |
+
n_sv = self.n_components
|
559 |
+
if self.method == "bistochastic":
|
560 |
+
normalized_data = _bistochastic_normalize(X)
|
561 |
+
n_sv += 1
|
562 |
+
elif self.method == "scale":
|
563 |
+
normalized_data, _, _ = _scale_normalize(X)
|
564 |
+
n_sv += 1
|
565 |
+
elif self.method == "log":
|
566 |
+
normalized_data = _log_normalize(X)
|
567 |
+
n_discard = 0 if self.method == "log" else 1
|
568 |
+
u, v = self._svd(normalized_data, n_sv, n_discard)
|
569 |
+
ut = u.T
|
570 |
+
vt = v.T
|
571 |
+
|
572 |
+
try:
|
573 |
+
n_row_clusters, n_col_clusters = self.n_clusters
|
574 |
+
except TypeError:
|
575 |
+
n_row_clusters = n_col_clusters = self.n_clusters
|
576 |
+
|
577 |
+
best_ut = self._fit_best_piecewise(ut, self.n_best, n_row_clusters)
|
578 |
+
|
579 |
+
best_vt = self._fit_best_piecewise(vt, self.n_best, n_col_clusters)
|
580 |
+
|
581 |
+
self.row_labels_ = self._project_and_cluster(X, best_vt.T, n_row_clusters)
|
582 |
+
|
583 |
+
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T, n_col_clusters)
|
584 |
+
|
585 |
+
self.rows_ = np.vstack(
|
586 |
+
[
|
587 |
+
self.row_labels_ == label
|
588 |
+
for label in range(n_row_clusters)
|
589 |
+
for _ in range(n_col_clusters)
|
590 |
+
]
|
591 |
+
)
|
592 |
+
self.columns_ = np.vstack(
|
593 |
+
[
|
594 |
+
self.column_labels_ == label
|
595 |
+
for _ in range(n_row_clusters)
|
596 |
+
for label in range(n_col_clusters)
|
597 |
+
]
|
598 |
+
)
|
599 |
+
|
600 |
+
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
|
601 |
+
"""Find the ``n_best`` vectors that are best approximated by piecewise
|
602 |
+
constant vectors.
|
603 |
+
|
604 |
+
The piecewise vectors are found by k-means; the best is chosen
|
605 |
+
according to Euclidean distance.
|
606 |
+
|
607 |
+
"""
|
608 |
+
|
609 |
+
def make_piecewise(v):
|
610 |
+
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
|
611 |
+
return centroid[labels].ravel()
|
612 |
+
|
613 |
+
piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vectors)
|
614 |
+
dists = np.apply_along_axis(norm, axis=1, arr=(vectors - piecewise_vectors))
|
615 |
+
result = vectors[np.argsort(dists)[:n_best]]
|
616 |
+
return result
|
617 |
+
|
618 |
+
def _project_and_cluster(self, data, vectors, n_clusters):
|
619 |
+
"""Project ``data`` to ``vectors`` and cluster the result."""
|
620 |
+
projected = safe_sparse_dot(data, vectors)
|
621 |
+
_, labels = self._k_means(projected, n_clusters)
|
622 |
+
return labels
|
venv/lib/python3.10/site-packages/sklearn/cluster/_dbscan_inner.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (221 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (192 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/hdbscan.cpython-310.pyc
ADDED
Binary file (31 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_linkage.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (258 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_reachability.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (365 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (385 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.pxd
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2015, Leland McInnes
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# Redistribution and use in source and binary forms, with or without
|
5 |
+
# modification, are permitted provided that the following conditions are met:
|
6 |
+
|
7 |
+
# 1. Redistributions of source code must retain the above copyright notice,
|
8 |
+
# this list of conditions and the following disclaimer.
|
9 |
+
|
10 |
+
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
11 |
+
# this list of conditions and the following disclaimer in the documentation
|
12 |
+
# and/or other materials provided with the distribution.
|
13 |
+
|
14 |
+
# 3. Neither the name of the copyright holder nor the names of its contributors
|
15 |
+
# may be used to endorse or promote products derived from this software without
|
16 |
+
# specific prior written permission.
|
17 |
+
|
18 |
+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
19 |
+
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
20 |
+
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
21 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
22 |
+
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
23 |
+
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
24 |
+
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
25 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
26 |
+
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
27 |
+
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
28 |
+
# POSSIBILITY OF SUCH DAMAGE.
|
29 |
+
|
30 |
+
from ...utils._typedefs cimport intp_t, float64_t, uint8_t
|
31 |
+
cimport numpy as cnp
|
32 |
+
|
33 |
+
# This corresponds to the scipy.cluster.hierarchy format
|
34 |
+
ctypedef packed struct HIERARCHY_t:
|
35 |
+
intp_t left_node
|
36 |
+
intp_t right_node
|
37 |
+
float64_t value
|
38 |
+
intp_t cluster_size
|
39 |
+
|
40 |
+
# Effectively an edgelist encoding a parent/child pair, along with a value and
|
41 |
+
# the corresponding cluster_size in each row providing a tree structure.
|
42 |
+
ctypedef packed struct CONDENSED_t:
|
43 |
+
intp_t parent
|
44 |
+
intp_t child
|
45 |
+
float64_t value
|
46 |
+
intp_t cluster_size
|
47 |
+
|
48 |
+
cdef extern from "numpy/arrayobject.h":
|
49 |
+
intp_t * PyArray_SHAPE(cnp.PyArrayObject *)
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/hdbscan.py
ADDED
@@ -0,0 +1,1018 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
HDBSCAN: Hierarchical Density-Based Spatial Clustering
|
3 |
+
of Applications with Noise
|
4 |
+
"""
|
5 |
+
# Authors: Leland McInnes <[email protected]>
|
6 |
+
# Steve Astels <[email protected]>
|
7 |
+
# John Healy <[email protected]>
|
8 |
+
# Meekail Zain <[email protected]>
|
9 |
+
# Copyright (c) 2015, Leland McInnes
|
10 |
+
# All rights reserved.
|
11 |
+
|
12 |
+
# Redistribution and use in source and binary forms, with or without
|
13 |
+
# modification, are permitted provided that the following conditions are met:
|
14 |
+
|
15 |
+
# 1. Redistributions of source code must retain the above copyright notice,
|
16 |
+
# this list of conditions and the following disclaimer.
|
17 |
+
|
18 |
+
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
19 |
+
# this list of conditions and the following disclaimer in the documentation
|
20 |
+
# and/or other materials provided with the distribution.
|
21 |
+
|
22 |
+
# 3. Neither the name of the copyright holder nor the names of its contributors
|
23 |
+
# may be used to endorse or promote products derived from this software without
|
24 |
+
# specific prior written permission.
|
25 |
+
|
26 |
+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
27 |
+
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
28 |
+
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
29 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
30 |
+
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
31 |
+
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
32 |
+
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
33 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
34 |
+
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
35 |
+
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
36 |
+
# POSSIBILITY OF SUCH DAMAGE.
|
37 |
+
|
38 |
+
from numbers import Integral, Real
|
39 |
+
from warnings import warn
|
40 |
+
|
41 |
+
import numpy as np
|
42 |
+
from scipy.sparse import csgraph, issparse
|
43 |
+
|
44 |
+
from ...base import BaseEstimator, ClusterMixin, _fit_context
|
45 |
+
from ...metrics import pairwise_distances
|
46 |
+
from ...metrics._dist_metrics import DistanceMetric
|
47 |
+
from ...neighbors import BallTree, KDTree, NearestNeighbors
|
48 |
+
from ...utils._param_validation import Interval, StrOptions
|
49 |
+
from ...utils.validation import _allclose_dense_sparse, _assert_all_finite
|
50 |
+
from ._linkage import (
|
51 |
+
MST_edge_dtype,
|
52 |
+
make_single_linkage,
|
53 |
+
mst_from_data_matrix,
|
54 |
+
mst_from_mutual_reachability,
|
55 |
+
)
|
56 |
+
from ._reachability import mutual_reachability_graph
|
57 |
+
from ._tree import HIERARCHY_dtype, labelling_at_cut, tree_to_labels
|
58 |
+
|
59 |
+
FAST_METRICS = set(KDTree.valid_metrics + BallTree.valid_metrics)
|
60 |
+
|
61 |
+
# Encodings are arbitrary but must be strictly negative.
|
62 |
+
# The current encodings are chosen as extensions to the -1 noise label.
|
63 |
+
# Avoided enums so that the end user only deals with simple labels.
|
64 |
+
_OUTLIER_ENCODING: dict = {
|
65 |
+
"infinite": {
|
66 |
+
"label": -2,
|
67 |
+
# The probability could also be 1, since infinite points are certainly
|
68 |
+
# infinite outliers, however 0 is convention from the HDBSCAN library
|
69 |
+
# implementation.
|
70 |
+
"prob": 0,
|
71 |
+
},
|
72 |
+
"missing": {
|
73 |
+
"label": -3,
|
74 |
+
# A nan probability is chosen to emphasize the fact that the
|
75 |
+
# corresponding data was not considered in the clustering problem.
|
76 |
+
"prob": np.nan,
|
77 |
+
},
|
78 |
+
}
|
79 |
+
|
80 |
+
|
81 |
+
def _brute_mst(mutual_reachability, min_samples):
|
82 |
+
"""
|
83 |
+
Builds a minimum spanning tree (MST) from the provided mutual-reachability
|
84 |
+
values. This function dispatches to a custom Cython implementation for
|
85 |
+
dense arrays, and `scipy.sparse.csgraph.minimum_spanning_tree` for sparse
|
86 |
+
arrays/matrices.
|
87 |
+
|
88 |
+
Parameters
|
89 |
+
----------
|
90 |
+
mututal_reachability_graph: {ndarray, sparse matrix} of shape \
|
91 |
+
(n_samples, n_samples)
|
92 |
+
Weighted adjacency matrix of the mutual reachability graph.
|
93 |
+
|
94 |
+
min_samples : int, default=None
|
95 |
+
The number of samples in a neighborhood for a point
|
96 |
+
to be considered as a core point. This includes the point itself.
|
97 |
+
|
98 |
+
Returns
|
99 |
+
-------
|
100 |
+
mst : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype
|
101 |
+
The MST representation of the mutual-reachability graph. The MST is
|
102 |
+
represented as a collection of edges.
|
103 |
+
"""
|
104 |
+
if not issparse(mutual_reachability):
|
105 |
+
return mst_from_mutual_reachability(mutual_reachability)
|
106 |
+
|
107 |
+
# Check if the mutual reachability matrix has any rows which have
|
108 |
+
# less than `min_samples` non-zero elements.
|
109 |
+
indptr = mutual_reachability.indptr
|
110 |
+
num_points = mutual_reachability.shape[0]
|
111 |
+
if any((indptr[i + 1] - indptr[i]) < min_samples for i in range(num_points)):
|
112 |
+
raise ValueError(
|
113 |
+
f"There exists points with fewer than {min_samples} neighbors. Ensure"
|
114 |
+
" your distance matrix has non-zero values for at least"
|
115 |
+
f" `min_sample`={min_samples} neighbors for each points (i.e. K-nn"
|
116 |
+
" graph), or specify a `max_distance` in `metric_params` to use when"
|
117 |
+
" distances are missing."
|
118 |
+
)
|
119 |
+
# Check connected component on mutual reachability.
|
120 |
+
# If more than one connected component is present,
|
121 |
+
# it means that the graph is disconnected.
|
122 |
+
n_components = csgraph.connected_components(
|
123 |
+
mutual_reachability, directed=False, return_labels=False
|
124 |
+
)
|
125 |
+
if n_components > 1:
|
126 |
+
raise ValueError(
|
127 |
+
f"Sparse mutual reachability matrix has {n_components} connected"
|
128 |
+
" components. HDBSCAN cannot be perfomed on a disconnected graph. Ensure"
|
129 |
+
" that the sparse distance matrix has only one connected component."
|
130 |
+
)
|
131 |
+
|
132 |
+
# Compute the minimum spanning tree for the sparse graph
|
133 |
+
sparse_min_spanning_tree = csgraph.minimum_spanning_tree(mutual_reachability)
|
134 |
+
rows, cols = sparse_min_spanning_tree.nonzero()
|
135 |
+
mst = np.rec.fromarrays(
|
136 |
+
[rows, cols, sparse_min_spanning_tree.data],
|
137 |
+
dtype=MST_edge_dtype,
|
138 |
+
)
|
139 |
+
return mst
|
140 |
+
|
141 |
+
|
142 |
+
def _process_mst(min_spanning_tree):
|
143 |
+
"""
|
144 |
+
Builds a single-linkage tree (SLT) from the provided minimum spanning tree
|
145 |
+
(MST). The MST is first sorted then processed by a custom Cython routine.
|
146 |
+
|
147 |
+
Parameters
|
148 |
+
----------
|
149 |
+
min_spanning_tree : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype
|
150 |
+
The MST representation of the mutual-reachability graph. The MST is
|
151 |
+
represented as a collection of edges.
|
152 |
+
|
153 |
+
Returns
|
154 |
+
-------
|
155 |
+
single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
|
156 |
+
The single-linkage tree tree (dendrogram) built from the MST.
|
157 |
+
"""
|
158 |
+
# Sort edges of the min_spanning_tree by weight
|
159 |
+
row_order = np.argsort(min_spanning_tree["distance"])
|
160 |
+
min_spanning_tree = min_spanning_tree[row_order]
|
161 |
+
# Convert edge list into standard hierarchical clustering format
|
162 |
+
return make_single_linkage(min_spanning_tree)
|
163 |
+
|
164 |
+
|
165 |
+
def _hdbscan_brute(
|
166 |
+
X,
|
167 |
+
min_samples=5,
|
168 |
+
alpha=None,
|
169 |
+
metric="euclidean",
|
170 |
+
n_jobs=None,
|
171 |
+
copy=False,
|
172 |
+
**metric_params,
|
173 |
+
):
|
174 |
+
"""
|
175 |
+
Builds a single-linkage tree (SLT) from the input data `X`. If
|
176 |
+
`metric="precomputed"` then `X` must be a symmetric array of distances.
|
177 |
+
Otherwise, the pairwise distances are calculated directly and passed to
|
178 |
+
`mutual_reachability_graph`.
|
179 |
+
|
180 |
+
Parameters
|
181 |
+
----------
|
182 |
+
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
|
183 |
+
Either the raw data from which to compute the pairwise distances,
|
184 |
+
or the precomputed distances.
|
185 |
+
|
186 |
+
min_samples : int, default=None
|
187 |
+
The number of samples in a neighborhood for a point
|
188 |
+
to be considered as a core point. This includes the point itself.
|
189 |
+
|
190 |
+
alpha : float, default=1.0
|
191 |
+
A distance scaling parameter as used in robust single linkage.
|
192 |
+
|
193 |
+
metric : str or callable, default='euclidean'
|
194 |
+
The metric to use when calculating distance between instances in a
|
195 |
+
feature array.
|
196 |
+
|
197 |
+
- If metric is a string or callable, it must be one of
|
198 |
+
the options allowed by :func:`~sklearn.metrics.pairwise_distances`
|
199 |
+
for its metric parameter.
|
200 |
+
|
201 |
+
- If metric is "precomputed", X is assumed to be a distance matrix and
|
202 |
+
must be square.
|
203 |
+
|
204 |
+
n_jobs : int, default=None
|
205 |
+
The number of jobs to use for computing the pairwise distances. This
|
206 |
+
works by breaking down the pairwise matrix into n_jobs even slices and
|
207 |
+
computing them in parallel. This parameter is passed directly to
|
208 |
+
:func:`~sklearn.metrics.pairwise_distances`.
|
209 |
+
|
210 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
211 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
212 |
+
for more details.
|
213 |
+
|
214 |
+
copy : bool, default=False
|
215 |
+
If `copy=True` then any time an in-place modifications would be made
|
216 |
+
that would overwrite `X`, a copy will first be made, guaranteeing that
|
217 |
+
the original data will be unchanged. Currently, it only applies when
|
218 |
+
`metric="precomputed"`, when passing a dense array or a CSR sparse
|
219 |
+
array/matrix.
|
220 |
+
|
221 |
+
metric_params : dict, default=None
|
222 |
+
Arguments passed to the distance metric.
|
223 |
+
|
224 |
+
Returns
|
225 |
+
-------
|
226 |
+
single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
|
227 |
+
The single-linkage tree tree (dendrogram) built from the MST.
|
228 |
+
"""
|
229 |
+
if metric == "precomputed":
|
230 |
+
if X.shape[0] != X.shape[1]:
|
231 |
+
raise ValueError(
|
232 |
+
"The precomputed distance matrix is expected to be symmetric, however"
|
233 |
+
f" it has shape {X.shape}. Please verify that the"
|
234 |
+
" distance matrix was constructed correctly."
|
235 |
+
)
|
236 |
+
if not _allclose_dense_sparse(X, X.T):
|
237 |
+
raise ValueError(
|
238 |
+
"The precomputed distance matrix is expected to be symmetric, however"
|
239 |
+
" its values appear to be asymmetric. Please verify that the distance"
|
240 |
+
" matrix was constructed correctly."
|
241 |
+
)
|
242 |
+
|
243 |
+
distance_matrix = X.copy() if copy else X
|
244 |
+
else:
|
245 |
+
distance_matrix = pairwise_distances(
|
246 |
+
X, metric=metric, n_jobs=n_jobs, **metric_params
|
247 |
+
)
|
248 |
+
distance_matrix /= alpha
|
249 |
+
|
250 |
+
max_distance = metric_params.get("max_distance", 0.0)
|
251 |
+
if issparse(distance_matrix) and distance_matrix.format != "csr":
|
252 |
+
# we need CSR format to avoid a conversion in `_brute_mst` when calling
|
253 |
+
# `csgraph.connected_components`
|
254 |
+
distance_matrix = distance_matrix.tocsr()
|
255 |
+
|
256 |
+
# Note that `distance_matrix` is manipulated in-place, however we do not
|
257 |
+
# need it for anything else past this point, hence the operation is safe.
|
258 |
+
mutual_reachability_ = mutual_reachability_graph(
|
259 |
+
distance_matrix, min_samples=min_samples, max_distance=max_distance
|
260 |
+
)
|
261 |
+
min_spanning_tree = _brute_mst(mutual_reachability_, min_samples=min_samples)
|
262 |
+
# Warn if the MST couldn't be constructed around the missing distances
|
263 |
+
if np.isinf(min_spanning_tree["distance"]).any():
|
264 |
+
warn(
|
265 |
+
(
|
266 |
+
"The minimum spanning tree contains edge weights with value "
|
267 |
+
"infinity. Potentially, you are missing too many distances "
|
268 |
+
"in the initial distance matrix for the given neighborhood "
|
269 |
+
"size."
|
270 |
+
),
|
271 |
+
UserWarning,
|
272 |
+
)
|
273 |
+
return _process_mst(min_spanning_tree)
|
274 |
+
|
275 |
+
|
276 |
+
def _hdbscan_prims(
|
277 |
+
X,
|
278 |
+
algo,
|
279 |
+
min_samples=5,
|
280 |
+
alpha=1.0,
|
281 |
+
metric="euclidean",
|
282 |
+
leaf_size=40,
|
283 |
+
n_jobs=None,
|
284 |
+
**metric_params,
|
285 |
+
):
|
286 |
+
"""
|
287 |
+
Builds a single-linkage tree (SLT) from the input data `X`. If
|
288 |
+
`metric="precomputed"` then `X` must be a symmetric array of distances.
|
289 |
+
Otherwise, the pairwise distances are calculated directly and passed to
|
290 |
+
`mutual_reachability_graph`.
|
291 |
+
|
292 |
+
Parameters
|
293 |
+
----------
|
294 |
+
X : ndarray of shape (n_samples, n_features)
|
295 |
+
The raw data.
|
296 |
+
|
297 |
+
min_samples : int, default=None
|
298 |
+
The number of samples in a neighborhood for a point
|
299 |
+
to be considered as a core point. This includes the point itself.
|
300 |
+
|
301 |
+
alpha : float, default=1.0
|
302 |
+
A distance scaling parameter as used in robust single linkage.
|
303 |
+
|
304 |
+
metric : str or callable, default='euclidean'
|
305 |
+
The metric to use when calculating distance between instances in a
|
306 |
+
feature array. `metric` must be one of the options allowed by
|
307 |
+
:func:`~sklearn.metrics.pairwise_distances` for its metric
|
308 |
+
parameter.
|
309 |
+
|
310 |
+
n_jobs : int, default=None
|
311 |
+
The number of jobs to use for computing the pairwise distances. This
|
312 |
+
works by breaking down the pairwise matrix into n_jobs even slices and
|
313 |
+
computing them in parallel. This parameter is passed directly to
|
314 |
+
:func:`~sklearn.metrics.pairwise_distances`.
|
315 |
+
|
316 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
317 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
318 |
+
for more details.
|
319 |
+
|
320 |
+
copy : bool, default=False
|
321 |
+
If `copy=True` then any time an in-place modifications would be made
|
322 |
+
that would overwrite `X`, a copy will first be made, guaranteeing that
|
323 |
+
the original data will be unchanged. Currently, it only applies when
|
324 |
+
`metric="precomputed"`, when passing a dense array or a CSR sparse
|
325 |
+
array/matrix.
|
326 |
+
|
327 |
+
metric_params : dict, default=None
|
328 |
+
Arguments passed to the distance metric.
|
329 |
+
|
330 |
+
Returns
|
331 |
+
-------
|
332 |
+
single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
|
333 |
+
The single-linkage tree tree (dendrogram) built from the MST.
|
334 |
+
"""
|
335 |
+
# The Cython routines used require contiguous arrays
|
336 |
+
X = np.asarray(X, order="C")
|
337 |
+
|
338 |
+
# Get distance to kth nearest neighbour
|
339 |
+
nbrs = NearestNeighbors(
|
340 |
+
n_neighbors=min_samples,
|
341 |
+
algorithm=algo,
|
342 |
+
leaf_size=leaf_size,
|
343 |
+
metric=metric,
|
344 |
+
metric_params=metric_params,
|
345 |
+
n_jobs=n_jobs,
|
346 |
+
p=None,
|
347 |
+
).fit(X)
|
348 |
+
|
349 |
+
neighbors_distances, _ = nbrs.kneighbors(X, min_samples, return_distance=True)
|
350 |
+
core_distances = np.ascontiguousarray(neighbors_distances[:, -1])
|
351 |
+
dist_metric = DistanceMetric.get_metric(metric, **metric_params)
|
352 |
+
|
353 |
+
# Mutual reachability distance is implicit in mst_from_data_matrix
|
354 |
+
min_spanning_tree = mst_from_data_matrix(X, core_distances, dist_metric, alpha)
|
355 |
+
return _process_mst(min_spanning_tree)
|
356 |
+
|
357 |
+
|
358 |
+
def remap_single_linkage_tree(tree, internal_to_raw, non_finite):
|
359 |
+
"""
|
360 |
+
Takes an internal single_linkage_tree structure and adds back in a set of points
|
361 |
+
that were initially detected as non-finite and returns that new tree.
|
362 |
+
These points will all be merged into the final node at np.inf distance and
|
363 |
+
considered noise points.
|
364 |
+
|
365 |
+
Parameters
|
366 |
+
----------
|
367 |
+
tree : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
|
368 |
+
The single-linkage tree tree (dendrogram) built from the MST.
|
369 |
+
internal_to_raw: dict
|
370 |
+
A mapping from internal integer index to the raw integer index
|
371 |
+
non_finite : ndarray
|
372 |
+
Boolean array of which entries in the raw data are non-finite
|
373 |
+
"""
|
374 |
+
finite_count = len(internal_to_raw)
|
375 |
+
|
376 |
+
outlier_count = len(non_finite)
|
377 |
+
for i, _ in enumerate(tree):
|
378 |
+
left = tree[i]["left_node"]
|
379 |
+
right = tree[i]["right_node"]
|
380 |
+
|
381 |
+
if left < finite_count:
|
382 |
+
tree[i]["left_node"] = internal_to_raw[left]
|
383 |
+
else:
|
384 |
+
tree[i]["left_node"] = left + outlier_count
|
385 |
+
if right < finite_count:
|
386 |
+
tree[i]["right_node"] = internal_to_raw[right]
|
387 |
+
else:
|
388 |
+
tree[i]["right_node"] = right + outlier_count
|
389 |
+
|
390 |
+
outlier_tree = np.zeros(len(non_finite), dtype=HIERARCHY_dtype)
|
391 |
+
last_cluster_id = max(
|
392 |
+
tree[tree.shape[0] - 1]["left_node"], tree[tree.shape[0] - 1]["right_node"]
|
393 |
+
)
|
394 |
+
last_cluster_size = tree[tree.shape[0] - 1]["cluster_size"]
|
395 |
+
for i, outlier in enumerate(non_finite):
|
396 |
+
outlier_tree[i] = (outlier, last_cluster_id + 1, np.inf, last_cluster_size + 1)
|
397 |
+
last_cluster_id += 1
|
398 |
+
last_cluster_size += 1
|
399 |
+
tree = np.concatenate([tree, outlier_tree])
|
400 |
+
return tree
|
401 |
+
|
402 |
+
|
403 |
+
def _get_finite_row_indices(matrix):
|
404 |
+
"""
|
405 |
+
Returns the indices of the purely finite rows of a
|
406 |
+
sparse matrix or dense ndarray
|
407 |
+
"""
|
408 |
+
if issparse(matrix):
|
409 |
+
row_indices = np.array(
|
410 |
+
[i for i, row in enumerate(matrix.tolil().data) if np.all(np.isfinite(row))]
|
411 |
+
)
|
412 |
+
else:
|
413 |
+
(row_indices,) = np.isfinite(matrix.sum(axis=1)).nonzero()
|
414 |
+
return row_indices
|
415 |
+
|
416 |
+
|
417 |
+
class HDBSCAN(ClusterMixin, BaseEstimator):
|
418 |
+
"""Cluster data using hierarchical density-based clustering.
|
419 |
+
|
420 |
+
HDBSCAN - Hierarchical Density-Based Spatial Clustering of Applications
|
421 |
+
with Noise. Performs :class:`~sklearn.cluster.DBSCAN` over varying epsilon
|
422 |
+
values and integrates the result to find a clustering that gives the best
|
423 |
+
stability over epsilon.
|
424 |
+
This allows HDBSCAN to find clusters of varying densities (unlike
|
425 |
+
:class:`~sklearn.cluster.DBSCAN`), and be more robust to parameter selection.
|
426 |
+
Read more in the :ref:`User Guide <hdbscan>`.
|
427 |
+
|
428 |
+
For an example of how to use HDBSCAN, as well as a comparison to
|
429 |
+
:class:`~sklearn.cluster.DBSCAN`, please see the :ref:`plotting demo
|
430 |
+
<sphx_glr_auto_examples_cluster_plot_hdbscan.py>`.
|
431 |
+
|
432 |
+
.. versionadded:: 1.3
|
433 |
+
|
434 |
+
Parameters
|
435 |
+
----------
|
436 |
+
min_cluster_size : int, default=5
|
437 |
+
The minimum number of samples in a group for that group to be
|
438 |
+
considered a cluster; groupings smaller than this size will be left
|
439 |
+
as noise.
|
440 |
+
|
441 |
+
min_samples : int, default=None
|
442 |
+
The number of samples in a neighborhood for a point
|
443 |
+
to be considered as a core point. This includes the point itself.
|
444 |
+
When `None`, defaults to `min_cluster_size`.
|
445 |
+
|
446 |
+
cluster_selection_epsilon : float, default=0.0
|
447 |
+
A distance threshold. Clusters below this value will be merged.
|
448 |
+
See [5]_ for more information.
|
449 |
+
|
450 |
+
max_cluster_size : int, default=None
|
451 |
+
A limit to the size of clusters returned by the `"eom"` cluster
|
452 |
+
selection algorithm. There is no limit when `max_cluster_size=None`.
|
453 |
+
Has no effect if `cluster_selection_method="leaf"`.
|
454 |
+
|
455 |
+
metric : str or callable, default='euclidean'
|
456 |
+
The metric to use when calculating distance between instances in a
|
457 |
+
feature array.
|
458 |
+
|
459 |
+
- If metric is a string or callable, it must be one of
|
460 |
+
the options allowed by :func:`~sklearn.metrics.pairwise_distances`
|
461 |
+
for its metric parameter.
|
462 |
+
|
463 |
+
- If metric is "precomputed", X is assumed to be a distance matrix and
|
464 |
+
must be square.
|
465 |
+
|
466 |
+
metric_params : dict, default=None
|
467 |
+
Arguments passed to the distance metric.
|
468 |
+
|
469 |
+
alpha : float, default=1.0
|
470 |
+
A distance scaling parameter as used in robust single linkage.
|
471 |
+
See [3]_ for more information.
|
472 |
+
|
473 |
+
algorithm : {"auto", "brute", "kd_tree", "ball_tree"}, default="auto"
|
474 |
+
Exactly which algorithm to use for computing core distances; By default
|
475 |
+
this is set to `"auto"` which attempts to use a
|
476 |
+
:class:`~sklearn.neighbors.KDTree` tree if possible, otherwise it uses
|
477 |
+
a :class:`~sklearn.neighbors.BallTree` tree. Both `"kd_tree"` and
|
478 |
+
`"ball_tree"` algorithms use the
|
479 |
+
:class:`~sklearn.neighbors.NearestNeighbors` estimator.
|
480 |
+
|
481 |
+
If the `X` passed during `fit` is sparse or `metric` is invalid for
|
482 |
+
both :class:`~sklearn.neighbors.KDTree` and
|
483 |
+
:class:`~sklearn.neighbors.BallTree`, then it resolves to use the
|
484 |
+
`"brute"` algorithm.
|
485 |
+
|
486 |
+
.. deprecated:: 1.4
|
487 |
+
The `'kdtree'` option was deprecated in version 1.4,
|
488 |
+
and will be renamed to `'kd_tree'` in 1.6.
|
489 |
+
|
490 |
+
.. deprecated:: 1.4
|
491 |
+
The `'balltree'` option was deprecated in version 1.4,
|
492 |
+
and will be renamed to `'ball_tree'` in 1.6.
|
493 |
+
|
494 |
+
leaf_size : int, default=40
|
495 |
+
Leaf size for trees responsible for fast nearest neighbour queries when
|
496 |
+
a KDTree or a BallTree are used as core-distance algorithms. A large
|
497 |
+
dataset size and small `leaf_size` may induce excessive memory usage.
|
498 |
+
If you are running out of memory consider increasing the `leaf_size`
|
499 |
+
parameter. Ignored for `algorithm="brute"`.
|
500 |
+
|
501 |
+
n_jobs : int, default=None
|
502 |
+
Number of jobs to run in parallel to calculate distances.
|
503 |
+
`None` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
504 |
+
`-1` means using all processors. See :term:`Glossary <n_jobs>`
|
505 |
+
for more details.
|
506 |
+
|
507 |
+
cluster_selection_method : {"eom", "leaf"}, default="eom"
|
508 |
+
The method used to select clusters from the condensed tree. The
|
509 |
+
standard approach for HDBSCAN* is to use an Excess of Mass (`"eom"`)
|
510 |
+
algorithm to find the most persistent clusters. Alternatively you can
|
511 |
+
instead select the clusters at the leaves of the tree -- this provides
|
512 |
+
the most fine grained and homogeneous clusters.
|
513 |
+
|
514 |
+
allow_single_cluster : bool, default=False
|
515 |
+
By default HDBSCAN* will not produce a single cluster, setting this
|
516 |
+
to True will override this and allow single cluster results in
|
517 |
+
the case that you feel this is a valid result for your dataset.
|
518 |
+
|
519 |
+
store_centers : str, default=None
|
520 |
+
Which, if any, cluster centers to compute and store. The options are:
|
521 |
+
|
522 |
+
- `None` which does not compute nor store any centers.
|
523 |
+
- `"centroid"` which calculates the center by taking the weighted
|
524 |
+
average of their positions. Note that the algorithm uses the
|
525 |
+
euclidean metric and does not guarantee that the output will be
|
526 |
+
an observed data point.
|
527 |
+
- `"medoid"` which calculates the center by taking the point in the
|
528 |
+
fitted data which minimizes the distance to all other points in
|
529 |
+
the cluster. This is slower than "centroid" since it requires
|
530 |
+
computing additional pairwise distances between points of the
|
531 |
+
same cluster but guarantees the output is an observed data point.
|
532 |
+
The medoid is also well-defined for arbitrary metrics, and does not
|
533 |
+
depend on a euclidean metric.
|
534 |
+
- `"both"` which computes and stores both forms of centers.
|
535 |
+
|
536 |
+
copy : bool, default=False
|
537 |
+
If `copy=True` then any time an in-place modifications would be made
|
538 |
+
that would overwrite data passed to :term:`fit`, a copy will first be
|
539 |
+
made, guaranteeing that the original data will be unchanged.
|
540 |
+
Currently, it only applies when `metric="precomputed"`, when passing
|
541 |
+
a dense array or a CSR sparse matrix and when `algorithm="brute"`.
|
542 |
+
|
543 |
+
Attributes
|
544 |
+
----------
|
545 |
+
labels_ : ndarray of shape (n_samples,)
|
546 |
+
Cluster labels for each point in the dataset given to :term:`fit`.
|
547 |
+
Outliers are labeled as follows:
|
548 |
+
|
549 |
+
- Noisy samples are given the label -1.
|
550 |
+
- Samples with infinite elements (+/- np.inf) are given the label -2.
|
551 |
+
- Samples with missing data are given the label -3, even if they
|
552 |
+
also have infinite elements.
|
553 |
+
|
554 |
+
probabilities_ : ndarray of shape (n_samples,)
|
555 |
+
The strength with which each sample is a member of its assigned
|
556 |
+
cluster.
|
557 |
+
|
558 |
+
- Clustered samples have probabilities proportional to the degree that
|
559 |
+
they persist as part of the cluster.
|
560 |
+
- Noisy samples have probability zero.
|
561 |
+
- Samples with infinite elements (+/- np.inf) have probability 0.
|
562 |
+
- Samples with missing data have probability `np.nan`.
|
563 |
+
|
564 |
+
n_features_in_ : int
|
565 |
+
Number of features seen during :term:`fit`.
|
566 |
+
|
567 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
568 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
569 |
+
has feature names that are all strings.
|
570 |
+
|
571 |
+
centroids_ : ndarray of shape (n_clusters, n_features)
|
572 |
+
A collection containing the centroid of each cluster calculated under
|
573 |
+
the standard euclidean metric. The centroids may fall "outside" their
|
574 |
+
respective clusters if the clusters themselves are non-convex.
|
575 |
+
|
576 |
+
Note that `n_clusters` only counts non-outlier clusters. That is to
|
577 |
+
say, the `-1, -2, -3` labels for the outlier clusters are excluded.
|
578 |
+
|
579 |
+
medoids_ : ndarray of shape (n_clusters, n_features)
|
580 |
+
A collection containing the medoid of each cluster calculated under
|
581 |
+
the whichever metric was passed to the `metric` parameter. The
|
582 |
+
medoids are points in the original cluster which minimize the average
|
583 |
+
distance to all other points in that cluster under the chosen metric.
|
584 |
+
These can be thought of as the result of projecting the `metric`-based
|
585 |
+
centroid back onto the cluster.
|
586 |
+
|
587 |
+
Note that `n_clusters` only counts non-outlier clusters. That is to
|
588 |
+
say, the `-1, -2, -3` labels for the outlier clusters are excluded.
|
589 |
+
|
590 |
+
See Also
|
591 |
+
--------
|
592 |
+
DBSCAN : Density-Based Spatial Clustering of Applications
|
593 |
+
with Noise.
|
594 |
+
OPTICS : Ordering Points To Identify the Clustering Structure.
|
595 |
+
Birch : Memory-efficient, online-learning algorithm.
|
596 |
+
|
597 |
+
References
|
598 |
+
----------
|
599 |
+
|
600 |
+
.. [1] :doi:`Campello, R. J., Moulavi, D., & Sander, J. Density-based clustering
|
601 |
+
based on hierarchical density estimates.
|
602 |
+
<10.1007/978-3-642-37456-2_14>`
|
603 |
+
.. [2] :doi:`Campello, R. J., Moulavi, D., Zimek, A., & Sander, J.
|
604 |
+
Hierarchical density estimates for data clustering, visualization,
|
605 |
+
and outlier detection.<10.1145/2733381>`
|
606 |
+
|
607 |
+
.. [3] `Chaudhuri, K., & Dasgupta, S. Rates of convergence for the
|
608 |
+
cluster tree.
|
609 |
+
<https://papers.nips.cc/paper/2010/hash/
|
610 |
+
b534ba68236ba543ae44b22bd110a1d6-Abstract.html>`_
|
611 |
+
|
612 |
+
.. [4] `Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and
|
613 |
+
Sander, J. Density-Based Clustering Validation.
|
614 |
+
<https://www.dbs.ifi.lmu.de/~zimek/publications/SDM2014/DBCV.pdf>`_
|
615 |
+
|
616 |
+
.. [5] :arxiv:`Malzer, C., & Baum, M. "A Hybrid Approach To Hierarchical
|
617 |
+
Density-based Cluster Selection."<1911.02282>`.
|
618 |
+
|
619 |
+
Examples
|
620 |
+
--------
|
621 |
+
>>> from sklearn.cluster import HDBSCAN
|
622 |
+
>>> from sklearn.datasets import load_digits
|
623 |
+
>>> X, _ = load_digits(return_X_y=True)
|
624 |
+
>>> hdb = HDBSCAN(min_cluster_size=20)
|
625 |
+
>>> hdb.fit(X)
|
626 |
+
HDBSCAN(min_cluster_size=20)
|
627 |
+
>>> hdb.labels_
|
628 |
+
array([ 2, 6, -1, ..., -1, -1, -1])
|
629 |
+
"""
|
630 |
+
|
631 |
+
_parameter_constraints = {
|
632 |
+
"min_cluster_size": [Interval(Integral, left=2, right=None, closed="left")],
|
633 |
+
"min_samples": [Interval(Integral, left=1, right=None, closed="left"), None],
|
634 |
+
"cluster_selection_epsilon": [
|
635 |
+
Interval(Real, left=0, right=None, closed="left")
|
636 |
+
],
|
637 |
+
"max_cluster_size": [
|
638 |
+
None,
|
639 |
+
Interval(Integral, left=1, right=None, closed="left"),
|
640 |
+
],
|
641 |
+
"metric": [StrOptions(FAST_METRICS | {"precomputed"}), callable],
|
642 |
+
"metric_params": [dict, None],
|
643 |
+
"alpha": [Interval(Real, left=0, right=None, closed="neither")],
|
644 |
+
# TODO(1.6): Remove "kdtree" and "balltree" option
|
645 |
+
"algorithm": [
|
646 |
+
StrOptions(
|
647 |
+
{"auto", "brute", "kd_tree", "ball_tree", "kdtree", "balltree"},
|
648 |
+
deprecated={"kdtree", "balltree"},
|
649 |
+
),
|
650 |
+
],
|
651 |
+
"leaf_size": [Interval(Integral, left=1, right=None, closed="left")],
|
652 |
+
"n_jobs": [Integral, None],
|
653 |
+
"cluster_selection_method": [StrOptions({"eom", "leaf"})],
|
654 |
+
"allow_single_cluster": ["boolean"],
|
655 |
+
"store_centers": [None, StrOptions({"centroid", "medoid", "both"})],
|
656 |
+
"copy": ["boolean"],
|
657 |
+
}
|
658 |
+
|
659 |
+
def __init__(
|
660 |
+
self,
|
661 |
+
min_cluster_size=5,
|
662 |
+
min_samples=None,
|
663 |
+
cluster_selection_epsilon=0.0,
|
664 |
+
max_cluster_size=None,
|
665 |
+
metric="euclidean",
|
666 |
+
metric_params=None,
|
667 |
+
alpha=1.0,
|
668 |
+
algorithm="auto",
|
669 |
+
leaf_size=40,
|
670 |
+
n_jobs=None,
|
671 |
+
cluster_selection_method="eom",
|
672 |
+
allow_single_cluster=False,
|
673 |
+
store_centers=None,
|
674 |
+
copy=False,
|
675 |
+
):
|
676 |
+
self.min_cluster_size = min_cluster_size
|
677 |
+
self.min_samples = min_samples
|
678 |
+
self.alpha = alpha
|
679 |
+
self.max_cluster_size = max_cluster_size
|
680 |
+
self.cluster_selection_epsilon = cluster_selection_epsilon
|
681 |
+
self.metric = metric
|
682 |
+
self.metric_params = metric_params
|
683 |
+
self.algorithm = algorithm
|
684 |
+
self.leaf_size = leaf_size
|
685 |
+
self.n_jobs = n_jobs
|
686 |
+
self.cluster_selection_method = cluster_selection_method
|
687 |
+
self.allow_single_cluster = allow_single_cluster
|
688 |
+
self.store_centers = store_centers
|
689 |
+
self.copy = copy
|
690 |
+
|
691 |
+
@_fit_context(
|
692 |
+
# HDBSCAN.metric is not validated yet
|
693 |
+
prefer_skip_nested_validation=False
|
694 |
+
)
|
695 |
+
def fit(self, X, y=None):
|
696 |
+
"""Find clusters based on hierarchical density-based clustering.
|
697 |
+
|
698 |
+
Parameters
|
699 |
+
----------
|
700 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
|
701 |
+
ndarray of shape (n_samples, n_samples)
|
702 |
+
A feature array, or array of distances between samples if
|
703 |
+
`metric='precomputed'`.
|
704 |
+
|
705 |
+
y : None
|
706 |
+
Ignored.
|
707 |
+
|
708 |
+
Returns
|
709 |
+
-------
|
710 |
+
self : object
|
711 |
+
Returns self.
|
712 |
+
"""
|
713 |
+
if self.metric == "precomputed" and self.store_centers is not None:
|
714 |
+
raise ValueError(
|
715 |
+
"Cannot store centers when using a precomputed distance matrix."
|
716 |
+
)
|
717 |
+
|
718 |
+
self._metric_params = self.metric_params or {}
|
719 |
+
if self.metric != "precomputed":
|
720 |
+
# Non-precomputed matrices may contain non-finite values.
|
721 |
+
X = self._validate_data(
|
722 |
+
X,
|
723 |
+
accept_sparse=["csr", "lil"],
|
724 |
+
force_all_finite=False,
|
725 |
+
dtype=np.float64,
|
726 |
+
)
|
727 |
+
self._raw_data = X
|
728 |
+
all_finite = True
|
729 |
+
try:
|
730 |
+
_assert_all_finite(X.data if issparse(X) else X)
|
731 |
+
except ValueError:
|
732 |
+
all_finite = False
|
733 |
+
|
734 |
+
if not all_finite:
|
735 |
+
# Pass only the purely finite indices into hdbscan
|
736 |
+
# We will later assign all non-finite points their
|
737 |
+
# corresponding labels, as specified in `_OUTLIER_ENCODING`
|
738 |
+
|
739 |
+
# Reduce X to make the checks for missing/outlier samples more
|
740 |
+
# convenient.
|
741 |
+
reduced_X = X.sum(axis=1)
|
742 |
+
|
743 |
+
# Samples with missing data are denoted by the presence of
|
744 |
+
# `np.nan`
|
745 |
+
missing_index = np.isnan(reduced_X).nonzero()[0]
|
746 |
+
|
747 |
+
# Outlier samples are denoted by the presence of `np.inf`
|
748 |
+
infinite_index = np.isinf(reduced_X).nonzero()[0]
|
749 |
+
|
750 |
+
# Continue with only finite samples
|
751 |
+
finite_index = _get_finite_row_indices(X)
|
752 |
+
internal_to_raw = {x: y for x, y in enumerate(finite_index)}
|
753 |
+
X = X[finite_index]
|
754 |
+
elif issparse(X):
|
755 |
+
# Handle sparse precomputed distance matrices separately
|
756 |
+
X = self._validate_data(
|
757 |
+
X,
|
758 |
+
accept_sparse=["csr", "lil"],
|
759 |
+
dtype=np.float64,
|
760 |
+
)
|
761 |
+
else:
|
762 |
+
# Only non-sparse, precomputed distance matrices are handled here
|
763 |
+
# and thereby allowed to contain numpy.inf for missing distances
|
764 |
+
|
765 |
+
# Perform data validation after removing infinite values (numpy.inf)
|
766 |
+
# from the given distance matrix.
|
767 |
+
X = self._validate_data(X, force_all_finite=False, dtype=np.float64)
|
768 |
+
if np.isnan(X).any():
|
769 |
+
# TODO: Support np.nan in Cython implementation for precomputed
|
770 |
+
# dense HDBSCAN
|
771 |
+
raise ValueError("np.nan values found in precomputed-dense")
|
772 |
+
if X.shape[0] == 1:
|
773 |
+
raise ValueError("n_samples=1 while HDBSCAN requires more than one sample")
|
774 |
+
self._min_samples = (
|
775 |
+
self.min_cluster_size if self.min_samples is None else self.min_samples
|
776 |
+
)
|
777 |
+
|
778 |
+
if self._min_samples > X.shape[0]:
|
779 |
+
raise ValueError(
|
780 |
+
f"min_samples ({self._min_samples}) must be at most the number of"
|
781 |
+
f" samples in X ({X.shape[0]})"
|
782 |
+
)
|
783 |
+
|
784 |
+
# TODO(1.6): Remove
|
785 |
+
if self.algorithm == "kdtree":
|
786 |
+
warn(
|
787 |
+
(
|
788 |
+
"`algorithm='kdtree'`has been deprecated in 1.4 and will be renamed"
|
789 |
+
" to'kd_tree'`in 1.6. To keep the past behaviour, set"
|
790 |
+
" `algorithm='kd_tree'`."
|
791 |
+
),
|
792 |
+
FutureWarning,
|
793 |
+
)
|
794 |
+
self.algorithm = "kd_tree"
|
795 |
+
|
796 |
+
# TODO(1.6): Remove
|
797 |
+
if self.algorithm == "balltree":
|
798 |
+
warn(
|
799 |
+
(
|
800 |
+
"`algorithm='balltree'`has been deprecated in 1.4 and will be"
|
801 |
+
" renamed to'ball_tree'`in 1.6. To keep the past behaviour, set"
|
802 |
+
" `algorithm='ball_tree'`."
|
803 |
+
),
|
804 |
+
FutureWarning,
|
805 |
+
)
|
806 |
+
self.algorithm = "ball_tree"
|
807 |
+
|
808 |
+
mst_func = None
|
809 |
+
kwargs = dict(
|
810 |
+
X=X,
|
811 |
+
min_samples=self._min_samples,
|
812 |
+
alpha=self.alpha,
|
813 |
+
metric=self.metric,
|
814 |
+
n_jobs=self.n_jobs,
|
815 |
+
**self._metric_params,
|
816 |
+
)
|
817 |
+
if self.algorithm == "kd_tree" and self.metric not in KDTree.valid_metrics:
|
818 |
+
raise ValueError(
|
819 |
+
f"{self.metric} is not a valid metric for a KDTree-based algorithm."
|
820 |
+
" Please select a different metric."
|
821 |
+
)
|
822 |
+
elif (
|
823 |
+
self.algorithm == "ball_tree" and self.metric not in BallTree.valid_metrics
|
824 |
+
):
|
825 |
+
raise ValueError(
|
826 |
+
f"{self.metric} is not a valid metric for a BallTree-based algorithm."
|
827 |
+
" Please select a different metric."
|
828 |
+
)
|
829 |
+
|
830 |
+
if self.algorithm != "auto":
|
831 |
+
if (
|
832 |
+
self.metric != "precomputed"
|
833 |
+
and issparse(X)
|
834 |
+
and self.algorithm != "brute"
|
835 |
+
):
|
836 |
+
raise ValueError("Sparse data matrices only support algorithm `brute`.")
|
837 |
+
|
838 |
+
if self.algorithm == "brute":
|
839 |
+
mst_func = _hdbscan_brute
|
840 |
+
kwargs["copy"] = self.copy
|
841 |
+
elif self.algorithm == "kd_tree":
|
842 |
+
mst_func = _hdbscan_prims
|
843 |
+
kwargs["algo"] = "kd_tree"
|
844 |
+
kwargs["leaf_size"] = self.leaf_size
|
845 |
+
else:
|
846 |
+
mst_func = _hdbscan_prims
|
847 |
+
kwargs["algo"] = "ball_tree"
|
848 |
+
kwargs["leaf_size"] = self.leaf_size
|
849 |
+
else:
|
850 |
+
if issparse(X) or self.metric not in FAST_METRICS:
|
851 |
+
# We can't do much with sparse matrices ...
|
852 |
+
mst_func = _hdbscan_brute
|
853 |
+
kwargs["copy"] = self.copy
|
854 |
+
elif self.metric in KDTree.valid_metrics:
|
855 |
+
# TODO: Benchmark KD vs Ball Tree efficiency
|
856 |
+
mst_func = _hdbscan_prims
|
857 |
+
kwargs["algo"] = "kd_tree"
|
858 |
+
kwargs["leaf_size"] = self.leaf_size
|
859 |
+
else:
|
860 |
+
# Metric is a valid BallTree metric
|
861 |
+
mst_func = _hdbscan_prims
|
862 |
+
kwargs["algo"] = "ball_tree"
|
863 |
+
kwargs["leaf_size"] = self.leaf_size
|
864 |
+
|
865 |
+
self._single_linkage_tree_ = mst_func(**kwargs)
|
866 |
+
|
867 |
+
self.labels_, self.probabilities_ = tree_to_labels(
|
868 |
+
self._single_linkage_tree_,
|
869 |
+
self.min_cluster_size,
|
870 |
+
self.cluster_selection_method,
|
871 |
+
self.allow_single_cluster,
|
872 |
+
self.cluster_selection_epsilon,
|
873 |
+
self.max_cluster_size,
|
874 |
+
)
|
875 |
+
if self.metric != "precomputed" and not all_finite:
|
876 |
+
# Remap indices to align with original data in the case of
|
877 |
+
# non-finite entries. Samples with np.inf are mapped to -1 and
|
878 |
+
# those with np.nan are mapped to -2.
|
879 |
+
self._single_linkage_tree_ = remap_single_linkage_tree(
|
880 |
+
self._single_linkage_tree_,
|
881 |
+
internal_to_raw,
|
882 |
+
# There may be overlap for points w/ both `np.inf` and `np.nan`
|
883 |
+
non_finite=set(np.hstack([infinite_index, missing_index])),
|
884 |
+
)
|
885 |
+
new_labels = np.empty(self._raw_data.shape[0], dtype=np.int32)
|
886 |
+
new_labels[finite_index] = self.labels_
|
887 |
+
new_labels[infinite_index] = _OUTLIER_ENCODING["infinite"]["label"]
|
888 |
+
new_labels[missing_index] = _OUTLIER_ENCODING["missing"]["label"]
|
889 |
+
self.labels_ = new_labels
|
890 |
+
|
891 |
+
new_probabilities = np.zeros(self._raw_data.shape[0], dtype=np.float64)
|
892 |
+
new_probabilities[finite_index] = self.probabilities_
|
893 |
+
# Infinite outliers have probability 0 by convention, though this
|
894 |
+
# is arbitrary.
|
895 |
+
new_probabilities[infinite_index] = _OUTLIER_ENCODING["infinite"]["prob"]
|
896 |
+
new_probabilities[missing_index] = _OUTLIER_ENCODING["missing"]["prob"]
|
897 |
+
self.probabilities_ = new_probabilities
|
898 |
+
|
899 |
+
if self.store_centers:
|
900 |
+
self._weighted_cluster_center(X)
|
901 |
+
return self
|
902 |
+
|
903 |
+
def fit_predict(self, X, y=None):
|
904 |
+
"""Cluster X and return the associated cluster labels.
|
905 |
+
|
906 |
+
Parameters
|
907 |
+
----------
|
908 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
|
909 |
+
ndarray of shape (n_samples, n_samples)
|
910 |
+
A feature array, or array of distances between samples if
|
911 |
+
`metric='precomputed'`.
|
912 |
+
|
913 |
+
y : None
|
914 |
+
Ignored.
|
915 |
+
|
916 |
+
Returns
|
917 |
+
-------
|
918 |
+
y : ndarray of shape (n_samples,)
|
919 |
+
Cluster labels.
|
920 |
+
"""
|
921 |
+
self.fit(X)
|
922 |
+
return self.labels_
|
923 |
+
|
924 |
+
def _weighted_cluster_center(self, X):
|
925 |
+
"""Calculate and store the centroids/medoids of each cluster.
|
926 |
+
|
927 |
+
This requires `X` to be a raw feature array, not precomputed
|
928 |
+
distances. Rather than return outputs directly, this helper method
|
929 |
+
instead stores them in the `self.{centroids, medoids}_` attributes.
|
930 |
+
The choice for which attributes are calculated and stored is mediated
|
931 |
+
by the value of `self.store_centers`.
|
932 |
+
|
933 |
+
Parameters
|
934 |
+
----------
|
935 |
+
X : ndarray of shape (n_samples, n_features)
|
936 |
+
The feature array that the estimator was fit with.
|
937 |
+
|
938 |
+
"""
|
939 |
+
# Number of non-noise clusters
|
940 |
+
n_clusters = len(set(self.labels_) - {-1, -2})
|
941 |
+
mask = np.empty((X.shape[0],), dtype=np.bool_)
|
942 |
+
make_centroids = self.store_centers in ("centroid", "both")
|
943 |
+
make_medoids = self.store_centers in ("medoid", "both")
|
944 |
+
|
945 |
+
if make_centroids:
|
946 |
+
self.centroids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)
|
947 |
+
if make_medoids:
|
948 |
+
self.medoids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)
|
949 |
+
|
950 |
+
# Need to handle iteratively seen each cluster may have a different
|
951 |
+
# number of samples, hence we can't create a homogeneous 3D array.
|
952 |
+
for idx in range(n_clusters):
|
953 |
+
mask = self.labels_ == idx
|
954 |
+
data = X[mask]
|
955 |
+
strength = self.probabilities_[mask]
|
956 |
+
if make_centroids:
|
957 |
+
self.centroids_[idx] = np.average(data, weights=strength, axis=0)
|
958 |
+
if make_medoids:
|
959 |
+
# TODO: Implement weighted argmin PWD backend
|
960 |
+
dist_mat = pairwise_distances(
|
961 |
+
data, metric=self.metric, **self._metric_params
|
962 |
+
)
|
963 |
+
dist_mat = dist_mat * strength
|
964 |
+
medoid_index = np.argmin(dist_mat.sum(axis=1))
|
965 |
+
self.medoids_[idx] = data[medoid_index]
|
966 |
+
return
|
967 |
+
|
968 |
+
def dbscan_clustering(self, cut_distance, min_cluster_size=5):
|
969 |
+
"""Return clustering given by DBSCAN without border points.
|
970 |
+
|
971 |
+
Return clustering that would be equivalent to running DBSCAN* for a
|
972 |
+
particular cut_distance (or epsilon) DBSCAN* can be thought of as
|
973 |
+
DBSCAN without the border points. As such these results may differ
|
974 |
+
slightly from `cluster.DBSCAN` due to the difference in implementation
|
975 |
+
over the non-core points.
|
976 |
+
|
977 |
+
This can also be thought of as a flat clustering derived from constant
|
978 |
+
height cut through the single linkage tree.
|
979 |
+
|
980 |
+
This represents the result of selecting a cut value for robust single linkage
|
981 |
+
clustering. The `min_cluster_size` allows the flat clustering to declare noise
|
982 |
+
points (and cluster smaller than `min_cluster_size`).
|
983 |
+
|
984 |
+
Parameters
|
985 |
+
----------
|
986 |
+
cut_distance : float
|
987 |
+
The mutual reachability distance cut value to use to generate a
|
988 |
+
flat clustering.
|
989 |
+
|
990 |
+
min_cluster_size : int, default=5
|
991 |
+
Clusters smaller than this value with be called 'noise' and remain
|
992 |
+
unclustered in the resulting flat clustering.
|
993 |
+
|
994 |
+
Returns
|
995 |
+
-------
|
996 |
+
labels : ndarray of shape (n_samples,)
|
997 |
+
An array of cluster labels, one per datapoint.
|
998 |
+
Outliers are labeled as follows:
|
999 |
+
|
1000 |
+
- Noisy samples are given the label -1.
|
1001 |
+
- Samples with infinite elements (+/- np.inf) are given the label -2.
|
1002 |
+
- Samples with missing data are given the label -3, even if they
|
1003 |
+
also have infinite elements.
|
1004 |
+
"""
|
1005 |
+
labels = labelling_at_cut(
|
1006 |
+
self._single_linkage_tree_, cut_distance, min_cluster_size
|
1007 |
+
)
|
1008 |
+
# Infer indices from labels generated during `fit`
|
1009 |
+
infinite_index = self.labels_ == _OUTLIER_ENCODING["infinite"]["label"]
|
1010 |
+
missing_index = self.labels_ == _OUTLIER_ENCODING["missing"]["label"]
|
1011 |
+
|
1012 |
+
# Overwrite infinite/missing outlier samples (otherwise simple noise)
|
1013 |
+
labels[infinite_index] = _OUTLIER_ENCODING["infinite"]["label"]
|
1014 |
+
labels[missing_index] = _OUTLIER_ENCODING["missing"]["label"]
|
1015 |
+
return labels
|
1016 |
+
|
1017 |
+
def _more_tags(self):
|
1018 |
+
return {"allow_nan": self.metric != "precomputed"}
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (198 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/test_reachibility.cpython-310.pyc
ADDED
Binary file (2.3 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/test_reachibility.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from sklearn.cluster._hdbscan._reachability import mutual_reachability_graph
|
5 |
+
from sklearn.utils._testing import (
|
6 |
+
_convert_container,
|
7 |
+
assert_allclose,
|
8 |
+
)
|
9 |
+
|
10 |
+
|
11 |
+
def test_mutual_reachability_graph_error_sparse_format():
|
12 |
+
"""Check that we raise an error if the sparse format is not CSR."""
|
13 |
+
rng = np.random.RandomState(0)
|
14 |
+
X = rng.randn(10, 10)
|
15 |
+
X = X.T @ X
|
16 |
+
np.fill_diagonal(X, 0.0)
|
17 |
+
X = _convert_container(X, "sparse_csc")
|
18 |
+
|
19 |
+
err_msg = "Only sparse CSR matrices are supported"
|
20 |
+
with pytest.raises(ValueError, match=err_msg):
|
21 |
+
mutual_reachability_graph(X)
|
22 |
+
|
23 |
+
|
24 |
+
@pytest.mark.parametrize("array_type", ["array", "sparse_csr"])
|
25 |
+
def test_mutual_reachability_graph_inplace(array_type):
|
26 |
+
"""Check that the operation is happening inplace."""
|
27 |
+
rng = np.random.RandomState(0)
|
28 |
+
X = rng.randn(10, 10)
|
29 |
+
X = X.T @ X
|
30 |
+
np.fill_diagonal(X, 0.0)
|
31 |
+
X = _convert_container(X, array_type)
|
32 |
+
|
33 |
+
mr_graph = mutual_reachability_graph(X)
|
34 |
+
|
35 |
+
assert id(mr_graph) == id(X)
|
36 |
+
|
37 |
+
|
38 |
+
def test_mutual_reachability_graph_equivalence_dense_sparse():
|
39 |
+
"""Check that we get the same results for dense and sparse implementation."""
|
40 |
+
rng = np.random.RandomState(0)
|
41 |
+
X = rng.randn(5, 5)
|
42 |
+
X_dense = X.T @ X
|
43 |
+
X_sparse = _convert_container(X_dense, "sparse_csr")
|
44 |
+
|
45 |
+
mr_graph_dense = mutual_reachability_graph(X_dense, min_samples=3)
|
46 |
+
mr_graph_sparse = mutual_reachability_graph(X_sparse, min_samples=3)
|
47 |
+
|
48 |
+
assert_allclose(mr_graph_dense, mr_graph_sparse.toarray())
|
49 |
+
|
50 |
+
|
51 |
+
@pytest.mark.parametrize("array_type", ["array", "sparse_csr"])
|
52 |
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
53 |
+
def test_mutual_reachability_graph_preserve_dtype(array_type, dtype):
|
54 |
+
"""Check that the computation preserve dtype thanks to fused types."""
|
55 |
+
rng = np.random.RandomState(0)
|
56 |
+
X = rng.randn(10, 10)
|
57 |
+
X = (X.T @ X).astype(dtype)
|
58 |
+
np.fill_diagonal(X, 0.0)
|
59 |
+
X = _convert_container(X, array_type)
|
60 |
+
|
61 |
+
assert X.dtype == dtype
|
62 |
+
mr_graph = mutual_reachability_graph(X)
|
63 |
+
assert mr_graph.dtype == dtype
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (332 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.pxd
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..utils._typedefs cimport intp_t
|
2 |
+
|
3 |
+
cdef class UnionFind:
|
4 |
+
cdef intp_t next_label
|
5 |
+
cdef intp_t[:] parent
|
6 |
+
cdef intp_t[:] size
|
7 |
+
|
8 |
+
cdef void union(self, intp_t m, intp_t n) noexcept
|
9 |
+
cdef intp_t fast_find(self, intp_t n) noexcept
|
venv/lib/python3.10/site-packages/sklearn/cluster/_kmeans.py
ADDED
@@ -0,0 +1,2318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""K-means clustering."""
|
2 |
+
|
3 |
+
# Authors: Gael Varoquaux <[email protected]>
|
4 |
+
# Thomas Rueckstiess <[email protected]>
|
5 |
+
# James Bergstra <[email protected]>
|
6 |
+
# Jan Schlueter <[email protected]>
|
7 |
+
# Nelle Varoquaux
|
8 |
+
# Peter Prettenhofer <[email protected]>
|
9 |
+
# Olivier Grisel <[email protected]>
|
10 |
+
# Mathieu Blondel <[email protected]>
|
11 |
+
# Robert Layton <[email protected]>
|
12 |
+
# License: BSD 3 clause
|
13 |
+
|
14 |
+
import warnings
|
15 |
+
from abc import ABC, abstractmethod
|
16 |
+
from numbers import Integral, Real
|
17 |
+
|
18 |
+
import numpy as np
|
19 |
+
import scipy.sparse as sp
|
20 |
+
|
21 |
+
from ..base import (
|
22 |
+
BaseEstimator,
|
23 |
+
ClassNamePrefixFeaturesOutMixin,
|
24 |
+
ClusterMixin,
|
25 |
+
TransformerMixin,
|
26 |
+
_fit_context,
|
27 |
+
)
|
28 |
+
from ..exceptions import ConvergenceWarning
|
29 |
+
from ..metrics.pairwise import _euclidean_distances, euclidean_distances
|
30 |
+
from ..utils import check_array, check_random_state
|
31 |
+
from ..utils._openmp_helpers import _openmp_effective_n_threads
|
32 |
+
from ..utils._param_validation import Interval, StrOptions, validate_params
|
33 |
+
from ..utils.extmath import row_norms, stable_cumsum
|
34 |
+
from ..utils.fixes import threadpool_info, threadpool_limits
|
35 |
+
from ..utils.sparsefuncs import mean_variance_axis
|
36 |
+
from ..utils.sparsefuncs_fast import assign_rows_csr
|
37 |
+
from ..utils.validation import (
|
38 |
+
_check_sample_weight,
|
39 |
+
_is_arraylike_not_scalar,
|
40 |
+
check_is_fitted,
|
41 |
+
)
|
42 |
+
from ._k_means_common import (
|
43 |
+
CHUNK_SIZE,
|
44 |
+
_inertia_dense,
|
45 |
+
_inertia_sparse,
|
46 |
+
_is_same_clustering,
|
47 |
+
)
|
48 |
+
from ._k_means_elkan import (
|
49 |
+
elkan_iter_chunked_dense,
|
50 |
+
elkan_iter_chunked_sparse,
|
51 |
+
init_bounds_dense,
|
52 |
+
init_bounds_sparse,
|
53 |
+
)
|
54 |
+
from ._k_means_lloyd import lloyd_iter_chunked_dense, lloyd_iter_chunked_sparse
|
55 |
+
from ._k_means_minibatch import _minibatch_update_dense, _minibatch_update_sparse
|
56 |
+
|
57 |
+
###############################################################################
|
58 |
+
# Initialization heuristic
|
59 |
+
|
60 |
+
|
61 |
+
@validate_params(
|
62 |
+
{
|
63 |
+
"X": ["array-like", "sparse matrix"],
|
64 |
+
"n_clusters": [Interval(Integral, 1, None, closed="left")],
|
65 |
+
"sample_weight": ["array-like", None],
|
66 |
+
"x_squared_norms": ["array-like", None],
|
67 |
+
"random_state": ["random_state"],
|
68 |
+
"n_local_trials": [Interval(Integral, 1, None, closed="left"), None],
|
69 |
+
},
|
70 |
+
prefer_skip_nested_validation=True,
|
71 |
+
)
|
72 |
+
def kmeans_plusplus(
|
73 |
+
X,
|
74 |
+
n_clusters,
|
75 |
+
*,
|
76 |
+
sample_weight=None,
|
77 |
+
x_squared_norms=None,
|
78 |
+
random_state=None,
|
79 |
+
n_local_trials=None,
|
80 |
+
):
|
81 |
+
"""Init n_clusters seeds according to k-means++.
|
82 |
+
|
83 |
+
.. versionadded:: 0.24
|
84 |
+
|
85 |
+
Parameters
|
86 |
+
----------
|
87 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
88 |
+
The data to pick seeds from.
|
89 |
+
|
90 |
+
n_clusters : int
|
91 |
+
The number of centroids to initialize.
|
92 |
+
|
93 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
94 |
+
The weights for each observation in `X`. If `None`, all observations
|
95 |
+
are assigned equal weight. `sample_weight` is ignored if `init`
|
96 |
+
is a callable or a user provided array.
|
97 |
+
|
98 |
+
.. versionadded:: 1.3
|
99 |
+
|
100 |
+
x_squared_norms : array-like of shape (n_samples,), default=None
|
101 |
+
Squared Euclidean norm of each data point.
|
102 |
+
|
103 |
+
random_state : int or RandomState instance, default=None
|
104 |
+
Determines random number generation for centroid initialization. Pass
|
105 |
+
an int for reproducible output across multiple function calls.
|
106 |
+
See :term:`Glossary <random_state>`.
|
107 |
+
|
108 |
+
n_local_trials : int, default=None
|
109 |
+
The number of seeding trials for each center (except the first),
|
110 |
+
of which the one reducing inertia the most is greedily chosen.
|
111 |
+
Set to None to make the number of trials depend logarithmically
|
112 |
+
on the number of seeds (2+log(k)) which is the recommended setting.
|
113 |
+
Setting to 1 disables the greedy cluster selection and recovers the
|
114 |
+
vanilla k-means++ algorithm which was empirically shown to work less
|
115 |
+
well than its greedy variant.
|
116 |
+
|
117 |
+
Returns
|
118 |
+
-------
|
119 |
+
centers : ndarray of shape (n_clusters, n_features)
|
120 |
+
The initial centers for k-means.
|
121 |
+
|
122 |
+
indices : ndarray of shape (n_clusters,)
|
123 |
+
The index location of the chosen centers in the data array X. For a
|
124 |
+
given index and center, X[index] = center.
|
125 |
+
|
126 |
+
Notes
|
127 |
+
-----
|
128 |
+
Selects initial cluster centers for k-mean clustering in a smart way
|
129 |
+
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
|
130 |
+
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
|
131 |
+
on Discrete algorithms. 2007
|
132 |
+
|
133 |
+
Examples
|
134 |
+
--------
|
135 |
+
|
136 |
+
>>> from sklearn.cluster import kmeans_plusplus
|
137 |
+
>>> import numpy as np
|
138 |
+
>>> X = np.array([[1, 2], [1, 4], [1, 0],
|
139 |
+
... [10, 2], [10, 4], [10, 0]])
|
140 |
+
>>> centers, indices = kmeans_plusplus(X, n_clusters=2, random_state=0)
|
141 |
+
>>> centers
|
142 |
+
array([[10, 2],
|
143 |
+
[ 1, 0]])
|
144 |
+
>>> indices
|
145 |
+
array([3, 2])
|
146 |
+
"""
|
147 |
+
# Check data
|
148 |
+
check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
|
149 |
+
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
|
150 |
+
|
151 |
+
if X.shape[0] < n_clusters:
|
152 |
+
raise ValueError(
|
153 |
+
f"n_samples={X.shape[0]} should be >= n_clusters={n_clusters}."
|
154 |
+
)
|
155 |
+
|
156 |
+
# Check parameters
|
157 |
+
if x_squared_norms is None:
|
158 |
+
x_squared_norms = row_norms(X, squared=True)
|
159 |
+
else:
|
160 |
+
x_squared_norms = check_array(x_squared_norms, dtype=X.dtype, ensure_2d=False)
|
161 |
+
|
162 |
+
if x_squared_norms.shape[0] != X.shape[0]:
|
163 |
+
raise ValueError(
|
164 |
+
f"The length of x_squared_norms {x_squared_norms.shape[0]} should "
|
165 |
+
f"be equal to the length of n_samples {X.shape[0]}."
|
166 |
+
)
|
167 |
+
|
168 |
+
random_state = check_random_state(random_state)
|
169 |
+
|
170 |
+
# Call private k-means++
|
171 |
+
centers, indices = _kmeans_plusplus(
|
172 |
+
X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials
|
173 |
+
)
|
174 |
+
|
175 |
+
return centers, indices
|
176 |
+
|
177 |
+
|
178 |
+
def _kmeans_plusplus(
|
179 |
+
X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials=None
|
180 |
+
):
|
181 |
+
"""Computational component for initialization of n_clusters by
|
182 |
+
k-means++. Prior validation of data is assumed.
|
183 |
+
|
184 |
+
Parameters
|
185 |
+
----------
|
186 |
+
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
|
187 |
+
The data to pick seeds for.
|
188 |
+
|
189 |
+
n_clusters : int
|
190 |
+
The number of seeds to choose.
|
191 |
+
|
192 |
+
sample_weight : ndarray of shape (n_samples,)
|
193 |
+
The weights for each observation in `X`.
|
194 |
+
|
195 |
+
x_squared_norms : ndarray of shape (n_samples,)
|
196 |
+
Squared Euclidean norm of each data point.
|
197 |
+
|
198 |
+
random_state : RandomState instance
|
199 |
+
The generator used to initialize the centers.
|
200 |
+
See :term:`Glossary <random_state>`.
|
201 |
+
|
202 |
+
n_local_trials : int, default=None
|
203 |
+
The number of seeding trials for each center (except the first),
|
204 |
+
of which the one reducing inertia the most is greedily chosen.
|
205 |
+
Set to None to make the number of trials depend logarithmically
|
206 |
+
on the number of seeds (2+log(k)); this is the default.
|
207 |
+
|
208 |
+
Returns
|
209 |
+
-------
|
210 |
+
centers : ndarray of shape (n_clusters, n_features)
|
211 |
+
The initial centers for k-means.
|
212 |
+
|
213 |
+
indices : ndarray of shape (n_clusters,)
|
214 |
+
The index location of the chosen centers in the data array X. For a
|
215 |
+
given index and center, X[index] = center.
|
216 |
+
"""
|
217 |
+
n_samples, n_features = X.shape
|
218 |
+
|
219 |
+
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
|
220 |
+
|
221 |
+
# Set the number of local seeding trials if none is given
|
222 |
+
if n_local_trials is None:
|
223 |
+
# This is what Arthur/Vassilvitskii tried, but did not report
|
224 |
+
# specific results for other than mentioning in the conclusion
|
225 |
+
# that it helped.
|
226 |
+
n_local_trials = 2 + int(np.log(n_clusters))
|
227 |
+
|
228 |
+
# Pick first center randomly and track index of point
|
229 |
+
center_id = random_state.choice(n_samples, p=sample_weight / sample_weight.sum())
|
230 |
+
indices = np.full(n_clusters, -1, dtype=int)
|
231 |
+
if sp.issparse(X):
|
232 |
+
centers[0] = X[[center_id]].toarray()
|
233 |
+
else:
|
234 |
+
centers[0] = X[center_id]
|
235 |
+
indices[0] = center_id
|
236 |
+
|
237 |
+
# Initialize list of closest distances and calculate current potential
|
238 |
+
closest_dist_sq = _euclidean_distances(
|
239 |
+
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True
|
240 |
+
)
|
241 |
+
current_pot = closest_dist_sq @ sample_weight
|
242 |
+
|
243 |
+
# Pick the remaining n_clusters-1 points
|
244 |
+
for c in range(1, n_clusters):
|
245 |
+
# Choose center candidates by sampling with probability proportional
|
246 |
+
# to the squared distance to the closest existing center
|
247 |
+
rand_vals = random_state.uniform(size=n_local_trials) * current_pot
|
248 |
+
candidate_ids = np.searchsorted(
|
249 |
+
stable_cumsum(sample_weight * closest_dist_sq), rand_vals
|
250 |
+
)
|
251 |
+
# XXX: numerical imprecision can result in a candidate_id out of range
|
252 |
+
np.clip(candidate_ids, None, closest_dist_sq.size - 1, out=candidate_ids)
|
253 |
+
|
254 |
+
# Compute distances to center candidates
|
255 |
+
distance_to_candidates = _euclidean_distances(
|
256 |
+
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True
|
257 |
+
)
|
258 |
+
|
259 |
+
# update closest distances squared and potential for each candidate
|
260 |
+
np.minimum(closest_dist_sq, distance_to_candidates, out=distance_to_candidates)
|
261 |
+
candidates_pot = distance_to_candidates @ sample_weight.reshape(-1, 1)
|
262 |
+
|
263 |
+
# Decide which candidate is the best
|
264 |
+
best_candidate = np.argmin(candidates_pot)
|
265 |
+
current_pot = candidates_pot[best_candidate]
|
266 |
+
closest_dist_sq = distance_to_candidates[best_candidate]
|
267 |
+
best_candidate = candidate_ids[best_candidate]
|
268 |
+
|
269 |
+
# Permanently add best center candidate found in local tries
|
270 |
+
if sp.issparse(X):
|
271 |
+
centers[c] = X[[best_candidate]].toarray()
|
272 |
+
else:
|
273 |
+
centers[c] = X[best_candidate]
|
274 |
+
indices[c] = best_candidate
|
275 |
+
|
276 |
+
return centers, indices
|
277 |
+
|
278 |
+
|
279 |
+
###############################################################################
|
280 |
+
# K-means batch estimation by EM (expectation maximization)
|
281 |
+
|
282 |
+
|
283 |
+
def _tolerance(X, tol):
|
284 |
+
"""Return a tolerance which is dependent on the dataset."""
|
285 |
+
if tol == 0:
|
286 |
+
return 0
|
287 |
+
if sp.issparse(X):
|
288 |
+
variances = mean_variance_axis(X, axis=0)[1]
|
289 |
+
else:
|
290 |
+
variances = np.var(X, axis=0)
|
291 |
+
return np.mean(variances) * tol
|
292 |
+
|
293 |
+
|
294 |
+
@validate_params(
|
295 |
+
{
|
296 |
+
"X": ["array-like", "sparse matrix"],
|
297 |
+
"sample_weight": ["array-like", None],
|
298 |
+
"return_n_iter": [bool],
|
299 |
+
},
|
300 |
+
prefer_skip_nested_validation=False,
|
301 |
+
)
|
302 |
+
def k_means(
|
303 |
+
X,
|
304 |
+
n_clusters,
|
305 |
+
*,
|
306 |
+
sample_weight=None,
|
307 |
+
init="k-means++",
|
308 |
+
n_init="auto",
|
309 |
+
max_iter=300,
|
310 |
+
verbose=False,
|
311 |
+
tol=1e-4,
|
312 |
+
random_state=None,
|
313 |
+
copy_x=True,
|
314 |
+
algorithm="lloyd",
|
315 |
+
return_n_iter=False,
|
316 |
+
):
|
317 |
+
"""Perform K-means clustering algorithm.
|
318 |
+
|
319 |
+
Read more in the :ref:`User Guide <k_means>`.
|
320 |
+
|
321 |
+
Parameters
|
322 |
+
----------
|
323 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
324 |
+
The observations to cluster. It must be noted that the data
|
325 |
+
will be converted to C ordering, which will cause a memory copy
|
326 |
+
if the given data is not C-contiguous.
|
327 |
+
|
328 |
+
n_clusters : int
|
329 |
+
The number of clusters to form as well as the number of
|
330 |
+
centroids to generate.
|
331 |
+
|
332 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
333 |
+
The weights for each observation in `X`. If `None`, all observations
|
334 |
+
are assigned equal weight. `sample_weight` is not used during
|
335 |
+
initialization if `init` is a callable or a user provided array.
|
336 |
+
|
337 |
+
init : {'k-means++', 'random'}, callable or array-like of shape \
|
338 |
+
(n_clusters, n_features), default='k-means++'
|
339 |
+
Method for initialization:
|
340 |
+
|
341 |
+
- `'k-means++'` : selects initial cluster centers for k-mean
|
342 |
+
clustering in a smart way to speed up convergence. See section
|
343 |
+
Notes in k_init for more details.
|
344 |
+
- `'random'`: choose `n_clusters` observations (rows) at random from data
|
345 |
+
for the initial centroids.
|
346 |
+
- If an array is passed, it should be of shape `(n_clusters, n_features)`
|
347 |
+
and gives the initial centers.
|
348 |
+
- If a callable is passed, it should take arguments `X`, `n_clusters` and a
|
349 |
+
random state and return an initialization.
|
350 |
+
|
351 |
+
n_init : 'auto' or int, default="auto"
|
352 |
+
Number of time the k-means algorithm will be run with different
|
353 |
+
centroid seeds. The final results will be the best output of
|
354 |
+
n_init consecutive runs in terms of inertia.
|
355 |
+
|
356 |
+
When `n_init='auto'`, the number of runs depends on the value of init:
|
357 |
+
10 if using `init='random'` or `init` is a callable;
|
358 |
+
1 if using `init='k-means++'` or `init` is an array-like.
|
359 |
+
|
360 |
+
.. versionadded:: 1.2
|
361 |
+
Added 'auto' option for `n_init`.
|
362 |
+
|
363 |
+
.. versionchanged:: 1.4
|
364 |
+
Default value for `n_init` changed to `'auto'`.
|
365 |
+
|
366 |
+
max_iter : int, default=300
|
367 |
+
Maximum number of iterations of the k-means algorithm to run.
|
368 |
+
|
369 |
+
verbose : bool, default=False
|
370 |
+
Verbosity mode.
|
371 |
+
|
372 |
+
tol : float, default=1e-4
|
373 |
+
Relative tolerance with regards to Frobenius norm of the difference
|
374 |
+
in the cluster centers of two consecutive iterations to declare
|
375 |
+
convergence.
|
376 |
+
|
377 |
+
random_state : int, RandomState instance or None, default=None
|
378 |
+
Determines random number generation for centroid initialization. Use
|
379 |
+
an int to make the randomness deterministic.
|
380 |
+
See :term:`Glossary <random_state>`.
|
381 |
+
|
382 |
+
copy_x : bool, default=True
|
383 |
+
When pre-computing distances it is more numerically accurate to center
|
384 |
+
the data first. If `copy_x` is True (default), then the original data is
|
385 |
+
not modified. If False, the original data is modified, and put back
|
386 |
+
before the function returns, but small numerical differences may be
|
387 |
+
introduced by subtracting and then adding the data mean. Note that if
|
388 |
+
the original data is not C-contiguous, a copy will be made even if
|
389 |
+
`copy_x` is False. If the original data is sparse, but not in CSR format,
|
390 |
+
a copy will be made even if `copy_x` is False.
|
391 |
+
|
392 |
+
algorithm : {"lloyd", "elkan"}, default="lloyd"
|
393 |
+
K-means algorithm to use. The classical EM-style algorithm is `"lloyd"`.
|
394 |
+
The `"elkan"` variation can be more efficient on some datasets with
|
395 |
+
well-defined clusters, by using the triangle inequality. However it's
|
396 |
+
more memory intensive due to the allocation of an extra array of shape
|
397 |
+
`(n_samples, n_clusters)`.
|
398 |
+
|
399 |
+
.. versionchanged:: 0.18
|
400 |
+
Added Elkan algorithm
|
401 |
+
|
402 |
+
.. versionchanged:: 1.1
|
403 |
+
Renamed "full" to "lloyd", and deprecated "auto" and "full".
|
404 |
+
Changed "auto" to use "lloyd" instead of "elkan".
|
405 |
+
|
406 |
+
return_n_iter : bool, default=False
|
407 |
+
Whether or not to return the number of iterations.
|
408 |
+
|
409 |
+
Returns
|
410 |
+
-------
|
411 |
+
centroid : ndarray of shape (n_clusters, n_features)
|
412 |
+
Centroids found at the last iteration of k-means.
|
413 |
+
|
414 |
+
label : ndarray of shape (n_samples,)
|
415 |
+
The `label[i]` is the code or index of the centroid the
|
416 |
+
i'th observation is closest to.
|
417 |
+
|
418 |
+
inertia : float
|
419 |
+
The final value of the inertia criterion (sum of squared distances to
|
420 |
+
the closest centroid for all observations in the training set).
|
421 |
+
|
422 |
+
best_n_iter : int
|
423 |
+
Number of iterations corresponding to the best results.
|
424 |
+
Returned only if `return_n_iter` is set to True.
|
425 |
+
|
426 |
+
Examples
|
427 |
+
--------
|
428 |
+
>>> import numpy as np
|
429 |
+
>>> from sklearn.cluster import k_means
|
430 |
+
>>> X = np.array([[1, 2], [1, 4], [1, 0],
|
431 |
+
... [10, 2], [10, 4], [10, 0]])
|
432 |
+
>>> centroid, label, inertia = k_means(
|
433 |
+
... X, n_clusters=2, n_init="auto", random_state=0
|
434 |
+
... )
|
435 |
+
>>> centroid
|
436 |
+
array([[10., 2.],
|
437 |
+
[ 1., 2.]])
|
438 |
+
>>> label
|
439 |
+
array([1, 1, 1, 0, 0, 0], dtype=int32)
|
440 |
+
>>> inertia
|
441 |
+
16.0
|
442 |
+
"""
|
443 |
+
est = KMeans(
|
444 |
+
n_clusters=n_clusters,
|
445 |
+
init=init,
|
446 |
+
n_init=n_init,
|
447 |
+
max_iter=max_iter,
|
448 |
+
verbose=verbose,
|
449 |
+
tol=tol,
|
450 |
+
random_state=random_state,
|
451 |
+
copy_x=copy_x,
|
452 |
+
algorithm=algorithm,
|
453 |
+
).fit(X, sample_weight=sample_weight)
|
454 |
+
if return_n_iter:
|
455 |
+
return est.cluster_centers_, est.labels_, est.inertia_, est.n_iter_
|
456 |
+
else:
|
457 |
+
return est.cluster_centers_, est.labels_, est.inertia_
|
458 |
+
|
459 |
+
|
460 |
+
def _kmeans_single_elkan(
|
461 |
+
X,
|
462 |
+
sample_weight,
|
463 |
+
centers_init,
|
464 |
+
max_iter=300,
|
465 |
+
verbose=False,
|
466 |
+
tol=1e-4,
|
467 |
+
n_threads=1,
|
468 |
+
):
|
469 |
+
"""A single run of k-means elkan, assumes preparation completed prior.
|
470 |
+
|
471 |
+
Parameters
|
472 |
+
----------
|
473 |
+
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
|
474 |
+
The observations to cluster. If sparse matrix, must be in CSR format.
|
475 |
+
|
476 |
+
sample_weight : array-like of shape (n_samples,)
|
477 |
+
The weights for each observation in X.
|
478 |
+
|
479 |
+
centers_init : ndarray of shape (n_clusters, n_features)
|
480 |
+
The initial centers.
|
481 |
+
|
482 |
+
max_iter : int, default=300
|
483 |
+
Maximum number of iterations of the k-means algorithm to run.
|
484 |
+
|
485 |
+
verbose : bool, default=False
|
486 |
+
Verbosity mode.
|
487 |
+
|
488 |
+
tol : float, default=1e-4
|
489 |
+
Relative tolerance with regards to Frobenius norm of the difference
|
490 |
+
in the cluster centers of two consecutive iterations to declare
|
491 |
+
convergence.
|
492 |
+
It's not advised to set `tol=0` since convergence might never be
|
493 |
+
declared due to rounding errors. Use a very small number instead.
|
494 |
+
|
495 |
+
n_threads : int, default=1
|
496 |
+
The number of OpenMP threads to use for the computation. Parallelism is
|
497 |
+
sample-wise on the main cython loop which assigns each sample to its
|
498 |
+
closest center.
|
499 |
+
|
500 |
+
Returns
|
501 |
+
-------
|
502 |
+
centroid : ndarray of shape (n_clusters, n_features)
|
503 |
+
Centroids found at the last iteration of k-means.
|
504 |
+
|
505 |
+
label : ndarray of shape (n_samples,)
|
506 |
+
label[i] is the code or index of the centroid the
|
507 |
+
i'th observation is closest to.
|
508 |
+
|
509 |
+
inertia : float
|
510 |
+
The final value of the inertia criterion (sum of squared distances to
|
511 |
+
the closest centroid for all observations in the training set).
|
512 |
+
|
513 |
+
n_iter : int
|
514 |
+
Number of iterations run.
|
515 |
+
"""
|
516 |
+
n_samples = X.shape[0]
|
517 |
+
n_clusters = centers_init.shape[0]
|
518 |
+
|
519 |
+
# Buffers to avoid new allocations at each iteration.
|
520 |
+
centers = centers_init
|
521 |
+
centers_new = np.zeros_like(centers)
|
522 |
+
weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype)
|
523 |
+
labels = np.full(n_samples, -1, dtype=np.int32)
|
524 |
+
labels_old = labels.copy()
|
525 |
+
center_half_distances = euclidean_distances(centers) / 2
|
526 |
+
distance_next_center = np.partition(
|
527 |
+
np.asarray(center_half_distances), kth=1, axis=0
|
528 |
+
)[1]
|
529 |
+
upper_bounds = np.zeros(n_samples, dtype=X.dtype)
|
530 |
+
lower_bounds = np.zeros((n_samples, n_clusters), dtype=X.dtype)
|
531 |
+
center_shift = np.zeros(n_clusters, dtype=X.dtype)
|
532 |
+
|
533 |
+
if sp.issparse(X):
|
534 |
+
init_bounds = init_bounds_sparse
|
535 |
+
elkan_iter = elkan_iter_chunked_sparse
|
536 |
+
_inertia = _inertia_sparse
|
537 |
+
else:
|
538 |
+
init_bounds = init_bounds_dense
|
539 |
+
elkan_iter = elkan_iter_chunked_dense
|
540 |
+
_inertia = _inertia_dense
|
541 |
+
|
542 |
+
init_bounds(
|
543 |
+
X,
|
544 |
+
centers,
|
545 |
+
center_half_distances,
|
546 |
+
labels,
|
547 |
+
upper_bounds,
|
548 |
+
lower_bounds,
|
549 |
+
n_threads=n_threads,
|
550 |
+
)
|
551 |
+
|
552 |
+
strict_convergence = False
|
553 |
+
|
554 |
+
for i in range(max_iter):
|
555 |
+
elkan_iter(
|
556 |
+
X,
|
557 |
+
sample_weight,
|
558 |
+
centers,
|
559 |
+
centers_new,
|
560 |
+
weight_in_clusters,
|
561 |
+
center_half_distances,
|
562 |
+
distance_next_center,
|
563 |
+
upper_bounds,
|
564 |
+
lower_bounds,
|
565 |
+
labels,
|
566 |
+
center_shift,
|
567 |
+
n_threads,
|
568 |
+
)
|
569 |
+
|
570 |
+
# compute new pairwise distances between centers and closest other
|
571 |
+
# center of each center for next iterations
|
572 |
+
center_half_distances = euclidean_distances(centers_new) / 2
|
573 |
+
distance_next_center = np.partition(
|
574 |
+
np.asarray(center_half_distances), kth=1, axis=0
|
575 |
+
)[1]
|
576 |
+
|
577 |
+
if verbose:
|
578 |
+
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
|
579 |
+
print(f"Iteration {i}, inertia {inertia}")
|
580 |
+
|
581 |
+
centers, centers_new = centers_new, centers
|
582 |
+
|
583 |
+
if np.array_equal(labels, labels_old):
|
584 |
+
# First check the labels for strict convergence.
|
585 |
+
if verbose:
|
586 |
+
print(f"Converged at iteration {i}: strict convergence.")
|
587 |
+
strict_convergence = True
|
588 |
+
break
|
589 |
+
else:
|
590 |
+
# No strict convergence, check for tol based convergence.
|
591 |
+
center_shift_tot = (center_shift**2).sum()
|
592 |
+
if center_shift_tot <= tol:
|
593 |
+
if verbose:
|
594 |
+
print(
|
595 |
+
f"Converged at iteration {i}: center shift "
|
596 |
+
f"{center_shift_tot} within tolerance {tol}."
|
597 |
+
)
|
598 |
+
break
|
599 |
+
|
600 |
+
labels_old[:] = labels
|
601 |
+
|
602 |
+
if not strict_convergence:
|
603 |
+
# rerun E-step so that predicted labels match cluster centers
|
604 |
+
elkan_iter(
|
605 |
+
X,
|
606 |
+
sample_weight,
|
607 |
+
centers,
|
608 |
+
centers,
|
609 |
+
weight_in_clusters,
|
610 |
+
center_half_distances,
|
611 |
+
distance_next_center,
|
612 |
+
upper_bounds,
|
613 |
+
lower_bounds,
|
614 |
+
labels,
|
615 |
+
center_shift,
|
616 |
+
n_threads,
|
617 |
+
update_centers=False,
|
618 |
+
)
|
619 |
+
|
620 |
+
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
|
621 |
+
|
622 |
+
return labels, inertia, centers, i + 1
|
623 |
+
|
624 |
+
|
625 |
+
def _kmeans_single_lloyd(
|
626 |
+
X,
|
627 |
+
sample_weight,
|
628 |
+
centers_init,
|
629 |
+
max_iter=300,
|
630 |
+
verbose=False,
|
631 |
+
tol=1e-4,
|
632 |
+
n_threads=1,
|
633 |
+
):
|
634 |
+
"""A single run of k-means lloyd, assumes preparation completed prior.
|
635 |
+
|
636 |
+
Parameters
|
637 |
+
----------
|
638 |
+
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
|
639 |
+
The observations to cluster. If sparse matrix, must be in CSR format.
|
640 |
+
|
641 |
+
sample_weight : ndarray of shape (n_samples,)
|
642 |
+
The weights for each observation in X.
|
643 |
+
|
644 |
+
centers_init : ndarray of shape (n_clusters, n_features)
|
645 |
+
The initial centers.
|
646 |
+
|
647 |
+
max_iter : int, default=300
|
648 |
+
Maximum number of iterations of the k-means algorithm to run.
|
649 |
+
|
650 |
+
verbose : bool, default=False
|
651 |
+
Verbosity mode
|
652 |
+
|
653 |
+
tol : float, default=1e-4
|
654 |
+
Relative tolerance with regards to Frobenius norm of the difference
|
655 |
+
in the cluster centers of two consecutive iterations to declare
|
656 |
+
convergence.
|
657 |
+
It's not advised to set `tol=0` since convergence might never be
|
658 |
+
declared due to rounding errors. Use a very small number instead.
|
659 |
+
|
660 |
+
n_threads : int, default=1
|
661 |
+
The number of OpenMP threads to use for the computation. Parallelism is
|
662 |
+
sample-wise on the main cython loop which assigns each sample to its
|
663 |
+
closest center.
|
664 |
+
|
665 |
+
Returns
|
666 |
+
-------
|
667 |
+
centroid : ndarray of shape (n_clusters, n_features)
|
668 |
+
Centroids found at the last iteration of k-means.
|
669 |
+
|
670 |
+
label : ndarray of shape (n_samples,)
|
671 |
+
label[i] is the code or index of the centroid the
|
672 |
+
i'th observation is closest to.
|
673 |
+
|
674 |
+
inertia : float
|
675 |
+
The final value of the inertia criterion (sum of squared distances to
|
676 |
+
the closest centroid for all observations in the training set).
|
677 |
+
|
678 |
+
n_iter : int
|
679 |
+
Number of iterations run.
|
680 |
+
"""
|
681 |
+
n_clusters = centers_init.shape[0]
|
682 |
+
|
683 |
+
# Buffers to avoid new allocations at each iteration.
|
684 |
+
centers = centers_init
|
685 |
+
centers_new = np.zeros_like(centers)
|
686 |
+
labels = np.full(X.shape[0], -1, dtype=np.int32)
|
687 |
+
labels_old = labels.copy()
|
688 |
+
weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype)
|
689 |
+
center_shift = np.zeros(n_clusters, dtype=X.dtype)
|
690 |
+
|
691 |
+
if sp.issparse(X):
|
692 |
+
lloyd_iter = lloyd_iter_chunked_sparse
|
693 |
+
_inertia = _inertia_sparse
|
694 |
+
else:
|
695 |
+
lloyd_iter = lloyd_iter_chunked_dense
|
696 |
+
_inertia = _inertia_dense
|
697 |
+
|
698 |
+
strict_convergence = False
|
699 |
+
|
700 |
+
# Threadpoolctl context to limit the number of threads in second level of
|
701 |
+
# nested parallelism (i.e. BLAS) to avoid oversubscription.
|
702 |
+
with threadpool_limits(limits=1, user_api="blas"):
|
703 |
+
for i in range(max_iter):
|
704 |
+
lloyd_iter(
|
705 |
+
X,
|
706 |
+
sample_weight,
|
707 |
+
centers,
|
708 |
+
centers_new,
|
709 |
+
weight_in_clusters,
|
710 |
+
labels,
|
711 |
+
center_shift,
|
712 |
+
n_threads,
|
713 |
+
)
|
714 |
+
|
715 |
+
if verbose:
|
716 |
+
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
|
717 |
+
print(f"Iteration {i}, inertia {inertia}.")
|
718 |
+
|
719 |
+
centers, centers_new = centers_new, centers
|
720 |
+
|
721 |
+
if np.array_equal(labels, labels_old):
|
722 |
+
# First check the labels for strict convergence.
|
723 |
+
if verbose:
|
724 |
+
print(f"Converged at iteration {i}: strict convergence.")
|
725 |
+
strict_convergence = True
|
726 |
+
break
|
727 |
+
else:
|
728 |
+
# No strict convergence, check for tol based convergence.
|
729 |
+
center_shift_tot = (center_shift**2).sum()
|
730 |
+
if center_shift_tot <= tol:
|
731 |
+
if verbose:
|
732 |
+
print(
|
733 |
+
f"Converged at iteration {i}: center shift "
|
734 |
+
f"{center_shift_tot} within tolerance {tol}."
|
735 |
+
)
|
736 |
+
break
|
737 |
+
|
738 |
+
labels_old[:] = labels
|
739 |
+
|
740 |
+
if not strict_convergence:
|
741 |
+
# rerun E-step so that predicted labels match cluster centers
|
742 |
+
lloyd_iter(
|
743 |
+
X,
|
744 |
+
sample_weight,
|
745 |
+
centers,
|
746 |
+
centers,
|
747 |
+
weight_in_clusters,
|
748 |
+
labels,
|
749 |
+
center_shift,
|
750 |
+
n_threads,
|
751 |
+
update_centers=False,
|
752 |
+
)
|
753 |
+
|
754 |
+
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
|
755 |
+
|
756 |
+
return labels, inertia, centers, i + 1
|
757 |
+
|
758 |
+
|
759 |
+
def _labels_inertia(X, sample_weight, centers, n_threads=1, return_inertia=True):
|
760 |
+
"""E step of the K-means EM algorithm.
|
761 |
+
|
762 |
+
Compute the labels and the inertia of the given samples and centers.
|
763 |
+
|
764 |
+
Parameters
|
765 |
+
----------
|
766 |
+
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
|
767 |
+
The input samples to assign to the labels. If sparse matrix, must
|
768 |
+
be in CSR format.
|
769 |
+
|
770 |
+
sample_weight : ndarray of shape (n_samples,)
|
771 |
+
The weights for each observation in X.
|
772 |
+
|
773 |
+
x_squared_norms : ndarray of shape (n_samples,)
|
774 |
+
Precomputed squared euclidean norm of each data point, to speed up
|
775 |
+
computations.
|
776 |
+
|
777 |
+
centers : ndarray of shape (n_clusters, n_features)
|
778 |
+
The cluster centers.
|
779 |
+
|
780 |
+
n_threads : int, default=1
|
781 |
+
The number of OpenMP threads to use for the computation. Parallelism is
|
782 |
+
sample-wise on the main cython loop which assigns each sample to its
|
783 |
+
closest center.
|
784 |
+
|
785 |
+
return_inertia : bool, default=True
|
786 |
+
Whether to compute and return the inertia.
|
787 |
+
|
788 |
+
Returns
|
789 |
+
-------
|
790 |
+
labels : ndarray of shape (n_samples,)
|
791 |
+
The resulting assignment.
|
792 |
+
|
793 |
+
inertia : float
|
794 |
+
Sum of squared distances of samples to their closest cluster center.
|
795 |
+
Inertia is only returned if return_inertia is True.
|
796 |
+
"""
|
797 |
+
n_samples = X.shape[0]
|
798 |
+
n_clusters = centers.shape[0]
|
799 |
+
|
800 |
+
labels = np.full(n_samples, -1, dtype=np.int32)
|
801 |
+
center_shift = np.zeros(n_clusters, dtype=centers.dtype)
|
802 |
+
|
803 |
+
if sp.issparse(X):
|
804 |
+
_labels = lloyd_iter_chunked_sparse
|
805 |
+
_inertia = _inertia_sparse
|
806 |
+
else:
|
807 |
+
_labels = lloyd_iter_chunked_dense
|
808 |
+
_inertia = _inertia_dense
|
809 |
+
|
810 |
+
_labels(
|
811 |
+
X,
|
812 |
+
sample_weight,
|
813 |
+
centers,
|
814 |
+
centers_new=None,
|
815 |
+
weight_in_clusters=None,
|
816 |
+
labels=labels,
|
817 |
+
center_shift=center_shift,
|
818 |
+
n_threads=n_threads,
|
819 |
+
update_centers=False,
|
820 |
+
)
|
821 |
+
|
822 |
+
if return_inertia:
|
823 |
+
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
|
824 |
+
return labels, inertia
|
825 |
+
|
826 |
+
return labels
|
827 |
+
|
828 |
+
|
829 |
+
def _labels_inertia_threadpool_limit(
|
830 |
+
X, sample_weight, centers, n_threads=1, return_inertia=True
|
831 |
+
):
|
832 |
+
"""Same as _labels_inertia but in a threadpool_limits context."""
|
833 |
+
with threadpool_limits(limits=1, user_api="blas"):
|
834 |
+
result = _labels_inertia(X, sample_weight, centers, n_threads, return_inertia)
|
835 |
+
|
836 |
+
return result
|
837 |
+
|
838 |
+
|
839 |
+
class _BaseKMeans(
|
840 |
+
ClassNamePrefixFeaturesOutMixin, TransformerMixin, ClusterMixin, BaseEstimator, ABC
|
841 |
+
):
|
842 |
+
"""Base class for KMeans and MiniBatchKMeans"""
|
843 |
+
|
844 |
+
_parameter_constraints: dict = {
|
845 |
+
"n_clusters": [Interval(Integral, 1, None, closed="left")],
|
846 |
+
"init": [StrOptions({"k-means++", "random"}), callable, "array-like"],
|
847 |
+
"n_init": [
|
848 |
+
StrOptions({"auto"}),
|
849 |
+
Interval(Integral, 1, None, closed="left"),
|
850 |
+
],
|
851 |
+
"max_iter": [Interval(Integral, 1, None, closed="left")],
|
852 |
+
"tol": [Interval(Real, 0, None, closed="left")],
|
853 |
+
"verbose": ["verbose"],
|
854 |
+
"random_state": ["random_state"],
|
855 |
+
}
|
856 |
+
|
857 |
+
def __init__(
|
858 |
+
self,
|
859 |
+
n_clusters,
|
860 |
+
*,
|
861 |
+
init,
|
862 |
+
n_init,
|
863 |
+
max_iter,
|
864 |
+
tol,
|
865 |
+
verbose,
|
866 |
+
random_state,
|
867 |
+
):
|
868 |
+
self.n_clusters = n_clusters
|
869 |
+
self.init = init
|
870 |
+
self.max_iter = max_iter
|
871 |
+
self.tol = tol
|
872 |
+
self.n_init = n_init
|
873 |
+
self.verbose = verbose
|
874 |
+
self.random_state = random_state
|
875 |
+
|
876 |
+
def _check_params_vs_input(self, X, default_n_init=None):
|
877 |
+
# n_clusters
|
878 |
+
if X.shape[0] < self.n_clusters:
|
879 |
+
raise ValueError(
|
880 |
+
f"n_samples={X.shape[0]} should be >= n_clusters={self.n_clusters}."
|
881 |
+
)
|
882 |
+
|
883 |
+
# tol
|
884 |
+
self._tol = _tolerance(X, self.tol)
|
885 |
+
|
886 |
+
# n-init
|
887 |
+
if self.n_init == "auto":
|
888 |
+
if isinstance(self.init, str) and self.init == "k-means++":
|
889 |
+
self._n_init = 1
|
890 |
+
elif isinstance(self.init, str) and self.init == "random":
|
891 |
+
self._n_init = default_n_init
|
892 |
+
elif callable(self.init):
|
893 |
+
self._n_init = default_n_init
|
894 |
+
else: # array-like
|
895 |
+
self._n_init = 1
|
896 |
+
else:
|
897 |
+
self._n_init = self.n_init
|
898 |
+
|
899 |
+
if _is_arraylike_not_scalar(self.init) and self._n_init != 1:
|
900 |
+
warnings.warn(
|
901 |
+
(
|
902 |
+
"Explicit initial center position passed: performing only"
|
903 |
+
f" one init in {self.__class__.__name__} instead of "
|
904 |
+
f"n_init={self._n_init}."
|
905 |
+
),
|
906 |
+
RuntimeWarning,
|
907 |
+
stacklevel=2,
|
908 |
+
)
|
909 |
+
self._n_init = 1
|
910 |
+
|
911 |
+
@abstractmethod
|
912 |
+
def _warn_mkl_vcomp(self, n_active_threads):
|
913 |
+
"""Issue an estimator specific warning when vcomp and mkl are both present
|
914 |
+
|
915 |
+
This method is called by `_check_mkl_vcomp`.
|
916 |
+
"""
|
917 |
+
|
918 |
+
def _check_mkl_vcomp(self, X, n_samples):
|
919 |
+
"""Check when vcomp and mkl are both present"""
|
920 |
+
# The BLAS call inside a prange in lloyd_iter_chunked_dense is known to
|
921 |
+
# cause a small memory leak when there are less chunks than the number
|
922 |
+
# of available threads. It only happens when the OpenMP library is
|
923 |
+
# vcomp (microsoft OpenMP) and the BLAS library is MKL. see #18653
|
924 |
+
if sp.issparse(X):
|
925 |
+
return
|
926 |
+
|
927 |
+
n_active_threads = int(np.ceil(n_samples / CHUNK_SIZE))
|
928 |
+
if n_active_threads < self._n_threads:
|
929 |
+
modules = threadpool_info()
|
930 |
+
has_vcomp = "vcomp" in [module["prefix"] for module in modules]
|
931 |
+
has_mkl = ("mkl", "intel") in [
|
932 |
+
(module["internal_api"], module.get("threading_layer", None))
|
933 |
+
for module in modules
|
934 |
+
]
|
935 |
+
if has_vcomp and has_mkl:
|
936 |
+
self._warn_mkl_vcomp(n_active_threads)
|
937 |
+
|
938 |
+
def _validate_center_shape(self, X, centers):
|
939 |
+
"""Check if centers is compatible with X and n_clusters."""
|
940 |
+
if centers.shape[0] != self.n_clusters:
|
941 |
+
raise ValueError(
|
942 |
+
f"The shape of the initial centers {centers.shape} does not "
|
943 |
+
f"match the number of clusters {self.n_clusters}."
|
944 |
+
)
|
945 |
+
if centers.shape[1] != X.shape[1]:
|
946 |
+
raise ValueError(
|
947 |
+
f"The shape of the initial centers {centers.shape} does not "
|
948 |
+
f"match the number of features of the data {X.shape[1]}."
|
949 |
+
)
|
950 |
+
|
951 |
+
def _check_test_data(self, X):
|
952 |
+
X = self._validate_data(
|
953 |
+
X,
|
954 |
+
accept_sparse="csr",
|
955 |
+
reset=False,
|
956 |
+
dtype=[np.float64, np.float32],
|
957 |
+
order="C",
|
958 |
+
accept_large_sparse=False,
|
959 |
+
)
|
960 |
+
return X
|
961 |
+
|
962 |
+
def _init_centroids(
|
963 |
+
self,
|
964 |
+
X,
|
965 |
+
x_squared_norms,
|
966 |
+
init,
|
967 |
+
random_state,
|
968 |
+
sample_weight,
|
969 |
+
init_size=None,
|
970 |
+
n_centroids=None,
|
971 |
+
):
|
972 |
+
"""Compute the initial centroids.
|
973 |
+
|
974 |
+
Parameters
|
975 |
+
----------
|
976 |
+
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
|
977 |
+
The input samples.
|
978 |
+
|
979 |
+
x_squared_norms : ndarray of shape (n_samples,)
|
980 |
+
Squared euclidean norm of each data point. Pass it if you have it
|
981 |
+
at hands already to avoid it being recomputed here.
|
982 |
+
|
983 |
+
init : {'k-means++', 'random'}, callable or ndarray of shape \
|
984 |
+
(n_clusters, n_features)
|
985 |
+
Method for initialization.
|
986 |
+
|
987 |
+
random_state : RandomState instance
|
988 |
+
Determines random number generation for centroid initialization.
|
989 |
+
See :term:`Glossary <random_state>`.
|
990 |
+
|
991 |
+
sample_weight : ndarray of shape (n_samples,)
|
992 |
+
The weights for each observation in X. `sample_weight` is not used
|
993 |
+
during initialization if `init` is a callable or a user provided
|
994 |
+
array.
|
995 |
+
|
996 |
+
init_size : int, default=None
|
997 |
+
Number of samples to randomly sample for speeding up the
|
998 |
+
initialization (sometimes at the expense of accuracy).
|
999 |
+
|
1000 |
+
n_centroids : int, default=None
|
1001 |
+
Number of centroids to initialize.
|
1002 |
+
If left to 'None' the number of centroids will be equal to
|
1003 |
+
number of clusters to form (self.n_clusters).
|
1004 |
+
|
1005 |
+
Returns
|
1006 |
+
-------
|
1007 |
+
centers : ndarray of shape (n_clusters, n_features)
|
1008 |
+
Initial centroids of clusters.
|
1009 |
+
"""
|
1010 |
+
n_samples = X.shape[0]
|
1011 |
+
n_clusters = self.n_clusters if n_centroids is None else n_centroids
|
1012 |
+
|
1013 |
+
if init_size is not None and init_size < n_samples:
|
1014 |
+
init_indices = random_state.randint(0, n_samples, init_size)
|
1015 |
+
X = X[init_indices]
|
1016 |
+
x_squared_norms = x_squared_norms[init_indices]
|
1017 |
+
n_samples = X.shape[0]
|
1018 |
+
sample_weight = sample_weight[init_indices]
|
1019 |
+
|
1020 |
+
if isinstance(init, str) and init == "k-means++":
|
1021 |
+
centers, _ = _kmeans_plusplus(
|
1022 |
+
X,
|
1023 |
+
n_clusters,
|
1024 |
+
random_state=random_state,
|
1025 |
+
x_squared_norms=x_squared_norms,
|
1026 |
+
sample_weight=sample_weight,
|
1027 |
+
)
|
1028 |
+
elif isinstance(init, str) and init == "random":
|
1029 |
+
seeds = random_state.choice(
|
1030 |
+
n_samples,
|
1031 |
+
size=n_clusters,
|
1032 |
+
replace=False,
|
1033 |
+
p=sample_weight / sample_weight.sum(),
|
1034 |
+
)
|
1035 |
+
centers = X[seeds]
|
1036 |
+
elif _is_arraylike_not_scalar(self.init):
|
1037 |
+
centers = init
|
1038 |
+
elif callable(init):
|
1039 |
+
centers = init(X, n_clusters, random_state=random_state)
|
1040 |
+
centers = check_array(centers, dtype=X.dtype, copy=False, order="C")
|
1041 |
+
self._validate_center_shape(X, centers)
|
1042 |
+
|
1043 |
+
if sp.issparse(centers):
|
1044 |
+
centers = centers.toarray()
|
1045 |
+
|
1046 |
+
return centers
|
1047 |
+
|
1048 |
+
def fit_predict(self, X, y=None, sample_weight=None):
|
1049 |
+
"""Compute cluster centers and predict cluster index for each sample.
|
1050 |
+
|
1051 |
+
Convenience method; equivalent to calling fit(X) followed by
|
1052 |
+
predict(X).
|
1053 |
+
|
1054 |
+
Parameters
|
1055 |
+
----------
|
1056 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
1057 |
+
New data to transform.
|
1058 |
+
|
1059 |
+
y : Ignored
|
1060 |
+
Not used, present here for API consistency by convention.
|
1061 |
+
|
1062 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
1063 |
+
The weights for each observation in X. If None, all observations
|
1064 |
+
are assigned equal weight.
|
1065 |
+
|
1066 |
+
Returns
|
1067 |
+
-------
|
1068 |
+
labels : ndarray of shape (n_samples,)
|
1069 |
+
Index of the cluster each sample belongs to.
|
1070 |
+
"""
|
1071 |
+
return self.fit(X, sample_weight=sample_weight).labels_
|
1072 |
+
|
1073 |
+
def predict(self, X, sample_weight="deprecated"):
|
1074 |
+
"""Predict the closest cluster each sample in X belongs to.
|
1075 |
+
|
1076 |
+
In the vector quantization literature, `cluster_centers_` is called
|
1077 |
+
the code book and each value returned by `predict` is the index of
|
1078 |
+
the closest code in the code book.
|
1079 |
+
|
1080 |
+
Parameters
|
1081 |
+
----------
|
1082 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
1083 |
+
New data to predict.
|
1084 |
+
|
1085 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
1086 |
+
The weights for each observation in X. If None, all observations
|
1087 |
+
are assigned equal weight.
|
1088 |
+
|
1089 |
+
.. deprecated:: 1.3
|
1090 |
+
The parameter `sample_weight` is deprecated in version 1.3
|
1091 |
+
and will be removed in 1.5.
|
1092 |
+
|
1093 |
+
Returns
|
1094 |
+
-------
|
1095 |
+
labels : ndarray of shape (n_samples,)
|
1096 |
+
Index of the cluster each sample belongs to.
|
1097 |
+
"""
|
1098 |
+
check_is_fitted(self)
|
1099 |
+
|
1100 |
+
X = self._check_test_data(X)
|
1101 |
+
if not (isinstance(sample_weight, str) and sample_weight == "deprecated"):
|
1102 |
+
warnings.warn(
|
1103 |
+
(
|
1104 |
+
"'sample_weight' was deprecated in version 1.3 and "
|
1105 |
+
"will be removed in 1.5."
|
1106 |
+
),
|
1107 |
+
FutureWarning,
|
1108 |
+
)
|
1109 |
+
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
|
1110 |
+
else:
|
1111 |
+
sample_weight = _check_sample_weight(None, X, dtype=X.dtype)
|
1112 |
+
|
1113 |
+
labels = _labels_inertia_threadpool_limit(
|
1114 |
+
X,
|
1115 |
+
sample_weight,
|
1116 |
+
self.cluster_centers_,
|
1117 |
+
n_threads=self._n_threads,
|
1118 |
+
return_inertia=False,
|
1119 |
+
)
|
1120 |
+
|
1121 |
+
return labels
|
1122 |
+
|
1123 |
+
def fit_transform(self, X, y=None, sample_weight=None):
|
1124 |
+
"""Compute clustering and transform X to cluster-distance space.
|
1125 |
+
|
1126 |
+
Equivalent to fit(X).transform(X), but more efficiently implemented.
|
1127 |
+
|
1128 |
+
Parameters
|
1129 |
+
----------
|
1130 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
1131 |
+
New data to transform.
|
1132 |
+
|
1133 |
+
y : Ignored
|
1134 |
+
Not used, present here for API consistency by convention.
|
1135 |
+
|
1136 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
1137 |
+
The weights for each observation in X. If None, all observations
|
1138 |
+
are assigned equal weight.
|
1139 |
+
|
1140 |
+
Returns
|
1141 |
+
-------
|
1142 |
+
X_new : ndarray of shape (n_samples, n_clusters)
|
1143 |
+
X transformed in the new space.
|
1144 |
+
"""
|
1145 |
+
return self.fit(X, sample_weight=sample_weight)._transform(X)
|
1146 |
+
|
1147 |
+
def transform(self, X):
|
1148 |
+
"""Transform X to a cluster-distance space.
|
1149 |
+
|
1150 |
+
In the new space, each dimension is the distance to the cluster
|
1151 |
+
centers. Note that even if X is sparse, the array returned by
|
1152 |
+
`transform` will typically be dense.
|
1153 |
+
|
1154 |
+
Parameters
|
1155 |
+
----------
|
1156 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
1157 |
+
New data to transform.
|
1158 |
+
|
1159 |
+
Returns
|
1160 |
+
-------
|
1161 |
+
X_new : ndarray of shape (n_samples, n_clusters)
|
1162 |
+
X transformed in the new space.
|
1163 |
+
"""
|
1164 |
+
check_is_fitted(self)
|
1165 |
+
|
1166 |
+
X = self._check_test_data(X)
|
1167 |
+
return self._transform(X)
|
1168 |
+
|
1169 |
+
def _transform(self, X):
|
1170 |
+
"""Guts of transform method; no input validation."""
|
1171 |
+
return euclidean_distances(X, self.cluster_centers_)
|
1172 |
+
|
1173 |
+
def score(self, X, y=None, sample_weight=None):
|
1174 |
+
"""Opposite of the value of X on the K-means objective.
|
1175 |
+
|
1176 |
+
Parameters
|
1177 |
+
----------
|
1178 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
1179 |
+
New data.
|
1180 |
+
|
1181 |
+
y : Ignored
|
1182 |
+
Not used, present here for API consistency by convention.
|
1183 |
+
|
1184 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
1185 |
+
The weights for each observation in X. If None, all observations
|
1186 |
+
are assigned equal weight.
|
1187 |
+
|
1188 |
+
Returns
|
1189 |
+
-------
|
1190 |
+
score : float
|
1191 |
+
Opposite of the value of X on the K-means objective.
|
1192 |
+
"""
|
1193 |
+
check_is_fitted(self)
|
1194 |
+
|
1195 |
+
X = self._check_test_data(X)
|
1196 |
+
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
|
1197 |
+
|
1198 |
+
_, scores = _labels_inertia_threadpool_limit(
|
1199 |
+
X, sample_weight, self.cluster_centers_, self._n_threads
|
1200 |
+
)
|
1201 |
+
return -scores
|
1202 |
+
|
1203 |
+
def _more_tags(self):
|
1204 |
+
return {
|
1205 |
+
"_xfail_checks": {
|
1206 |
+
"check_sample_weights_invariance": (
|
1207 |
+
"zero sample_weight is not equivalent to removing samples"
|
1208 |
+
),
|
1209 |
+
},
|
1210 |
+
}
|
1211 |
+
|
1212 |
+
|
1213 |
+
class KMeans(_BaseKMeans):
|
1214 |
+
"""K-Means clustering.
|
1215 |
+
|
1216 |
+
Read more in the :ref:`User Guide <k_means>`.
|
1217 |
+
|
1218 |
+
Parameters
|
1219 |
+
----------
|
1220 |
+
|
1221 |
+
n_clusters : int, default=8
|
1222 |
+
The number of clusters to form as well as the number of
|
1223 |
+
centroids to generate.
|
1224 |
+
|
1225 |
+
For an example of how to choose an optimal value for `n_clusters` refer to
|
1226 |
+
:ref:`sphx_glr_auto_examples_cluster_plot_kmeans_silhouette_analysis.py`.
|
1227 |
+
|
1228 |
+
init : {'k-means++', 'random'}, callable or array-like of shape \
|
1229 |
+
(n_clusters, n_features), default='k-means++'
|
1230 |
+
Method for initialization:
|
1231 |
+
|
1232 |
+
* 'k-means++' : selects initial cluster centroids using sampling \
|
1233 |
+
based on an empirical probability distribution of the points' \
|
1234 |
+
contribution to the overall inertia. This technique speeds up \
|
1235 |
+
convergence. The algorithm implemented is "greedy k-means++". It \
|
1236 |
+
differs from the vanilla k-means++ by making several trials at \
|
1237 |
+
each sampling step and choosing the best centroid among them.
|
1238 |
+
|
1239 |
+
* 'random': choose `n_clusters` observations (rows) at random from \
|
1240 |
+
data for the initial centroids.
|
1241 |
+
|
1242 |
+
* If an array is passed, it should be of shape (n_clusters, n_features)\
|
1243 |
+
and gives the initial centers.
|
1244 |
+
|
1245 |
+
* If a callable is passed, it should take arguments X, n_clusters and a\
|
1246 |
+
random state and return an initialization.
|
1247 |
+
|
1248 |
+
For an example of how to use the different `init` strategy, see the example
|
1249 |
+
entitled :ref:`sphx_glr_auto_examples_cluster_plot_kmeans_digits.py`.
|
1250 |
+
|
1251 |
+
n_init : 'auto' or int, default='auto'
|
1252 |
+
Number of times the k-means algorithm is run with different centroid
|
1253 |
+
seeds. The final results is the best output of `n_init` consecutive runs
|
1254 |
+
in terms of inertia. Several runs are recommended for sparse
|
1255 |
+
high-dimensional problems (see :ref:`kmeans_sparse_high_dim`).
|
1256 |
+
|
1257 |
+
When `n_init='auto'`, the number of runs depends on the value of init:
|
1258 |
+
10 if using `init='random'` or `init` is a callable;
|
1259 |
+
1 if using `init='k-means++'` or `init` is an array-like.
|
1260 |
+
|
1261 |
+
.. versionadded:: 1.2
|
1262 |
+
Added 'auto' option for `n_init`.
|
1263 |
+
|
1264 |
+
.. versionchanged:: 1.4
|
1265 |
+
Default value for `n_init` changed to `'auto'`.
|
1266 |
+
|
1267 |
+
max_iter : int, default=300
|
1268 |
+
Maximum number of iterations of the k-means algorithm for a
|
1269 |
+
single run.
|
1270 |
+
|
1271 |
+
tol : float, default=1e-4
|
1272 |
+
Relative tolerance with regards to Frobenius norm of the difference
|
1273 |
+
in the cluster centers of two consecutive iterations to declare
|
1274 |
+
convergence.
|
1275 |
+
|
1276 |
+
verbose : int, default=0
|
1277 |
+
Verbosity mode.
|
1278 |
+
|
1279 |
+
random_state : int, RandomState instance or None, default=None
|
1280 |
+
Determines random number generation for centroid initialization. Use
|
1281 |
+
an int to make the randomness deterministic.
|
1282 |
+
See :term:`Glossary <random_state>`.
|
1283 |
+
|
1284 |
+
copy_x : bool, default=True
|
1285 |
+
When pre-computing distances it is more numerically accurate to center
|
1286 |
+
the data first. If copy_x is True (default), then the original data is
|
1287 |
+
not modified. If False, the original data is modified, and put back
|
1288 |
+
before the function returns, but small numerical differences may be
|
1289 |
+
introduced by subtracting and then adding the data mean. Note that if
|
1290 |
+
the original data is not C-contiguous, a copy will be made even if
|
1291 |
+
copy_x is False. If the original data is sparse, but not in CSR format,
|
1292 |
+
a copy will be made even if copy_x is False.
|
1293 |
+
|
1294 |
+
algorithm : {"lloyd", "elkan"}, default="lloyd"
|
1295 |
+
K-means algorithm to use. The classical EM-style algorithm is `"lloyd"`.
|
1296 |
+
The `"elkan"` variation can be more efficient on some datasets with
|
1297 |
+
well-defined clusters, by using the triangle inequality. However it's
|
1298 |
+
more memory intensive due to the allocation of an extra array of shape
|
1299 |
+
`(n_samples, n_clusters)`.
|
1300 |
+
|
1301 |
+
.. versionchanged:: 0.18
|
1302 |
+
Added Elkan algorithm
|
1303 |
+
|
1304 |
+
.. versionchanged:: 1.1
|
1305 |
+
Renamed "full" to "lloyd", and deprecated "auto" and "full".
|
1306 |
+
Changed "auto" to use "lloyd" instead of "elkan".
|
1307 |
+
|
1308 |
+
Attributes
|
1309 |
+
----------
|
1310 |
+
cluster_centers_ : ndarray of shape (n_clusters, n_features)
|
1311 |
+
Coordinates of cluster centers. If the algorithm stops before fully
|
1312 |
+
converging (see ``tol`` and ``max_iter``), these will not be
|
1313 |
+
consistent with ``labels_``.
|
1314 |
+
|
1315 |
+
labels_ : ndarray of shape (n_samples,)
|
1316 |
+
Labels of each point
|
1317 |
+
|
1318 |
+
inertia_ : float
|
1319 |
+
Sum of squared distances of samples to their closest cluster center,
|
1320 |
+
weighted by the sample weights if provided.
|
1321 |
+
|
1322 |
+
n_iter_ : int
|
1323 |
+
Number of iterations run.
|
1324 |
+
|
1325 |
+
n_features_in_ : int
|
1326 |
+
Number of features seen during :term:`fit`.
|
1327 |
+
|
1328 |
+
.. versionadded:: 0.24
|
1329 |
+
|
1330 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
1331 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
1332 |
+
has feature names that are all strings.
|
1333 |
+
|
1334 |
+
.. versionadded:: 1.0
|
1335 |
+
|
1336 |
+
See Also
|
1337 |
+
--------
|
1338 |
+
MiniBatchKMeans : Alternative online implementation that does incremental
|
1339 |
+
updates of the centers positions using mini-batches.
|
1340 |
+
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
|
1341 |
+
probably much faster than the default batch implementation.
|
1342 |
+
|
1343 |
+
Notes
|
1344 |
+
-----
|
1345 |
+
The k-means problem is solved using either Lloyd's or Elkan's algorithm.
|
1346 |
+
|
1347 |
+
The average complexity is given by O(k n T), where n is the number of
|
1348 |
+
samples and T is the number of iteration.
|
1349 |
+
|
1350 |
+
The worst case complexity is given by O(n^(k+2/p)) with
|
1351 |
+
n = n_samples, p = n_features.
|
1352 |
+
Refer to :doi:`"How slow is the k-means method?" D. Arthur and S. Vassilvitskii -
|
1353 |
+
SoCG2006.<10.1145/1137856.1137880>` for more details.
|
1354 |
+
|
1355 |
+
In practice, the k-means algorithm is very fast (one of the fastest
|
1356 |
+
clustering algorithms available), but it falls in local minima. That's why
|
1357 |
+
it can be useful to restart it several times.
|
1358 |
+
|
1359 |
+
If the algorithm stops before fully converging (because of ``tol`` or
|
1360 |
+
``max_iter``), ``labels_`` and ``cluster_centers_`` will not be consistent,
|
1361 |
+
i.e. the ``cluster_centers_`` will not be the means of the points in each
|
1362 |
+
cluster. Also, the estimator will reassign ``labels_`` after the last
|
1363 |
+
iteration to make ``labels_`` consistent with ``predict`` on the training
|
1364 |
+
set.
|
1365 |
+
|
1366 |
+
Examples
|
1367 |
+
--------
|
1368 |
+
|
1369 |
+
>>> from sklearn.cluster import KMeans
|
1370 |
+
>>> import numpy as np
|
1371 |
+
>>> X = np.array([[1, 2], [1, 4], [1, 0],
|
1372 |
+
... [10, 2], [10, 4], [10, 0]])
|
1373 |
+
>>> kmeans = KMeans(n_clusters=2, random_state=0, n_init="auto").fit(X)
|
1374 |
+
>>> kmeans.labels_
|
1375 |
+
array([1, 1, 1, 0, 0, 0], dtype=int32)
|
1376 |
+
>>> kmeans.predict([[0, 0], [12, 3]])
|
1377 |
+
array([1, 0], dtype=int32)
|
1378 |
+
>>> kmeans.cluster_centers_
|
1379 |
+
array([[10., 2.],
|
1380 |
+
[ 1., 2.]])
|
1381 |
+
|
1382 |
+
For a more detailed example of K-Means using the iris dataset see
|
1383 |
+
:ref:`sphx_glr_auto_examples_cluster_plot_cluster_iris.py`.
|
1384 |
+
|
1385 |
+
For examples of common problems with K-Means and how to address them see
|
1386 |
+
:ref:`sphx_glr_auto_examples_cluster_plot_kmeans_assumptions.py`.
|
1387 |
+
|
1388 |
+
For an example of how to use K-Means to perform color quantization see
|
1389 |
+
:ref:`sphx_glr_auto_examples_cluster_plot_color_quantization.py`.
|
1390 |
+
|
1391 |
+
For a demonstration of how K-Means can be used to cluster text documents see
|
1392 |
+
:ref:`sphx_glr_auto_examples_text_plot_document_clustering.py`.
|
1393 |
+
|
1394 |
+
For a comparison between K-Means and MiniBatchKMeans refer to example
|
1395 |
+
:ref:`sphx_glr_auto_examples_cluster_plot_mini_batch_kmeans.py`.
|
1396 |
+
"""
|
1397 |
+
|
1398 |
+
_parameter_constraints: dict = {
|
1399 |
+
**_BaseKMeans._parameter_constraints,
|
1400 |
+
"copy_x": ["boolean"],
|
1401 |
+
"algorithm": [StrOptions({"lloyd", "elkan"})],
|
1402 |
+
}
|
1403 |
+
|
1404 |
+
def __init__(
|
1405 |
+
self,
|
1406 |
+
n_clusters=8,
|
1407 |
+
*,
|
1408 |
+
init="k-means++",
|
1409 |
+
n_init="auto",
|
1410 |
+
max_iter=300,
|
1411 |
+
tol=1e-4,
|
1412 |
+
verbose=0,
|
1413 |
+
random_state=None,
|
1414 |
+
copy_x=True,
|
1415 |
+
algorithm="lloyd",
|
1416 |
+
):
|
1417 |
+
super().__init__(
|
1418 |
+
n_clusters=n_clusters,
|
1419 |
+
init=init,
|
1420 |
+
n_init=n_init,
|
1421 |
+
max_iter=max_iter,
|
1422 |
+
tol=tol,
|
1423 |
+
verbose=verbose,
|
1424 |
+
random_state=random_state,
|
1425 |
+
)
|
1426 |
+
|
1427 |
+
self.copy_x = copy_x
|
1428 |
+
self.algorithm = algorithm
|
1429 |
+
|
1430 |
+
def _check_params_vs_input(self, X):
|
1431 |
+
super()._check_params_vs_input(X, default_n_init=10)
|
1432 |
+
|
1433 |
+
self._algorithm = self.algorithm
|
1434 |
+
if self._algorithm == "elkan" and self.n_clusters == 1:
|
1435 |
+
warnings.warn(
|
1436 |
+
(
|
1437 |
+
"algorithm='elkan' doesn't make sense for a single "
|
1438 |
+
"cluster. Using 'lloyd' instead."
|
1439 |
+
),
|
1440 |
+
RuntimeWarning,
|
1441 |
+
)
|
1442 |
+
self._algorithm = "lloyd"
|
1443 |
+
|
1444 |
+
def _warn_mkl_vcomp(self, n_active_threads):
|
1445 |
+
"""Warn when vcomp and mkl are both present"""
|
1446 |
+
warnings.warn(
|
1447 |
+
"KMeans is known to have a memory leak on Windows "
|
1448 |
+
"with MKL, when there are less chunks than available "
|
1449 |
+
"threads. You can avoid it by setting the environment"
|
1450 |
+
f" variable OMP_NUM_THREADS={n_active_threads}."
|
1451 |
+
)
|
1452 |
+
|
1453 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
1454 |
+
def fit(self, X, y=None, sample_weight=None):
|
1455 |
+
"""Compute k-means clustering.
|
1456 |
+
|
1457 |
+
Parameters
|
1458 |
+
----------
|
1459 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
1460 |
+
Training instances to cluster. It must be noted that the data
|
1461 |
+
will be converted to C ordering, which will cause a memory
|
1462 |
+
copy if the given data is not C-contiguous.
|
1463 |
+
If a sparse matrix is passed, a copy will be made if it's not in
|
1464 |
+
CSR format.
|
1465 |
+
|
1466 |
+
y : Ignored
|
1467 |
+
Not used, present here for API consistency by convention.
|
1468 |
+
|
1469 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
1470 |
+
The weights for each observation in X. If None, all observations
|
1471 |
+
are assigned equal weight. `sample_weight` is not used during
|
1472 |
+
initialization if `init` is a callable or a user provided array.
|
1473 |
+
|
1474 |
+
.. versionadded:: 0.20
|
1475 |
+
|
1476 |
+
Returns
|
1477 |
+
-------
|
1478 |
+
self : object
|
1479 |
+
Fitted estimator.
|
1480 |
+
"""
|
1481 |
+
X = self._validate_data(
|
1482 |
+
X,
|
1483 |
+
accept_sparse="csr",
|
1484 |
+
dtype=[np.float64, np.float32],
|
1485 |
+
order="C",
|
1486 |
+
copy=self.copy_x,
|
1487 |
+
accept_large_sparse=False,
|
1488 |
+
)
|
1489 |
+
|
1490 |
+
self._check_params_vs_input(X)
|
1491 |
+
|
1492 |
+
random_state = check_random_state(self.random_state)
|
1493 |
+
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
|
1494 |
+
self._n_threads = _openmp_effective_n_threads()
|
1495 |
+
|
1496 |
+
# Validate init array
|
1497 |
+
init = self.init
|
1498 |
+
init_is_array_like = _is_arraylike_not_scalar(init)
|
1499 |
+
if init_is_array_like:
|
1500 |
+
init = check_array(init, dtype=X.dtype, copy=True, order="C")
|
1501 |
+
self._validate_center_shape(X, init)
|
1502 |
+
|
1503 |
+
# subtract of mean of x for more accurate distance computations
|
1504 |
+
if not sp.issparse(X):
|
1505 |
+
X_mean = X.mean(axis=0)
|
1506 |
+
# The copy was already done above
|
1507 |
+
X -= X_mean
|
1508 |
+
|
1509 |
+
if init_is_array_like:
|
1510 |
+
init -= X_mean
|
1511 |
+
|
1512 |
+
# precompute squared norms of data points
|
1513 |
+
x_squared_norms = row_norms(X, squared=True)
|
1514 |
+
|
1515 |
+
if self._algorithm == "elkan":
|
1516 |
+
kmeans_single = _kmeans_single_elkan
|
1517 |
+
else:
|
1518 |
+
kmeans_single = _kmeans_single_lloyd
|
1519 |
+
self._check_mkl_vcomp(X, X.shape[0])
|
1520 |
+
|
1521 |
+
best_inertia, best_labels = None, None
|
1522 |
+
|
1523 |
+
for i in range(self._n_init):
|
1524 |
+
# Initialize centers
|
1525 |
+
centers_init = self._init_centroids(
|
1526 |
+
X,
|
1527 |
+
x_squared_norms=x_squared_norms,
|
1528 |
+
init=init,
|
1529 |
+
random_state=random_state,
|
1530 |
+
sample_weight=sample_weight,
|
1531 |
+
)
|
1532 |
+
if self.verbose:
|
1533 |
+
print("Initialization complete")
|
1534 |
+
|
1535 |
+
# run a k-means once
|
1536 |
+
labels, inertia, centers, n_iter_ = kmeans_single(
|
1537 |
+
X,
|
1538 |
+
sample_weight,
|
1539 |
+
centers_init,
|
1540 |
+
max_iter=self.max_iter,
|
1541 |
+
verbose=self.verbose,
|
1542 |
+
tol=self._tol,
|
1543 |
+
n_threads=self._n_threads,
|
1544 |
+
)
|
1545 |
+
|
1546 |
+
# determine if these results are the best so far
|
1547 |
+
# we chose a new run if it has a better inertia and the clustering is
|
1548 |
+
# different from the best so far (it's possible that the inertia is
|
1549 |
+
# slightly better even if the clustering is the same with potentially
|
1550 |
+
# permuted labels, due to rounding errors)
|
1551 |
+
if best_inertia is None or (
|
1552 |
+
inertia < best_inertia
|
1553 |
+
and not _is_same_clustering(labels, best_labels, self.n_clusters)
|
1554 |
+
):
|
1555 |
+
best_labels = labels
|
1556 |
+
best_centers = centers
|
1557 |
+
best_inertia = inertia
|
1558 |
+
best_n_iter = n_iter_
|
1559 |
+
|
1560 |
+
if not sp.issparse(X):
|
1561 |
+
if not self.copy_x:
|
1562 |
+
X += X_mean
|
1563 |
+
best_centers += X_mean
|
1564 |
+
|
1565 |
+
distinct_clusters = len(set(best_labels))
|
1566 |
+
if distinct_clusters < self.n_clusters:
|
1567 |
+
warnings.warn(
|
1568 |
+
"Number of distinct clusters ({}) found smaller than "
|
1569 |
+
"n_clusters ({}). Possibly due to duplicate points "
|
1570 |
+
"in X.".format(distinct_clusters, self.n_clusters),
|
1571 |
+
ConvergenceWarning,
|
1572 |
+
stacklevel=2,
|
1573 |
+
)
|
1574 |
+
|
1575 |
+
self.cluster_centers_ = best_centers
|
1576 |
+
self._n_features_out = self.cluster_centers_.shape[0]
|
1577 |
+
self.labels_ = best_labels
|
1578 |
+
self.inertia_ = best_inertia
|
1579 |
+
self.n_iter_ = best_n_iter
|
1580 |
+
return self
|
1581 |
+
|
1582 |
+
|
1583 |
+
def _mini_batch_step(
|
1584 |
+
X,
|
1585 |
+
sample_weight,
|
1586 |
+
centers,
|
1587 |
+
centers_new,
|
1588 |
+
weight_sums,
|
1589 |
+
random_state,
|
1590 |
+
random_reassign=False,
|
1591 |
+
reassignment_ratio=0.01,
|
1592 |
+
verbose=False,
|
1593 |
+
n_threads=1,
|
1594 |
+
):
|
1595 |
+
"""Incremental update of the centers for the Minibatch K-Means algorithm.
|
1596 |
+
|
1597 |
+
Parameters
|
1598 |
+
----------
|
1599 |
+
|
1600 |
+
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
|
1601 |
+
The original data array. If sparse, must be in CSR format.
|
1602 |
+
|
1603 |
+
x_squared_norms : ndarray of shape (n_samples,)
|
1604 |
+
Squared euclidean norm of each data point.
|
1605 |
+
|
1606 |
+
sample_weight : ndarray of shape (n_samples,)
|
1607 |
+
The weights for each observation in `X`.
|
1608 |
+
|
1609 |
+
centers : ndarray of shape (n_clusters, n_features)
|
1610 |
+
The cluster centers before the current iteration
|
1611 |
+
|
1612 |
+
centers_new : ndarray of shape (n_clusters, n_features)
|
1613 |
+
The cluster centers after the current iteration. Modified in-place.
|
1614 |
+
|
1615 |
+
weight_sums : ndarray of shape (n_clusters,)
|
1616 |
+
The vector in which we keep track of the numbers of points in a
|
1617 |
+
cluster. This array is modified in place.
|
1618 |
+
|
1619 |
+
random_state : RandomState instance
|
1620 |
+
Determines random number generation for low count centers reassignment.
|
1621 |
+
See :term:`Glossary <random_state>`.
|
1622 |
+
|
1623 |
+
random_reassign : boolean, default=False
|
1624 |
+
If True, centers with very low counts are randomly reassigned
|
1625 |
+
to observations.
|
1626 |
+
|
1627 |
+
reassignment_ratio : float, default=0.01
|
1628 |
+
Control the fraction of the maximum number of counts for a
|
1629 |
+
center to be reassigned. A higher value means that low count
|
1630 |
+
centers are more likely to be reassigned, which means that the
|
1631 |
+
model will take longer to converge, but should converge in a
|
1632 |
+
better clustering.
|
1633 |
+
|
1634 |
+
verbose : bool, default=False
|
1635 |
+
Controls the verbosity.
|
1636 |
+
|
1637 |
+
n_threads : int, default=1
|
1638 |
+
The number of OpenMP threads to use for the computation.
|
1639 |
+
|
1640 |
+
Returns
|
1641 |
+
-------
|
1642 |
+
inertia : float
|
1643 |
+
Sum of squared distances of samples to their closest cluster center.
|
1644 |
+
The inertia is computed after finding the labels and before updating
|
1645 |
+
the centers.
|
1646 |
+
"""
|
1647 |
+
# Perform label assignment to nearest centers
|
1648 |
+
# For better efficiency, it's better to run _mini_batch_step in a
|
1649 |
+
# threadpool_limit context than using _labels_inertia_threadpool_limit here
|
1650 |
+
labels, inertia = _labels_inertia(X, sample_weight, centers, n_threads=n_threads)
|
1651 |
+
|
1652 |
+
# Update centers according to the labels
|
1653 |
+
if sp.issparse(X):
|
1654 |
+
_minibatch_update_sparse(
|
1655 |
+
X, sample_weight, centers, centers_new, weight_sums, labels, n_threads
|
1656 |
+
)
|
1657 |
+
else:
|
1658 |
+
_minibatch_update_dense(
|
1659 |
+
X,
|
1660 |
+
sample_weight,
|
1661 |
+
centers,
|
1662 |
+
centers_new,
|
1663 |
+
weight_sums,
|
1664 |
+
labels,
|
1665 |
+
n_threads,
|
1666 |
+
)
|
1667 |
+
|
1668 |
+
# Reassign clusters that have very low weight
|
1669 |
+
if random_reassign and reassignment_ratio > 0:
|
1670 |
+
to_reassign = weight_sums < reassignment_ratio * weight_sums.max()
|
1671 |
+
|
1672 |
+
# pick at most .5 * batch_size samples as new centers
|
1673 |
+
if to_reassign.sum() > 0.5 * X.shape[0]:
|
1674 |
+
indices_dont_reassign = np.argsort(weight_sums)[int(0.5 * X.shape[0]) :]
|
1675 |
+
to_reassign[indices_dont_reassign] = False
|
1676 |
+
n_reassigns = to_reassign.sum()
|
1677 |
+
|
1678 |
+
if n_reassigns:
|
1679 |
+
# Pick new clusters amongst observations with uniform probability
|
1680 |
+
new_centers = random_state.choice(
|
1681 |
+
X.shape[0], replace=False, size=n_reassigns
|
1682 |
+
)
|
1683 |
+
if verbose:
|
1684 |
+
print(f"[MiniBatchKMeans] Reassigning {n_reassigns} cluster centers.")
|
1685 |
+
|
1686 |
+
if sp.issparse(X):
|
1687 |
+
assign_rows_csr(
|
1688 |
+
X,
|
1689 |
+
new_centers.astype(np.intp, copy=False),
|
1690 |
+
np.where(to_reassign)[0].astype(np.intp, copy=False),
|
1691 |
+
centers_new,
|
1692 |
+
)
|
1693 |
+
else:
|
1694 |
+
centers_new[to_reassign] = X[new_centers]
|
1695 |
+
|
1696 |
+
# reset counts of reassigned centers, but don't reset them too small
|
1697 |
+
# to avoid instant reassignment. This is a pretty dirty hack as it
|
1698 |
+
# also modifies the learning rates.
|
1699 |
+
weight_sums[to_reassign] = np.min(weight_sums[~to_reassign])
|
1700 |
+
|
1701 |
+
return inertia
|
1702 |
+
|
1703 |
+
|
1704 |
+
class MiniBatchKMeans(_BaseKMeans):
|
1705 |
+
"""
|
1706 |
+
Mini-Batch K-Means clustering.
|
1707 |
+
|
1708 |
+
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
|
1709 |
+
|
1710 |
+
Parameters
|
1711 |
+
----------
|
1712 |
+
|
1713 |
+
n_clusters : int, default=8
|
1714 |
+
The number of clusters to form as well as the number of
|
1715 |
+
centroids to generate.
|
1716 |
+
|
1717 |
+
init : {'k-means++', 'random'}, callable or array-like of shape \
|
1718 |
+
(n_clusters, n_features), default='k-means++'
|
1719 |
+
Method for initialization:
|
1720 |
+
|
1721 |
+
'k-means++' : selects initial cluster centroids using sampling based on
|
1722 |
+
an empirical probability distribution of the points' contribution to the
|
1723 |
+
overall inertia. This technique speeds up convergence. The algorithm
|
1724 |
+
implemented is "greedy k-means++". It differs from the vanilla k-means++
|
1725 |
+
by making several trials at each sampling step and choosing the best centroid
|
1726 |
+
among them.
|
1727 |
+
|
1728 |
+
'random': choose `n_clusters` observations (rows) at random from data
|
1729 |
+
for the initial centroids.
|
1730 |
+
|
1731 |
+
If an array is passed, it should be of shape (n_clusters, n_features)
|
1732 |
+
and gives the initial centers.
|
1733 |
+
|
1734 |
+
If a callable is passed, it should take arguments X, n_clusters and a
|
1735 |
+
random state and return an initialization.
|
1736 |
+
|
1737 |
+
max_iter : int, default=100
|
1738 |
+
Maximum number of iterations over the complete dataset before
|
1739 |
+
stopping independently of any early stopping criterion heuristics.
|
1740 |
+
|
1741 |
+
batch_size : int, default=1024
|
1742 |
+
Size of the mini batches.
|
1743 |
+
For faster computations, you can set the ``batch_size`` greater than
|
1744 |
+
256 * number of cores to enable parallelism on all cores.
|
1745 |
+
|
1746 |
+
.. versionchanged:: 1.0
|
1747 |
+
`batch_size` default changed from 100 to 1024.
|
1748 |
+
|
1749 |
+
verbose : int, default=0
|
1750 |
+
Verbosity mode.
|
1751 |
+
|
1752 |
+
compute_labels : bool, default=True
|
1753 |
+
Compute label assignment and inertia for the complete dataset
|
1754 |
+
once the minibatch optimization has converged in fit.
|
1755 |
+
|
1756 |
+
random_state : int, RandomState instance or None, default=None
|
1757 |
+
Determines random number generation for centroid initialization and
|
1758 |
+
random reassignment. Use an int to make the randomness deterministic.
|
1759 |
+
See :term:`Glossary <random_state>`.
|
1760 |
+
|
1761 |
+
tol : float, default=0.0
|
1762 |
+
Control early stopping based on the relative center changes as
|
1763 |
+
measured by a smoothed, variance-normalized of the mean center
|
1764 |
+
squared position changes. This early stopping heuristics is
|
1765 |
+
closer to the one used for the batch variant of the algorithms
|
1766 |
+
but induces a slight computational and memory overhead over the
|
1767 |
+
inertia heuristic.
|
1768 |
+
|
1769 |
+
To disable convergence detection based on normalized center
|
1770 |
+
change, set tol to 0.0 (default).
|
1771 |
+
|
1772 |
+
max_no_improvement : int, default=10
|
1773 |
+
Control early stopping based on the consecutive number of mini
|
1774 |
+
batches that does not yield an improvement on the smoothed inertia.
|
1775 |
+
|
1776 |
+
To disable convergence detection based on inertia, set
|
1777 |
+
max_no_improvement to None.
|
1778 |
+
|
1779 |
+
init_size : int, default=None
|
1780 |
+
Number of samples to randomly sample for speeding up the
|
1781 |
+
initialization (sometimes at the expense of accuracy): the
|
1782 |
+
only algorithm is initialized by running a batch KMeans on a
|
1783 |
+
random subset of the data. This needs to be larger than n_clusters.
|
1784 |
+
|
1785 |
+
If `None`, the heuristic is `init_size = 3 * batch_size` if
|
1786 |
+
`3 * batch_size < n_clusters`, else `init_size = 3 * n_clusters`.
|
1787 |
+
|
1788 |
+
n_init : 'auto' or int, default="auto"
|
1789 |
+
Number of random initializations that are tried.
|
1790 |
+
In contrast to KMeans, the algorithm is only run once, using the best of
|
1791 |
+
the `n_init` initializations as measured by inertia. Several runs are
|
1792 |
+
recommended for sparse high-dimensional problems (see
|
1793 |
+
:ref:`kmeans_sparse_high_dim`).
|
1794 |
+
|
1795 |
+
When `n_init='auto'`, the number of runs depends on the value of init:
|
1796 |
+
3 if using `init='random'` or `init` is a callable;
|
1797 |
+
1 if using `init='k-means++'` or `init` is an array-like.
|
1798 |
+
|
1799 |
+
.. versionadded:: 1.2
|
1800 |
+
Added 'auto' option for `n_init`.
|
1801 |
+
|
1802 |
+
.. versionchanged:: 1.4
|
1803 |
+
Default value for `n_init` changed to `'auto'` in version.
|
1804 |
+
|
1805 |
+
reassignment_ratio : float, default=0.01
|
1806 |
+
Control the fraction of the maximum number of counts for a center to
|
1807 |
+
be reassigned. A higher value means that low count centers are more
|
1808 |
+
easily reassigned, which means that the model will take longer to
|
1809 |
+
converge, but should converge in a better clustering. However, too high
|
1810 |
+
a value may cause convergence issues, especially with a small batch
|
1811 |
+
size.
|
1812 |
+
|
1813 |
+
Attributes
|
1814 |
+
----------
|
1815 |
+
|
1816 |
+
cluster_centers_ : ndarray of shape (n_clusters, n_features)
|
1817 |
+
Coordinates of cluster centers.
|
1818 |
+
|
1819 |
+
labels_ : ndarray of shape (n_samples,)
|
1820 |
+
Labels of each point (if compute_labels is set to True).
|
1821 |
+
|
1822 |
+
inertia_ : float
|
1823 |
+
The value of the inertia criterion associated with the chosen
|
1824 |
+
partition if compute_labels is set to True. If compute_labels is set to
|
1825 |
+
False, it's an approximation of the inertia based on an exponentially
|
1826 |
+
weighted average of the batch inertiae.
|
1827 |
+
The inertia is defined as the sum of square distances of samples to
|
1828 |
+
their cluster center, weighted by the sample weights if provided.
|
1829 |
+
|
1830 |
+
n_iter_ : int
|
1831 |
+
Number of iterations over the full dataset.
|
1832 |
+
|
1833 |
+
n_steps_ : int
|
1834 |
+
Number of minibatches processed.
|
1835 |
+
|
1836 |
+
.. versionadded:: 1.0
|
1837 |
+
|
1838 |
+
n_features_in_ : int
|
1839 |
+
Number of features seen during :term:`fit`.
|
1840 |
+
|
1841 |
+
.. versionadded:: 0.24
|
1842 |
+
|
1843 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
1844 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
1845 |
+
has feature names that are all strings.
|
1846 |
+
|
1847 |
+
.. versionadded:: 1.0
|
1848 |
+
|
1849 |
+
See Also
|
1850 |
+
--------
|
1851 |
+
KMeans : The classic implementation of the clustering method based on the
|
1852 |
+
Lloyd's algorithm. It consumes the whole set of input data at each
|
1853 |
+
iteration.
|
1854 |
+
|
1855 |
+
Notes
|
1856 |
+
-----
|
1857 |
+
See https://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
|
1858 |
+
|
1859 |
+
When there are too few points in the dataset, some centers may be
|
1860 |
+
duplicated, which means that a proper clustering in terms of the number
|
1861 |
+
of requesting clusters and the number of returned clusters will not
|
1862 |
+
always match. One solution is to set `reassignment_ratio=0`, which
|
1863 |
+
prevents reassignments of clusters that are too small.
|
1864 |
+
|
1865 |
+
Examples
|
1866 |
+
--------
|
1867 |
+
>>> from sklearn.cluster import MiniBatchKMeans
|
1868 |
+
>>> import numpy as np
|
1869 |
+
>>> X = np.array([[1, 2], [1, 4], [1, 0],
|
1870 |
+
... [4, 2], [4, 0], [4, 4],
|
1871 |
+
... [4, 5], [0, 1], [2, 2],
|
1872 |
+
... [3, 2], [5, 5], [1, -1]])
|
1873 |
+
>>> # manually fit on batches
|
1874 |
+
>>> kmeans = MiniBatchKMeans(n_clusters=2,
|
1875 |
+
... random_state=0,
|
1876 |
+
... batch_size=6,
|
1877 |
+
... n_init="auto")
|
1878 |
+
>>> kmeans = kmeans.partial_fit(X[0:6,:])
|
1879 |
+
>>> kmeans = kmeans.partial_fit(X[6:12,:])
|
1880 |
+
>>> kmeans.cluster_centers_
|
1881 |
+
array([[3.375, 3. ],
|
1882 |
+
[0.75 , 0.5 ]])
|
1883 |
+
>>> kmeans.predict([[0, 0], [4, 4]])
|
1884 |
+
array([1, 0], dtype=int32)
|
1885 |
+
>>> # fit on the whole data
|
1886 |
+
>>> kmeans = MiniBatchKMeans(n_clusters=2,
|
1887 |
+
... random_state=0,
|
1888 |
+
... batch_size=6,
|
1889 |
+
... max_iter=10,
|
1890 |
+
... n_init="auto").fit(X)
|
1891 |
+
>>> kmeans.cluster_centers_
|
1892 |
+
array([[3.55102041, 2.48979592],
|
1893 |
+
[1.06896552, 1. ]])
|
1894 |
+
>>> kmeans.predict([[0, 0], [4, 4]])
|
1895 |
+
array([1, 0], dtype=int32)
|
1896 |
+
"""
|
1897 |
+
|
1898 |
+
_parameter_constraints: dict = {
|
1899 |
+
**_BaseKMeans._parameter_constraints,
|
1900 |
+
"batch_size": [Interval(Integral, 1, None, closed="left")],
|
1901 |
+
"compute_labels": ["boolean"],
|
1902 |
+
"max_no_improvement": [Interval(Integral, 0, None, closed="left"), None],
|
1903 |
+
"init_size": [Interval(Integral, 1, None, closed="left"), None],
|
1904 |
+
"reassignment_ratio": [Interval(Real, 0, None, closed="left")],
|
1905 |
+
}
|
1906 |
+
|
1907 |
+
def __init__(
|
1908 |
+
self,
|
1909 |
+
n_clusters=8,
|
1910 |
+
*,
|
1911 |
+
init="k-means++",
|
1912 |
+
max_iter=100,
|
1913 |
+
batch_size=1024,
|
1914 |
+
verbose=0,
|
1915 |
+
compute_labels=True,
|
1916 |
+
random_state=None,
|
1917 |
+
tol=0.0,
|
1918 |
+
max_no_improvement=10,
|
1919 |
+
init_size=None,
|
1920 |
+
n_init="auto",
|
1921 |
+
reassignment_ratio=0.01,
|
1922 |
+
):
|
1923 |
+
super().__init__(
|
1924 |
+
n_clusters=n_clusters,
|
1925 |
+
init=init,
|
1926 |
+
max_iter=max_iter,
|
1927 |
+
verbose=verbose,
|
1928 |
+
random_state=random_state,
|
1929 |
+
tol=tol,
|
1930 |
+
n_init=n_init,
|
1931 |
+
)
|
1932 |
+
|
1933 |
+
self.max_no_improvement = max_no_improvement
|
1934 |
+
self.batch_size = batch_size
|
1935 |
+
self.compute_labels = compute_labels
|
1936 |
+
self.init_size = init_size
|
1937 |
+
self.reassignment_ratio = reassignment_ratio
|
1938 |
+
|
1939 |
+
def _check_params_vs_input(self, X):
|
1940 |
+
super()._check_params_vs_input(X, default_n_init=3)
|
1941 |
+
|
1942 |
+
self._batch_size = min(self.batch_size, X.shape[0])
|
1943 |
+
|
1944 |
+
# init_size
|
1945 |
+
self._init_size = self.init_size
|
1946 |
+
if self._init_size is None:
|
1947 |
+
self._init_size = 3 * self._batch_size
|
1948 |
+
if self._init_size < self.n_clusters:
|
1949 |
+
self._init_size = 3 * self.n_clusters
|
1950 |
+
elif self._init_size < self.n_clusters:
|
1951 |
+
warnings.warn(
|
1952 |
+
(
|
1953 |
+
f"init_size={self._init_size} should be larger than "
|
1954 |
+
f"n_clusters={self.n_clusters}. Setting it to "
|
1955 |
+
"min(3*n_clusters, n_samples)"
|
1956 |
+
),
|
1957 |
+
RuntimeWarning,
|
1958 |
+
stacklevel=2,
|
1959 |
+
)
|
1960 |
+
self._init_size = 3 * self.n_clusters
|
1961 |
+
self._init_size = min(self._init_size, X.shape[0])
|
1962 |
+
|
1963 |
+
# reassignment_ratio
|
1964 |
+
if self.reassignment_ratio < 0:
|
1965 |
+
raise ValueError(
|
1966 |
+
"reassignment_ratio should be >= 0, got "
|
1967 |
+
f"{self.reassignment_ratio} instead."
|
1968 |
+
)
|
1969 |
+
|
1970 |
+
def _warn_mkl_vcomp(self, n_active_threads):
|
1971 |
+
"""Warn when vcomp and mkl are both present"""
|
1972 |
+
warnings.warn(
|
1973 |
+
"MiniBatchKMeans is known to have a memory leak on "
|
1974 |
+
"Windows with MKL, when there are less chunks than "
|
1975 |
+
"available threads. You can prevent it by setting "
|
1976 |
+
f"batch_size >= {self._n_threads * CHUNK_SIZE} or by "
|
1977 |
+
"setting the environment variable "
|
1978 |
+
f"OMP_NUM_THREADS={n_active_threads}"
|
1979 |
+
)
|
1980 |
+
|
1981 |
+
def _mini_batch_convergence(
|
1982 |
+
self, step, n_steps, n_samples, centers_squared_diff, batch_inertia
|
1983 |
+
):
|
1984 |
+
"""Helper function to encapsulate the early stopping logic"""
|
1985 |
+
# Normalize inertia to be able to compare values when
|
1986 |
+
# batch_size changes
|
1987 |
+
batch_inertia /= self._batch_size
|
1988 |
+
|
1989 |
+
# count steps starting from 1 for user friendly verbose mode.
|
1990 |
+
step = step + 1
|
1991 |
+
|
1992 |
+
# Ignore first iteration because it's inertia from initialization.
|
1993 |
+
if step == 1:
|
1994 |
+
if self.verbose:
|
1995 |
+
print(
|
1996 |
+
f"Minibatch step {step}/{n_steps}: mean batch "
|
1997 |
+
f"inertia: {batch_inertia}"
|
1998 |
+
)
|
1999 |
+
return False
|
2000 |
+
|
2001 |
+
# Compute an Exponentially Weighted Average of the inertia to
|
2002 |
+
# monitor the convergence while discarding minibatch-local stochastic
|
2003 |
+
# variability: https://en.wikipedia.org/wiki/Moving_average
|
2004 |
+
if self._ewa_inertia is None:
|
2005 |
+
self._ewa_inertia = batch_inertia
|
2006 |
+
else:
|
2007 |
+
alpha = self._batch_size * 2.0 / (n_samples + 1)
|
2008 |
+
alpha = min(alpha, 1)
|
2009 |
+
self._ewa_inertia = self._ewa_inertia * (1 - alpha) + batch_inertia * alpha
|
2010 |
+
|
2011 |
+
# Log progress to be able to monitor convergence
|
2012 |
+
if self.verbose:
|
2013 |
+
print(
|
2014 |
+
f"Minibatch step {step}/{n_steps}: mean batch inertia: "
|
2015 |
+
f"{batch_inertia}, ewa inertia: {self._ewa_inertia}"
|
2016 |
+
)
|
2017 |
+
|
2018 |
+
# Early stopping based on absolute tolerance on squared change of
|
2019 |
+
# centers position
|
2020 |
+
if self._tol > 0.0 and centers_squared_diff <= self._tol:
|
2021 |
+
if self.verbose:
|
2022 |
+
print(f"Converged (small centers change) at step {step}/{n_steps}")
|
2023 |
+
return True
|
2024 |
+
|
2025 |
+
# Early stopping heuristic due to lack of improvement on smoothed
|
2026 |
+
# inertia
|
2027 |
+
if self._ewa_inertia_min is None or self._ewa_inertia < self._ewa_inertia_min:
|
2028 |
+
self._no_improvement = 0
|
2029 |
+
self._ewa_inertia_min = self._ewa_inertia
|
2030 |
+
else:
|
2031 |
+
self._no_improvement += 1
|
2032 |
+
|
2033 |
+
if (
|
2034 |
+
self.max_no_improvement is not None
|
2035 |
+
and self._no_improvement >= self.max_no_improvement
|
2036 |
+
):
|
2037 |
+
if self.verbose:
|
2038 |
+
print(
|
2039 |
+
"Converged (lack of improvement in inertia) at step "
|
2040 |
+
f"{step}/{n_steps}"
|
2041 |
+
)
|
2042 |
+
return True
|
2043 |
+
|
2044 |
+
return False
|
2045 |
+
|
2046 |
+
def _random_reassign(self):
|
2047 |
+
"""Check if a random reassignment needs to be done.
|
2048 |
+
|
2049 |
+
Do random reassignments each time 10 * n_clusters samples have been
|
2050 |
+
processed.
|
2051 |
+
|
2052 |
+
If there are empty clusters we always want to reassign.
|
2053 |
+
"""
|
2054 |
+
self._n_since_last_reassign += self._batch_size
|
2055 |
+
if (self._counts == 0).any() or self._n_since_last_reassign >= (
|
2056 |
+
10 * self.n_clusters
|
2057 |
+
):
|
2058 |
+
self._n_since_last_reassign = 0
|
2059 |
+
return True
|
2060 |
+
return False
|
2061 |
+
|
2062 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
2063 |
+
def fit(self, X, y=None, sample_weight=None):
|
2064 |
+
"""Compute the centroids on X by chunking it into mini-batches.
|
2065 |
+
|
2066 |
+
Parameters
|
2067 |
+
----------
|
2068 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
2069 |
+
Training instances to cluster. It must be noted that the data
|
2070 |
+
will be converted to C ordering, which will cause a memory copy
|
2071 |
+
if the given data is not C-contiguous.
|
2072 |
+
If a sparse matrix is passed, a copy will be made if it's not in
|
2073 |
+
CSR format.
|
2074 |
+
|
2075 |
+
y : Ignored
|
2076 |
+
Not used, present here for API consistency by convention.
|
2077 |
+
|
2078 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
2079 |
+
The weights for each observation in X. If None, all observations
|
2080 |
+
are assigned equal weight. `sample_weight` is not used during
|
2081 |
+
initialization if `init` is a callable or a user provided array.
|
2082 |
+
|
2083 |
+
.. versionadded:: 0.20
|
2084 |
+
|
2085 |
+
Returns
|
2086 |
+
-------
|
2087 |
+
self : object
|
2088 |
+
Fitted estimator.
|
2089 |
+
"""
|
2090 |
+
X = self._validate_data(
|
2091 |
+
X,
|
2092 |
+
accept_sparse="csr",
|
2093 |
+
dtype=[np.float64, np.float32],
|
2094 |
+
order="C",
|
2095 |
+
accept_large_sparse=False,
|
2096 |
+
)
|
2097 |
+
|
2098 |
+
self._check_params_vs_input(X)
|
2099 |
+
random_state = check_random_state(self.random_state)
|
2100 |
+
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
|
2101 |
+
self._n_threads = _openmp_effective_n_threads()
|
2102 |
+
n_samples, n_features = X.shape
|
2103 |
+
|
2104 |
+
# Validate init array
|
2105 |
+
init = self.init
|
2106 |
+
if _is_arraylike_not_scalar(init):
|
2107 |
+
init = check_array(init, dtype=X.dtype, copy=True, order="C")
|
2108 |
+
self._validate_center_shape(X, init)
|
2109 |
+
|
2110 |
+
self._check_mkl_vcomp(X, self._batch_size)
|
2111 |
+
|
2112 |
+
# precompute squared norms of data points
|
2113 |
+
x_squared_norms = row_norms(X, squared=True)
|
2114 |
+
|
2115 |
+
# Validation set for the init
|
2116 |
+
validation_indices = random_state.randint(0, n_samples, self._init_size)
|
2117 |
+
X_valid = X[validation_indices]
|
2118 |
+
sample_weight_valid = sample_weight[validation_indices]
|
2119 |
+
|
2120 |
+
# perform several inits with random subsets
|
2121 |
+
best_inertia = None
|
2122 |
+
for init_idx in range(self._n_init):
|
2123 |
+
if self.verbose:
|
2124 |
+
print(f"Init {init_idx + 1}/{self._n_init} with method {init}")
|
2125 |
+
|
2126 |
+
# Initialize the centers using only a fraction of the data as we
|
2127 |
+
# expect n_samples to be very large when using MiniBatchKMeans.
|
2128 |
+
cluster_centers = self._init_centroids(
|
2129 |
+
X,
|
2130 |
+
x_squared_norms=x_squared_norms,
|
2131 |
+
init=init,
|
2132 |
+
random_state=random_state,
|
2133 |
+
init_size=self._init_size,
|
2134 |
+
sample_weight=sample_weight,
|
2135 |
+
)
|
2136 |
+
|
2137 |
+
# Compute inertia on a validation set.
|
2138 |
+
_, inertia = _labels_inertia_threadpool_limit(
|
2139 |
+
X_valid,
|
2140 |
+
sample_weight_valid,
|
2141 |
+
cluster_centers,
|
2142 |
+
n_threads=self._n_threads,
|
2143 |
+
)
|
2144 |
+
|
2145 |
+
if self.verbose:
|
2146 |
+
print(f"Inertia for init {init_idx + 1}/{self._n_init}: {inertia}")
|
2147 |
+
if best_inertia is None or inertia < best_inertia:
|
2148 |
+
init_centers = cluster_centers
|
2149 |
+
best_inertia = inertia
|
2150 |
+
|
2151 |
+
centers = init_centers
|
2152 |
+
centers_new = np.empty_like(centers)
|
2153 |
+
|
2154 |
+
# Initialize counts
|
2155 |
+
self._counts = np.zeros(self.n_clusters, dtype=X.dtype)
|
2156 |
+
|
2157 |
+
# Attributes to monitor the convergence
|
2158 |
+
self._ewa_inertia = None
|
2159 |
+
self._ewa_inertia_min = None
|
2160 |
+
self._no_improvement = 0
|
2161 |
+
|
2162 |
+
# Initialize number of samples seen since last reassignment
|
2163 |
+
self._n_since_last_reassign = 0
|
2164 |
+
|
2165 |
+
n_steps = (self.max_iter * n_samples) // self._batch_size
|
2166 |
+
|
2167 |
+
with threadpool_limits(limits=1, user_api="blas"):
|
2168 |
+
# Perform the iterative optimization until convergence
|
2169 |
+
for i in range(n_steps):
|
2170 |
+
# Sample a minibatch from the full dataset
|
2171 |
+
minibatch_indices = random_state.randint(0, n_samples, self._batch_size)
|
2172 |
+
|
2173 |
+
# Perform the actual update step on the minibatch data
|
2174 |
+
batch_inertia = _mini_batch_step(
|
2175 |
+
X=X[minibatch_indices],
|
2176 |
+
sample_weight=sample_weight[minibatch_indices],
|
2177 |
+
centers=centers,
|
2178 |
+
centers_new=centers_new,
|
2179 |
+
weight_sums=self._counts,
|
2180 |
+
random_state=random_state,
|
2181 |
+
random_reassign=self._random_reassign(),
|
2182 |
+
reassignment_ratio=self.reassignment_ratio,
|
2183 |
+
verbose=self.verbose,
|
2184 |
+
n_threads=self._n_threads,
|
2185 |
+
)
|
2186 |
+
|
2187 |
+
if self._tol > 0.0:
|
2188 |
+
centers_squared_diff = np.sum((centers_new - centers) ** 2)
|
2189 |
+
else:
|
2190 |
+
centers_squared_diff = 0
|
2191 |
+
|
2192 |
+
centers, centers_new = centers_new, centers
|
2193 |
+
|
2194 |
+
# Monitor convergence and do early stopping if necessary
|
2195 |
+
if self._mini_batch_convergence(
|
2196 |
+
i, n_steps, n_samples, centers_squared_diff, batch_inertia
|
2197 |
+
):
|
2198 |
+
break
|
2199 |
+
|
2200 |
+
self.cluster_centers_ = centers
|
2201 |
+
self._n_features_out = self.cluster_centers_.shape[0]
|
2202 |
+
|
2203 |
+
self.n_steps_ = i + 1
|
2204 |
+
self.n_iter_ = int(np.ceil(((i + 1) * self._batch_size) / n_samples))
|
2205 |
+
|
2206 |
+
if self.compute_labels:
|
2207 |
+
self.labels_, self.inertia_ = _labels_inertia_threadpool_limit(
|
2208 |
+
X,
|
2209 |
+
sample_weight,
|
2210 |
+
self.cluster_centers_,
|
2211 |
+
n_threads=self._n_threads,
|
2212 |
+
)
|
2213 |
+
else:
|
2214 |
+
self.inertia_ = self._ewa_inertia * n_samples
|
2215 |
+
|
2216 |
+
return self
|
2217 |
+
|
2218 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
2219 |
+
def partial_fit(self, X, y=None, sample_weight=None):
|
2220 |
+
"""Update k means estimate on a single mini-batch X.
|
2221 |
+
|
2222 |
+
Parameters
|
2223 |
+
----------
|
2224 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
2225 |
+
Training instances to cluster. It must be noted that the data
|
2226 |
+
will be converted to C ordering, which will cause a memory copy
|
2227 |
+
if the given data is not C-contiguous.
|
2228 |
+
If a sparse matrix is passed, a copy will be made if it's not in
|
2229 |
+
CSR format.
|
2230 |
+
|
2231 |
+
y : Ignored
|
2232 |
+
Not used, present here for API consistency by convention.
|
2233 |
+
|
2234 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
2235 |
+
The weights for each observation in X. If None, all observations
|
2236 |
+
are assigned equal weight. `sample_weight` is not used during
|
2237 |
+
initialization if `init` is a callable or a user provided array.
|
2238 |
+
|
2239 |
+
Returns
|
2240 |
+
-------
|
2241 |
+
self : object
|
2242 |
+
Return updated estimator.
|
2243 |
+
"""
|
2244 |
+
has_centers = hasattr(self, "cluster_centers_")
|
2245 |
+
|
2246 |
+
X = self._validate_data(
|
2247 |
+
X,
|
2248 |
+
accept_sparse="csr",
|
2249 |
+
dtype=[np.float64, np.float32],
|
2250 |
+
order="C",
|
2251 |
+
accept_large_sparse=False,
|
2252 |
+
reset=not has_centers,
|
2253 |
+
)
|
2254 |
+
|
2255 |
+
self._random_state = getattr(
|
2256 |
+
self, "_random_state", check_random_state(self.random_state)
|
2257 |
+
)
|
2258 |
+
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
|
2259 |
+
self.n_steps_ = getattr(self, "n_steps_", 0)
|
2260 |
+
|
2261 |
+
# precompute squared norms of data points
|
2262 |
+
x_squared_norms = row_norms(X, squared=True)
|
2263 |
+
|
2264 |
+
if not has_centers:
|
2265 |
+
# this instance has not been fitted yet (fit or partial_fit)
|
2266 |
+
self._check_params_vs_input(X)
|
2267 |
+
self._n_threads = _openmp_effective_n_threads()
|
2268 |
+
|
2269 |
+
# Validate init array
|
2270 |
+
init = self.init
|
2271 |
+
if _is_arraylike_not_scalar(init):
|
2272 |
+
init = check_array(init, dtype=X.dtype, copy=True, order="C")
|
2273 |
+
self._validate_center_shape(X, init)
|
2274 |
+
|
2275 |
+
self._check_mkl_vcomp(X, X.shape[0])
|
2276 |
+
|
2277 |
+
# initialize the cluster centers
|
2278 |
+
self.cluster_centers_ = self._init_centroids(
|
2279 |
+
X,
|
2280 |
+
x_squared_norms=x_squared_norms,
|
2281 |
+
init=init,
|
2282 |
+
random_state=self._random_state,
|
2283 |
+
init_size=self._init_size,
|
2284 |
+
sample_weight=sample_weight,
|
2285 |
+
)
|
2286 |
+
|
2287 |
+
# Initialize counts
|
2288 |
+
self._counts = np.zeros(self.n_clusters, dtype=X.dtype)
|
2289 |
+
|
2290 |
+
# Initialize number of samples seen since last reassignment
|
2291 |
+
self._n_since_last_reassign = 0
|
2292 |
+
|
2293 |
+
with threadpool_limits(limits=1, user_api="blas"):
|
2294 |
+
_mini_batch_step(
|
2295 |
+
X,
|
2296 |
+
sample_weight=sample_weight,
|
2297 |
+
centers=self.cluster_centers_,
|
2298 |
+
centers_new=self.cluster_centers_,
|
2299 |
+
weight_sums=self._counts,
|
2300 |
+
random_state=self._random_state,
|
2301 |
+
random_reassign=self._random_reassign(),
|
2302 |
+
reassignment_ratio=self.reassignment_ratio,
|
2303 |
+
verbose=self.verbose,
|
2304 |
+
n_threads=self._n_threads,
|
2305 |
+
)
|
2306 |
+
|
2307 |
+
if self.compute_labels:
|
2308 |
+
self.labels_, self.inertia_ = _labels_inertia_threadpool_limit(
|
2309 |
+
X,
|
2310 |
+
sample_weight,
|
2311 |
+
self.cluster_centers_,
|
2312 |
+
n_threads=self._n_threads,
|
2313 |
+
)
|
2314 |
+
|
2315 |
+
self.n_steps_ += 1
|
2316 |
+
self._n_features_out = self.cluster_centers_.shape[0]
|
2317 |
+
|
2318 |
+
return self
|
venv/lib/python3.10/site-packages/sklearn/cluster/tests/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (189 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc
ADDED
Binary file (826 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc
ADDED
Binary file (9.65 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc
ADDED
Binary file (7.73 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc
ADDED
Binary file (7.2 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc
ADDED
Binary file (4.51 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_dbscan.cpython-310.pyc
ADDED
Binary file (10.5 kB). View file
|
|