Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/10.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/10.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/21.attention.query_key_value.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/25.attention.dense.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/25.attention.dense.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/sklearn/neighbors/_base.py +1387 -0
- venv/lib/python3.10/site-packages/sklearn/neighbors/_classification.py +839 -0
- venv/lib/python3.10/site-packages/sklearn/neighbors/_graph.py +719 -0
- venv/lib/python3.10/site-packages/sklearn/neighbors/_nca.py +525 -0
- venv/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/neighbors/_regression.py +510 -0
- venv/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py +175 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/10.mlp.dense_4h_to_h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b9689eb1c74eddf479143f88377890aaf72609025da1d42e38e1c3341dfdd31b
|
3 |
+
size 33555612
|
ckpts/universal/global_step40/zero/10.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa4244869979212bc62f6130b03a693e182770b2cb2233a3e9b027d769daaefc
|
3 |
+
size 33555627
|
ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:445d543dfa522ec41120f21a75f0773cdb130f313f3a7855b4c98eb1d64e3c7a
|
3 |
+
size 9387
|
ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:df24ce7c264f4ab09f898921658c8a326f43eda7fa3d449e3254fd6907973604
|
3 |
+
size 33555612
|
ckpts/universal/global_step40/zero/21.attention.query_key_value.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8ac18b29045cef3697c1b6e55206eb56999cea21767bfd0526373ab86f73271a
|
3 |
+
size 50332749
|
ckpts/universal/global_step40/zero/25.attention.dense.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4dc1337163ce023c2f71e491e66aa73a14dfbd46435f6ae0f03a8ec5c0e9b7d0
|
3 |
+
size 16778411
|
ckpts/universal/global_step40/zero/25.attention.dense.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d0a311c07cdcd005b77c2a71a818f6d585a7779a10cb045bbe91e6f62c5cd6cb
|
3 |
+
size 16778317
|
ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0e014baa072b1dc520dca57e239d50b32356e6245697cbba503b3911d071c5bf
|
3 |
+
size 33555612
|
ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:72bf3dee9375a038147a1b688ed1b6779744207217d8e76a0e09a8f3028212db
|
3 |
+
size 33555627
|
ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:34e9e7ca6e93becfe7bd651e1a2265dbc9d6110bb1375aae72f8d1c0f73677b9
|
3 |
+
size 33555533
|
venv/lib/python3.10/site-packages/sklearn/neighbors/_base.py
ADDED
@@ -0,0 +1,1387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Base and mixin classes for nearest neighbors."""
|
2 |
+
# Authors: Jake Vanderplas <[email protected]>
|
3 |
+
# Fabian Pedregosa <[email protected]>
|
4 |
+
# Alexandre Gramfort <[email protected]>
|
5 |
+
# Sparseness support by Lars Buitinck
|
6 |
+
# Multi-output support by Arnaud Joly <[email protected]>
|
7 |
+
#
|
8 |
+
# License: BSD 3 clause (C) INRIA, University of Amsterdam
|
9 |
+
import itertools
|
10 |
+
import numbers
|
11 |
+
import warnings
|
12 |
+
from abc import ABCMeta, abstractmethod
|
13 |
+
from functools import partial
|
14 |
+
from numbers import Integral, Real
|
15 |
+
|
16 |
+
import numpy as np
|
17 |
+
from joblib import effective_n_jobs
|
18 |
+
from scipy.sparse import csr_matrix, issparse
|
19 |
+
|
20 |
+
from ..base import BaseEstimator, MultiOutputMixin, is_classifier
|
21 |
+
from ..exceptions import DataConversionWarning, EfficiencyWarning
|
22 |
+
from ..metrics import DistanceMetric, pairwise_distances_chunked
|
23 |
+
from ..metrics._pairwise_distances_reduction import (
|
24 |
+
ArgKmin,
|
25 |
+
RadiusNeighbors,
|
26 |
+
)
|
27 |
+
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
|
28 |
+
from ..utils import (
|
29 |
+
_to_object_array,
|
30 |
+
check_array,
|
31 |
+
gen_even_slices,
|
32 |
+
)
|
33 |
+
from ..utils._param_validation import Interval, StrOptions, validate_params
|
34 |
+
from ..utils.fixes import parse_version, sp_base_version
|
35 |
+
from ..utils.multiclass import check_classification_targets
|
36 |
+
from ..utils.parallel import Parallel, delayed
|
37 |
+
from ..utils.validation import check_is_fitted, check_non_negative
|
38 |
+
from ._ball_tree import BallTree
|
39 |
+
from ._kd_tree import KDTree
|
40 |
+
|
41 |
+
SCIPY_METRICS = [
|
42 |
+
"braycurtis",
|
43 |
+
"canberra",
|
44 |
+
"chebyshev",
|
45 |
+
"correlation",
|
46 |
+
"cosine",
|
47 |
+
"dice",
|
48 |
+
"hamming",
|
49 |
+
"jaccard",
|
50 |
+
"mahalanobis",
|
51 |
+
"minkowski",
|
52 |
+
"rogerstanimoto",
|
53 |
+
"russellrao",
|
54 |
+
"seuclidean",
|
55 |
+
"sokalmichener",
|
56 |
+
"sokalsneath",
|
57 |
+
"sqeuclidean",
|
58 |
+
"yule",
|
59 |
+
]
|
60 |
+
if sp_base_version < parse_version("1.11"):
|
61 |
+
# Deprecated in SciPy 1.9 and removed in SciPy 1.11
|
62 |
+
SCIPY_METRICS += ["kulsinski"]
|
63 |
+
if sp_base_version < parse_version("1.9"):
|
64 |
+
# Deprecated in SciPy 1.0 and removed in SciPy 1.9
|
65 |
+
SCIPY_METRICS += ["matching"]
|
66 |
+
|
67 |
+
VALID_METRICS = dict(
|
68 |
+
ball_tree=BallTree.valid_metrics,
|
69 |
+
kd_tree=KDTree.valid_metrics,
|
70 |
+
# The following list comes from the
|
71 |
+
# sklearn.metrics.pairwise doc string
|
72 |
+
brute=sorted(set(PAIRWISE_DISTANCE_FUNCTIONS).union(SCIPY_METRICS)),
|
73 |
+
)
|
74 |
+
|
75 |
+
VALID_METRICS_SPARSE = dict(
|
76 |
+
ball_tree=[],
|
77 |
+
kd_tree=[],
|
78 |
+
brute=(PAIRWISE_DISTANCE_FUNCTIONS.keys() - {"haversine", "nan_euclidean"}),
|
79 |
+
)
|
80 |
+
|
81 |
+
|
82 |
+
def _get_weights(dist, weights):
|
83 |
+
"""Get the weights from an array of distances and a parameter ``weights``.
|
84 |
+
|
85 |
+
Assume weights have already been validated.
|
86 |
+
|
87 |
+
Parameters
|
88 |
+
----------
|
89 |
+
dist : ndarray
|
90 |
+
The input distances.
|
91 |
+
|
92 |
+
weights : {'uniform', 'distance'}, callable or None
|
93 |
+
The kind of weighting used.
|
94 |
+
|
95 |
+
Returns
|
96 |
+
-------
|
97 |
+
weights_arr : array of the same shape as ``dist``
|
98 |
+
If ``weights == 'uniform'``, then returns None.
|
99 |
+
"""
|
100 |
+
if weights in (None, "uniform"):
|
101 |
+
return None
|
102 |
+
|
103 |
+
if weights == "distance":
|
104 |
+
# if user attempts to classify a point that was zero distance from one
|
105 |
+
# or more training points, those training points are weighted as 1.0
|
106 |
+
# and the other points as 0.0
|
107 |
+
if dist.dtype is np.dtype(object):
|
108 |
+
for point_dist_i, point_dist in enumerate(dist):
|
109 |
+
# check if point_dist is iterable
|
110 |
+
# (ex: RadiusNeighborClassifier.predict may set an element of
|
111 |
+
# dist to 1e-6 to represent an 'outlier')
|
112 |
+
if hasattr(point_dist, "__contains__") and 0.0 in point_dist:
|
113 |
+
dist[point_dist_i] = point_dist == 0.0
|
114 |
+
else:
|
115 |
+
dist[point_dist_i] = 1.0 / point_dist
|
116 |
+
else:
|
117 |
+
with np.errstate(divide="ignore"):
|
118 |
+
dist = 1.0 / dist
|
119 |
+
inf_mask = np.isinf(dist)
|
120 |
+
inf_row = np.any(inf_mask, axis=1)
|
121 |
+
dist[inf_row] = inf_mask[inf_row]
|
122 |
+
return dist
|
123 |
+
|
124 |
+
if callable(weights):
|
125 |
+
return weights(dist)
|
126 |
+
|
127 |
+
|
128 |
+
def _is_sorted_by_data(graph):
|
129 |
+
"""Return whether the graph's non-zero entries are sorted by data.
|
130 |
+
|
131 |
+
The non-zero entries are stored in graph.data and graph.indices.
|
132 |
+
For each row (or sample), the non-zero entries can be either:
|
133 |
+
- sorted by indices, as after graph.sort_indices();
|
134 |
+
- sorted by data, as after _check_precomputed(graph);
|
135 |
+
- not sorted.
|
136 |
+
|
137 |
+
Parameters
|
138 |
+
----------
|
139 |
+
graph : sparse matrix of shape (n_samples, n_samples)
|
140 |
+
Neighbors graph as given by `kneighbors_graph` or
|
141 |
+
`radius_neighbors_graph`. Matrix should be of format CSR format.
|
142 |
+
|
143 |
+
Returns
|
144 |
+
-------
|
145 |
+
res : bool
|
146 |
+
Whether input graph is sorted by data.
|
147 |
+
"""
|
148 |
+
assert graph.format == "csr"
|
149 |
+
out_of_order = graph.data[:-1] > graph.data[1:]
|
150 |
+
line_change = np.unique(graph.indptr[1:-1] - 1)
|
151 |
+
line_change = line_change[line_change < out_of_order.shape[0]]
|
152 |
+
return out_of_order.sum() == out_of_order[line_change].sum()
|
153 |
+
|
154 |
+
|
155 |
+
def _check_precomputed(X):
|
156 |
+
"""Check precomputed distance matrix.
|
157 |
+
|
158 |
+
If the precomputed distance matrix is sparse, it checks that the non-zero
|
159 |
+
entries are sorted by distances. If not, the matrix is copied and sorted.
|
160 |
+
|
161 |
+
Parameters
|
162 |
+
----------
|
163 |
+
X : {sparse matrix, array-like}, (n_samples, n_samples)
|
164 |
+
Distance matrix to other samples. X may be a sparse matrix, in which
|
165 |
+
case only non-zero elements may be considered neighbors.
|
166 |
+
|
167 |
+
Returns
|
168 |
+
-------
|
169 |
+
X : {sparse matrix, array-like}, (n_samples, n_samples)
|
170 |
+
Distance matrix to other samples. X may be a sparse matrix, in which
|
171 |
+
case only non-zero elements may be considered neighbors.
|
172 |
+
"""
|
173 |
+
if not issparse(X):
|
174 |
+
X = check_array(X)
|
175 |
+
check_non_negative(X, whom="precomputed distance matrix.")
|
176 |
+
return X
|
177 |
+
else:
|
178 |
+
graph = X
|
179 |
+
|
180 |
+
if graph.format not in ("csr", "csc", "coo", "lil"):
|
181 |
+
raise TypeError(
|
182 |
+
"Sparse matrix in {!r} format is not supported due to "
|
183 |
+
"its handling of explicit zeros".format(graph.format)
|
184 |
+
)
|
185 |
+
copied = graph.format != "csr"
|
186 |
+
graph = check_array(graph, accept_sparse="csr")
|
187 |
+
check_non_negative(graph, whom="precomputed distance matrix.")
|
188 |
+
graph = sort_graph_by_row_values(graph, copy=not copied, warn_when_not_sorted=True)
|
189 |
+
|
190 |
+
return graph
|
191 |
+
|
192 |
+
|
193 |
+
@validate_params(
|
194 |
+
{
|
195 |
+
"graph": ["sparse matrix"],
|
196 |
+
"copy": ["boolean"],
|
197 |
+
"warn_when_not_sorted": ["boolean"],
|
198 |
+
},
|
199 |
+
prefer_skip_nested_validation=True,
|
200 |
+
)
|
201 |
+
def sort_graph_by_row_values(graph, copy=False, warn_when_not_sorted=True):
|
202 |
+
"""Sort a sparse graph such that each row is stored with increasing values.
|
203 |
+
|
204 |
+
.. versionadded:: 1.2
|
205 |
+
|
206 |
+
Parameters
|
207 |
+
----------
|
208 |
+
graph : sparse matrix of shape (n_samples, n_samples)
|
209 |
+
Distance matrix to other samples, where only non-zero elements are
|
210 |
+
considered neighbors. Matrix is converted to CSR format if not already.
|
211 |
+
|
212 |
+
copy : bool, default=False
|
213 |
+
If True, the graph is copied before sorting. If False, the sorting is
|
214 |
+
performed inplace. If the graph is not of CSR format, `copy` must be
|
215 |
+
True to allow the conversion to CSR format, otherwise an error is
|
216 |
+
raised.
|
217 |
+
|
218 |
+
warn_when_not_sorted : bool, default=True
|
219 |
+
If True, a :class:`~sklearn.exceptions.EfficiencyWarning` is raised
|
220 |
+
when the input graph is not sorted by row values.
|
221 |
+
|
222 |
+
Returns
|
223 |
+
-------
|
224 |
+
graph : sparse matrix of shape (n_samples, n_samples)
|
225 |
+
Distance matrix to other samples, where only non-zero elements are
|
226 |
+
considered neighbors. Matrix is in CSR format.
|
227 |
+
|
228 |
+
Examples
|
229 |
+
--------
|
230 |
+
>>> from scipy.sparse import csr_matrix
|
231 |
+
>>> from sklearn.neighbors import sort_graph_by_row_values
|
232 |
+
>>> X = csr_matrix(
|
233 |
+
... [[0., 3., 1.],
|
234 |
+
... [3., 0., 2.],
|
235 |
+
... [1., 2., 0.]])
|
236 |
+
>>> X.data
|
237 |
+
array([3., 1., 3., 2., 1., 2.])
|
238 |
+
>>> X_ = sort_graph_by_row_values(X)
|
239 |
+
>>> X_.data
|
240 |
+
array([1., 3., 2., 3., 1., 2.])
|
241 |
+
"""
|
242 |
+
if graph.format == "csr" and _is_sorted_by_data(graph):
|
243 |
+
return graph
|
244 |
+
|
245 |
+
if warn_when_not_sorted:
|
246 |
+
warnings.warn(
|
247 |
+
(
|
248 |
+
"Precomputed sparse input was not sorted by row values. Use the"
|
249 |
+
" function sklearn.neighbors.sort_graph_by_row_values to sort the input"
|
250 |
+
" by row values, with warn_when_not_sorted=False to remove this"
|
251 |
+
" warning."
|
252 |
+
),
|
253 |
+
EfficiencyWarning,
|
254 |
+
)
|
255 |
+
|
256 |
+
if graph.format not in ("csr", "csc", "coo", "lil"):
|
257 |
+
raise TypeError(
|
258 |
+
f"Sparse matrix in {graph.format!r} format is not supported due to "
|
259 |
+
"its handling of explicit zeros"
|
260 |
+
)
|
261 |
+
elif graph.format != "csr":
|
262 |
+
if not copy:
|
263 |
+
raise ValueError(
|
264 |
+
"The input graph is not in CSR format. Use copy=True to allow "
|
265 |
+
"the conversion to CSR format."
|
266 |
+
)
|
267 |
+
graph = graph.asformat("csr")
|
268 |
+
elif copy: # csr format with copy=True
|
269 |
+
graph = graph.copy()
|
270 |
+
|
271 |
+
row_nnz = np.diff(graph.indptr)
|
272 |
+
if row_nnz.max() == row_nnz.min():
|
273 |
+
# if each sample has the same number of provided neighbors
|
274 |
+
n_samples = graph.shape[0]
|
275 |
+
distances = graph.data.reshape(n_samples, -1)
|
276 |
+
|
277 |
+
order = np.argsort(distances, kind="mergesort")
|
278 |
+
order += np.arange(n_samples)[:, None] * row_nnz[0]
|
279 |
+
order = order.ravel()
|
280 |
+
graph.data = graph.data[order]
|
281 |
+
graph.indices = graph.indices[order]
|
282 |
+
|
283 |
+
else:
|
284 |
+
for start, stop in zip(graph.indptr, graph.indptr[1:]):
|
285 |
+
order = np.argsort(graph.data[start:stop], kind="mergesort")
|
286 |
+
graph.data[start:stop] = graph.data[start:stop][order]
|
287 |
+
graph.indices[start:stop] = graph.indices[start:stop][order]
|
288 |
+
|
289 |
+
return graph
|
290 |
+
|
291 |
+
|
292 |
+
def _kneighbors_from_graph(graph, n_neighbors, return_distance):
|
293 |
+
"""Decompose a nearest neighbors sparse graph into distances and indices.
|
294 |
+
|
295 |
+
Parameters
|
296 |
+
----------
|
297 |
+
graph : sparse matrix of shape (n_samples, n_samples)
|
298 |
+
Neighbors graph as given by `kneighbors_graph` or
|
299 |
+
`radius_neighbors_graph`. Matrix should be of format CSR format.
|
300 |
+
|
301 |
+
n_neighbors : int
|
302 |
+
Number of neighbors required for each sample.
|
303 |
+
|
304 |
+
return_distance : bool
|
305 |
+
Whether or not to return the distances.
|
306 |
+
|
307 |
+
Returns
|
308 |
+
-------
|
309 |
+
neigh_dist : ndarray of shape (n_samples, n_neighbors)
|
310 |
+
Distances to nearest neighbors. Only present if `return_distance=True`.
|
311 |
+
|
312 |
+
neigh_ind : ndarray of shape (n_samples, n_neighbors)
|
313 |
+
Indices of nearest neighbors.
|
314 |
+
"""
|
315 |
+
n_samples = graph.shape[0]
|
316 |
+
assert graph.format == "csr"
|
317 |
+
|
318 |
+
# number of neighbors by samples
|
319 |
+
row_nnz = np.diff(graph.indptr)
|
320 |
+
row_nnz_min = row_nnz.min()
|
321 |
+
if n_neighbors is not None and row_nnz_min < n_neighbors:
|
322 |
+
raise ValueError(
|
323 |
+
"%d neighbors per samples are required, but some samples have only"
|
324 |
+
" %d neighbors in precomputed graph matrix. Decrease number of "
|
325 |
+
"neighbors used or recompute the graph with more neighbors."
|
326 |
+
% (n_neighbors, row_nnz_min)
|
327 |
+
)
|
328 |
+
|
329 |
+
def extract(a):
|
330 |
+
# if each sample has the same number of provided neighbors
|
331 |
+
if row_nnz.max() == row_nnz_min:
|
332 |
+
return a.reshape(n_samples, -1)[:, :n_neighbors]
|
333 |
+
else:
|
334 |
+
idx = np.tile(np.arange(n_neighbors), (n_samples, 1))
|
335 |
+
idx += graph.indptr[:-1, None]
|
336 |
+
return a.take(idx, mode="clip").reshape(n_samples, n_neighbors)
|
337 |
+
|
338 |
+
if return_distance:
|
339 |
+
return extract(graph.data), extract(graph.indices)
|
340 |
+
else:
|
341 |
+
return extract(graph.indices)
|
342 |
+
|
343 |
+
|
344 |
+
def _radius_neighbors_from_graph(graph, radius, return_distance):
|
345 |
+
"""Decompose a nearest neighbors sparse graph into distances and indices.
|
346 |
+
|
347 |
+
Parameters
|
348 |
+
----------
|
349 |
+
graph : sparse matrix of shape (n_samples, n_samples)
|
350 |
+
Neighbors graph as given by `kneighbors_graph` or
|
351 |
+
`radius_neighbors_graph`. Matrix should be of format CSR format.
|
352 |
+
|
353 |
+
radius : float
|
354 |
+
Radius of neighborhoods which should be strictly positive.
|
355 |
+
|
356 |
+
return_distance : bool
|
357 |
+
Whether or not to return the distances.
|
358 |
+
|
359 |
+
Returns
|
360 |
+
-------
|
361 |
+
neigh_dist : ndarray of shape (n_samples,) of arrays
|
362 |
+
Distances to nearest neighbors. Only present if `return_distance=True`.
|
363 |
+
|
364 |
+
neigh_ind : ndarray of shape (n_samples,) of arrays
|
365 |
+
Indices of nearest neighbors.
|
366 |
+
"""
|
367 |
+
assert graph.format == "csr"
|
368 |
+
|
369 |
+
no_filter_needed = bool(graph.data.max() <= radius)
|
370 |
+
|
371 |
+
if no_filter_needed:
|
372 |
+
data, indices, indptr = graph.data, graph.indices, graph.indptr
|
373 |
+
else:
|
374 |
+
mask = graph.data <= radius
|
375 |
+
if return_distance:
|
376 |
+
data = np.compress(mask, graph.data)
|
377 |
+
indices = np.compress(mask, graph.indices)
|
378 |
+
indptr = np.concatenate(([0], np.cumsum(mask)))[graph.indptr]
|
379 |
+
|
380 |
+
indices = indices.astype(np.intp, copy=no_filter_needed)
|
381 |
+
|
382 |
+
if return_distance:
|
383 |
+
neigh_dist = _to_object_array(np.split(data, indptr[1:-1]))
|
384 |
+
neigh_ind = _to_object_array(np.split(indices, indptr[1:-1]))
|
385 |
+
|
386 |
+
if return_distance:
|
387 |
+
return neigh_dist, neigh_ind
|
388 |
+
else:
|
389 |
+
return neigh_ind
|
390 |
+
|
391 |
+
|
392 |
+
class NeighborsBase(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):
|
393 |
+
"""Base class for nearest neighbors estimators."""
|
394 |
+
|
395 |
+
_parameter_constraints: dict = {
|
396 |
+
"n_neighbors": [Interval(Integral, 1, None, closed="left"), None],
|
397 |
+
"radius": [Interval(Real, 0, None, closed="both"), None],
|
398 |
+
"algorithm": [StrOptions({"auto", "ball_tree", "kd_tree", "brute"})],
|
399 |
+
"leaf_size": [Interval(Integral, 1, None, closed="left")],
|
400 |
+
"p": [Interval(Real, 0, None, closed="right"), None],
|
401 |
+
"metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable],
|
402 |
+
"metric_params": [dict, None],
|
403 |
+
"n_jobs": [Integral, None],
|
404 |
+
}
|
405 |
+
|
406 |
+
@abstractmethod
|
407 |
+
def __init__(
|
408 |
+
self,
|
409 |
+
n_neighbors=None,
|
410 |
+
radius=None,
|
411 |
+
algorithm="auto",
|
412 |
+
leaf_size=30,
|
413 |
+
metric="minkowski",
|
414 |
+
p=2,
|
415 |
+
metric_params=None,
|
416 |
+
n_jobs=None,
|
417 |
+
):
|
418 |
+
self.n_neighbors = n_neighbors
|
419 |
+
self.radius = radius
|
420 |
+
self.algorithm = algorithm
|
421 |
+
self.leaf_size = leaf_size
|
422 |
+
self.metric = metric
|
423 |
+
self.metric_params = metric_params
|
424 |
+
self.p = p
|
425 |
+
self.n_jobs = n_jobs
|
426 |
+
|
427 |
+
def _check_algorithm_metric(self):
|
428 |
+
if self.algorithm == "auto":
|
429 |
+
if self.metric == "precomputed":
|
430 |
+
alg_check = "brute"
|
431 |
+
elif (
|
432 |
+
callable(self.metric)
|
433 |
+
or self.metric in VALID_METRICS["ball_tree"]
|
434 |
+
or isinstance(self.metric, DistanceMetric)
|
435 |
+
):
|
436 |
+
alg_check = "ball_tree"
|
437 |
+
else:
|
438 |
+
alg_check = "brute"
|
439 |
+
else:
|
440 |
+
alg_check = self.algorithm
|
441 |
+
|
442 |
+
if callable(self.metric):
|
443 |
+
if self.algorithm == "kd_tree":
|
444 |
+
# callable metric is only valid for brute force and ball_tree
|
445 |
+
raise ValueError(
|
446 |
+
"kd_tree does not support callable metric '%s'"
|
447 |
+
"Function call overhead will result"
|
448 |
+
"in very poor performance."
|
449 |
+
% self.metric
|
450 |
+
)
|
451 |
+
elif self.metric not in VALID_METRICS[alg_check] and not isinstance(
|
452 |
+
self.metric, DistanceMetric
|
453 |
+
):
|
454 |
+
raise ValueError(
|
455 |
+
"Metric '%s' not valid. Use "
|
456 |
+
"sorted(sklearn.neighbors.VALID_METRICS['%s']) "
|
457 |
+
"to get valid options. "
|
458 |
+
"Metric can also be a callable function." % (self.metric, alg_check)
|
459 |
+
)
|
460 |
+
|
461 |
+
if self.metric_params is not None and "p" in self.metric_params:
|
462 |
+
if self.p is not None:
|
463 |
+
warnings.warn(
|
464 |
+
(
|
465 |
+
"Parameter p is found in metric_params. "
|
466 |
+
"The corresponding parameter from __init__ "
|
467 |
+
"is ignored."
|
468 |
+
),
|
469 |
+
SyntaxWarning,
|
470 |
+
stacklevel=3,
|
471 |
+
)
|
472 |
+
|
473 |
+
def _fit(self, X, y=None):
|
474 |
+
if self._get_tags()["requires_y"]:
|
475 |
+
if not isinstance(X, (KDTree, BallTree, NeighborsBase)):
|
476 |
+
X, y = self._validate_data(
|
477 |
+
X, y, accept_sparse="csr", multi_output=True, order="C"
|
478 |
+
)
|
479 |
+
|
480 |
+
if is_classifier(self):
|
481 |
+
# Classification targets require a specific format
|
482 |
+
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
|
483 |
+
if y.ndim != 1:
|
484 |
+
warnings.warn(
|
485 |
+
(
|
486 |
+
"A column-vector y was passed when a "
|
487 |
+
"1d array was expected. Please change "
|
488 |
+
"the shape of y to (n_samples,), for "
|
489 |
+
"example using ravel()."
|
490 |
+
),
|
491 |
+
DataConversionWarning,
|
492 |
+
stacklevel=2,
|
493 |
+
)
|
494 |
+
|
495 |
+
self.outputs_2d_ = False
|
496 |
+
y = y.reshape((-1, 1))
|
497 |
+
else:
|
498 |
+
self.outputs_2d_ = True
|
499 |
+
|
500 |
+
check_classification_targets(y)
|
501 |
+
self.classes_ = []
|
502 |
+
# Using `dtype=np.intp` is necessary since `np.bincount`
|
503 |
+
# (called in _classification.py) fails when dealing
|
504 |
+
# with a float64 array on 32bit systems.
|
505 |
+
self._y = np.empty(y.shape, dtype=np.intp)
|
506 |
+
for k in range(self._y.shape[1]):
|
507 |
+
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
|
508 |
+
self.classes_.append(classes)
|
509 |
+
|
510 |
+
if not self.outputs_2d_:
|
511 |
+
self.classes_ = self.classes_[0]
|
512 |
+
self._y = self._y.ravel()
|
513 |
+
else:
|
514 |
+
self._y = y
|
515 |
+
|
516 |
+
else:
|
517 |
+
if not isinstance(X, (KDTree, BallTree, NeighborsBase)):
|
518 |
+
X = self._validate_data(X, accept_sparse="csr", order="C")
|
519 |
+
|
520 |
+
self._check_algorithm_metric()
|
521 |
+
if self.metric_params is None:
|
522 |
+
self.effective_metric_params_ = {}
|
523 |
+
else:
|
524 |
+
self.effective_metric_params_ = self.metric_params.copy()
|
525 |
+
|
526 |
+
effective_p = self.effective_metric_params_.get("p", self.p)
|
527 |
+
if self.metric == "minkowski":
|
528 |
+
self.effective_metric_params_["p"] = effective_p
|
529 |
+
|
530 |
+
self.effective_metric_ = self.metric
|
531 |
+
# For minkowski distance, use more efficient methods where available
|
532 |
+
if self.metric == "minkowski":
|
533 |
+
p = self.effective_metric_params_.pop("p", 2)
|
534 |
+
w = self.effective_metric_params_.pop("w", None)
|
535 |
+
|
536 |
+
if p == 1 and w is None:
|
537 |
+
self.effective_metric_ = "manhattan"
|
538 |
+
elif p == 2 and w is None:
|
539 |
+
self.effective_metric_ = "euclidean"
|
540 |
+
elif p == np.inf and w is None:
|
541 |
+
self.effective_metric_ = "chebyshev"
|
542 |
+
else:
|
543 |
+
# Use the generic minkowski metric, possibly weighted.
|
544 |
+
self.effective_metric_params_["p"] = p
|
545 |
+
self.effective_metric_params_["w"] = w
|
546 |
+
|
547 |
+
if isinstance(X, NeighborsBase):
|
548 |
+
self._fit_X = X._fit_X
|
549 |
+
self._tree = X._tree
|
550 |
+
self._fit_method = X._fit_method
|
551 |
+
self.n_samples_fit_ = X.n_samples_fit_
|
552 |
+
return self
|
553 |
+
|
554 |
+
elif isinstance(X, BallTree):
|
555 |
+
self._fit_X = X.data
|
556 |
+
self._tree = X
|
557 |
+
self._fit_method = "ball_tree"
|
558 |
+
self.n_samples_fit_ = X.data.shape[0]
|
559 |
+
return self
|
560 |
+
|
561 |
+
elif isinstance(X, KDTree):
|
562 |
+
self._fit_X = X.data
|
563 |
+
self._tree = X
|
564 |
+
self._fit_method = "kd_tree"
|
565 |
+
self.n_samples_fit_ = X.data.shape[0]
|
566 |
+
return self
|
567 |
+
|
568 |
+
if self.metric == "precomputed":
|
569 |
+
X = _check_precomputed(X)
|
570 |
+
# Precomputed matrix X must be squared
|
571 |
+
if X.shape[0] != X.shape[1]:
|
572 |
+
raise ValueError(
|
573 |
+
"Precomputed matrix must be square."
|
574 |
+
" Input is a {}x{} matrix.".format(X.shape[0], X.shape[1])
|
575 |
+
)
|
576 |
+
self.n_features_in_ = X.shape[1]
|
577 |
+
|
578 |
+
n_samples = X.shape[0]
|
579 |
+
if n_samples == 0:
|
580 |
+
raise ValueError("n_samples must be greater than 0")
|
581 |
+
|
582 |
+
if issparse(X):
|
583 |
+
if self.algorithm not in ("auto", "brute"):
|
584 |
+
warnings.warn("cannot use tree with sparse input: using brute force")
|
585 |
+
|
586 |
+
if (
|
587 |
+
self.effective_metric_ not in VALID_METRICS_SPARSE["brute"]
|
588 |
+
and not callable(self.effective_metric_)
|
589 |
+
and not isinstance(self.effective_metric_, DistanceMetric)
|
590 |
+
):
|
591 |
+
raise ValueError(
|
592 |
+
"Metric '%s' not valid for sparse input. "
|
593 |
+
"Use sorted(sklearn.neighbors."
|
594 |
+
"VALID_METRICS_SPARSE['brute']) "
|
595 |
+
"to get valid options. "
|
596 |
+
"Metric can also be a callable function." % (self.effective_metric_)
|
597 |
+
)
|
598 |
+
self._fit_X = X.copy()
|
599 |
+
self._tree = None
|
600 |
+
self._fit_method = "brute"
|
601 |
+
self.n_samples_fit_ = X.shape[0]
|
602 |
+
return self
|
603 |
+
|
604 |
+
self._fit_method = self.algorithm
|
605 |
+
self._fit_X = X
|
606 |
+
self.n_samples_fit_ = X.shape[0]
|
607 |
+
|
608 |
+
if self._fit_method == "auto":
|
609 |
+
# A tree approach is better for small number of neighbors or small
|
610 |
+
# number of features, with KDTree generally faster when available
|
611 |
+
if (
|
612 |
+
self.metric == "precomputed"
|
613 |
+
or self._fit_X.shape[1] > 15
|
614 |
+
or (
|
615 |
+
self.n_neighbors is not None
|
616 |
+
and self.n_neighbors >= self._fit_X.shape[0] // 2
|
617 |
+
)
|
618 |
+
):
|
619 |
+
self._fit_method = "brute"
|
620 |
+
else:
|
621 |
+
if (
|
622 |
+
self.effective_metric_ == "minkowski"
|
623 |
+
and self.effective_metric_params_["p"] < 1
|
624 |
+
):
|
625 |
+
self._fit_method = "brute"
|
626 |
+
elif (
|
627 |
+
self.effective_metric_ == "minkowski"
|
628 |
+
and self.effective_metric_params_.get("w") is not None
|
629 |
+
):
|
630 |
+
# 'minkowski' with weights is not supported by KDTree but is
|
631 |
+
# supported byBallTree.
|
632 |
+
self._fit_method = "ball_tree"
|
633 |
+
elif self.effective_metric_ in VALID_METRICS["kd_tree"]:
|
634 |
+
self._fit_method = "kd_tree"
|
635 |
+
elif (
|
636 |
+
callable(self.effective_metric_)
|
637 |
+
or self.effective_metric_ in VALID_METRICS["ball_tree"]
|
638 |
+
):
|
639 |
+
self._fit_method = "ball_tree"
|
640 |
+
else:
|
641 |
+
self._fit_method = "brute"
|
642 |
+
|
643 |
+
if (
|
644 |
+
self.effective_metric_ == "minkowski"
|
645 |
+
and self.effective_metric_params_["p"] < 1
|
646 |
+
):
|
647 |
+
# For 0 < p < 1 Minkowski distances aren't valid distance
|
648 |
+
# metric as they do not satisfy triangular inequality:
|
649 |
+
# they are semi-metrics.
|
650 |
+
# algorithm="kd_tree" and algorithm="ball_tree" can't be used because
|
651 |
+
# KDTree and BallTree require a proper distance metric to work properly.
|
652 |
+
# However, the brute-force algorithm supports semi-metrics.
|
653 |
+
if self._fit_method == "brute":
|
654 |
+
warnings.warn(
|
655 |
+
"Mind that for 0 < p < 1, Minkowski metrics are not distance"
|
656 |
+
" metrics. Continuing the execution with `algorithm='brute'`."
|
657 |
+
)
|
658 |
+
else: # self._fit_method in ("kd_tree", "ball_tree")
|
659 |
+
raise ValueError(
|
660 |
+
f'algorithm="{self._fit_method}" does not support 0 < p < 1 for '
|
661 |
+
"the Minkowski metric. To resolve this problem either "
|
662 |
+
'set p >= 1 or algorithm="brute".'
|
663 |
+
)
|
664 |
+
|
665 |
+
if self._fit_method == "ball_tree":
|
666 |
+
self._tree = BallTree(
|
667 |
+
X,
|
668 |
+
self.leaf_size,
|
669 |
+
metric=self.effective_metric_,
|
670 |
+
**self.effective_metric_params_,
|
671 |
+
)
|
672 |
+
elif self._fit_method == "kd_tree":
|
673 |
+
if (
|
674 |
+
self.effective_metric_ == "minkowski"
|
675 |
+
and self.effective_metric_params_.get("w") is not None
|
676 |
+
):
|
677 |
+
raise ValueError(
|
678 |
+
"algorithm='kd_tree' is not valid for "
|
679 |
+
"metric='minkowski' with a weight parameter 'w': "
|
680 |
+
"try algorithm='ball_tree' "
|
681 |
+
"or algorithm='brute' instead."
|
682 |
+
)
|
683 |
+
self._tree = KDTree(
|
684 |
+
X,
|
685 |
+
self.leaf_size,
|
686 |
+
metric=self.effective_metric_,
|
687 |
+
**self.effective_metric_params_,
|
688 |
+
)
|
689 |
+
elif self._fit_method == "brute":
|
690 |
+
self._tree = None
|
691 |
+
|
692 |
+
return self
|
693 |
+
|
694 |
+
def _more_tags(self):
|
695 |
+
# For cross-validation routines to split data correctly
|
696 |
+
return {"pairwise": self.metric == "precomputed"}
|
697 |
+
|
698 |
+
|
699 |
+
def _tree_query_parallel_helper(tree, *args, **kwargs):
|
700 |
+
"""Helper for the Parallel calls in KNeighborsMixin.kneighbors.
|
701 |
+
|
702 |
+
The Cython method tree.query is not directly picklable by cloudpickle
|
703 |
+
under PyPy.
|
704 |
+
"""
|
705 |
+
return tree.query(*args, **kwargs)
|
706 |
+
|
707 |
+
|
708 |
+
class KNeighborsMixin:
|
709 |
+
"""Mixin for k-neighbors searches."""
|
710 |
+
|
711 |
+
def _kneighbors_reduce_func(self, dist, start, n_neighbors, return_distance):
|
712 |
+
"""Reduce a chunk of distances to the nearest neighbors.
|
713 |
+
|
714 |
+
Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`
|
715 |
+
|
716 |
+
Parameters
|
717 |
+
----------
|
718 |
+
dist : ndarray of shape (n_samples_chunk, n_samples)
|
719 |
+
The distance matrix.
|
720 |
+
|
721 |
+
start : int
|
722 |
+
The index in X which the first row of dist corresponds to.
|
723 |
+
|
724 |
+
n_neighbors : int
|
725 |
+
Number of neighbors required for each sample.
|
726 |
+
|
727 |
+
return_distance : bool
|
728 |
+
Whether or not to return the distances.
|
729 |
+
|
730 |
+
Returns
|
731 |
+
-------
|
732 |
+
dist : array of shape (n_samples_chunk, n_neighbors)
|
733 |
+
Returned only if `return_distance=True`.
|
734 |
+
|
735 |
+
neigh : array of shape (n_samples_chunk, n_neighbors)
|
736 |
+
The neighbors indices.
|
737 |
+
"""
|
738 |
+
sample_range = np.arange(dist.shape[0])[:, None]
|
739 |
+
neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1)
|
740 |
+
neigh_ind = neigh_ind[:, :n_neighbors]
|
741 |
+
# argpartition doesn't guarantee sorted order, so we sort again
|
742 |
+
neigh_ind = neigh_ind[sample_range, np.argsort(dist[sample_range, neigh_ind])]
|
743 |
+
if return_distance:
|
744 |
+
if self.effective_metric_ == "euclidean":
|
745 |
+
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
|
746 |
+
else:
|
747 |
+
result = dist[sample_range, neigh_ind], neigh_ind
|
748 |
+
else:
|
749 |
+
result = neigh_ind
|
750 |
+
return result
|
751 |
+
|
752 |
+
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
|
753 |
+
"""Find the K-neighbors of a point.
|
754 |
+
|
755 |
+
Returns indices of and distances to the neighbors of each point.
|
756 |
+
|
757 |
+
Parameters
|
758 |
+
----------
|
759 |
+
X : {array-like, sparse matrix}, shape (n_queries, n_features), \
|
760 |
+
or (n_queries, n_indexed) if metric == 'precomputed', default=None
|
761 |
+
The query point or points.
|
762 |
+
If not provided, neighbors of each indexed point are returned.
|
763 |
+
In this case, the query point is not considered its own neighbor.
|
764 |
+
|
765 |
+
n_neighbors : int, default=None
|
766 |
+
Number of neighbors required for each sample. The default is the
|
767 |
+
value passed to the constructor.
|
768 |
+
|
769 |
+
return_distance : bool, default=True
|
770 |
+
Whether or not to return the distances.
|
771 |
+
|
772 |
+
Returns
|
773 |
+
-------
|
774 |
+
neigh_dist : ndarray of shape (n_queries, n_neighbors)
|
775 |
+
Array representing the lengths to points, only present if
|
776 |
+
return_distance=True.
|
777 |
+
|
778 |
+
neigh_ind : ndarray of shape (n_queries, n_neighbors)
|
779 |
+
Indices of the nearest points in the population matrix.
|
780 |
+
|
781 |
+
Examples
|
782 |
+
--------
|
783 |
+
In the following example, we construct a NearestNeighbors
|
784 |
+
class from an array representing our data set and ask who's
|
785 |
+
the closest point to [1,1,1]
|
786 |
+
|
787 |
+
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
|
788 |
+
>>> from sklearn.neighbors import NearestNeighbors
|
789 |
+
>>> neigh = NearestNeighbors(n_neighbors=1)
|
790 |
+
>>> neigh.fit(samples)
|
791 |
+
NearestNeighbors(n_neighbors=1)
|
792 |
+
>>> print(neigh.kneighbors([[1., 1., 1.]]))
|
793 |
+
(array([[0.5]]), array([[2]]))
|
794 |
+
|
795 |
+
As you can see, it returns [[0.5]], and [[2]], which means that the
|
796 |
+
element is at distance 0.5 and is the third element of samples
|
797 |
+
(indexes start at 0). You can also query for multiple points:
|
798 |
+
|
799 |
+
>>> X = [[0., 1., 0.], [1., 0., 1.]]
|
800 |
+
>>> neigh.kneighbors(X, return_distance=False)
|
801 |
+
array([[1],
|
802 |
+
[2]]...)
|
803 |
+
"""
|
804 |
+
check_is_fitted(self)
|
805 |
+
|
806 |
+
if n_neighbors is None:
|
807 |
+
n_neighbors = self.n_neighbors
|
808 |
+
elif n_neighbors <= 0:
|
809 |
+
raise ValueError("Expected n_neighbors > 0. Got %d" % n_neighbors)
|
810 |
+
elif not isinstance(n_neighbors, numbers.Integral):
|
811 |
+
raise TypeError(
|
812 |
+
"n_neighbors does not take %s value, enter integer value"
|
813 |
+
% type(n_neighbors)
|
814 |
+
)
|
815 |
+
|
816 |
+
query_is_train = X is None
|
817 |
+
if query_is_train:
|
818 |
+
X = self._fit_X
|
819 |
+
# Include an extra neighbor to account for the sample itself being
|
820 |
+
# returned, which is removed later
|
821 |
+
n_neighbors += 1
|
822 |
+
else:
|
823 |
+
if self.metric == "precomputed":
|
824 |
+
X = _check_precomputed(X)
|
825 |
+
else:
|
826 |
+
X = self._validate_data(X, accept_sparse="csr", reset=False, order="C")
|
827 |
+
|
828 |
+
n_samples_fit = self.n_samples_fit_
|
829 |
+
if n_neighbors > n_samples_fit:
|
830 |
+
if query_is_train:
|
831 |
+
n_neighbors -= 1 # ok to modify inplace because an error is raised
|
832 |
+
inequality_str = "n_neighbors < n_samples_fit"
|
833 |
+
else:
|
834 |
+
inequality_str = "n_neighbors <= n_samples_fit"
|
835 |
+
raise ValueError(
|
836 |
+
f"Expected {inequality_str}, but "
|
837 |
+
f"n_neighbors = {n_neighbors}, n_samples_fit = {n_samples_fit}, "
|
838 |
+
f"n_samples = {X.shape[0]}" # include n_samples for common tests
|
839 |
+
)
|
840 |
+
|
841 |
+
n_jobs = effective_n_jobs(self.n_jobs)
|
842 |
+
chunked_results = None
|
843 |
+
use_pairwise_distances_reductions = (
|
844 |
+
self._fit_method == "brute"
|
845 |
+
and ArgKmin.is_usable_for(
|
846 |
+
X if X is not None else self._fit_X, self._fit_X, self.effective_metric_
|
847 |
+
)
|
848 |
+
)
|
849 |
+
if use_pairwise_distances_reductions:
|
850 |
+
results = ArgKmin.compute(
|
851 |
+
X=X,
|
852 |
+
Y=self._fit_X,
|
853 |
+
k=n_neighbors,
|
854 |
+
metric=self.effective_metric_,
|
855 |
+
metric_kwargs=self.effective_metric_params_,
|
856 |
+
strategy="auto",
|
857 |
+
return_distance=return_distance,
|
858 |
+
)
|
859 |
+
|
860 |
+
elif (
|
861 |
+
self._fit_method == "brute" and self.metric == "precomputed" and issparse(X)
|
862 |
+
):
|
863 |
+
results = _kneighbors_from_graph(
|
864 |
+
X, n_neighbors=n_neighbors, return_distance=return_distance
|
865 |
+
)
|
866 |
+
|
867 |
+
elif self._fit_method == "brute":
|
868 |
+
# Joblib-based backend, which is used when user-defined callable
|
869 |
+
# are passed for metric.
|
870 |
+
|
871 |
+
# This won't be used in the future once PairwiseDistancesReductions
|
872 |
+
# support:
|
873 |
+
# - DistanceMetrics which work on supposedly binary data
|
874 |
+
# - CSR-dense and dense-CSR case if 'euclidean' in metric.
|
875 |
+
reduce_func = partial(
|
876 |
+
self._kneighbors_reduce_func,
|
877 |
+
n_neighbors=n_neighbors,
|
878 |
+
return_distance=return_distance,
|
879 |
+
)
|
880 |
+
|
881 |
+
# for efficiency, use squared euclidean distances
|
882 |
+
if self.effective_metric_ == "euclidean":
|
883 |
+
kwds = {"squared": True}
|
884 |
+
else:
|
885 |
+
kwds = self.effective_metric_params_
|
886 |
+
|
887 |
+
chunked_results = list(
|
888 |
+
pairwise_distances_chunked(
|
889 |
+
X,
|
890 |
+
self._fit_X,
|
891 |
+
reduce_func=reduce_func,
|
892 |
+
metric=self.effective_metric_,
|
893 |
+
n_jobs=n_jobs,
|
894 |
+
**kwds,
|
895 |
+
)
|
896 |
+
)
|
897 |
+
|
898 |
+
elif self._fit_method in ["ball_tree", "kd_tree"]:
|
899 |
+
if issparse(X):
|
900 |
+
raise ValueError(
|
901 |
+
"%s does not work with sparse matrices. Densify the data, "
|
902 |
+
"or set algorithm='brute'"
|
903 |
+
% self._fit_method
|
904 |
+
)
|
905 |
+
chunked_results = Parallel(n_jobs, prefer="threads")(
|
906 |
+
delayed(_tree_query_parallel_helper)(
|
907 |
+
self._tree, X[s], n_neighbors, return_distance
|
908 |
+
)
|
909 |
+
for s in gen_even_slices(X.shape[0], n_jobs)
|
910 |
+
)
|
911 |
+
else:
|
912 |
+
raise ValueError("internal: _fit_method not recognized")
|
913 |
+
|
914 |
+
if chunked_results is not None:
|
915 |
+
if return_distance:
|
916 |
+
neigh_dist, neigh_ind = zip(*chunked_results)
|
917 |
+
results = np.vstack(neigh_dist), np.vstack(neigh_ind)
|
918 |
+
else:
|
919 |
+
results = np.vstack(chunked_results)
|
920 |
+
|
921 |
+
if not query_is_train:
|
922 |
+
return results
|
923 |
+
else:
|
924 |
+
# If the query data is the same as the indexed data, we would like
|
925 |
+
# to ignore the first nearest neighbor of every sample, i.e
|
926 |
+
# the sample itself.
|
927 |
+
if return_distance:
|
928 |
+
neigh_dist, neigh_ind = results
|
929 |
+
else:
|
930 |
+
neigh_ind = results
|
931 |
+
|
932 |
+
n_queries, _ = X.shape
|
933 |
+
sample_range = np.arange(n_queries)[:, None]
|
934 |
+
sample_mask = neigh_ind != sample_range
|
935 |
+
|
936 |
+
# Corner case: When the number of duplicates are more
|
937 |
+
# than the number of neighbors, the first NN will not
|
938 |
+
# be the sample, but a duplicate.
|
939 |
+
# In that case mask the first duplicate.
|
940 |
+
dup_gr_nbrs = np.all(sample_mask, axis=1)
|
941 |
+
sample_mask[:, 0][dup_gr_nbrs] = False
|
942 |
+
neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1))
|
943 |
+
|
944 |
+
if return_distance:
|
945 |
+
neigh_dist = np.reshape(
|
946 |
+
neigh_dist[sample_mask], (n_queries, n_neighbors - 1)
|
947 |
+
)
|
948 |
+
return neigh_dist, neigh_ind
|
949 |
+
return neigh_ind
|
950 |
+
|
951 |
+
def kneighbors_graph(self, X=None, n_neighbors=None, mode="connectivity"):
|
952 |
+
"""Compute the (weighted) graph of k-Neighbors for points in X.
|
953 |
+
|
954 |
+
Parameters
|
955 |
+
----------
|
956 |
+
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
|
957 |
+
or (n_queries, n_indexed) if metric == 'precomputed', default=None
|
958 |
+
The query point or points.
|
959 |
+
If not provided, neighbors of each indexed point are returned.
|
960 |
+
In this case, the query point is not considered its own neighbor.
|
961 |
+
For ``metric='precomputed'`` the shape should be
|
962 |
+
(n_queries, n_indexed). Otherwise the shape should be
|
963 |
+
(n_queries, n_features).
|
964 |
+
|
965 |
+
n_neighbors : int, default=None
|
966 |
+
Number of neighbors for each sample. The default is the value
|
967 |
+
passed to the constructor.
|
968 |
+
|
969 |
+
mode : {'connectivity', 'distance'}, default='connectivity'
|
970 |
+
Type of returned matrix: 'connectivity' will return the
|
971 |
+
connectivity matrix with ones and zeros, in 'distance' the
|
972 |
+
edges are distances between points, type of distance
|
973 |
+
depends on the selected metric parameter in
|
974 |
+
NearestNeighbors class.
|
975 |
+
|
976 |
+
Returns
|
977 |
+
-------
|
978 |
+
A : sparse-matrix of shape (n_queries, n_samples_fit)
|
979 |
+
`n_samples_fit` is the number of samples in the fitted data.
|
980 |
+
`A[i, j]` gives the weight of the edge connecting `i` to `j`.
|
981 |
+
The matrix is of CSR format.
|
982 |
+
|
983 |
+
See Also
|
984 |
+
--------
|
985 |
+
NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph
|
986 |
+
of Neighbors for points in X.
|
987 |
+
|
988 |
+
Examples
|
989 |
+
--------
|
990 |
+
>>> X = [[0], [3], [1]]
|
991 |
+
>>> from sklearn.neighbors import NearestNeighbors
|
992 |
+
>>> neigh = NearestNeighbors(n_neighbors=2)
|
993 |
+
>>> neigh.fit(X)
|
994 |
+
NearestNeighbors(n_neighbors=2)
|
995 |
+
>>> A = neigh.kneighbors_graph(X)
|
996 |
+
>>> A.toarray()
|
997 |
+
array([[1., 0., 1.],
|
998 |
+
[0., 1., 1.],
|
999 |
+
[1., 0., 1.]])
|
1000 |
+
"""
|
1001 |
+
check_is_fitted(self)
|
1002 |
+
if n_neighbors is None:
|
1003 |
+
n_neighbors = self.n_neighbors
|
1004 |
+
|
1005 |
+
# check the input only in self.kneighbors
|
1006 |
+
|
1007 |
+
# construct CSR matrix representation of the k-NN graph
|
1008 |
+
if mode == "connectivity":
|
1009 |
+
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
|
1010 |
+
n_queries = A_ind.shape[0]
|
1011 |
+
A_data = np.ones(n_queries * n_neighbors)
|
1012 |
+
|
1013 |
+
elif mode == "distance":
|
1014 |
+
A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True)
|
1015 |
+
A_data = np.ravel(A_data)
|
1016 |
+
|
1017 |
+
else:
|
1018 |
+
raise ValueError(
|
1019 |
+
'Unsupported mode, must be one of "connectivity", '
|
1020 |
+
f'or "distance" but got "{mode}" instead'
|
1021 |
+
)
|
1022 |
+
|
1023 |
+
n_queries = A_ind.shape[0]
|
1024 |
+
n_samples_fit = self.n_samples_fit_
|
1025 |
+
n_nonzero = n_queries * n_neighbors
|
1026 |
+
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
|
1027 |
+
|
1028 |
+
kneighbors_graph = csr_matrix(
|
1029 |
+
(A_data, A_ind.ravel(), A_indptr), shape=(n_queries, n_samples_fit)
|
1030 |
+
)
|
1031 |
+
|
1032 |
+
return kneighbors_graph
|
1033 |
+
|
1034 |
+
|
1035 |
+
def _tree_query_radius_parallel_helper(tree, *args, **kwargs):
|
1036 |
+
"""Helper for the Parallel calls in RadiusNeighborsMixin.radius_neighbors.
|
1037 |
+
|
1038 |
+
The Cython method tree.query_radius is not directly picklable by
|
1039 |
+
cloudpickle under PyPy.
|
1040 |
+
"""
|
1041 |
+
return tree.query_radius(*args, **kwargs)
|
1042 |
+
|
1043 |
+
|
1044 |
+
class RadiusNeighborsMixin:
|
1045 |
+
"""Mixin for radius-based neighbors searches."""
|
1046 |
+
|
1047 |
+
def _radius_neighbors_reduce_func(self, dist, start, radius, return_distance):
|
1048 |
+
"""Reduce a chunk of distances to the nearest neighbors.
|
1049 |
+
|
1050 |
+
Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`
|
1051 |
+
|
1052 |
+
Parameters
|
1053 |
+
----------
|
1054 |
+
dist : ndarray of shape (n_samples_chunk, n_samples)
|
1055 |
+
The distance matrix.
|
1056 |
+
|
1057 |
+
start : int
|
1058 |
+
The index in X which the first row of dist corresponds to.
|
1059 |
+
|
1060 |
+
radius : float
|
1061 |
+
The radius considered when making the nearest neighbors search.
|
1062 |
+
|
1063 |
+
return_distance : bool
|
1064 |
+
Whether or not to return the distances.
|
1065 |
+
|
1066 |
+
Returns
|
1067 |
+
-------
|
1068 |
+
dist : list of ndarray of shape (n_samples_chunk,)
|
1069 |
+
Returned only if `return_distance=True`.
|
1070 |
+
|
1071 |
+
neigh : list of ndarray of shape (n_samples_chunk,)
|
1072 |
+
The neighbors indices.
|
1073 |
+
"""
|
1074 |
+
neigh_ind = [np.where(d <= radius)[0] for d in dist]
|
1075 |
+
|
1076 |
+
if return_distance:
|
1077 |
+
if self.effective_metric_ == "euclidean":
|
1078 |
+
dist = [np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)]
|
1079 |
+
else:
|
1080 |
+
dist = [d[neigh_ind[i]] for i, d in enumerate(dist)]
|
1081 |
+
results = dist, neigh_ind
|
1082 |
+
else:
|
1083 |
+
results = neigh_ind
|
1084 |
+
return results
|
1085 |
+
|
1086 |
+
def radius_neighbors(
|
1087 |
+
self, X=None, radius=None, return_distance=True, sort_results=False
|
1088 |
+
):
|
1089 |
+
"""Find the neighbors within a given radius of a point or points.
|
1090 |
+
|
1091 |
+
Return the indices and distances of each point from the dataset
|
1092 |
+
lying in a ball with size ``radius`` around the points of the query
|
1093 |
+
array. Points lying on the boundary are included in the results.
|
1094 |
+
|
1095 |
+
The result points are *not* necessarily sorted by distance to their
|
1096 |
+
query point.
|
1097 |
+
|
1098 |
+
Parameters
|
1099 |
+
----------
|
1100 |
+
X : {array-like, sparse matrix} of (n_samples, n_features), default=None
|
1101 |
+
The query point or points.
|
1102 |
+
If not provided, neighbors of each indexed point are returned.
|
1103 |
+
In this case, the query point is not considered its own neighbor.
|
1104 |
+
|
1105 |
+
radius : float, default=None
|
1106 |
+
Limiting distance of neighbors to return. The default is the value
|
1107 |
+
passed to the constructor.
|
1108 |
+
|
1109 |
+
return_distance : bool, default=True
|
1110 |
+
Whether or not to return the distances.
|
1111 |
+
|
1112 |
+
sort_results : bool, default=False
|
1113 |
+
If True, the distances and indices will be sorted by increasing
|
1114 |
+
distances before being returned. If False, the results may not
|
1115 |
+
be sorted. If `return_distance=False`, setting `sort_results=True`
|
1116 |
+
will result in an error.
|
1117 |
+
|
1118 |
+
.. versionadded:: 0.22
|
1119 |
+
|
1120 |
+
Returns
|
1121 |
+
-------
|
1122 |
+
neigh_dist : ndarray of shape (n_samples,) of arrays
|
1123 |
+
Array representing the distances to each point, only present if
|
1124 |
+
`return_distance=True`. The distance values are computed according
|
1125 |
+
to the ``metric`` constructor parameter.
|
1126 |
+
|
1127 |
+
neigh_ind : ndarray of shape (n_samples,) of arrays
|
1128 |
+
An array of arrays of indices of the approximate nearest points
|
1129 |
+
from the population matrix that lie within a ball of size
|
1130 |
+
``radius`` around the query points.
|
1131 |
+
|
1132 |
+
Notes
|
1133 |
+
-----
|
1134 |
+
Because the number of neighbors of each point is not necessarily
|
1135 |
+
equal, the results for multiple query points cannot be fit in a
|
1136 |
+
standard data array.
|
1137 |
+
For efficiency, `radius_neighbors` returns arrays of objects, where
|
1138 |
+
each object is a 1D array of indices or distances.
|
1139 |
+
|
1140 |
+
Examples
|
1141 |
+
--------
|
1142 |
+
In the following example, we construct a NeighborsClassifier
|
1143 |
+
class from an array representing our data set and ask who's
|
1144 |
+
the closest point to [1, 1, 1]:
|
1145 |
+
|
1146 |
+
>>> import numpy as np
|
1147 |
+
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
|
1148 |
+
>>> from sklearn.neighbors import NearestNeighbors
|
1149 |
+
>>> neigh = NearestNeighbors(radius=1.6)
|
1150 |
+
>>> neigh.fit(samples)
|
1151 |
+
NearestNeighbors(radius=1.6)
|
1152 |
+
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
|
1153 |
+
>>> print(np.asarray(rng[0][0]))
|
1154 |
+
[1.5 0.5]
|
1155 |
+
>>> print(np.asarray(rng[1][0]))
|
1156 |
+
[1 2]
|
1157 |
+
|
1158 |
+
The first array returned contains the distances to all points which
|
1159 |
+
are closer than 1.6, while the second array returned contains their
|
1160 |
+
indices. In general, multiple points can be queried at the same time.
|
1161 |
+
"""
|
1162 |
+
check_is_fitted(self)
|
1163 |
+
|
1164 |
+
if sort_results and not return_distance:
|
1165 |
+
raise ValueError("return_distance must be True if sort_results is True.")
|
1166 |
+
|
1167 |
+
query_is_train = X is None
|
1168 |
+
if query_is_train:
|
1169 |
+
X = self._fit_X
|
1170 |
+
else:
|
1171 |
+
if self.metric == "precomputed":
|
1172 |
+
X = _check_precomputed(X)
|
1173 |
+
else:
|
1174 |
+
X = self._validate_data(X, accept_sparse="csr", reset=False, order="C")
|
1175 |
+
|
1176 |
+
if radius is None:
|
1177 |
+
radius = self.radius
|
1178 |
+
|
1179 |
+
use_pairwise_distances_reductions = (
|
1180 |
+
self._fit_method == "brute"
|
1181 |
+
and RadiusNeighbors.is_usable_for(
|
1182 |
+
X if X is not None else self._fit_X, self._fit_X, self.effective_metric_
|
1183 |
+
)
|
1184 |
+
)
|
1185 |
+
|
1186 |
+
if use_pairwise_distances_reductions:
|
1187 |
+
results = RadiusNeighbors.compute(
|
1188 |
+
X=X,
|
1189 |
+
Y=self._fit_X,
|
1190 |
+
radius=radius,
|
1191 |
+
metric=self.effective_metric_,
|
1192 |
+
metric_kwargs=self.effective_metric_params_,
|
1193 |
+
strategy="auto",
|
1194 |
+
return_distance=return_distance,
|
1195 |
+
sort_results=sort_results,
|
1196 |
+
)
|
1197 |
+
|
1198 |
+
elif (
|
1199 |
+
self._fit_method == "brute" and self.metric == "precomputed" and issparse(X)
|
1200 |
+
):
|
1201 |
+
results = _radius_neighbors_from_graph(
|
1202 |
+
X, radius=radius, return_distance=return_distance
|
1203 |
+
)
|
1204 |
+
|
1205 |
+
elif self._fit_method == "brute":
|
1206 |
+
# Joblib-based backend, which is used when user-defined callable
|
1207 |
+
# are passed for metric.
|
1208 |
+
|
1209 |
+
# This won't be used in the future once PairwiseDistancesReductions
|
1210 |
+
# support:
|
1211 |
+
# - DistanceMetrics which work on supposedly binary data
|
1212 |
+
# - CSR-dense and dense-CSR case if 'euclidean' in metric.
|
1213 |
+
|
1214 |
+
# for efficiency, use squared euclidean distances
|
1215 |
+
if self.effective_metric_ == "euclidean":
|
1216 |
+
radius *= radius
|
1217 |
+
kwds = {"squared": True}
|
1218 |
+
else:
|
1219 |
+
kwds = self.effective_metric_params_
|
1220 |
+
|
1221 |
+
reduce_func = partial(
|
1222 |
+
self._radius_neighbors_reduce_func,
|
1223 |
+
radius=radius,
|
1224 |
+
return_distance=return_distance,
|
1225 |
+
)
|
1226 |
+
|
1227 |
+
chunked_results = pairwise_distances_chunked(
|
1228 |
+
X,
|
1229 |
+
self._fit_X,
|
1230 |
+
reduce_func=reduce_func,
|
1231 |
+
metric=self.effective_metric_,
|
1232 |
+
n_jobs=self.n_jobs,
|
1233 |
+
**kwds,
|
1234 |
+
)
|
1235 |
+
if return_distance:
|
1236 |
+
neigh_dist_chunks, neigh_ind_chunks = zip(*chunked_results)
|
1237 |
+
neigh_dist_list = sum(neigh_dist_chunks, [])
|
1238 |
+
neigh_ind_list = sum(neigh_ind_chunks, [])
|
1239 |
+
neigh_dist = _to_object_array(neigh_dist_list)
|
1240 |
+
neigh_ind = _to_object_array(neigh_ind_list)
|
1241 |
+
results = neigh_dist, neigh_ind
|
1242 |
+
else:
|
1243 |
+
neigh_ind_list = sum(chunked_results, [])
|
1244 |
+
results = _to_object_array(neigh_ind_list)
|
1245 |
+
|
1246 |
+
if sort_results:
|
1247 |
+
for ii in range(len(neigh_dist)):
|
1248 |
+
order = np.argsort(neigh_dist[ii], kind="mergesort")
|
1249 |
+
neigh_ind[ii] = neigh_ind[ii][order]
|
1250 |
+
neigh_dist[ii] = neigh_dist[ii][order]
|
1251 |
+
results = neigh_dist, neigh_ind
|
1252 |
+
|
1253 |
+
elif self._fit_method in ["ball_tree", "kd_tree"]:
|
1254 |
+
if issparse(X):
|
1255 |
+
raise ValueError(
|
1256 |
+
"%s does not work with sparse matrices. Densify the data, "
|
1257 |
+
"or set algorithm='brute'"
|
1258 |
+
% self._fit_method
|
1259 |
+
)
|
1260 |
+
|
1261 |
+
n_jobs = effective_n_jobs(self.n_jobs)
|
1262 |
+
delayed_query = delayed(_tree_query_radius_parallel_helper)
|
1263 |
+
chunked_results = Parallel(n_jobs, prefer="threads")(
|
1264 |
+
delayed_query(
|
1265 |
+
self._tree, X[s], radius, return_distance, sort_results=sort_results
|
1266 |
+
)
|
1267 |
+
for s in gen_even_slices(X.shape[0], n_jobs)
|
1268 |
+
)
|
1269 |
+
if return_distance:
|
1270 |
+
neigh_ind, neigh_dist = tuple(zip(*chunked_results))
|
1271 |
+
results = np.hstack(neigh_dist), np.hstack(neigh_ind)
|
1272 |
+
else:
|
1273 |
+
results = np.hstack(chunked_results)
|
1274 |
+
else:
|
1275 |
+
raise ValueError("internal: _fit_method not recognized")
|
1276 |
+
|
1277 |
+
if not query_is_train:
|
1278 |
+
return results
|
1279 |
+
else:
|
1280 |
+
# If the query data is the same as the indexed data, we would like
|
1281 |
+
# to ignore the first nearest neighbor of every sample, i.e
|
1282 |
+
# the sample itself.
|
1283 |
+
if return_distance:
|
1284 |
+
neigh_dist, neigh_ind = results
|
1285 |
+
else:
|
1286 |
+
neigh_ind = results
|
1287 |
+
|
1288 |
+
for ind, ind_neighbor in enumerate(neigh_ind):
|
1289 |
+
mask = ind_neighbor != ind
|
1290 |
+
|
1291 |
+
neigh_ind[ind] = ind_neighbor[mask]
|
1292 |
+
if return_distance:
|
1293 |
+
neigh_dist[ind] = neigh_dist[ind][mask]
|
1294 |
+
|
1295 |
+
if return_distance:
|
1296 |
+
return neigh_dist, neigh_ind
|
1297 |
+
return neigh_ind
|
1298 |
+
|
1299 |
+
def radius_neighbors_graph(
|
1300 |
+
self, X=None, radius=None, mode="connectivity", sort_results=False
|
1301 |
+
):
|
1302 |
+
"""Compute the (weighted) graph of Neighbors for points in X.
|
1303 |
+
|
1304 |
+
Neighborhoods are restricted the points at a distance lower than
|
1305 |
+
radius.
|
1306 |
+
|
1307 |
+
Parameters
|
1308 |
+
----------
|
1309 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
|
1310 |
+
The query point or points.
|
1311 |
+
If not provided, neighbors of each indexed point are returned.
|
1312 |
+
In this case, the query point is not considered its own neighbor.
|
1313 |
+
|
1314 |
+
radius : float, default=None
|
1315 |
+
Radius of neighborhoods. The default is the value passed to the
|
1316 |
+
constructor.
|
1317 |
+
|
1318 |
+
mode : {'connectivity', 'distance'}, default='connectivity'
|
1319 |
+
Type of returned matrix: 'connectivity' will return the
|
1320 |
+
connectivity matrix with ones and zeros, in 'distance' the
|
1321 |
+
edges are distances between points, type of distance
|
1322 |
+
depends on the selected metric parameter in
|
1323 |
+
NearestNeighbors class.
|
1324 |
+
|
1325 |
+
sort_results : bool, default=False
|
1326 |
+
If True, in each row of the result, the non-zero entries will be
|
1327 |
+
sorted by increasing distances. If False, the non-zero entries may
|
1328 |
+
not be sorted. Only used with mode='distance'.
|
1329 |
+
|
1330 |
+
.. versionadded:: 0.22
|
1331 |
+
|
1332 |
+
Returns
|
1333 |
+
-------
|
1334 |
+
A : sparse-matrix of shape (n_queries, n_samples_fit)
|
1335 |
+
`n_samples_fit` is the number of samples in the fitted data.
|
1336 |
+
`A[i, j]` gives the weight of the edge connecting `i` to `j`.
|
1337 |
+
The matrix is of CSR format.
|
1338 |
+
|
1339 |
+
See Also
|
1340 |
+
--------
|
1341 |
+
kneighbors_graph : Compute the (weighted) graph of k-Neighbors for
|
1342 |
+
points in X.
|
1343 |
+
|
1344 |
+
Examples
|
1345 |
+
--------
|
1346 |
+
>>> X = [[0], [3], [1]]
|
1347 |
+
>>> from sklearn.neighbors import NearestNeighbors
|
1348 |
+
>>> neigh = NearestNeighbors(radius=1.5)
|
1349 |
+
>>> neigh.fit(X)
|
1350 |
+
NearestNeighbors(radius=1.5)
|
1351 |
+
>>> A = neigh.radius_neighbors_graph(X)
|
1352 |
+
>>> A.toarray()
|
1353 |
+
array([[1., 0., 1.],
|
1354 |
+
[0., 1., 0.],
|
1355 |
+
[1., 0., 1.]])
|
1356 |
+
"""
|
1357 |
+
check_is_fitted(self)
|
1358 |
+
|
1359 |
+
# check the input only in self.radius_neighbors
|
1360 |
+
|
1361 |
+
if radius is None:
|
1362 |
+
radius = self.radius
|
1363 |
+
|
1364 |
+
# construct CSR matrix representation of the NN graph
|
1365 |
+
if mode == "connectivity":
|
1366 |
+
A_ind = self.radius_neighbors(X, radius, return_distance=False)
|
1367 |
+
A_data = None
|
1368 |
+
elif mode == "distance":
|
1369 |
+
dist, A_ind = self.radius_neighbors(
|
1370 |
+
X, radius, return_distance=True, sort_results=sort_results
|
1371 |
+
)
|
1372 |
+
A_data = np.concatenate(list(dist))
|
1373 |
+
else:
|
1374 |
+
raise ValueError(
|
1375 |
+
'Unsupported mode, must be one of "connectivity", '
|
1376 |
+
f'or "distance" but got "{mode}" instead'
|
1377 |
+
)
|
1378 |
+
|
1379 |
+
n_queries = A_ind.shape[0]
|
1380 |
+
n_samples_fit = self.n_samples_fit_
|
1381 |
+
n_neighbors = np.array([len(a) for a in A_ind])
|
1382 |
+
A_ind = np.concatenate(list(A_ind))
|
1383 |
+
if A_data is None:
|
1384 |
+
A_data = np.ones(len(A_ind))
|
1385 |
+
A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors)))
|
1386 |
+
|
1387 |
+
return csr_matrix((A_data, A_ind, A_indptr), shape=(n_queries, n_samples_fit))
|
venv/lib/python3.10/site-packages/sklearn/neighbors/_classification.py
ADDED
@@ -0,0 +1,839 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Nearest Neighbor Classification"""
|
2 |
+
|
3 |
+
# Authors: Jake Vanderplas <[email protected]>
|
4 |
+
# Fabian Pedregosa <[email protected]>
|
5 |
+
# Alexandre Gramfort <[email protected]>
|
6 |
+
# Sparseness support by Lars Buitinck
|
7 |
+
# Multi-output support by Arnaud Joly <[email protected]>
|
8 |
+
#
|
9 |
+
# License: BSD 3 clause (C) INRIA, University of Amsterdam
|
10 |
+
import warnings
|
11 |
+
from numbers import Integral
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
from sklearn.neighbors._base import _check_precomputed
|
16 |
+
|
17 |
+
from ..base import ClassifierMixin, _fit_context
|
18 |
+
from ..metrics._pairwise_distances_reduction import (
|
19 |
+
ArgKminClassMode,
|
20 |
+
RadiusNeighborsClassMode,
|
21 |
+
)
|
22 |
+
from ..utils._param_validation import StrOptions
|
23 |
+
from ..utils.arrayfuncs import _all_with_any_reduction_axis_1
|
24 |
+
from ..utils.extmath import weighted_mode
|
25 |
+
from ..utils.fixes import _mode
|
26 |
+
from ..utils.validation import _is_arraylike, _num_samples, check_is_fitted
|
27 |
+
from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin, _get_weights
|
28 |
+
|
29 |
+
|
30 |
+
def _adjusted_metric(metric, metric_kwargs, p=None):
|
31 |
+
metric_kwargs = metric_kwargs or {}
|
32 |
+
if metric == "minkowski":
|
33 |
+
metric_kwargs["p"] = p
|
34 |
+
if p == 2:
|
35 |
+
metric = "euclidean"
|
36 |
+
return metric, metric_kwargs
|
37 |
+
|
38 |
+
|
39 |
+
class KNeighborsClassifier(KNeighborsMixin, ClassifierMixin, NeighborsBase):
|
40 |
+
"""Classifier implementing the k-nearest neighbors vote.
|
41 |
+
|
42 |
+
Read more in the :ref:`User Guide <classification>`.
|
43 |
+
|
44 |
+
Parameters
|
45 |
+
----------
|
46 |
+
n_neighbors : int, default=5
|
47 |
+
Number of neighbors to use by default for :meth:`kneighbors` queries.
|
48 |
+
|
49 |
+
weights : {'uniform', 'distance'}, callable or None, default='uniform'
|
50 |
+
Weight function used in prediction. Possible values:
|
51 |
+
|
52 |
+
- 'uniform' : uniform weights. All points in each neighborhood
|
53 |
+
are weighted equally.
|
54 |
+
- 'distance' : weight points by the inverse of their distance.
|
55 |
+
in this case, closer neighbors of a query point will have a
|
56 |
+
greater influence than neighbors which are further away.
|
57 |
+
- [callable] : a user-defined function which accepts an
|
58 |
+
array of distances, and returns an array of the same shape
|
59 |
+
containing the weights.
|
60 |
+
|
61 |
+
Refer to the example entitled
|
62 |
+
:ref:`sphx_glr_auto_examples_neighbors_plot_classification.py`
|
63 |
+
showing the impact of the `weights` parameter on the decision
|
64 |
+
boundary.
|
65 |
+
|
66 |
+
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
|
67 |
+
Algorithm used to compute the nearest neighbors:
|
68 |
+
|
69 |
+
- 'ball_tree' will use :class:`BallTree`
|
70 |
+
- 'kd_tree' will use :class:`KDTree`
|
71 |
+
- 'brute' will use a brute-force search.
|
72 |
+
- 'auto' will attempt to decide the most appropriate algorithm
|
73 |
+
based on the values passed to :meth:`fit` method.
|
74 |
+
|
75 |
+
Note: fitting on sparse input will override the setting of
|
76 |
+
this parameter, using brute force.
|
77 |
+
|
78 |
+
leaf_size : int, default=30
|
79 |
+
Leaf size passed to BallTree or KDTree. This can affect the
|
80 |
+
speed of the construction and query, as well as the memory
|
81 |
+
required to store the tree. The optimal value depends on the
|
82 |
+
nature of the problem.
|
83 |
+
|
84 |
+
p : float, default=2
|
85 |
+
Power parameter for the Minkowski metric. When p = 1, this is equivalent
|
86 |
+
to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.
|
87 |
+
For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected
|
88 |
+
to be positive.
|
89 |
+
|
90 |
+
metric : str or callable, default='minkowski'
|
91 |
+
Metric to use for distance computation. Default is "minkowski", which
|
92 |
+
results in the standard Euclidean distance when p = 2. See the
|
93 |
+
documentation of `scipy.spatial.distance
|
94 |
+
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
|
95 |
+
the metrics listed in
|
96 |
+
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
|
97 |
+
values.
|
98 |
+
|
99 |
+
If metric is "precomputed", X is assumed to be a distance matrix and
|
100 |
+
must be square during fit. X may be a :term:`sparse graph`, in which
|
101 |
+
case only "nonzero" elements may be considered neighbors.
|
102 |
+
|
103 |
+
If metric is a callable function, it takes two arrays representing 1D
|
104 |
+
vectors as inputs and must return one value indicating the distance
|
105 |
+
between those vectors. This works for Scipy's metrics, but is less
|
106 |
+
efficient than passing the metric name as a string.
|
107 |
+
|
108 |
+
metric_params : dict, default=None
|
109 |
+
Additional keyword arguments for the metric function.
|
110 |
+
|
111 |
+
n_jobs : int, default=None
|
112 |
+
The number of parallel jobs to run for neighbors search.
|
113 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
114 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
115 |
+
for more details.
|
116 |
+
Doesn't affect :meth:`fit` method.
|
117 |
+
|
118 |
+
Attributes
|
119 |
+
----------
|
120 |
+
classes_ : array of shape (n_classes,)
|
121 |
+
Class labels known to the classifier
|
122 |
+
|
123 |
+
effective_metric_ : str or callble
|
124 |
+
The distance metric used. It will be same as the `metric` parameter
|
125 |
+
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
|
126 |
+
'minkowski' and `p` parameter set to 2.
|
127 |
+
|
128 |
+
effective_metric_params_ : dict
|
129 |
+
Additional keyword arguments for the metric function. For most metrics
|
130 |
+
will be same with `metric_params` parameter, but may also contain the
|
131 |
+
`p` parameter value if the `effective_metric_` attribute is set to
|
132 |
+
'minkowski'.
|
133 |
+
|
134 |
+
n_features_in_ : int
|
135 |
+
Number of features seen during :term:`fit`.
|
136 |
+
|
137 |
+
.. versionadded:: 0.24
|
138 |
+
|
139 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
140 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
141 |
+
has feature names that are all strings.
|
142 |
+
|
143 |
+
.. versionadded:: 1.0
|
144 |
+
|
145 |
+
n_samples_fit_ : int
|
146 |
+
Number of samples in the fitted data.
|
147 |
+
|
148 |
+
outputs_2d_ : bool
|
149 |
+
False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
|
150 |
+
otherwise True.
|
151 |
+
|
152 |
+
See Also
|
153 |
+
--------
|
154 |
+
RadiusNeighborsClassifier: Classifier based on neighbors within a fixed radius.
|
155 |
+
KNeighborsRegressor: Regression based on k-nearest neighbors.
|
156 |
+
RadiusNeighborsRegressor: Regression based on neighbors within a fixed radius.
|
157 |
+
NearestNeighbors: Unsupervised learner for implementing neighbor searches.
|
158 |
+
|
159 |
+
Notes
|
160 |
+
-----
|
161 |
+
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
|
162 |
+
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
|
163 |
+
|
164 |
+
.. warning::
|
165 |
+
|
166 |
+
Regarding the Nearest Neighbors algorithms, if it is found that two
|
167 |
+
neighbors, neighbor `k+1` and `k`, have identical distances
|
168 |
+
but different labels, the results will depend on the ordering of the
|
169 |
+
training data.
|
170 |
+
|
171 |
+
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
|
172 |
+
|
173 |
+
Examples
|
174 |
+
--------
|
175 |
+
>>> X = [[0], [1], [2], [3]]
|
176 |
+
>>> y = [0, 0, 1, 1]
|
177 |
+
>>> from sklearn.neighbors import KNeighborsClassifier
|
178 |
+
>>> neigh = KNeighborsClassifier(n_neighbors=3)
|
179 |
+
>>> neigh.fit(X, y)
|
180 |
+
KNeighborsClassifier(...)
|
181 |
+
>>> print(neigh.predict([[1.1]]))
|
182 |
+
[0]
|
183 |
+
>>> print(neigh.predict_proba([[0.9]]))
|
184 |
+
[[0.666... 0.333...]]
|
185 |
+
"""
|
186 |
+
|
187 |
+
_parameter_constraints: dict = {**NeighborsBase._parameter_constraints}
|
188 |
+
_parameter_constraints.pop("radius")
|
189 |
+
_parameter_constraints.update(
|
190 |
+
{"weights": [StrOptions({"uniform", "distance"}), callable, None]}
|
191 |
+
)
|
192 |
+
|
193 |
+
def __init__(
|
194 |
+
self,
|
195 |
+
n_neighbors=5,
|
196 |
+
*,
|
197 |
+
weights="uniform",
|
198 |
+
algorithm="auto",
|
199 |
+
leaf_size=30,
|
200 |
+
p=2,
|
201 |
+
metric="minkowski",
|
202 |
+
metric_params=None,
|
203 |
+
n_jobs=None,
|
204 |
+
):
|
205 |
+
super().__init__(
|
206 |
+
n_neighbors=n_neighbors,
|
207 |
+
algorithm=algorithm,
|
208 |
+
leaf_size=leaf_size,
|
209 |
+
metric=metric,
|
210 |
+
p=p,
|
211 |
+
metric_params=metric_params,
|
212 |
+
n_jobs=n_jobs,
|
213 |
+
)
|
214 |
+
self.weights = weights
|
215 |
+
|
216 |
+
@_fit_context(
|
217 |
+
# KNeighborsClassifier.metric is not validated yet
|
218 |
+
prefer_skip_nested_validation=False
|
219 |
+
)
|
220 |
+
def fit(self, X, y):
|
221 |
+
"""Fit the k-nearest neighbors classifier from the training dataset.
|
222 |
+
|
223 |
+
Parameters
|
224 |
+
----------
|
225 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
|
226 |
+
(n_samples, n_samples) if metric='precomputed'
|
227 |
+
Training data.
|
228 |
+
|
229 |
+
y : {array-like, sparse matrix} of shape (n_samples,) or \
|
230 |
+
(n_samples, n_outputs)
|
231 |
+
Target values.
|
232 |
+
|
233 |
+
Returns
|
234 |
+
-------
|
235 |
+
self : KNeighborsClassifier
|
236 |
+
The fitted k-nearest neighbors classifier.
|
237 |
+
"""
|
238 |
+
return self._fit(X, y)
|
239 |
+
|
240 |
+
def predict(self, X):
|
241 |
+
"""Predict the class labels for the provided data.
|
242 |
+
|
243 |
+
Parameters
|
244 |
+
----------
|
245 |
+
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
|
246 |
+
or (n_queries, n_indexed) if metric == 'precomputed'
|
247 |
+
Test samples.
|
248 |
+
|
249 |
+
Returns
|
250 |
+
-------
|
251 |
+
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
|
252 |
+
Class labels for each data sample.
|
253 |
+
"""
|
254 |
+
check_is_fitted(self, "_fit_method")
|
255 |
+
if self.weights == "uniform":
|
256 |
+
if self._fit_method == "brute" and ArgKminClassMode.is_usable_for(
|
257 |
+
X, self._fit_X, self.metric
|
258 |
+
):
|
259 |
+
probabilities = self.predict_proba(X)
|
260 |
+
if self.outputs_2d_:
|
261 |
+
return np.stack(
|
262 |
+
[
|
263 |
+
self.classes_[idx][np.argmax(probas, axis=1)]
|
264 |
+
for idx, probas in enumerate(probabilities)
|
265 |
+
],
|
266 |
+
axis=1,
|
267 |
+
)
|
268 |
+
return self.classes_[np.argmax(probabilities, axis=1)]
|
269 |
+
# In that case, we do not need the distances to perform
|
270 |
+
# the weighting so we do not compute them.
|
271 |
+
neigh_ind = self.kneighbors(X, return_distance=False)
|
272 |
+
neigh_dist = None
|
273 |
+
else:
|
274 |
+
neigh_dist, neigh_ind = self.kneighbors(X)
|
275 |
+
|
276 |
+
classes_ = self.classes_
|
277 |
+
_y = self._y
|
278 |
+
if not self.outputs_2d_:
|
279 |
+
_y = self._y.reshape((-1, 1))
|
280 |
+
classes_ = [self.classes_]
|
281 |
+
|
282 |
+
n_outputs = len(classes_)
|
283 |
+
n_queries = _num_samples(X)
|
284 |
+
weights = _get_weights(neigh_dist, self.weights)
|
285 |
+
if weights is not None and _all_with_any_reduction_axis_1(weights, value=0):
|
286 |
+
raise ValueError(
|
287 |
+
"All neighbors of some sample is getting zero weights. "
|
288 |
+
"Please modify 'weights' to avoid this case if you are "
|
289 |
+
"using a user-defined function."
|
290 |
+
)
|
291 |
+
|
292 |
+
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
|
293 |
+
for k, classes_k in enumerate(classes_):
|
294 |
+
if weights is None:
|
295 |
+
mode, _ = _mode(_y[neigh_ind, k], axis=1)
|
296 |
+
else:
|
297 |
+
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
|
298 |
+
|
299 |
+
mode = np.asarray(mode.ravel(), dtype=np.intp)
|
300 |
+
y_pred[:, k] = classes_k.take(mode)
|
301 |
+
|
302 |
+
if not self.outputs_2d_:
|
303 |
+
y_pred = y_pred.ravel()
|
304 |
+
|
305 |
+
return y_pred
|
306 |
+
|
307 |
+
def predict_proba(self, X):
|
308 |
+
"""Return probability estimates for the test data X.
|
309 |
+
|
310 |
+
Parameters
|
311 |
+
----------
|
312 |
+
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
|
313 |
+
or (n_queries, n_indexed) if metric == 'precomputed'
|
314 |
+
Test samples.
|
315 |
+
|
316 |
+
Returns
|
317 |
+
-------
|
318 |
+
p : ndarray of shape (n_queries, n_classes), or a list of n_outputs \
|
319 |
+
of such arrays if n_outputs > 1.
|
320 |
+
The class probabilities of the input samples. Classes are ordered
|
321 |
+
by lexicographic order.
|
322 |
+
"""
|
323 |
+
check_is_fitted(self, "_fit_method")
|
324 |
+
if self.weights == "uniform":
|
325 |
+
# TODO: systematize this mapping of metric for
|
326 |
+
# PairwiseDistancesReductions.
|
327 |
+
metric, metric_kwargs = _adjusted_metric(
|
328 |
+
metric=self.metric, metric_kwargs=self.metric_params, p=self.p
|
329 |
+
)
|
330 |
+
if (
|
331 |
+
self._fit_method == "brute"
|
332 |
+
and ArgKminClassMode.is_usable_for(X, self._fit_X, metric)
|
333 |
+
# TODO: Implement efficient multi-output solution
|
334 |
+
and not self.outputs_2d_
|
335 |
+
):
|
336 |
+
if self.metric == "precomputed":
|
337 |
+
X = _check_precomputed(X)
|
338 |
+
else:
|
339 |
+
X = self._validate_data(
|
340 |
+
X, accept_sparse="csr", reset=False, order="C"
|
341 |
+
)
|
342 |
+
|
343 |
+
probabilities = ArgKminClassMode.compute(
|
344 |
+
X,
|
345 |
+
self._fit_X,
|
346 |
+
k=self.n_neighbors,
|
347 |
+
weights=self.weights,
|
348 |
+
Y_labels=self._y,
|
349 |
+
unique_Y_labels=self.classes_,
|
350 |
+
metric=metric,
|
351 |
+
metric_kwargs=metric_kwargs,
|
352 |
+
# `strategy="parallel_on_X"` has in practice be shown
|
353 |
+
# to be more efficient than `strategy="parallel_on_Y``
|
354 |
+
# on many combination of datasets.
|
355 |
+
# Hence, we choose to enforce it here.
|
356 |
+
# For more information, see:
|
357 |
+
# https://github.com/scikit-learn/scikit-learn/pull/24076#issuecomment-1445258342 # noqa
|
358 |
+
# TODO: adapt the heuristic for `strategy="auto"` for
|
359 |
+
# `ArgKminClassMode` and use `strategy="auto"`.
|
360 |
+
strategy="parallel_on_X",
|
361 |
+
)
|
362 |
+
return probabilities
|
363 |
+
|
364 |
+
# In that case, we do not need the distances to perform
|
365 |
+
# the weighting so we do not compute them.
|
366 |
+
neigh_ind = self.kneighbors(X, return_distance=False)
|
367 |
+
neigh_dist = None
|
368 |
+
else:
|
369 |
+
neigh_dist, neigh_ind = self.kneighbors(X)
|
370 |
+
|
371 |
+
classes_ = self.classes_
|
372 |
+
_y = self._y
|
373 |
+
if not self.outputs_2d_:
|
374 |
+
_y = self._y.reshape((-1, 1))
|
375 |
+
classes_ = [self.classes_]
|
376 |
+
|
377 |
+
n_queries = _num_samples(X)
|
378 |
+
|
379 |
+
weights = _get_weights(neigh_dist, self.weights)
|
380 |
+
if weights is None:
|
381 |
+
weights = np.ones_like(neigh_ind)
|
382 |
+
elif _all_with_any_reduction_axis_1(weights, value=0):
|
383 |
+
raise ValueError(
|
384 |
+
"All neighbors of some sample is getting zero weights. "
|
385 |
+
"Please modify 'weights' to avoid this case if you are "
|
386 |
+
"using a user-defined function."
|
387 |
+
)
|
388 |
+
|
389 |
+
all_rows = np.arange(n_queries)
|
390 |
+
probabilities = []
|
391 |
+
for k, classes_k in enumerate(classes_):
|
392 |
+
pred_labels = _y[:, k][neigh_ind]
|
393 |
+
proba_k = np.zeros((n_queries, classes_k.size))
|
394 |
+
|
395 |
+
# a simple ':' index doesn't work right
|
396 |
+
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
|
397 |
+
proba_k[all_rows, idx] += weights[:, i]
|
398 |
+
|
399 |
+
# normalize 'votes' into real [0,1] probabilities
|
400 |
+
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
|
401 |
+
proba_k /= normalizer
|
402 |
+
|
403 |
+
probabilities.append(proba_k)
|
404 |
+
|
405 |
+
if not self.outputs_2d_:
|
406 |
+
probabilities = probabilities[0]
|
407 |
+
|
408 |
+
return probabilities
|
409 |
+
|
410 |
+
def _more_tags(self):
|
411 |
+
return {"multilabel": True}
|
412 |
+
|
413 |
+
|
414 |
+
class RadiusNeighborsClassifier(RadiusNeighborsMixin, ClassifierMixin, NeighborsBase):
|
415 |
+
"""Classifier implementing a vote among neighbors within a given radius.
|
416 |
+
|
417 |
+
Read more in the :ref:`User Guide <classification>`.
|
418 |
+
|
419 |
+
Parameters
|
420 |
+
----------
|
421 |
+
radius : float, default=1.0
|
422 |
+
Range of parameter space to use by default for :meth:`radius_neighbors`
|
423 |
+
queries.
|
424 |
+
|
425 |
+
weights : {'uniform', 'distance'}, callable or None, default='uniform'
|
426 |
+
Weight function used in prediction. Possible values:
|
427 |
+
|
428 |
+
- 'uniform' : uniform weights. All points in each neighborhood
|
429 |
+
are weighted equally.
|
430 |
+
- 'distance' : weight points by the inverse of their distance.
|
431 |
+
in this case, closer neighbors of a query point will have a
|
432 |
+
greater influence than neighbors which are further away.
|
433 |
+
- [callable] : a user-defined function which accepts an
|
434 |
+
array of distances, and returns an array of the same shape
|
435 |
+
containing the weights.
|
436 |
+
|
437 |
+
Uniform weights are used by default.
|
438 |
+
|
439 |
+
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
|
440 |
+
Algorithm used to compute the nearest neighbors:
|
441 |
+
|
442 |
+
- 'ball_tree' will use :class:`BallTree`
|
443 |
+
- 'kd_tree' will use :class:`KDTree`
|
444 |
+
- 'brute' will use a brute-force search.
|
445 |
+
- 'auto' will attempt to decide the most appropriate algorithm
|
446 |
+
based on the values passed to :meth:`fit` method.
|
447 |
+
|
448 |
+
Note: fitting on sparse input will override the setting of
|
449 |
+
this parameter, using brute force.
|
450 |
+
|
451 |
+
leaf_size : int, default=30
|
452 |
+
Leaf size passed to BallTree or KDTree. This can affect the
|
453 |
+
speed of the construction and query, as well as the memory
|
454 |
+
required to store the tree. The optimal value depends on the
|
455 |
+
nature of the problem.
|
456 |
+
|
457 |
+
p : float, default=2
|
458 |
+
Power parameter for the Minkowski metric. When p = 1, this is
|
459 |
+
equivalent to using manhattan_distance (l1), and euclidean_distance
|
460 |
+
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
|
461 |
+
This parameter is expected to be positive.
|
462 |
+
|
463 |
+
metric : str or callable, default='minkowski'
|
464 |
+
Metric to use for distance computation. Default is "minkowski", which
|
465 |
+
results in the standard Euclidean distance when p = 2. See the
|
466 |
+
documentation of `scipy.spatial.distance
|
467 |
+
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
|
468 |
+
the metrics listed in
|
469 |
+
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
|
470 |
+
values.
|
471 |
+
|
472 |
+
If metric is "precomputed", X is assumed to be a distance matrix and
|
473 |
+
must be square during fit. X may be a :term:`sparse graph`, in which
|
474 |
+
case only "nonzero" elements may be considered neighbors.
|
475 |
+
|
476 |
+
If metric is a callable function, it takes two arrays representing 1D
|
477 |
+
vectors as inputs and must return one value indicating the distance
|
478 |
+
between those vectors. This works for Scipy's metrics, but is less
|
479 |
+
efficient than passing the metric name as a string.
|
480 |
+
|
481 |
+
outlier_label : {manual label, 'most_frequent'}, default=None
|
482 |
+
Label for outlier samples (samples with no neighbors in given radius).
|
483 |
+
|
484 |
+
- manual label: str or int label (should be the same type as y)
|
485 |
+
or list of manual labels if multi-output is used.
|
486 |
+
- 'most_frequent' : assign the most frequent label of y to outliers.
|
487 |
+
- None : when any outlier is detected, ValueError will be raised.
|
488 |
+
|
489 |
+
The outlier label should be selected from among the unique 'Y' labels.
|
490 |
+
If it is specified with a different value a warning will be raised and
|
491 |
+
all class probabilities of outliers will be assigned to be 0.
|
492 |
+
|
493 |
+
metric_params : dict, default=None
|
494 |
+
Additional keyword arguments for the metric function.
|
495 |
+
|
496 |
+
n_jobs : int, default=None
|
497 |
+
The number of parallel jobs to run for neighbors search.
|
498 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
499 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
500 |
+
for more details.
|
501 |
+
|
502 |
+
Attributes
|
503 |
+
----------
|
504 |
+
classes_ : ndarray of shape (n_classes,)
|
505 |
+
Class labels known to the classifier.
|
506 |
+
|
507 |
+
effective_metric_ : str or callable
|
508 |
+
The distance metric used. It will be same as the `metric` parameter
|
509 |
+
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
|
510 |
+
'minkowski' and `p` parameter set to 2.
|
511 |
+
|
512 |
+
effective_metric_params_ : dict
|
513 |
+
Additional keyword arguments for the metric function. For most metrics
|
514 |
+
will be same with `metric_params` parameter, but may also contain the
|
515 |
+
`p` parameter value if the `effective_metric_` attribute is set to
|
516 |
+
'minkowski'.
|
517 |
+
|
518 |
+
n_features_in_ : int
|
519 |
+
Number of features seen during :term:`fit`.
|
520 |
+
|
521 |
+
.. versionadded:: 0.24
|
522 |
+
|
523 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
524 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
525 |
+
has feature names that are all strings.
|
526 |
+
|
527 |
+
.. versionadded:: 1.0
|
528 |
+
|
529 |
+
n_samples_fit_ : int
|
530 |
+
Number of samples in the fitted data.
|
531 |
+
|
532 |
+
outlier_label_ : int or array-like of shape (n_class,)
|
533 |
+
Label which is given for outlier samples (samples with no neighbors
|
534 |
+
on given radius).
|
535 |
+
|
536 |
+
outputs_2d_ : bool
|
537 |
+
False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
|
538 |
+
otherwise True.
|
539 |
+
|
540 |
+
See Also
|
541 |
+
--------
|
542 |
+
KNeighborsClassifier : Classifier implementing the k-nearest neighbors
|
543 |
+
vote.
|
544 |
+
RadiusNeighborsRegressor : Regression based on neighbors within a
|
545 |
+
fixed radius.
|
546 |
+
KNeighborsRegressor : Regression based on k-nearest neighbors.
|
547 |
+
NearestNeighbors : Unsupervised learner for implementing neighbor
|
548 |
+
searches.
|
549 |
+
|
550 |
+
Notes
|
551 |
+
-----
|
552 |
+
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
|
553 |
+
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
|
554 |
+
|
555 |
+
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
|
556 |
+
|
557 |
+
Examples
|
558 |
+
--------
|
559 |
+
>>> X = [[0], [1], [2], [3]]
|
560 |
+
>>> y = [0, 0, 1, 1]
|
561 |
+
>>> from sklearn.neighbors import RadiusNeighborsClassifier
|
562 |
+
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
|
563 |
+
>>> neigh.fit(X, y)
|
564 |
+
RadiusNeighborsClassifier(...)
|
565 |
+
>>> print(neigh.predict([[1.5]]))
|
566 |
+
[0]
|
567 |
+
>>> print(neigh.predict_proba([[1.0]]))
|
568 |
+
[[0.66666667 0.33333333]]
|
569 |
+
"""
|
570 |
+
|
571 |
+
_parameter_constraints: dict = {
|
572 |
+
**NeighborsBase._parameter_constraints,
|
573 |
+
"weights": [StrOptions({"uniform", "distance"}), callable, None],
|
574 |
+
"outlier_label": [Integral, str, "array-like", None],
|
575 |
+
}
|
576 |
+
_parameter_constraints.pop("n_neighbors")
|
577 |
+
|
578 |
+
def __init__(
|
579 |
+
self,
|
580 |
+
radius=1.0,
|
581 |
+
*,
|
582 |
+
weights="uniform",
|
583 |
+
algorithm="auto",
|
584 |
+
leaf_size=30,
|
585 |
+
p=2,
|
586 |
+
metric="minkowski",
|
587 |
+
outlier_label=None,
|
588 |
+
metric_params=None,
|
589 |
+
n_jobs=None,
|
590 |
+
):
|
591 |
+
super().__init__(
|
592 |
+
radius=radius,
|
593 |
+
algorithm=algorithm,
|
594 |
+
leaf_size=leaf_size,
|
595 |
+
metric=metric,
|
596 |
+
p=p,
|
597 |
+
metric_params=metric_params,
|
598 |
+
n_jobs=n_jobs,
|
599 |
+
)
|
600 |
+
self.weights = weights
|
601 |
+
self.outlier_label = outlier_label
|
602 |
+
|
603 |
+
@_fit_context(
|
604 |
+
# RadiusNeighborsClassifier.metric is not validated yet
|
605 |
+
prefer_skip_nested_validation=False
|
606 |
+
)
|
607 |
+
def fit(self, X, y):
|
608 |
+
"""Fit the radius neighbors classifier from the training dataset.
|
609 |
+
|
610 |
+
Parameters
|
611 |
+
----------
|
612 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
|
613 |
+
(n_samples, n_samples) if metric='precomputed'
|
614 |
+
Training data.
|
615 |
+
|
616 |
+
y : {array-like, sparse matrix} of shape (n_samples,) or \
|
617 |
+
(n_samples, n_outputs)
|
618 |
+
Target values.
|
619 |
+
|
620 |
+
Returns
|
621 |
+
-------
|
622 |
+
self : RadiusNeighborsClassifier
|
623 |
+
The fitted radius neighbors classifier.
|
624 |
+
"""
|
625 |
+
self._fit(X, y)
|
626 |
+
|
627 |
+
classes_ = self.classes_
|
628 |
+
_y = self._y
|
629 |
+
if not self.outputs_2d_:
|
630 |
+
_y = self._y.reshape((-1, 1))
|
631 |
+
classes_ = [self.classes_]
|
632 |
+
|
633 |
+
if self.outlier_label is None:
|
634 |
+
outlier_label_ = None
|
635 |
+
|
636 |
+
elif self.outlier_label == "most_frequent":
|
637 |
+
outlier_label_ = []
|
638 |
+
# iterate over multi-output, get the most frequent label for each
|
639 |
+
# output.
|
640 |
+
for k, classes_k in enumerate(classes_):
|
641 |
+
label_count = np.bincount(_y[:, k])
|
642 |
+
outlier_label_.append(classes_k[label_count.argmax()])
|
643 |
+
|
644 |
+
else:
|
645 |
+
if _is_arraylike(self.outlier_label) and not isinstance(
|
646 |
+
self.outlier_label, str
|
647 |
+
):
|
648 |
+
if len(self.outlier_label) != len(classes_):
|
649 |
+
raise ValueError(
|
650 |
+
"The length of outlier_label: {} is "
|
651 |
+
"inconsistent with the output "
|
652 |
+
"length: {}".format(self.outlier_label, len(classes_))
|
653 |
+
)
|
654 |
+
outlier_label_ = self.outlier_label
|
655 |
+
else:
|
656 |
+
outlier_label_ = [self.outlier_label] * len(classes_)
|
657 |
+
|
658 |
+
for classes, label in zip(classes_, outlier_label_):
|
659 |
+
if _is_arraylike(label) and not isinstance(label, str):
|
660 |
+
# ensure the outlier label for each output is a scalar.
|
661 |
+
raise TypeError(
|
662 |
+
"The outlier_label of classes {} is "
|
663 |
+
"supposed to be a scalar, got "
|
664 |
+
"{}.".format(classes, label)
|
665 |
+
)
|
666 |
+
if np.append(classes, label).dtype != classes.dtype:
|
667 |
+
# ensure the dtype of outlier label is consistent with y.
|
668 |
+
raise TypeError(
|
669 |
+
"The dtype of outlier_label {} is "
|
670 |
+
"inconsistent with classes {} in "
|
671 |
+
"y.".format(label, classes)
|
672 |
+
)
|
673 |
+
|
674 |
+
self.outlier_label_ = outlier_label_
|
675 |
+
|
676 |
+
return self
|
677 |
+
|
678 |
+
def predict(self, X):
|
679 |
+
"""Predict the class labels for the provided data.
|
680 |
+
|
681 |
+
Parameters
|
682 |
+
----------
|
683 |
+
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
|
684 |
+
or (n_queries, n_indexed) if metric == 'precomputed'
|
685 |
+
Test samples.
|
686 |
+
|
687 |
+
Returns
|
688 |
+
-------
|
689 |
+
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
|
690 |
+
Class labels for each data sample.
|
691 |
+
"""
|
692 |
+
|
693 |
+
probs = self.predict_proba(X)
|
694 |
+
classes_ = self.classes_
|
695 |
+
|
696 |
+
if not self.outputs_2d_:
|
697 |
+
probs = [probs]
|
698 |
+
classes_ = [self.classes_]
|
699 |
+
|
700 |
+
n_outputs = len(classes_)
|
701 |
+
n_queries = probs[0].shape[0]
|
702 |
+
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
|
703 |
+
|
704 |
+
for k, prob in enumerate(probs):
|
705 |
+
# iterate over multi-output, assign labels based on probabilities
|
706 |
+
# of each output.
|
707 |
+
max_prob_index = prob.argmax(axis=1)
|
708 |
+
y_pred[:, k] = classes_[k].take(max_prob_index)
|
709 |
+
|
710 |
+
outlier_zero_probs = (prob == 0).all(axis=1)
|
711 |
+
if outlier_zero_probs.any():
|
712 |
+
zero_prob_index = np.flatnonzero(outlier_zero_probs)
|
713 |
+
y_pred[zero_prob_index, k] = self.outlier_label_[k]
|
714 |
+
|
715 |
+
if not self.outputs_2d_:
|
716 |
+
y_pred = y_pred.ravel()
|
717 |
+
|
718 |
+
return y_pred
|
719 |
+
|
720 |
+
def predict_proba(self, X):
|
721 |
+
"""Return probability estimates for the test data X.
|
722 |
+
|
723 |
+
Parameters
|
724 |
+
----------
|
725 |
+
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
|
726 |
+
or (n_queries, n_indexed) if metric == 'precomputed'
|
727 |
+
Test samples.
|
728 |
+
|
729 |
+
Returns
|
730 |
+
-------
|
731 |
+
p : ndarray of shape (n_queries, n_classes), or a list of \
|
732 |
+
n_outputs of such arrays if n_outputs > 1.
|
733 |
+
The class probabilities of the input samples. Classes are ordered
|
734 |
+
by lexicographic order.
|
735 |
+
"""
|
736 |
+
check_is_fitted(self, "_fit_method")
|
737 |
+
n_queries = _num_samples(X)
|
738 |
+
|
739 |
+
metric, metric_kwargs = _adjusted_metric(
|
740 |
+
metric=self.metric, metric_kwargs=self.metric_params, p=self.p
|
741 |
+
)
|
742 |
+
|
743 |
+
if (
|
744 |
+
self.weights == "uniform"
|
745 |
+
and self._fit_method == "brute"
|
746 |
+
and not self.outputs_2d_
|
747 |
+
and RadiusNeighborsClassMode.is_usable_for(X, self._fit_X, metric)
|
748 |
+
):
|
749 |
+
probabilities = RadiusNeighborsClassMode.compute(
|
750 |
+
X=X,
|
751 |
+
Y=self._fit_X,
|
752 |
+
radius=self.radius,
|
753 |
+
weights=self.weights,
|
754 |
+
Y_labels=self._y,
|
755 |
+
unique_Y_labels=self.classes_,
|
756 |
+
outlier_label=self.outlier_label,
|
757 |
+
metric=metric,
|
758 |
+
metric_kwargs=metric_kwargs,
|
759 |
+
strategy="parallel_on_X",
|
760 |
+
# `strategy="parallel_on_X"` has in practice be shown
|
761 |
+
# to be more efficient than `strategy="parallel_on_Y``
|
762 |
+
# on many combination of datasets.
|
763 |
+
# Hence, we choose to enforce it here.
|
764 |
+
# For more information, see:
|
765 |
+
# https://github.com/scikit-learn/scikit-learn/pull/26828/files#r1282398471 # noqa
|
766 |
+
)
|
767 |
+
return probabilities
|
768 |
+
|
769 |
+
neigh_dist, neigh_ind = self.radius_neighbors(X)
|
770 |
+
outlier_mask = np.zeros(n_queries, dtype=bool)
|
771 |
+
outlier_mask[:] = [len(nind) == 0 for nind in neigh_ind]
|
772 |
+
outliers = np.flatnonzero(outlier_mask)
|
773 |
+
inliers = np.flatnonzero(~outlier_mask)
|
774 |
+
|
775 |
+
classes_ = self.classes_
|
776 |
+
_y = self._y
|
777 |
+
if not self.outputs_2d_:
|
778 |
+
_y = self._y.reshape((-1, 1))
|
779 |
+
classes_ = [self.classes_]
|
780 |
+
|
781 |
+
if self.outlier_label_ is None and outliers.size > 0:
|
782 |
+
raise ValueError(
|
783 |
+
"No neighbors found for test samples %r, "
|
784 |
+
"you can try using larger radius, "
|
785 |
+
"giving a label for outliers, "
|
786 |
+
"or considering removing them from your dataset." % outliers
|
787 |
+
)
|
788 |
+
|
789 |
+
weights = _get_weights(neigh_dist, self.weights)
|
790 |
+
if weights is not None:
|
791 |
+
weights = weights[inliers]
|
792 |
+
|
793 |
+
probabilities = []
|
794 |
+
# iterate over multi-output, measure probabilities of the k-th output.
|
795 |
+
for k, classes_k in enumerate(classes_):
|
796 |
+
pred_labels = np.zeros(len(neigh_ind), dtype=object)
|
797 |
+
pred_labels[:] = [_y[ind, k] for ind in neigh_ind]
|
798 |
+
|
799 |
+
proba_k = np.zeros((n_queries, classes_k.size))
|
800 |
+
proba_inl = np.zeros((len(inliers), classes_k.size))
|
801 |
+
|
802 |
+
# samples have different size of neighbors within the same radius
|
803 |
+
if weights is None:
|
804 |
+
for i, idx in enumerate(pred_labels[inliers]):
|
805 |
+
proba_inl[i, :] = np.bincount(idx, minlength=classes_k.size)
|
806 |
+
else:
|
807 |
+
for i, idx in enumerate(pred_labels[inliers]):
|
808 |
+
proba_inl[i, :] = np.bincount(
|
809 |
+
idx, weights[i], minlength=classes_k.size
|
810 |
+
)
|
811 |
+
proba_k[inliers, :] = proba_inl
|
812 |
+
|
813 |
+
if outliers.size > 0:
|
814 |
+
_outlier_label = self.outlier_label_[k]
|
815 |
+
label_index = np.flatnonzero(classes_k == _outlier_label)
|
816 |
+
if label_index.size == 1:
|
817 |
+
proba_k[outliers, label_index[0]] = 1.0
|
818 |
+
else:
|
819 |
+
warnings.warn(
|
820 |
+
"Outlier label {} is not in training "
|
821 |
+
"classes. All class probabilities of "
|
822 |
+
"outliers will be assigned with 0."
|
823 |
+
"".format(self.outlier_label_[k])
|
824 |
+
)
|
825 |
+
|
826 |
+
# normalize 'votes' into real [0,1] probabilities
|
827 |
+
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
|
828 |
+
normalizer[normalizer == 0.0] = 1.0
|
829 |
+
proba_k /= normalizer
|
830 |
+
|
831 |
+
probabilities.append(proba_k)
|
832 |
+
|
833 |
+
if not self.outputs_2d_:
|
834 |
+
probabilities = probabilities[0]
|
835 |
+
|
836 |
+
return probabilities
|
837 |
+
|
838 |
+
def _more_tags(self):
|
839 |
+
return {"multilabel": True}
|
venv/lib/python3.10/site-packages/sklearn/neighbors/_graph.py
ADDED
@@ -0,0 +1,719 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Nearest Neighbors graph functions"""
|
2 |
+
|
3 |
+
# Author: Jake Vanderplas <[email protected]>
|
4 |
+
# Tom Dupre la Tour
|
5 |
+
#
|
6 |
+
# License: BSD 3 clause (C) INRIA, University of Amsterdam
|
7 |
+
import itertools
|
8 |
+
|
9 |
+
from ..base import ClassNamePrefixFeaturesOutMixin, TransformerMixin, _fit_context
|
10 |
+
from ..utils._param_validation import (
|
11 |
+
Integral,
|
12 |
+
Interval,
|
13 |
+
Real,
|
14 |
+
StrOptions,
|
15 |
+
validate_params,
|
16 |
+
)
|
17 |
+
from ..utils.validation import check_is_fitted
|
18 |
+
from ._base import VALID_METRICS, KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin
|
19 |
+
from ._unsupervised import NearestNeighbors
|
20 |
+
|
21 |
+
|
22 |
+
def _check_params(X, metric, p, metric_params):
|
23 |
+
"""Check the validity of the input parameters"""
|
24 |
+
params = zip(["metric", "p", "metric_params"], [metric, p, metric_params])
|
25 |
+
est_params = X.get_params()
|
26 |
+
for param_name, func_param in params:
|
27 |
+
if func_param != est_params[param_name]:
|
28 |
+
raise ValueError(
|
29 |
+
"Got %s for %s, while the estimator has %s for the same parameter."
|
30 |
+
% (func_param, param_name, est_params[param_name])
|
31 |
+
)
|
32 |
+
|
33 |
+
|
34 |
+
def _query_include_self(X, include_self, mode):
|
35 |
+
"""Return the query based on include_self param"""
|
36 |
+
if include_self == "auto":
|
37 |
+
include_self = mode == "connectivity"
|
38 |
+
|
39 |
+
# it does not include each sample as its own neighbors
|
40 |
+
if not include_self:
|
41 |
+
X = None
|
42 |
+
|
43 |
+
return X
|
44 |
+
|
45 |
+
|
46 |
+
@validate_params(
|
47 |
+
{
|
48 |
+
"X": ["array-like", KNeighborsMixin],
|
49 |
+
"n_neighbors": [Interval(Integral, 1, None, closed="left")],
|
50 |
+
"mode": [StrOptions({"connectivity", "distance"})],
|
51 |
+
"metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable],
|
52 |
+
"p": [Interval(Real, 0, None, closed="right"), None],
|
53 |
+
"metric_params": [dict, None],
|
54 |
+
"include_self": ["boolean", StrOptions({"auto"})],
|
55 |
+
"n_jobs": [Integral, None],
|
56 |
+
},
|
57 |
+
prefer_skip_nested_validation=False, # metric is not validated yet
|
58 |
+
)
|
59 |
+
def kneighbors_graph(
|
60 |
+
X,
|
61 |
+
n_neighbors,
|
62 |
+
*,
|
63 |
+
mode="connectivity",
|
64 |
+
metric="minkowski",
|
65 |
+
p=2,
|
66 |
+
metric_params=None,
|
67 |
+
include_self=False,
|
68 |
+
n_jobs=None,
|
69 |
+
):
|
70 |
+
"""Compute the (weighted) graph of k-Neighbors for points in X.
|
71 |
+
|
72 |
+
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
|
73 |
+
|
74 |
+
Parameters
|
75 |
+
----------
|
76 |
+
X : array-like of shape (n_samples, n_features)
|
77 |
+
Sample data.
|
78 |
+
|
79 |
+
n_neighbors : int
|
80 |
+
Number of neighbors for each sample.
|
81 |
+
|
82 |
+
mode : {'connectivity', 'distance'}, default='connectivity'
|
83 |
+
Type of returned matrix: 'connectivity' will return the connectivity
|
84 |
+
matrix with ones and zeros, and 'distance' will return the distances
|
85 |
+
between neighbors according to the given metric.
|
86 |
+
|
87 |
+
metric : str, default='minkowski'
|
88 |
+
Metric to use for distance computation. Default is "minkowski", which
|
89 |
+
results in the standard Euclidean distance when p = 2. See the
|
90 |
+
documentation of `scipy.spatial.distance
|
91 |
+
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
|
92 |
+
the metrics listed in
|
93 |
+
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
|
94 |
+
values.
|
95 |
+
|
96 |
+
p : float, default=2
|
97 |
+
Power parameter for the Minkowski metric. When p = 1, this is equivalent
|
98 |
+
to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.
|
99 |
+
For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected
|
100 |
+
to be positive.
|
101 |
+
|
102 |
+
metric_params : dict, default=None
|
103 |
+
Additional keyword arguments for the metric function.
|
104 |
+
|
105 |
+
include_self : bool or 'auto', default=False
|
106 |
+
Whether or not to mark each sample as the first nearest neighbor to
|
107 |
+
itself. If 'auto', then True is used for mode='connectivity' and False
|
108 |
+
for mode='distance'.
|
109 |
+
|
110 |
+
n_jobs : int, default=None
|
111 |
+
The number of parallel jobs to run for neighbors search.
|
112 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
113 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
114 |
+
for more details.
|
115 |
+
|
116 |
+
Returns
|
117 |
+
-------
|
118 |
+
A : sparse matrix of shape (n_samples, n_samples)
|
119 |
+
Graph where A[i, j] is assigned the weight of edge that
|
120 |
+
connects i to j. The matrix is of CSR format.
|
121 |
+
|
122 |
+
See Also
|
123 |
+
--------
|
124 |
+
radius_neighbors_graph: Compute the (weighted) graph of Neighbors for points in X.
|
125 |
+
|
126 |
+
Examples
|
127 |
+
--------
|
128 |
+
>>> X = [[0], [3], [1]]
|
129 |
+
>>> from sklearn.neighbors import kneighbors_graph
|
130 |
+
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
|
131 |
+
>>> A.toarray()
|
132 |
+
array([[1., 0., 1.],
|
133 |
+
[0., 1., 1.],
|
134 |
+
[1., 0., 1.]])
|
135 |
+
"""
|
136 |
+
if not isinstance(X, KNeighborsMixin):
|
137 |
+
X = NearestNeighbors(
|
138 |
+
n_neighbors=n_neighbors,
|
139 |
+
metric=metric,
|
140 |
+
p=p,
|
141 |
+
metric_params=metric_params,
|
142 |
+
n_jobs=n_jobs,
|
143 |
+
).fit(X)
|
144 |
+
else:
|
145 |
+
_check_params(X, metric, p, metric_params)
|
146 |
+
|
147 |
+
query = _query_include_self(X._fit_X, include_self, mode)
|
148 |
+
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
|
149 |
+
|
150 |
+
|
151 |
+
@validate_params(
|
152 |
+
{
|
153 |
+
"X": ["array-like", RadiusNeighborsMixin],
|
154 |
+
"radius": [Interval(Real, 0, None, closed="both")],
|
155 |
+
"mode": [StrOptions({"connectivity", "distance"})],
|
156 |
+
"metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable],
|
157 |
+
"p": [Interval(Real, 0, None, closed="right"), None],
|
158 |
+
"metric_params": [dict, None],
|
159 |
+
"include_self": ["boolean", StrOptions({"auto"})],
|
160 |
+
"n_jobs": [Integral, None],
|
161 |
+
},
|
162 |
+
prefer_skip_nested_validation=False, # metric is not validated yet
|
163 |
+
)
|
164 |
+
def radius_neighbors_graph(
|
165 |
+
X,
|
166 |
+
radius,
|
167 |
+
*,
|
168 |
+
mode="connectivity",
|
169 |
+
metric="minkowski",
|
170 |
+
p=2,
|
171 |
+
metric_params=None,
|
172 |
+
include_self=False,
|
173 |
+
n_jobs=None,
|
174 |
+
):
|
175 |
+
"""Compute the (weighted) graph of Neighbors for points in X.
|
176 |
+
|
177 |
+
Neighborhoods are restricted the points at a distance lower than
|
178 |
+
radius.
|
179 |
+
|
180 |
+
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
|
181 |
+
|
182 |
+
Parameters
|
183 |
+
----------
|
184 |
+
X : array-like of shape (n_samples, n_features)
|
185 |
+
Sample data.
|
186 |
+
|
187 |
+
radius : float
|
188 |
+
Radius of neighborhoods.
|
189 |
+
|
190 |
+
mode : {'connectivity', 'distance'}, default='connectivity'
|
191 |
+
Type of returned matrix: 'connectivity' will return the connectivity
|
192 |
+
matrix with ones and zeros, and 'distance' will return the distances
|
193 |
+
between neighbors according to the given metric.
|
194 |
+
|
195 |
+
metric : str, default='minkowski'
|
196 |
+
Metric to use for distance computation. Default is "minkowski", which
|
197 |
+
results in the standard Euclidean distance when p = 2. See the
|
198 |
+
documentation of `scipy.spatial.distance
|
199 |
+
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
|
200 |
+
the metrics listed in
|
201 |
+
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
|
202 |
+
values.
|
203 |
+
|
204 |
+
p : float, default=2
|
205 |
+
Power parameter for the Minkowski metric. When p = 1, this is
|
206 |
+
equivalent to using manhattan_distance (l1), and euclidean_distance
|
207 |
+
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
|
208 |
+
|
209 |
+
metric_params : dict, default=None
|
210 |
+
Additional keyword arguments for the metric function.
|
211 |
+
|
212 |
+
include_self : bool or 'auto', default=False
|
213 |
+
Whether or not to mark each sample as the first nearest neighbor to
|
214 |
+
itself. If 'auto', then True is used for mode='connectivity' and False
|
215 |
+
for mode='distance'.
|
216 |
+
|
217 |
+
n_jobs : int, default=None
|
218 |
+
The number of parallel jobs to run for neighbors search.
|
219 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
220 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
221 |
+
for more details.
|
222 |
+
|
223 |
+
Returns
|
224 |
+
-------
|
225 |
+
A : sparse matrix of shape (n_samples, n_samples)
|
226 |
+
Graph where A[i, j] is assigned the weight of edge that connects
|
227 |
+
i to j. The matrix is of CSR format.
|
228 |
+
|
229 |
+
See Also
|
230 |
+
--------
|
231 |
+
kneighbors_graph: Compute the weighted graph of k-neighbors for points in X.
|
232 |
+
|
233 |
+
Examples
|
234 |
+
--------
|
235 |
+
>>> X = [[0], [3], [1]]
|
236 |
+
>>> from sklearn.neighbors import radius_neighbors_graph
|
237 |
+
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity',
|
238 |
+
... include_self=True)
|
239 |
+
>>> A.toarray()
|
240 |
+
array([[1., 0., 1.],
|
241 |
+
[0., 1., 0.],
|
242 |
+
[1., 0., 1.]])
|
243 |
+
"""
|
244 |
+
if not isinstance(X, RadiusNeighborsMixin):
|
245 |
+
X = NearestNeighbors(
|
246 |
+
radius=radius,
|
247 |
+
metric=metric,
|
248 |
+
p=p,
|
249 |
+
metric_params=metric_params,
|
250 |
+
n_jobs=n_jobs,
|
251 |
+
).fit(X)
|
252 |
+
else:
|
253 |
+
_check_params(X, metric, p, metric_params)
|
254 |
+
|
255 |
+
query = _query_include_self(X._fit_X, include_self, mode)
|
256 |
+
return X.radius_neighbors_graph(query, radius, mode)
|
257 |
+
|
258 |
+
|
259 |
+
class KNeighborsTransformer(
|
260 |
+
ClassNamePrefixFeaturesOutMixin, KNeighborsMixin, TransformerMixin, NeighborsBase
|
261 |
+
):
|
262 |
+
"""Transform X into a (weighted) graph of k nearest neighbors.
|
263 |
+
|
264 |
+
The transformed data is a sparse graph as returned by kneighbors_graph.
|
265 |
+
|
266 |
+
Read more in the :ref:`User Guide <neighbors_transformer>`.
|
267 |
+
|
268 |
+
.. versionadded:: 0.22
|
269 |
+
|
270 |
+
Parameters
|
271 |
+
----------
|
272 |
+
mode : {'distance', 'connectivity'}, default='distance'
|
273 |
+
Type of returned matrix: 'connectivity' will return the connectivity
|
274 |
+
matrix with ones and zeros, and 'distance' will return the distances
|
275 |
+
between neighbors according to the given metric.
|
276 |
+
|
277 |
+
n_neighbors : int, default=5
|
278 |
+
Number of neighbors for each sample in the transformed sparse graph.
|
279 |
+
For compatibility reasons, as each sample is considered as its own
|
280 |
+
neighbor, one extra neighbor will be computed when mode == 'distance'.
|
281 |
+
In this case, the sparse graph contains (n_neighbors + 1) neighbors.
|
282 |
+
|
283 |
+
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
|
284 |
+
Algorithm used to compute the nearest neighbors:
|
285 |
+
|
286 |
+
- 'ball_tree' will use :class:`BallTree`
|
287 |
+
- 'kd_tree' will use :class:`KDTree`
|
288 |
+
- 'brute' will use a brute-force search.
|
289 |
+
- 'auto' will attempt to decide the most appropriate algorithm
|
290 |
+
based on the values passed to :meth:`fit` method.
|
291 |
+
|
292 |
+
Note: fitting on sparse input will override the setting of
|
293 |
+
this parameter, using brute force.
|
294 |
+
|
295 |
+
leaf_size : int, default=30
|
296 |
+
Leaf size passed to BallTree or KDTree. This can affect the
|
297 |
+
speed of the construction and query, as well as the memory
|
298 |
+
required to store the tree. The optimal value depends on the
|
299 |
+
nature of the problem.
|
300 |
+
|
301 |
+
metric : str or callable, default='minkowski'
|
302 |
+
Metric to use for distance computation. Default is "minkowski", which
|
303 |
+
results in the standard Euclidean distance when p = 2. See the
|
304 |
+
documentation of `scipy.spatial.distance
|
305 |
+
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
|
306 |
+
the metrics listed in
|
307 |
+
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
|
308 |
+
values.
|
309 |
+
|
310 |
+
If metric is a callable function, it takes two arrays representing 1D
|
311 |
+
vectors as inputs and must return one value indicating the distance
|
312 |
+
between those vectors. This works for Scipy's metrics, but is less
|
313 |
+
efficient than passing the metric name as a string.
|
314 |
+
|
315 |
+
Distance matrices are not supported.
|
316 |
+
|
317 |
+
p : float, default=2
|
318 |
+
Parameter for the Minkowski metric from
|
319 |
+
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
|
320 |
+
equivalent to using manhattan_distance (l1), and euclidean_distance
|
321 |
+
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
|
322 |
+
This parameter is expected to be positive.
|
323 |
+
|
324 |
+
metric_params : dict, default=None
|
325 |
+
Additional keyword arguments for the metric function.
|
326 |
+
|
327 |
+
n_jobs : int, default=None
|
328 |
+
The number of parallel jobs to run for neighbors search.
|
329 |
+
If ``-1``, then the number of jobs is set to the number of CPU cores.
|
330 |
+
|
331 |
+
Attributes
|
332 |
+
----------
|
333 |
+
effective_metric_ : str or callable
|
334 |
+
The distance metric used. It will be same as the `metric` parameter
|
335 |
+
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
|
336 |
+
'minkowski' and `p` parameter set to 2.
|
337 |
+
|
338 |
+
effective_metric_params_ : dict
|
339 |
+
Additional keyword arguments for the metric function. For most metrics
|
340 |
+
will be same with `metric_params` parameter, but may also contain the
|
341 |
+
`p` parameter value if the `effective_metric_` attribute is set to
|
342 |
+
'minkowski'.
|
343 |
+
|
344 |
+
n_features_in_ : int
|
345 |
+
Number of features seen during :term:`fit`.
|
346 |
+
|
347 |
+
.. versionadded:: 0.24
|
348 |
+
|
349 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
350 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
351 |
+
has feature names that are all strings.
|
352 |
+
|
353 |
+
.. versionadded:: 1.0
|
354 |
+
|
355 |
+
n_samples_fit_ : int
|
356 |
+
Number of samples in the fitted data.
|
357 |
+
|
358 |
+
See Also
|
359 |
+
--------
|
360 |
+
kneighbors_graph : Compute the weighted graph of k-neighbors for
|
361 |
+
points in X.
|
362 |
+
RadiusNeighborsTransformer : Transform X into a weighted graph of
|
363 |
+
neighbors nearer than a radius.
|
364 |
+
|
365 |
+
Notes
|
366 |
+
-----
|
367 |
+
For an example of using :class:`~sklearn.neighbors.KNeighborsTransformer`
|
368 |
+
in combination with :class:`~sklearn.manifold.TSNE` see
|
369 |
+
:ref:`sphx_glr_auto_examples_neighbors_approximate_nearest_neighbors.py`.
|
370 |
+
|
371 |
+
Examples
|
372 |
+
--------
|
373 |
+
>>> from sklearn.datasets import load_wine
|
374 |
+
>>> from sklearn.neighbors import KNeighborsTransformer
|
375 |
+
>>> X, _ = load_wine(return_X_y=True)
|
376 |
+
>>> X.shape
|
377 |
+
(178, 13)
|
378 |
+
>>> transformer = KNeighborsTransformer(n_neighbors=5, mode='distance')
|
379 |
+
>>> X_dist_graph = transformer.fit_transform(X)
|
380 |
+
>>> X_dist_graph.shape
|
381 |
+
(178, 178)
|
382 |
+
"""
|
383 |
+
|
384 |
+
_parameter_constraints: dict = {
|
385 |
+
**NeighborsBase._parameter_constraints,
|
386 |
+
"mode": [StrOptions({"distance", "connectivity"})],
|
387 |
+
}
|
388 |
+
_parameter_constraints.pop("radius")
|
389 |
+
|
390 |
+
def __init__(
|
391 |
+
self,
|
392 |
+
*,
|
393 |
+
mode="distance",
|
394 |
+
n_neighbors=5,
|
395 |
+
algorithm="auto",
|
396 |
+
leaf_size=30,
|
397 |
+
metric="minkowski",
|
398 |
+
p=2,
|
399 |
+
metric_params=None,
|
400 |
+
n_jobs=None,
|
401 |
+
):
|
402 |
+
super(KNeighborsTransformer, self).__init__(
|
403 |
+
n_neighbors=n_neighbors,
|
404 |
+
radius=None,
|
405 |
+
algorithm=algorithm,
|
406 |
+
leaf_size=leaf_size,
|
407 |
+
metric=metric,
|
408 |
+
p=p,
|
409 |
+
metric_params=metric_params,
|
410 |
+
n_jobs=n_jobs,
|
411 |
+
)
|
412 |
+
self.mode = mode
|
413 |
+
|
414 |
+
@_fit_context(
|
415 |
+
# KNeighborsTransformer.metric is not validated yet
|
416 |
+
prefer_skip_nested_validation=False
|
417 |
+
)
|
418 |
+
def fit(self, X, y=None):
|
419 |
+
"""Fit the k-nearest neighbors transformer from the training dataset.
|
420 |
+
|
421 |
+
Parameters
|
422 |
+
----------
|
423 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
|
424 |
+
(n_samples, n_samples) if metric='precomputed'
|
425 |
+
Training data.
|
426 |
+
y : Ignored
|
427 |
+
Not used, present for API consistency by convention.
|
428 |
+
|
429 |
+
Returns
|
430 |
+
-------
|
431 |
+
self : KNeighborsTransformer
|
432 |
+
The fitted k-nearest neighbors transformer.
|
433 |
+
"""
|
434 |
+
self._fit(X)
|
435 |
+
self._n_features_out = self.n_samples_fit_
|
436 |
+
return self
|
437 |
+
|
438 |
+
def transform(self, X):
|
439 |
+
"""Compute the (weighted) graph of Neighbors for points in X.
|
440 |
+
|
441 |
+
Parameters
|
442 |
+
----------
|
443 |
+
X : array-like of shape (n_samples_transform, n_features)
|
444 |
+
Sample data.
|
445 |
+
|
446 |
+
Returns
|
447 |
+
-------
|
448 |
+
Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)
|
449 |
+
Xt[i, j] is assigned the weight of edge that connects i to j.
|
450 |
+
Only the neighbors have an explicit value.
|
451 |
+
The diagonal is always explicit.
|
452 |
+
The matrix is of CSR format.
|
453 |
+
"""
|
454 |
+
check_is_fitted(self)
|
455 |
+
add_one = self.mode == "distance"
|
456 |
+
return self.kneighbors_graph(
|
457 |
+
X, mode=self.mode, n_neighbors=self.n_neighbors + add_one
|
458 |
+
)
|
459 |
+
|
460 |
+
def fit_transform(self, X, y=None):
|
461 |
+
"""Fit to data, then transform it.
|
462 |
+
|
463 |
+
Fits transformer to X and y with optional parameters fit_params
|
464 |
+
and returns a transformed version of X.
|
465 |
+
|
466 |
+
Parameters
|
467 |
+
----------
|
468 |
+
X : array-like of shape (n_samples, n_features)
|
469 |
+
Training set.
|
470 |
+
|
471 |
+
y : Ignored
|
472 |
+
Not used, present for API consistency by convention.
|
473 |
+
|
474 |
+
Returns
|
475 |
+
-------
|
476 |
+
Xt : sparse matrix of shape (n_samples, n_samples)
|
477 |
+
Xt[i, j] is assigned the weight of edge that connects i to j.
|
478 |
+
Only the neighbors have an explicit value.
|
479 |
+
The diagonal is always explicit.
|
480 |
+
The matrix is of CSR format.
|
481 |
+
"""
|
482 |
+
return self.fit(X).transform(X)
|
483 |
+
|
484 |
+
def _more_tags(self):
|
485 |
+
return {
|
486 |
+
"_xfail_checks": {
|
487 |
+
"check_methods_sample_order_invariance": "check is not applicable."
|
488 |
+
}
|
489 |
+
}
|
490 |
+
|
491 |
+
|
492 |
+
class RadiusNeighborsTransformer(
|
493 |
+
ClassNamePrefixFeaturesOutMixin,
|
494 |
+
RadiusNeighborsMixin,
|
495 |
+
TransformerMixin,
|
496 |
+
NeighborsBase,
|
497 |
+
):
|
498 |
+
"""Transform X into a (weighted) graph of neighbors nearer than a radius.
|
499 |
+
|
500 |
+
The transformed data is a sparse graph as returned by
|
501 |
+
`radius_neighbors_graph`.
|
502 |
+
|
503 |
+
Read more in the :ref:`User Guide <neighbors_transformer>`.
|
504 |
+
|
505 |
+
.. versionadded:: 0.22
|
506 |
+
|
507 |
+
Parameters
|
508 |
+
----------
|
509 |
+
mode : {'distance', 'connectivity'}, default='distance'
|
510 |
+
Type of returned matrix: 'connectivity' will return the connectivity
|
511 |
+
matrix with ones and zeros, and 'distance' will return the distances
|
512 |
+
between neighbors according to the given metric.
|
513 |
+
|
514 |
+
radius : float, default=1.0
|
515 |
+
Radius of neighborhood in the transformed sparse graph.
|
516 |
+
|
517 |
+
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
|
518 |
+
Algorithm used to compute the nearest neighbors:
|
519 |
+
|
520 |
+
- 'ball_tree' will use :class:`BallTree`
|
521 |
+
- 'kd_tree' will use :class:`KDTree`
|
522 |
+
- 'brute' will use a brute-force search.
|
523 |
+
- 'auto' will attempt to decide the most appropriate algorithm
|
524 |
+
based on the values passed to :meth:`fit` method.
|
525 |
+
|
526 |
+
Note: fitting on sparse input will override the setting of
|
527 |
+
this parameter, using brute force.
|
528 |
+
|
529 |
+
leaf_size : int, default=30
|
530 |
+
Leaf size passed to BallTree or KDTree. This can affect the
|
531 |
+
speed of the construction and query, as well as the memory
|
532 |
+
required to store the tree. The optimal value depends on the
|
533 |
+
nature of the problem.
|
534 |
+
|
535 |
+
metric : str or callable, default='minkowski'
|
536 |
+
Metric to use for distance computation. Default is "minkowski", which
|
537 |
+
results in the standard Euclidean distance when p = 2. See the
|
538 |
+
documentation of `scipy.spatial.distance
|
539 |
+
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
|
540 |
+
the metrics listed in
|
541 |
+
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
|
542 |
+
values.
|
543 |
+
|
544 |
+
If metric is a callable function, it takes two arrays representing 1D
|
545 |
+
vectors as inputs and must return one value indicating the distance
|
546 |
+
between those vectors. This works for Scipy's metrics, but is less
|
547 |
+
efficient than passing the metric name as a string.
|
548 |
+
|
549 |
+
Distance matrices are not supported.
|
550 |
+
|
551 |
+
p : float, default=2
|
552 |
+
Parameter for the Minkowski metric from
|
553 |
+
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
|
554 |
+
equivalent to using manhattan_distance (l1), and euclidean_distance
|
555 |
+
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
|
556 |
+
This parameter is expected to be positive.
|
557 |
+
|
558 |
+
metric_params : dict, default=None
|
559 |
+
Additional keyword arguments for the metric function.
|
560 |
+
|
561 |
+
n_jobs : int, default=None
|
562 |
+
The number of parallel jobs to run for neighbors search.
|
563 |
+
If ``-1``, then the number of jobs is set to the number of CPU cores.
|
564 |
+
|
565 |
+
Attributes
|
566 |
+
----------
|
567 |
+
effective_metric_ : str or callable
|
568 |
+
The distance metric used. It will be same as the `metric` parameter
|
569 |
+
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
|
570 |
+
'minkowski' and `p` parameter set to 2.
|
571 |
+
|
572 |
+
effective_metric_params_ : dict
|
573 |
+
Additional keyword arguments for the metric function. For most metrics
|
574 |
+
will be same with `metric_params` parameter, but may also contain the
|
575 |
+
`p` parameter value if the `effective_metric_` attribute is set to
|
576 |
+
'minkowski'.
|
577 |
+
|
578 |
+
n_features_in_ : int
|
579 |
+
Number of features seen during :term:`fit`.
|
580 |
+
|
581 |
+
.. versionadded:: 0.24
|
582 |
+
|
583 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
584 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
585 |
+
has feature names that are all strings.
|
586 |
+
|
587 |
+
.. versionadded:: 1.0
|
588 |
+
|
589 |
+
n_samples_fit_ : int
|
590 |
+
Number of samples in the fitted data.
|
591 |
+
|
592 |
+
See Also
|
593 |
+
--------
|
594 |
+
kneighbors_graph : Compute the weighted graph of k-neighbors for
|
595 |
+
points in X.
|
596 |
+
KNeighborsTransformer : Transform X into a weighted graph of k
|
597 |
+
nearest neighbors.
|
598 |
+
|
599 |
+
Examples
|
600 |
+
--------
|
601 |
+
>>> import numpy as np
|
602 |
+
>>> from sklearn.datasets import load_wine
|
603 |
+
>>> from sklearn.cluster import DBSCAN
|
604 |
+
>>> from sklearn.neighbors import RadiusNeighborsTransformer
|
605 |
+
>>> from sklearn.pipeline import make_pipeline
|
606 |
+
>>> X, _ = load_wine(return_X_y=True)
|
607 |
+
>>> estimator = make_pipeline(
|
608 |
+
... RadiusNeighborsTransformer(radius=42.0, mode='distance'),
|
609 |
+
... DBSCAN(eps=25.0, metric='precomputed'))
|
610 |
+
>>> X_clustered = estimator.fit_predict(X)
|
611 |
+
>>> clusters, counts = np.unique(X_clustered, return_counts=True)
|
612 |
+
>>> print(counts)
|
613 |
+
[ 29 15 111 11 12]
|
614 |
+
"""
|
615 |
+
|
616 |
+
_parameter_constraints: dict = {
|
617 |
+
**NeighborsBase._parameter_constraints,
|
618 |
+
"mode": [StrOptions({"distance", "connectivity"})],
|
619 |
+
}
|
620 |
+
_parameter_constraints.pop("n_neighbors")
|
621 |
+
|
622 |
+
def __init__(
|
623 |
+
self,
|
624 |
+
*,
|
625 |
+
mode="distance",
|
626 |
+
radius=1.0,
|
627 |
+
algorithm="auto",
|
628 |
+
leaf_size=30,
|
629 |
+
metric="minkowski",
|
630 |
+
p=2,
|
631 |
+
metric_params=None,
|
632 |
+
n_jobs=None,
|
633 |
+
):
|
634 |
+
super(RadiusNeighborsTransformer, self).__init__(
|
635 |
+
n_neighbors=None,
|
636 |
+
radius=radius,
|
637 |
+
algorithm=algorithm,
|
638 |
+
leaf_size=leaf_size,
|
639 |
+
metric=metric,
|
640 |
+
p=p,
|
641 |
+
metric_params=metric_params,
|
642 |
+
n_jobs=n_jobs,
|
643 |
+
)
|
644 |
+
self.mode = mode
|
645 |
+
|
646 |
+
@_fit_context(
|
647 |
+
# RadiusNeighborsTransformer.metric is not validated yet
|
648 |
+
prefer_skip_nested_validation=False
|
649 |
+
)
|
650 |
+
def fit(self, X, y=None):
|
651 |
+
"""Fit the radius neighbors transformer from the training dataset.
|
652 |
+
|
653 |
+
Parameters
|
654 |
+
----------
|
655 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
|
656 |
+
(n_samples, n_samples) if metric='precomputed'
|
657 |
+
Training data.
|
658 |
+
|
659 |
+
y : Ignored
|
660 |
+
Not used, present for API consistency by convention.
|
661 |
+
|
662 |
+
Returns
|
663 |
+
-------
|
664 |
+
self : RadiusNeighborsTransformer
|
665 |
+
The fitted radius neighbors transformer.
|
666 |
+
"""
|
667 |
+
self._fit(X)
|
668 |
+
self._n_features_out = self.n_samples_fit_
|
669 |
+
return self
|
670 |
+
|
671 |
+
def transform(self, X):
|
672 |
+
"""Compute the (weighted) graph of Neighbors for points in X.
|
673 |
+
|
674 |
+
Parameters
|
675 |
+
----------
|
676 |
+
X : array-like of shape (n_samples_transform, n_features)
|
677 |
+
Sample data.
|
678 |
+
|
679 |
+
Returns
|
680 |
+
-------
|
681 |
+
Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)
|
682 |
+
Xt[i, j] is assigned the weight of edge that connects i to j.
|
683 |
+
Only the neighbors have an explicit value.
|
684 |
+
The diagonal is always explicit.
|
685 |
+
The matrix is of CSR format.
|
686 |
+
"""
|
687 |
+
check_is_fitted(self)
|
688 |
+
return self.radius_neighbors_graph(X, mode=self.mode, sort_results=True)
|
689 |
+
|
690 |
+
def fit_transform(self, X, y=None):
|
691 |
+
"""Fit to data, then transform it.
|
692 |
+
|
693 |
+
Fits transformer to X and y with optional parameters fit_params
|
694 |
+
and returns a transformed version of X.
|
695 |
+
|
696 |
+
Parameters
|
697 |
+
----------
|
698 |
+
X : array-like of shape (n_samples, n_features)
|
699 |
+
Training set.
|
700 |
+
|
701 |
+
y : Ignored
|
702 |
+
Not used, present for API consistency by convention.
|
703 |
+
|
704 |
+
Returns
|
705 |
+
-------
|
706 |
+
Xt : sparse matrix of shape (n_samples, n_samples)
|
707 |
+
Xt[i, j] is assigned the weight of edge that connects i to j.
|
708 |
+
Only the neighbors have an explicit value.
|
709 |
+
The diagonal is always explicit.
|
710 |
+
The matrix is of CSR format.
|
711 |
+
"""
|
712 |
+
return self.fit(X).transform(X)
|
713 |
+
|
714 |
+
def _more_tags(self):
|
715 |
+
return {
|
716 |
+
"_xfail_checks": {
|
717 |
+
"check_methods_sample_order_invariance": "check is not applicable."
|
718 |
+
}
|
719 |
+
}
|
venv/lib/python3.10/site-packages/sklearn/neighbors/_nca.py
ADDED
@@ -0,0 +1,525 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Neighborhood Component Analysis
|
3 |
+
"""
|
4 |
+
|
5 |
+
# Authors: William de Vazelhes <[email protected]>
|
6 |
+
# John Chiotellis <[email protected]>
|
7 |
+
# License: BSD 3 clause
|
8 |
+
|
9 |
+
import sys
|
10 |
+
import time
|
11 |
+
from numbers import Integral, Real
|
12 |
+
from warnings import warn
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
from scipy.optimize import minimize
|
16 |
+
|
17 |
+
from ..base import (
|
18 |
+
BaseEstimator,
|
19 |
+
ClassNamePrefixFeaturesOutMixin,
|
20 |
+
TransformerMixin,
|
21 |
+
_fit_context,
|
22 |
+
)
|
23 |
+
from ..decomposition import PCA
|
24 |
+
from ..exceptions import ConvergenceWarning
|
25 |
+
from ..metrics import pairwise_distances
|
26 |
+
from ..preprocessing import LabelEncoder
|
27 |
+
from ..utils._param_validation import Interval, StrOptions
|
28 |
+
from ..utils.extmath import softmax
|
29 |
+
from ..utils.multiclass import check_classification_targets
|
30 |
+
from ..utils.random import check_random_state
|
31 |
+
from ..utils.validation import check_array, check_is_fitted
|
32 |
+
|
33 |
+
|
34 |
+
class NeighborhoodComponentsAnalysis(
|
35 |
+
ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
|
36 |
+
):
|
37 |
+
"""Neighborhood Components Analysis.
|
38 |
+
|
39 |
+
Neighborhood Component Analysis (NCA) is a machine learning algorithm for
|
40 |
+
metric learning. It learns a linear transformation in a supervised fashion
|
41 |
+
to improve the classification accuracy of a stochastic nearest neighbors
|
42 |
+
rule in the transformed space.
|
43 |
+
|
44 |
+
Read more in the :ref:`User Guide <nca>`.
|
45 |
+
|
46 |
+
Parameters
|
47 |
+
----------
|
48 |
+
n_components : int, default=None
|
49 |
+
Preferred dimensionality of the projected space.
|
50 |
+
If None it will be set to `n_features`.
|
51 |
+
|
52 |
+
init : {'auto', 'pca', 'lda', 'identity', 'random'} or ndarray of shape \
|
53 |
+
(n_features_a, n_features_b), default='auto'
|
54 |
+
Initialization of the linear transformation. Possible options are
|
55 |
+
`'auto'`, `'pca'`, `'lda'`, `'identity'`, `'random'`, and a numpy
|
56 |
+
array of shape `(n_features_a, n_features_b)`.
|
57 |
+
|
58 |
+
- `'auto'`
|
59 |
+
Depending on `n_components`, the most reasonable initialization
|
60 |
+
will be chosen. If `n_components <= n_classes` we use `'lda'`, as
|
61 |
+
it uses labels information. If not, but
|
62 |
+
`n_components < min(n_features, n_samples)`, we use `'pca'`, as
|
63 |
+
it projects data in meaningful directions (those of higher
|
64 |
+
variance). Otherwise, we just use `'identity'`.
|
65 |
+
|
66 |
+
- `'pca'`
|
67 |
+
`n_components` principal components of the inputs passed
|
68 |
+
to :meth:`fit` will be used to initialize the transformation.
|
69 |
+
(See :class:`~sklearn.decomposition.PCA`)
|
70 |
+
|
71 |
+
- `'lda'`
|
72 |
+
`min(n_components, n_classes)` most discriminative
|
73 |
+
components of the inputs passed to :meth:`fit` will be used to
|
74 |
+
initialize the transformation. (If `n_components > n_classes`,
|
75 |
+
the rest of the components will be zero.) (See
|
76 |
+
:class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`)
|
77 |
+
|
78 |
+
- `'identity'`
|
79 |
+
If `n_components` is strictly smaller than the
|
80 |
+
dimensionality of the inputs passed to :meth:`fit`, the identity
|
81 |
+
matrix will be truncated to the first `n_components` rows.
|
82 |
+
|
83 |
+
- `'random'`
|
84 |
+
The initial transformation will be a random array of shape
|
85 |
+
`(n_components, n_features)`. Each value is sampled from the
|
86 |
+
standard normal distribution.
|
87 |
+
|
88 |
+
- numpy array
|
89 |
+
`n_features_b` must match the dimensionality of the inputs passed
|
90 |
+
to :meth:`fit` and n_features_a must be less than or equal to that.
|
91 |
+
If `n_components` is not `None`, `n_features_a` must match it.
|
92 |
+
|
93 |
+
warm_start : bool, default=False
|
94 |
+
If `True` and :meth:`fit` has been called before, the solution of the
|
95 |
+
previous call to :meth:`fit` is used as the initial linear
|
96 |
+
transformation (`n_components` and `init` will be ignored).
|
97 |
+
|
98 |
+
max_iter : int, default=50
|
99 |
+
Maximum number of iterations in the optimization.
|
100 |
+
|
101 |
+
tol : float, default=1e-5
|
102 |
+
Convergence tolerance for the optimization.
|
103 |
+
|
104 |
+
callback : callable, default=None
|
105 |
+
If not `None`, this function is called after every iteration of the
|
106 |
+
optimizer, taking as arguments the current solution (flattened
|
107 |
+
transformation matrix) and the number of iterations. This might be
|
108 |
+
useful in case one wants to examine or store the transformation
|
109 |
+
found after each iteration.
|
110 |
+
|
111 |
+
verbose : int, default=0
|
112 |
+
If 0, no progress messages will be printed.
|
113 |
+
If 1, progress messages will be printed to stdout.
|
114 |
+
If > 1, progress messages will be printed and the `disp`
|
115 |
+
parameter of :func:`scipy.optimize.minimize` will be set to
|
116 |
+
`verbose - 2`.
|
117 |
+
|
118 |
+
random_state : int or numpy.RandomState, default=None
|
119 |
+
A pseudo random number generator object or a seed for it if int. If
|
120 |
+
`init='random'`, `random_state` is used to initialize the random
|
121 |
+
transformation. If `init='pca'`, `random_state` is passed as an
|
122 |
+
argument to PCA when initializing the transformation. Pass an int
|
123 |
+
for reproducible results across multiple function calls.
|
124 |
+
See :term:`Glossary <random_state>`.
|
125 |
+
|
126 |
+
Attributes
|
127 |
+
----------
|
128 |
+
components_ : ndarray of shape (n_components, n_features)
|
129 |
+
The linear transformation learned during fitting.
|
130 |
+
|
131 |
+
n_features_in_ : int
|
132 |
+
Number of features seen during :term:`fit`.
|
133 |
+
|
134 |
+
.. versionadded:: 0.24
|
135 |
+
|
136 |
+
n_iter_ : int
|
137 |
+
Counts the number of iterations performed by the optimizer.
|
138 |
+
|
139 |
+
random_state_ : numpy.RandomState
|
140 |
+
Pseudo random number generator object used during initialization.
|
141 |
+
|
142 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
143 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
144 |
+
has feature names that are all strings.
|
145 |
+
|
146 |
+
.. versionadded:: 1.0
|
147 |
+
|
148 |
+
See Also
|
149 |
+
--------
|
150 |
+
sklearn.discriminant_analysis.LinearDiscriminantAnalysis : Linear
|
151 |
+
Discriminant Analysis.
|
152 |
+
sklearn.decomposition.PCA : Principal component analysis (PCA).
|
153 |
+
|
154 |
+
References
|
155 |
+
----------
|
156 |
+
.. [1] J. Goldberger, G. Hinton, S. Roweis, R. Salakhutdinov.
|
157 |
+
"Neighbourhood Components Analysis". Advances in Neural Information
|
158 |
+
Processing Systems. 17, 513-520, 2005.
|
159 |
+
http://www.cs.nyu.edu/~roweis/papers/ncanips.pdf
|
160 |
+
|
161 |
+
.. [2] Wikipedia entry on Neighborhood Components Analysis
|
162 |
+
https://en.wikipedia.org/wiki/Neighbourhood_components_analysis
|
163 |
+
|
164 |
+
Examples
|
165 |
+
--------
|
166 |
+
>>> from sklearn.neighbors import NeighborhoodComponentsAnalysis
|
167 |
+
>>> from sklearn.neighbors import KNeighborsClassifier
|
168 |
+
>>> from sklearn.datasets import load_iris
|
169 |
+
>>> from sklearn.model_selection import train_test_split
|
170 |
+
>>> X, y = load_iris(return_X_y=True)
|
171 |
+
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
|
172 |
+
... stratify=y, test_size=0.7, random_state=42)
|
173 |
+
>>> nca = NeighborhoodComponentsAnalysis(random_state=42)
|
174 |
+
>>> nca.fit(X_train, y_train)
|
175 |
+
NeighborhoodComponentsAnalysis(...)
|
176 |
+
>>> knn = KNeighborsClassifier(n_neighbors=3)
|
177 |
+
>>> knn.fit(X_train, y_train)
|
178 |
+
KNeighborsClassifier(...)
|
179 |
+
>>> print(knn.score(X_test, y_test))
|
180 |
+
0.933333...
|
181 |
+
>>> knn.fit(nca.transform(X_train), y_train)
|
182 |
+
KNeighborsClassifier(...)
|
183 |
+
>>> print(knn.score(nca.transform(X_test), y_test))
|
184 |
+
0.961904...
|
185 |
+
"""
|
186 |
+
|
187 |
+
_parameter_constraints: dict = {
|
188 |
+
"n_components": [
|
189 |
+
Interval(Integral, 1, None, closed="left"),
|
190 |
+
None,
|
191 |
+
],
|
192 |
+
"init": [
|
193 |
+
StrOptions({"auto", "pca", "lda", "identity", "random"}),
|
194 |
+
np.ndarray,
|
195 |
+
],
|
196 |
+
"warm_start": ["boolean"],
|
197 |
+
"max_iter": [Interval(Integral, 1, None, closed="left")],
|
198 |
+
"tol": [Interval(Real, 0, None, closed="left")],
|
199 |
+
"callback": [callable, None],
|
200 |
+
"verbose": ["verbose"],
|
201 |
+
"random_state": ["random_state"],
|
202 |
+
}
|
203 |
+
|
204 |
+
def __init__(
|
205 |
+
self,
|
206 |
+
n_components=None,
|
207 |
+
*,
|
208 |
+
init="auto",
|
209 |
+
warm_start=False,
|
210 |
+
max_iter=50,
|
211 |
+
tol=1e-5,
|
212 |
+
callback=None,
|
213 |
+
verbose=0,
|
214 |
+
random_state=None,
|
215 |
+
):
|
216 |
+
self.n_components = n_components
|
217 |
+
self.init = init
|
218 |
+
self.warm_start = warm_start
|
219 |
+
self.max_iter = max_iter
|
220 |
+
self.tol = tol
|
221 |
+
self.callback = callback
|
222 |
+
self.verbose = verbose
|
223 |
+
self.random_state = random_state
|
224 |
+
|
225 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
226 |
+
def fit(self, X, y):
|
227 |
+
"""Fit the model according to the given training data.
|
228 |
+
|
229 |
+
Parameters
|
230 |
+
----------
|
231 |
+
X : array-like of shape (n_samples, n_features)
|
232 |
+
The training samples.
|
233 |
+
|
234 |
+
y : array-like of shape (n_samples,)
|
235 |
+
The corresponding training labels.
|
236 |
+
|
237 |
+
Returns
|
238 |
+
-------
|
239 |
+
self : object
|
240 |
+
Fitted estimator.
|
241 |
+
"""
|
242 |
+
# Validate the inputs X and y, and converts y to numerical classes.
|
243 |
+
X, y = self._validate_data(X, y, ensure_min_samples=2)
|
244 |
+
check_classification_targets(y)
|
245 |
+
y = LabelEncoder().fit_transform(y)
|
246 |
+
|
247 |
+
# Check the preferred dimensionality of the projected space
|
248 |
+
if self.n_components is not None and self.n_components > X.shape[1]:
|
249 |
+
raise ValueError(
|
250 |
+
"The preferred dimensionality of the "
|
251 |
+
f"projected space `n_components` ({self.n_components}) cannot "
|
252 |
+
"be greater than the given data "
|
253 |
+
f"dimensionality ({X.shape[1]})!"
|
254 |
+
)
|
255 |
+
# If warm_start is enabled, check that the inputs are consistent
|
256 |
+
if (
|
257 |
+
self.warm_start
|
258 |
+
and hasattr(self, "components_")
|
259 |
+
and self.components_.shape[1] != X.shape[1]
|
260 |
+
):
|
261 |
+
raise ValueError(
|
262 |
+
f"The new inputs dimensionality ({X.shape[1]}) does not "
|
263 |
+
"match the input dimensionality of the "
|
264 |
+
f"previously learned transformation ({self.components_.shape[1]})."
|
265 |
+
)
|
266 |
+
# Check how the linear transformation should be initialized
|
267 |
+
init = self.init
|
268 |
+
if isinstance(init, np.ndarray):
|
269 |
+
init = check_array(init)
|
270 |
+
# Assert that init.shape[1] = X.shape[1]
|
271 |
+
if init.shape[1] != X.shape[1]:
|
272 |
+
raise ValueError(
|
273 |
+
f"The input dimensionality ({init.shape[1]}) of the given "
|
274 |
+
"linear transformation `init` must match the "
|
275 |
+
f"dimensionality of the given inputs `X` ({X.shape[1]})."
|
276 |
+
)
|
277 |
+
# Assert that init.shape[0] <= init.shape[1]
|
278 |
+
if init.shape[0] > init.shape[1]:
|
279 |
+
raise ValueError(
|
280 |
+
f"The output dimensionality ({init.shape[0]}) of the given "
|
281 |
+
"linear transformation `init` cannot be "
|
282 |
+
f"greater than its input dimensionality ({init.shape[1]})."
|
283 |
+
)
|
284 |
+
# Assert that self.n_components = init.shape[0]
|
285 |
+
if self.n_components is not None and self.n_components != init.shape[0]:
|
286 |
+
raise ValueError(
|
287 |
+
"The preferred dimensionality of the "
|
288 |
+
f"projected space `n_components` ({self.n_components}) does"
|
289 |
+
" not match the output dimensionality of "
|
290 |
+
"the given linear transformation "
|
291 |
+
f"`init` ({init.shape[0]})!"
|
292 |
+
)
|
293 |
+
|
294 |
+
# Initialize the random generator
|
295 |
+
self.random_state_ = check_random_state(self.random_state)
|
296 |
+
|
297 |
+
# Measure the total training time
|
298 |
+
t_train = time.time()
|
299 |
+
|
300 |
+
# Compute a mask that stays fixed during optimization:
|
301 |
+
same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
|
302 |
+
# (n_samples, n_samples)
|
303 |
+
|
304 |
+
# Initialize the transformation
|
305 |
+
transformation = np.ravel(self._initialize(X, y, init))
|
306 |
+
|
307 |
+
# Create a dictionary of parameters to be passed to the optimizer
|
308 |
+
disp = self.verbose - 2 if self.verbose > 1 else -1
|
309 |
+
optimizer_params = {
|
310 |
+
"method": "L-BFGS-B",
|
311 |
+
"fun": self._loss_grad_lbfgs,
|
312 |
+
"args": (X, same_class_mask, -1.0),
|
313 |
+
"jac": True,
|
314 |
+
"x0": transformation,
|
315 |
+
"tol": self.tol,
|
316 |
+
"options": dict(maxiter=self.max_iter, disp=disp),
|
317 |
+
"callback": self._callback,
|
318 |
+
}
|
319 |
+
|
320 |
+
# Call the optimizer
|
321 |
+
self.n_iter_ = 0
|
322 |
+
opt_result = minimize(**optimizer_params)
|
323 |
+
|
324 |
+
# Reshape the solution found by the optimizer
|
325 |
+
self.components_ = opt_result.x.reshape(-1, X.shape[1])
|
326 |
+
self._n_features_out = self.components_.shape[1]
|
327 |
+
|
328 |
+
# Stop timer
|
329 |
+
t_train = time.time() - t_train
|
330 |
+
if self.verbose:
|
331 |
+
cls_name = self.__class__.__name__
|
332 |
+
|
333 |
+
# Warn the user if the algorithm did not converge
|
334 |
+
if not opt_result.success:
|
335 |
+
warn(
|
336 |
+
"[{}] NCA did not converge: {}".format(
|
337 |
+
cls_name, opt_result.message
|
338 |
+
),
|
339 |
+
ConvergenceWarning,
|
340 |
+
)
|
341 |
+
|
342 |
+
print("[{}] Training took {:8.2f}s.".format(cls_name, t_train))
|
343 |
+
|
344 |
+
return self
|
345 |
+
|
346 |
+
def transform(self, X):
|
347 |
+
"""Apply the learned transformation to the given data.
|
348 |
+
|
349 |
+
Parameters
|
350 |
+
----------
|
351 |
+
X : array-like of shape (n_samples, n_features)
|
352 |
+
Data samples.
|
353 |
+
|
354 |
+
Returns
|
355 |
+
-------
|
356 |
+
X_embedded: ndarray of shape (n_samples, n_components)
|
357 |
+
The data samples transformed.
|
358 |
+
|
359 |
+
Raises
|
360 |
+
------
|
361 |
+
NotFittedError
|
362 |
+
If :meth:`fit` has not been called before.
|
363 |
+
"""
|
364 |
+
|
365 |
+
check_is_fitted(self)
|
366 |
+
X = self._validate_data(X, reset=False)
|
367 |
+
|
368 |
+
return np.dot(X, self.components_.T)
|
369 |
+
|
370 |
+
def _initialize(self, X, y, init):
|
371 |
+
"""Initialize the transformation.
|
372 |
+
|
373 |
+
Parameters
|
374 |
+
----------
|
375 |
+
X : array-like of shape (n_samples, n_features)
|
376 |
+
The training samples.
|
377 |
+
|
378 |
+
y : array-like of shape (n_samples,)
|
379 |
+
The training labels.
|
380 |
+
|
381 |
+
init : str or ndarray of shape (n_features_a, n_features_b)
|
382 |
+
The validated initialization of the linear transformation.
|
383 |
+
|
384 |
+
Returns
|
385 |
+
-------
|
386 |
+
transformation : ndarray of shape (n_components, n_features)
|
387 |
+
The initialized linear transformation.
|
388 |
+
|
389 |
+
"""
|
390 |
+
|
391 |
+
transformation = init
|
392 |
+
if self.warm_start and hasattr(self, "components_"):
|
393 |
+
transformation = self.components_
|
394 |
+
elif isinstance(init, np.ndarray):
|
395 |
+
pass
|
396 |
+
else:
|
397 |
+
n_samples, n_features = X.shape
|
398 |
+
n_components = self.n_components or n_features
|
399 |
+
if init == "auto":
|
400 |
+
n_classes = len(np.unique(y))
|
401 |
+
if n_components <= min(n_features, n_classes - 1):
|
402 |
+
init = "lda"
|
403 |
+
elif n_components < min(n_features, n_samples):
|
404 |
+
init = "pca"
|
405 |
+
else:
|
406 |
+
init = "identity"
|
407 |
+
if init == "identity":
|
408 |
+
transformation = np.eye(n_components, X.shape[1])
|
409 |
+
elif init == "random":
|
410 |
+
transformation = self.random_state_.standard_normal(
|
411 |
+
size=(n_components, X.shape[1])
|
412 |
+
)
|
413 |
+
elif init in {"pca", "lda"}:
|
414 |
+
init_time = time.time()
|
415 |
+
if init == "pca":
|
416 |
+
pca = PCA(
|
417 |
+
n_components=n_components, random_state=self.random_state_
|
418 |
+
)
|
419 |
+
if self.verbose:
|
420 |
+
print("Finding principal components... ", end="")
|
421 |
+
sys.stdout.flush()
|
422 |
+
pca.fit(X)
|
423 |
+
transformation = pca.components_
|
424 |
+
elif init == "lda":
|
425 |
+
from ..discriminant_analysis import LinearDiscriminantAnalysis
|
426 |
+
|
427 |
+
lda = LinearDiscriminantAnalysis(n_components=n_components)
|
428 |
+
if self.verbose:
|
429 |
+
print("Finding most discriminative components... ", end="")
|
430 |
+
sys.stdout.flush()
|
431 |
+
lda.fit(X, y)
|
432 |
+
transformation = lda.scalings_.T[:n_components]
|
433 |
+
if self.verbose:
|
434 |
+
print("done in {:5.2f}s".format(time.time() - init_time))
|
435 |
+
return transformation
|
436 |
+
|
437 |
+
def _callback(self, transformation):
|
438 |
+
"""Called after each iteration of the optimizer.
|
439 |
+
|
440 |
+
Parameters
|
441 |
+
----------
|
442 |
+
transformation : ndarray of shape (n_components * n_features,)
|
443 |
+
The solution computed by the optimizer in this iteration.
|
444 |
+
"""
|
445 |
+
if self.callback is not None:
|
446 |
+
self.callback(transformation, self.n_iter_)
|
447 |
+
|
448 |
+
self.n_iter_ += 1
|
449 |
+
|
450 |
+
def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0):
|
451 |
+
"""Compute the loss and the loss gradient w.r.t. `transformation`.
|
452 |
+
|
453 |
+
Parameters
|
454 |
+
----------
|
455 |
+
transformation : ndarray of shape (n_components * n_features,)
|
456 |
+
The raveled linear transformation on which to compute loss and
|
457 |
+
evaluate gradient.
|
458 |
+
|
459 |
+
X : ndarray of shape (n_samples, n_features)
|
460 |
+
The training samples.
|
461 |
+
|
462 |
+
same_class_mask : ndarray of shape (n_samples, n_samples)
|
463 |
+
A mask where `mask[i, j] == 1` if `X[i]` and `X[j]` belong
|
464 |
+
to the same class, and `0` otherwise.
|
465 |
+
|
466 |
+
Returns
|
467 |
+
-------
|
468 |
+
loss : float
|
469 |
+
The loss computed for the given transformation.
|
470 |
+
|
471 |
+
gradient : ndarray of shape (n_components * n_features,)
|
472 |
+
The new (flattened) gradient of the loss.
|
473 |
+
"""
|
474 |
+
|
475 |
+
if self.n_iter_ == 0:
|
476 |
+
self.n_iter_ += 1
|
477 |
+
if self.verbose:
|
478 |
+
header_fields = ["Iteration", "Objective Value", "Time(s)"]
|
479 |
+
header_fmt = "{:>10} {:>20} {:>10}"
|
480 |
+
header = header_fmt.format(*header_fields)
|
481 |
+
cls_name = self.__class__.__name__
|
482 |
+
print("[{}]".format(cls_name))
|
483 |
+
print(
|
484 |
+
"[{}] {}\n[{}] {}".format(
|
485 |
+
cls_name, header, cls_name, "-" * len(header)
|
486 |
+
)
|
487 |
+
)
|
488 |
+
|
489 |
+
t_funcall = time.time()
|
490 |
+
|
491 |
+
transformation = transformation.reshape(-1, X.shape[1])
|
492 |
+
X_embedded = np.dot(X, transformation.T) # (n_samples, n_components)
|
493 |
+
|
494 |
+
# Compute softmax distances
|
495 |
+
p_ij = pairwise_distances(X_embedded, squared=True)
|
496 |
+
np.fill_diagonal(p_ij, np.inf)
|
497 |
+
p_ij = softmax(-p_ij) # (n_samples, n_samples)
|
498 |
+
|
499 |
+
# Compute loss
|
500 |
+
masked_p_ij = p_ij * same_class_mask
|
501 |
+
p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1)
|
502 |
+
loss = np.sum(p)
|
503 |
+
|
504 |
+
# Compute gradient of loss w.r.t. `transform`
|
505 |
+
weighted_p_ij = masked_p_ij - p_ij * p
|
506 |
+
weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T
|
507 |
+
np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0))
|
508 |
+
gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X)
|
509 |
+
# time complexity of the gradient: O(n_components x n_samples x (
|
510 |
+
# n_samples + n_features))
|
511 |
+
|
512 |
+
if self.verbose:
|
513 |
+
t_funcall = time.time() - t_funcall
|
514 |
+
values_fmt = "[{}] {:>10} {:>20.6e} {:>10.2f}"
|
515 |
+
print(
|
516 |
+
values_fmt.format(
|
517 |
+
self.__class__.__name__, self.n_iter_, loss, t_funcall
|
518 |
+
)
|
519 |
+
)
|
520 |
+
sys.stdout.flush()
|
521 |
+
|
522 |
+
return sign * loss, sign * gradient.ravel()
|
523 |
+
|
524 |
+
def _more_tags(self):
|
525 |
+
return {"requires_y": True}
|
venv/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (45.3 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (315 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/neighbors/_regression.py
ADDED
@@ -0,0 +1,510 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Nearest Neighbor Regression."""
|
2 |
+
|
3 |
+
# Authors: Jake Vanderplas <[email protected]>
|
4 |
+
# Fabian Pedregosa <[email protected]>
|
5 |
+
# Alexandre Gramfort <[email protected]>
|
6 |
+
# Sparseness support by Lars Buitinck
|
7 |
+
# Multi-output support by Arnaud Joly <[email protected]>
|
8 |
+
# Empty radius support by Andreas Bjerre-Nielsen
|
9 |
+
#
|
10 |
+
# License: BSD 3 clause (C) INRIA, University of Amsterdam,
|
11 |
+
# University of Copenhagen
|
12 |
+
|
13 |
+
import warnings
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
|
17 |
+
from ..base import RegressorMixin, _fit_context
|
18 |
+
from ..metrics import DistanceMetric
|
19 |
+
from ..utils._param_validation import StrOptions
|
20 |
+
from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin, _get_weights
|
21 |
+
|
22 |
+
|
23 |
+
class KNeighborsRegressor(KNeighborsMixin, RegressorMixin, NeighborsBase):
|
24 |
+
"""Regression based on k-nearest neighbors.
|
25 |
+
|
26 |
+
The target is predicted by local interpolation of the targets
|
27 |
+
associated of the nearest neighbors in the training set.
|
28 |
+
|
29 |
+
Read more in the :ref:`User Guide <regression>`.
|
30 |
+
|
31 |
+
.. versionadded:: 0.9
|
32 |
+
|
33 |
+
Parameters
|
34 |
+
----------
|
35 |
+
n_neighbors : int, default=5
|
36 |
+
Number of neighbors to use by default for :meth:`kneighbors` queries.
|
37 |
+
|
38 |
+
weights : {'uniform', 'distance'}, callable or None, default='uniform'
|
39 |
+
Weight function used in prediction. Possible values:
|
40 |
+
|
41 |
+
- 'uniform' : uniform weights. All points in each neighborhood
|
42 |
+
are weighted equally.
|
43 |
+
- 'distance' : weight points by the inverse of their distance.
|
44 |
+
in this case, closer neighbors of a query point will have a
|
45 |
+
greater influence than neighbors which are further away.
|
46 |
+
- [callable] : a user-defined function which accepts an
|
47 |
+
array of distances, and returns an array of the same shape
|
48 |
+
containing the weights.
|
49 |
+
|
50 |
+
Uniform weights are used by default.
|
51 |
+
|
52 |
+
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
|
53 |
+
Algorithm used to compute the nearest neighbors:
|
54 |
+
|
55 |
+
- 'ball_tree' will use :class:`BallTree`
|
56 |
+
- 'kd_tree' will use :class:`KDTree`
|
57 |
+
- 'brute' will use a brute-force search.
|
58 |
+
- 'auto' will attempt to decide the most appropriate algorithm
|
59 |
+
based on the values passed to :meth:`fit` method.
|
60 |
+
|
61 |
+
Note: fitting on sparse input will override the setting of
|
62 |
+
this parameter, using brute force.
|
63 |
+
|
64 |
+
leaf_size : int, default=30
|
65 |
+
Leaf size passed to BallTree or KDTree. This can affect the
|
66 |
+
speed of the construction and query, as well as the memory
|
67 |
+
required to store the tree. The optimal value depends on the
|
68 |
+
nature of the problem.
|
69 |
+
|
70 |
+
p : float, default=2
|
71 |
+
Power parameter for the Minkowski metric. When p = 1, this is
|
72 |
+
equivalent to using manhattan_distance (l1), and euclidean_distance
|
73 |
+
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
|
74 |
+
|
75 |
+
metric : str, DistanceMetric object or callable, default='minkowski'
|
76 |
+
Metric to use for distance computation. Default is "minkowski", which
|
77 |
+
results in the standard Euclidean distance when p = 2. See the
|
78 |
+
documentation of `scipy.spatial.distance
|
79 |
+
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
|
80 |
+
the metrics listed in
|
81 |
+
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
|
82 |
+
values.
|
83 |
+
|
84 |
+
If metric is "precomputed", X is assumed to be a distance matrix and
|
85 |
+
must be square during fit. X may be a :term:`sparse graph`, in which
|
86 |
+
case only "nonzero" elements may be considered neighbors.
|
87 |
+
|
88 |
+
If metric is a callable function, it takes two arrays representing 1D
|
89 |
+
vectors as inputs and must return one value indicating the distance
|
90 |
+
between those vectors. This works for Scipy's metrics, but is less
|
91 |
+
efficient than passing the metric name as a string.
|
92 |
+
|
93 |
+
If metric is a DistanceMetric object, it will be passed directly to
|
94 |
+
the underlying computation routines.
|
95 |
+
|
96 |
+
metric_params : dict, default=None
|
97 |
+
Additional keyword arguments for the metric function.
|
98 |
+
|
99 |
+
n_jobs : int, default=None
|
100 |
+
The number of parallel jobs to run for neighbors search.
|
101 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
102 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
103 |
+
for more details.
|
104 |
+
Doesn't affect :meth:`fit` method.
|
105 |
+
|
106 |
+
Attributes
|
107 |
+
----------
|
108 |
+
effective_metric_ : str or callable
|
109 |
+
The distance metric to use. It will be same as the `metric` parameter
|
110 |
+
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
|
111 |
+
'minkowski' and `p` parameter set to 2.
|
112 |
+
|
113 |
+
effective_metric_params_ : dict
|
114 |
+
Additional keyword arguments for the metric function. For most metrics
|
115 |
+
will be same with `metric_params` parameter, but may also contain the
|
116 |
+
`p` parameter value if the `effective_metric_` attribute is set to
|
117 |
+
'minkowski'.
|
118 |
+
|
119 |
+
n_features_in_ : int
|
120 |
+
Number of features seen during :term:`fit`.
|
121 |
+
|
122 |
+
.. versionadded:: 0.24
|
123 |
+
|
124 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
125 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
126 |
+
has feature names that are all strings.
|
127 |
+
|
128 |
+
.. versionadded:: 1.0
|
129 |
+
|
130 |
+
n_samples_fit_ : int
|
131 |
+
Number of samples in the fitted data.
|
132 |
+
|
133 |
+
See Also
|
134 |
+
--------
|
135 |
+
NearestNeighbors : Unsupervised learner for implementing neighbor searches.
|
136 |
+
RadiusNeighborsRegressor : Regression based on neighbors within a fixed radius.
|
137 |
+
KNeighborsClassifier : Classifier implementing the k-nearest neighbors vote.
|
138 |
+
RadiusNeighborsClassifier : Classifier implementing
|
139 |
+
a vote among neighbors within a given radius.
|
140 |
+
|
141 |
+
Notes
|
142 |
+
-----
|
143 |
+
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
|
144 |
+
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
|
145 |
+
|
146 |
+
.. warning::
|
147 |
+
|
148 |
+
Regarding the Nearest Neighbors algorithms, if it is found that two
|
149 |
+
neighbors, neighbor `k+1` and `k`, have identical distances but
|
150 |
+
different labels, the results will depend on the ordering of the
|
151 |
+
training data.
|
152 |
+
|
153 |
+
https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
|
154 |
+
|
155 |
+
Examples
|
156 |
+
--------
|
157 |
+
>>> X = [[0], [1], [2], [3]]
|
158 |
+
>>> y = [0, 0, 1, 1]
|
159 |
+
>>> from sklearn.neighbors import KNeighborsRegressor
|
160 |
+
>>> neigh = KNeighborsRegressor(n_neighbors=2)
|
161 |
+
>>> neigh.fit(X, y)
|
162 |
+
KNeighborsRegressor(...)
|
163 |
+
>>> print(neigh.predict([[1.5]]))
|
164 |
+
[0.5]
|
165 |
+
"""
|
166 |
+
|
167 |
+
_parameter_constraints: dict = {
|
168 |
+
**NeighborsBase._parameter_constraints,
|
169 |
+
"weights": [StrOptions({"uniform", "distance"}), callable, None],
|
170 |
+
}
|
171 |
+
_parameter_constraints["metric"].append(DistanceMetric)
|
172 |
+
_parameter_constraints.pop("radius")
|
173 |
+
|
174 |
+
def __init__(
|
175 |
+
self,
|
176 |
+
n_neighbors=5,
|
177 |
+
*,
|
178 |
+
weights="uniform",
|
179 |
+
algorithm="auto",
|
180 |
+
leaf_size=30,
|
181 |
+
p=2,
|
182 |
+
metric="minkowski",
|
183 |
+
metric_params=None,
|
184 |
+
n_jobs=None,
|
185 |
+
):
|
186 |
+
super().__init__(
|
187 |
+
n_neighbors=n_neighbors,
|
188 |
+
algorithm=algorithm,
|
189 |
+
leaf_size=leaf_size,
|
190 |
+
metric=metric,
|
191 |
+
p=p,
|
192 |
+
metric_params=metric_params,
|
193 |
+
n_jobs=n_jobs,
|
194 |
+
)
|
195 |
+
self.weights = weights
|
196 |
+
|
197 |
+
def _more_tags(self):
|
198 |
+
# For cross-validation routines to split data correctly
|
199 |
+
return {"pairwise": self.metric == "precomputed"}
|
200 |
+
|
201 |
+
@_fit_context(
|
202 |
+
# KNeighborsRegressor.metric is not validated yet
|
203 |
+
prefer_skip_nested_validation=False
|
204 |
+
)
|
205 |
+
def fit(self, X, y):
|
206 |
+
"""Fit the k-nearest neighbors regressor from the training dataset.
|
207 |
+
|
208 |
+
Parameters
|
209 |
+
----------
|
210 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
|
211 |
+
(n_samples, n_samples) if metric='precomputed'
|
212 |
+
Training data.
|
213 |
+
|
214 |
+
y : {array-like, sparse matrix} of shape (n_samples,) or \
|
215 |
+
(n_samples, n_outputs)
|
216 |
+
Target values.
|
217 |
+
|
218 |
+
Returns
|
219 |
+
-------
|
220 |
+
self : KNeighborsRegressor
|
221 |
+
The fitted k-nearest neighbors regressor.
|
222 |
+
"""
|
223 |
+
return self._fit(X, y)
|
224 |
+
|
225 |
+
def predict(self, X):
|
226 |
+
"""Predict the target for the provided data.
|
227 |
+
|
228 |
+
Parameters
|
229 |
+
----------
|
230 |
+
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
|
231 |
+
or (n_queries, n_indexed) if metric == 'precomputed'
|
232 |
+
Test samples.
|
233 |
+
|
234 |
+
Returns
|
235 |
+
-------
|
236 |
+
y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int
|
237 |
+
Target values.
|
238 |
+
"""
|
239 |
+
if self.weights == "uniform":
|
240 |
+
# In that case, we do not need the distances to perform
|
241 |
+
# the weighting so we do not compute them.
|
242 |
+
neigh_ind = self.kneighbors(X, return_distance=False)
|
243 |
+
neigh_dist = None
|
244 |
+
else:
|
245 |
+
neigh_dist, neigh_ind = self.kneighbors(X)
|
246 |
+
|
247 |
+
weights = _get_weights(neigh_dist, self.weights)
|
248 |
+
|
249 |
+
_y = self._y
|
250 |
+
if _y.ndim == 1:
|
251 |
+
_y = _y.reshape((-1, 1))
|
252 |
+
|
253 |
+
if weights is None:
|
254 |
+
y_pred = np.mean(_y[neigh_ind], axis=1)
|
255 |
+
else:
|
256 |
+
y_pred = np.empty((neigh_dist.shape[0], _y.shape[1]), dtype=np.float64)
|
257 |
+
denom = np.sum(weights, axis=1)
|
258 |
+
|
259 |
+
for j in range(_y.shape[1]):
|
260 |
+
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
|
261 |
+
y_pred[:, j] = num / denom
|
262 |
+
|
263 |
+
if self._y.ndim == 1:
|
264 |
+
y_pred = y_pred.ravel()
|
265 |
+
|
266 |
+
return y_pred
|
267 |
+
|
268 |
+
|
269 |
+
class RadiusNeighborsRegressor(RadiusNeighborsMixin, RegressorMixin, NeighborsBase):
|
270 |
+
"""Regression based on neighbors within a fixed radius.
|
271 |
+
|
272 |
+
The target is predicted by local interpolation of the targets
|
273 |
+
associated of the nearest neighbors in the training set.
|
274 |
+
|
275 |
+
Read more in the :ref:`User Guide <regression>`.
|
276 |
+
|
277 |
+
.. versionadded:: 0.9
|
278 |
+
|
279 |
+
Parameters
|
280 |
+
----------
|
281 |
+
radius : float, default=1.0
|
282 |
+
Range of parameter space to use by default for :meth:`radius_neighbors`
|
283 |
+
queries.
|
284 |
+
|
285 |
+
weights : {'uniform', 'distance'}, callable or None, default='uniform'
|
286 |
+
Weight function used in prediction. Possible values:
|
287 |
+
|
288 |
+
- 'uniform' : uniform weights. All points in each neighborhood
|
289 |
+
are weighted equally.
|
290 |
+
- 'distance' : weight points by the inverse of their distance.
|
291 |
+
in this case, closer neighbors of a query point will have a
|
292 |
+
greater influence than neighbors which are further away.
|
293 |
+
- [callable] : a user-defined function which accepts an
|
294 |
+
array of distances, and returns an array of the same shape
|
295 |
+
containing the weights.
|
296 |
+
|
297 |
+
Uniform weights are used by default.
|
298 |
+
|
299 |
+
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
|
300 |
+
Algorithm used to compute the nearest neighbors:
|
301 |
+
|
302 |
+
- 'ball_tree' will use :class:`BallTree`
|
303 |
+
- 'kd_tree' will use :class:`KDTree`
|
304 |
+
- 'brute' will use a brute-force search.
|
305 |
+
- 'auto' will attempt to decide the most appropriate algorithm
|
306 |
+
based on the values passed to :meth:`fit` method.
|
307 |
+
|
308 |
+
Note: fitting on sparse input will override the setting of
|
309 |
+
this parameter, using brute force.
|
310 |
+
|
311 |
+
leaf_size : int, default=30
|
312 |
+
Leaf size passed to BallTree or KDTree. This can affect the
|
313 |
+
speed of the construction and query, as well as the memory
|
314 |
+
required to store the tree. The optimal value depends on the
|
315 |
+
nature of the problem.
|
316 |
+
|
317 |
+
p : float, default=2
|
318 |
+
Power parameter for the Minkowski metric. When p = 1, this is
|
319 |
+
equivalent to using manhattan_distance (l1), and euclidean_distance
|
320 |
+
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
|
321 |
+
|
322 |
+
metric : str or callable, default='minkowski'
|
323 |
+
Metric to use for distance computation. Default is "minkowski", which
|
324 |
+
results in the standard Euclidean distance when p = 2. See the
|
325 |
+
documentation of `scipy.spatial.distance
|
326 |
+
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
|
327 |
+
the metrics listed in
|
328 |
+
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
|
329 |
+
values.
|
330 |
+
|
331 |
+
If metric is "precomputed", X is assumed to be a distance matrix and
|
332 |
+
must be square during fit. X may be a :term:`sparse graph`, in which
|
333 |
+
case only "nonzero" elements may be considered neighbors.
|
334 |
+
|
335 |
+
If metric is a callable function, it takes two arrays representing 1D
|
336 |
+
vectors as inputs and must return one value indicating the distance
|
337 |
+
between those vectors. This works for Scipy's metrics, but is less
|
338 |
+
efficient than passing the metric name as a string.
|
339 |
+
|
340 |
+
metric_params : dict, default=None
|
341 |
+
Additional keyword arguments for the metric function.
|
342 |
+
|
343 |
+
n_jobs : int, default=None
|
344 |
+
The number of parallel jobs to run for neighbors search.
|
345 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
346 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
347 |
+
for more details.
|
348 |
+
|
349 |
+
Attributes
|
350 |
+
----------
|
351 |
+
effective_metric_ : str or callable
|
352 |
+
The distance metric to use. It will be same as the `metric` parameter
|
353 |
+
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
|
354 |
+
'minkowski' and `p` parameter set to 2.
|
355 |
+
|
356 |
+
effective_metric_params_ : dict
|
357 |
+
Additional keyword arguments for the metric function. For most metrics
|
358 |
+
will be same with `metric_params` parameter, but may also contain the
|
359 |
+
`p` parameter value if the `effective_metric_` attribute is set to
|
360 |
+
'minkowski'.
|
361 |
+
|
362 |
+
n_features_in_ : int
|
363 |
+
Number of features seen during :term:`fit`.
|
364 |
+
|
365 |
+
.. versionadded:: 0.24
|
366 |
+
|
367 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
368 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
369 |
+
has feature names that are all strings.
|
370 |
+
|
371 |
+
.. versionadded:: 1.0
|
372 |
+
|
373 |
+
n_samples_fit_ : int
|
374 |
+
Number of samples in the fitted data.
|
375 |
+
|
376 |
+
See Also
|
377 |
+
--------
|
378 |
+
NearestNeighbors : Unsupervised learner for implementing neighbor searches.
|
379 |
+
KNeighborsRegressor : Regression based on k-nearest neighbors.
|
380 |
+
KNeighborsClassifier : Classifier based on the k-nearest neighbors.
|
381 |
+
RadiusNeighborsClassifier : Classifier based on neighbors within a given radius.
|
382 |
+
|
383 |
+
Notes
|
384 |
+
-----
|
385 |
+
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
|
386 |
+
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
|
387 |
+
|
388 |
+
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
|
389 |
+
|
390 |
+
Examples
|
391 |
+
--------
|
392 |
+
>>> X = [[0], [1], [2], [3]]
|
393 |
+
>>> y = [0, 0, 1, 1]
|
394 |
+
>>> from sklearn.neighbors import RadiusNeighborsRegressor
|
395 |
+
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
|
396 |
+
>>> neigh.fit(X, y)
|
397 |
+
RadiusNeighborsRegressor(...)
|
398 |
+
>>> print(neigh.predict([[1.5]]))
|
399 |
+
[0.5]
|
400 |
+
"""
|
401 |
+
|
402 |
+
_parameter_constraints: dict = {
|
403 |
+
**NeighborsBase._parameter_constraints,
|
404 |
+
"weights": [StrOptions({"uniform", "distance"}), callable, None],
|
405 |
+
}
|
406 |
+
_parameter_constraints.pop("n_neighbors")
|
407 |
+
|
408 |
+
def __init__(
|
409 |
+
self,
|
410 |
+
radius=1.0,
|
411 |
+
*,
|
412 |
+
weights="uniform",
|
413 |
+
algorithm="auto",
|
414 |
+
leaf_size=30,
|
415 |
+
p=2,
|
416 |
+
metric="minkowski",
|
417 |
+
metric_params=None,
|
418 |
+
n_jobs=None,
|
419 |
+
):
|
420 |
+
super().__init__(
|
421 |
+
radius=radius,
|
422 |
+
algorithm=algorithm,
|
423 |
+
leaf_size=leaf_size,
|
424 |
+
p=p,
|
425 |
+
metric=metric,
|
426 |
+
metric_params=metric_params,
|
427 |
+
n_jobs=n_jobs,
|
428 |
+
)
|
429 |
+
self.weights = weights
|
430 |
+
|
431 |
+
@_fit_context(
|
432 |
+
# RadiusNeighborsRegressor.metric is not validated yet
|
433 |
+
prefer_skip_nested_validation=False
|
434 |
+
)
|
435 |
+
def fit(self, X, y):
|
436 |
+
"""Fit the radius neighbors regressor from the training dataset.
|
437 |
+
|
438 |
+
Parameters
|
439 |
+
----------
|
440 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
|
441 |
+
(n_samples, n_samples) if metric='precomputed'
|
442 |
+
Training data.
|
443 |
+
|
444 |
+
y : {array-like, sparse matrix} of shape (n_samples,) or \
|
445 |
+
(n_samples, n_outputs)
|
446 |
+
Target values.
|
447 |
+
|
448 |
+
Returns
|
449 |
+
-------
|
450 |
+
self : RadiusNeighborsRegressor
|
451 |
+
The fitted radius neighbors regressor.
|
452 |
+
"""
|
453 |
+
return self._fit(X, y)
|
454 |
+
|
455 |
+
def predict(self, X):
|
456 |
+
"""Predict the target for the provided data.
|
457 |
+
|
458 |
+
Parameters
|
459 |
+
----------
|
460 |
+
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
|
461 |
+
or (n_queries, n_indexed) if metric == 'precomputed'
|
462 |
+
Test samples.
|
463 |
+
|
464 |
+
Returns
|
465 |
+
-------
|
466 |
+
y : ndarray of shape (n_queries,) or (n_queries, n_outputs), \
|
467 |
+
dtype=double
|
468 |
+
Target values.
|
469 |
+
"""
|
470 |
+
neigh_dist, neigh_ind = self.radius_neighbors(X)
|
471 |
+
|
472 |
+
weights = _get_weights(neigh_dist, self.weights)
|
473 |
+
|
474 |
+
_y = self._y
|
475 |
+
if _y.ndim == 1:
|
476 |
+
_y = _y.reshape((-1, 1))
|
477 |
+
|
478 |
+
empty_obs = np.full_like(_y[0], np.nan)
|
479 |
+
|
480 |
+
if weights is None:
|
481 |
+
y_pred = np.array(
|
482 |
+
[
|
483 |
+
np.mean(_y[ind, :], axis=0) if len(ind) else empty_obs
|
484 |
+
for (i, ind) in enumerate(neigh_ind)
|
485 |
+
]
|
486 |
+
)
|
487 |
+
|
488 |
+
else:
|
489 |
+
y_pred = np.array(
|
490 |
+
[
|
491 |
+
(
|
492 |
+
np.average(_y[ind, :], axis=0, weights=weights[i])
|
493 |
+
if len(ind)
|
494 |
+
else empty_obs
|
495 |
+
)
|
496 |
+
for (i, ind) in enumerate(neigh_ind)
|
497 |
+
]
|
498 |
+
)
|
499 |
+
|
500 |
+
if np.any(np.isnan(y_pred)):
|
501 |
+
empty_warning_msg = (
|
502 |
+
"One or more samples have no neighbors "
|
503 |
+
"within specified radius; predicting NaN."
|
504 |
+
)
|
505 |
+
warnings.warn(empty_warning_msg)
|
506 |
+
|
507 |
+
if self._y.ndim == 1:
|
508 |
+
y_pred = y_pred.ravel()
|
509 |
+
|
510 |
+
return y_pred
|
venv/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Unsupervised nearest neighbors learner"""
|
2 |
+
from ..base import _fit_context
|
3 |
+
from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin
|
4 |
+
|
5 |
+
|
6 |
+
class NearestNeighbors(KNeighborsMixin, RadiusNeighborsMixin, NeighborsBase):
|
7 |
+
"""Unsupervised learner for implementing neighbor searches.
|
8 |
+
|
9 |
+
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
|
10 |
+
|
11 |
+
.. versionadded:: 0.9
|
12 |
+
|
13 |
+
Parameters
|
14 |
+
----------
|
15 |
+
n_neighbors : int, default=5
|
16 |
+
Number of neighbors to use by default for :meth:`kneighbors` queries.
|
17 |
+
|
18 |
+
radius : float, default=1.0
|
19 |
+
Range of parameter space to use by default for :meth:`radius_neighbors`
|
20 |
+
queries.
|
21 |
+
|
22 |
+
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
|
23 |
+
Algorithm used to compute the nearest neighbors:
|
24 |
+
|
25 |
+
- 'ball_tree' will use :class:`BallTree`
|
26 |
+
- 'kd_tree' will use :class:`KDTree`
|
27 |
+
- 'brute' will use a brute-force search.
|
28 |
+
- 'auto' will attempt to decide the most appropriate algorithm
|
29 |
+
based on the values passed to :meth:`fit` method.
|
30 |
+
|
31 |
+
Note: fitting on sparse input will override the setting of
|
32 |
+
this parameter, using brute force.
|
33 |
+
|
34 |
+
leaf_size : int, default=30
|
35 |
+
Leaf size passed to BallTree or KDTree. This can affect the
|
36 |
+
speed of the construction and query, as well as the memory
|
37 |
+
required to store the tree. The optimal value depends on the
|
38 |
+
nature of the problem.
|
39 |
+
|
40 |
+
metric : str or callable, default='minkowski'
|
41 |
+
Metric to use for distance computation. Default is "minkowski", which
|
42 |
+
results in the standard Euclidean distance when p = 2. See the
|
43 |
+
documentation of `scipy.spatial.distance
|
44 |
+
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
|
45 |
+
the metrics listed in
|
46 |
+
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
|
47 |
+
values.
|
48 |
+
|
49 |
+
If metric is "precomputed", X is assumed to be a distance matrix and
|
50 |
+
must be square during fit. X may be a :term:`sparse graph`, in which
|
51 |
+
case only "nonzero" elements may be considered neighbors.
|
52 |
+
|
53 |
+
If metric is a callable function, it takes two arrays representing 1D
|
54 |
+
vectors as inputs and must return one value indicating the distance
|
55 |
+
between those vectors. This works for Scipy's metrics, but is less
|
56 |
+
efficient than passing the metric name as a string.
|
57 |
+
|
58 |
+
p : float (positive), default=2
|
59 |
+
Parameter for the Minkowski metric from
|
60 |
+
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
|
61 |
+
equivalent to using manhattan_distance (l1), and euclidean_distance
|
62 |
+
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
|
63 |
+
|
64 |
+
metric_params : dict, default=None
|
65 |
+
Additional keyword arguments for the metric function.
|
66 |
+
|
67 |
+
n_jobs : int, default=None
|
68 |
+
The number of parallel jobs to run for neighbors search.
|
69 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
70 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
71 |
+
for more details.
|
72 |
+
|
73 |
+
Attributes
|
74 |
+
----------
|
75 |
+
effective_metric_ : str
|
76 |
+
Metric used to compute distances to neighbors.
|
77 |
+
|
78 |
+
effective_metric_params_ : dict
|
79 |
+
Parameters for the metric used to compute distances to neighbors.
|
80 |
+
|
81 |
+
n_features_in_ : int
|
82 |
+
Number of features seen during :term:`fit`.
|
83 |
+
|
84 |
+
.. versionadded:: 0.24
|
85 |
+
|
86 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
87 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
88 |
+
has feature names that are all strings.
|
89 |
+
|
90 |
+
.. versionadded:: 1.0
|
91 |
+
|
92 |
+
n_samples_fit_ : int
|
93 |
+
Number of samples in the fitted data.
|
94 |
+
|
95 |
+
See Also
|
96 |
+
--------
|
97 |
+
KNeighborsClassifier : Classifier implementing the k-nearest neighbors
|
98 |
+
vote.
|
99 |
+
RadiusNeighborsClassifier : Classifier implementing a vote among neighbors
|
100 |
+
within a given radius.
|
101 |
+
KNeighborsRegressor : Regression based on k-nearest neighbors.
|
102 |
+
RadiusNeighborsRegressor : Regression based on neighbors within a fixed
|
103 |
+
radius.
|
104 |
+
BallTree : Space partitioning data structure for organizing points in a
|
105 |
+
multi-dimensional space, used for nearest neighbor search.
|
106 |
+
|
107 |
+
Notes
|
108 |
+
-----
|
109 |
+
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
|
110 |
+
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
|
111 |
+
|
112 |
+
https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
|
113 |
+
|
114 |
+
Examples
|
115 |
+
--------
|
116 |
+
>>> import numpy as np
|
117 |
+
>>> from sklearn.neighbors import NearestNeighbors
|
118 |
+
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
|
119 |
+
>>> neigh = NearestNeighbors(n_neighbors=2, radius=0.4)
|
120 |
+
>>> neigh.fit(samples)
|
121 |
+
NearestNeighbors(...)
|
122 |
+
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
|
123 |
+
array([[2, 0]]...)
|
124 |
+
>>> nbrs = neigh.radius_neighbors(
|
125 |
+
... [[0, 0, 1.3]], 0.4, return_distance=False
|
126 |
+
... )
|
127 |
+
>>> np.asarray(nbrs[0][0])
|
128 |
+
array(2)
|
129 |
+
"""
|
130 |
+
|
131 |
+
def __init__(
|
132 |
+
self,
|
133 |
+
*,
|
134 |
+
n_neighbors=5,
|
135 |
+
radius=1.0,
|
136 |
+
algorithm="auto",
|
137 |
+
leaf_size=30,
|
138 |
+
metric="minkowski",
|
139 |
+
p=2,
|
140 |
+
metric_params=None,
|
141 |
+
n_jobs=None,
|
142 |
+
):
|
143 |
+
super().__init__(
|
144 |
+
n_neighbors=n_neighbors,
|
145 |
+
radius=radius,
|
146 |
+
algorithm=algorithm,
|
147 |
+
leaf_size=leaf_size,
|
148 |
+
metric=metric,
|
149 |
+
p=p,
|
150 |
+
metric_params=metric_params,
|
151 |
+
n_jobs=n_jobs,
|
152 |
+
)
|
153 |
+
|
154 |
+
@_fit_context(
|
155 |
+
# NearestNeighbors.metric is not validated yet
|
156 |
+
prefer_skip_nested_validation=False
|
157 |
+
)
|
158 |
+
def fit(self, X, y=None):
|
159 |
+
"""Fit the nearest neighbors estimator from the training dataset.
|
160 |
+
|
161 |
+
Parameters
|
162 |
+
----------
|
163 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
|
164 |
+
(n_samples, n_samples) if metric='precomputed'
|
165 |
+
Training data.
|
166 |
+
|
167 |
+
y : Ignored
|
168 |
+
Not used, present for API consistency by convention.
|
169 |
+
|
170 |
+
Returns
|
171 |
+
-------
|
172 |
+
self : NearestNeighbors
|
173 |
+
The fitted nearest neighbors estimator.
|
174 |
+
"""
|
175 |
+
return self._fit(X)
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (34.3 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc
ADDED
Binary file (1.34 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc
ADDED
Binary file (15.9 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc
ADDED
Binary file (3.18 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc
ADDED
Binary file (2.18 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc
ADDED
Binary file (10.2 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc
ADDED
Binary file (15 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc
ADDED
Binary file (669 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc
ADDED
Binary file (1.66 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc
ADDED
Binary file (47.5 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc
ADDED
Binary file (13.5 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc
ADDED
Binary file (28.5 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc
ADDED
Binary file (3.49 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc
ADDED
Binary file (11.5 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc
ADDED
Binary file (10 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc
ADDED
Binary file (12.7 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc
ADDED
Binary file (2.39 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc
ADDED
Binary file (2.01 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc
ADDED
Binary file (33.2 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc
ADDED
Binary file (6.23 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc
ADDED
Binary file (3.44 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc
ADDED
Binary file (7.99 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc
ADDED
Binary file (104 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc
ADDED
Binary file (37.4 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc
ADDED
Binary file (9.13 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc
ADDED
Binary file (4.87 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc
ADDED
Binary file (824 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc
ADDED
Binary file (5.12 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc
ADDED
Binary file (14.4 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc
ADDED
Binary file (6.48 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc
ADDED
Binary file (4.48 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc
ADDED
Binary file (2.69 kB). View file
|
|