applied-ai-018 commited on
Commit
c520ca7
·
verified ·
1 Parent(s): 194e272

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/__init__.py +0 -0
  2. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/boston_house_prices.csv +508 -0
  4. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/breast_cancer.csv +0 -0
  5. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/iris.csv +151 -0
  6. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/linnerud_exercise.csv +21 -0
  7. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/wine_data.csv +179 -0
  8. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdf-1119.json.gz +3 -0
  9. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/data-v1-dl-54002.arff.gz +3 -0
  10. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/__pycache__/__init__.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jd-40945.json.gz +3 -0
  13. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdq-40945.json.gz +3 -0
  14. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/data-v1-dl-16826755.arff.gz +3 -0
  15. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/__init__.py +0 -0
  16. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/__pycache__/__init__.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jd-42585.json.gz +3 -0
  18. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jdf-42585.json.gz +3 -0
  19. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jdq-42585.json.gz +3 -0
  20. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/data-v1-dl-21854866.arff.gz +3 -0
  21. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jd-61.json.gz +3 -0
  22. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdf-61.json.gz +3 -0
  23. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdl-dn-iris-l-2-dv-1.json.gz +3 -0
  24. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdl-dn-iris-l-2-s-act-.json.gz +3 -0
  25. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdq-61.json.gz +3 -0
  26. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/data-v1-dl-61.arff.gz +3 -0
  27. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/__init__.py +0 -0
  28. env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/__pycache__/__init__.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__init__.py +21 -0
  30. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__pycache__/__init__.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_isomap.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_locally_linear.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_mds.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_spectral_embedding.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_t_sne.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_barnes_hut_tsne.cpython-310-x86_64-linux-gnu.so +0 -0
  37. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_isomap.py +438 -0
  38. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_locally_linear.py +841 -0
  39. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_mds.py +653 -0
  40. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_spectral_embedding.py +749 -0
  41. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_t_sne.py +1174 -0
  42. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_utils.cpython-310-x86_64-linux-gnu.so +0 -0
  43. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__init__.py +0 -0
  44. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_isomap.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_locally_linear.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_mds.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_spectral_embedding.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_t_sne.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/test_isomap.py +348 -0
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/boston_house_prices.csv ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 506,13,,,,,,,,,,,,
2
+ "CRIM","ZN","INDUS","CHAS","NOX","RM","AGE","DIS","RAD","TAX","PTRATIO","B","LSTAT","MEDV"
3
+ 0.00632,18,2.31,0,0.538,6.575,65.2,4.09,1,296,15.3,396.9,4.98,24
4
+ 0.02731,0,7.07,0,0.469,6.421,78.9,4.9671,2,242,17.8,396.9,9.14,21.6
5
+ 0.02729,0,7.07,0,0.469,7.185,61.1,4.9671,2,242,17.8,392.83,4.03,34.7
6
+ 0.03237,0,2.18,0,0.458,6.998,45.8,6.0622,3,222,18.7,394.63,2.94,33.4
7
+ 0.06905,0,2.18,0,0.458,7.147,54.2,6.0622,3,222,18.7,396.9,5.33,36.2
8
+ 0.02985,0,2.18,0,0.458,6.43,58.7,6.0622,3,222,18.7,394.12,5.21,28.7
9
+ 0.08829,12.5,7.87,0,0.524,6.012,66.6,5.5605,5,311,15.2,395.6,12.43,22.9
10
+ 0.14455,12.5,7.87,0,0.524,6.172,96.1,5.9505,5,311,15.2,396.9,19.15,27.1
11
+ 0.21124,12.5,7.87,0,0.524,5.631,100,6.0821,5,311,15.2,386.63,29.93,16.5
12
+ 0.17004,12.5,7.87,0,0.524,6.004,85.9,6.5921,5,311,15.2,386.71,17.1,18.9
13
+ 0.22489,12.5,7.87,0,0.524,6.377,94.3,6.3467,5,311,15.2,392.52,20.45,15
14
+ 0.11747,12.5,7.87,0,0.524,6.009,82.9,6.2267,5,311,15.2,396.9,13.27,18.9
15
+ 0.09378,12.5,7.87,0,0.524,5.889,39,5.4509,5,311,15.2,390.5,15.71,21.7
16
+ 0.62976,0,8.14,0,0.538,5.949,61.8,4.7075,4,307,21,396.9,8.26,20.4
17
+ 0.63796,0,8.14,0,0.538,6.096,84.5,4.4619,4,307,21,380.02,10.26,18.2
18
+ 0.62739,0,8.14,0,0.538,5.834,56.5,4.4986,4,307,21,395.62,8.47,19.9
19
+ 1.05393,0,8.14,0,0.538,5.935,29.3,4.4986,4,307,21,386.85,6.58,23.1
20
+ 0.7842,0,8.14,0,0.538,5.99,81.7,4.2579,4,307,21,386.75,14.67,17.5
21
+ 0.80271,0,8.14,0,0.538,5.456,36.6,3.7965,4,307,21,288.99,11.69,20.2
22
+ 0.7258,0,8.14,0,0.538,5.727,69.5,3.7965,4,307,21,390.95,11.28,18.2
23
+ 1.25179,0,8.14,0,0.538,5.57,98.1,3.7979,4,307,21,376.57,21.02,13.6
24
+ 0.85204,0,8.14,0,0.538,5.965,89.2,4.0123,4,307,21,392.53,13.83,19.6
25
+ 1.23247,0,8.14,0,0.538,6.142,91.7,3.9769,4,307,21,396.9,18.72,15.2
26
+ 0.98843,0,8.14,0,0.538,5.813,100,4.0952,4,307,21,394.54,19.88,14.5
27
+ 0.75026,0,8.14,0,0.538,5.924,94.1,4.3996,4,307,21,394.33,16.3,15.6
28
+ 0.84054,0,8.14,0,0.538,5.599,85.7,4.4546,4,307,21,303.42,16.51,13.9
29
+ 0.67191,0,8.14,0,0.538,5.813,90.3,4.682,4,307,21,376.88,14.81,16.6
30
+ 0.95577,0,8.14,0,0.538,6.047,88.8,4.4534,4,307,21,306.38,17.28,14.8
31
+ 0.77299,0,8.14,0,0.538,6.495,94.4,4.4547,4,307,21,387.94,12.8,18.4
32
+ 1.00245,0,8.14,0,0.538,6.674,87.3,4.239,4,307,21,380.23,11.98,21
33
+ 1.13081,0,8.14,0,0.538,5.713,94.1,4.233,4,307,21,360.17,22.6,12.7
34
+ 1.35472,0,8.14,0,0.538,6.072,100,4.175,4,307,21,376.73,13.04,14.5
35
+ 1.38799,0,8.14,0,0.538,5.95,82,3.99,4,307,21,232.6,27.71,13.2
36
+ 1.15172,0,8.14,0,0.538,5.701,95,3.7872,4,307,21,358.77,18.35,13.1
37
+ 1.61282,0,8.14,0,0.538,6.096,96.9,3.7598,4,307,21,248.31,20.34,13.5
38
+ 0.06417,0,5.96,0,0.499,5.933,68.2,3.3603,5,279,19.2,396.9,9.68,18.9
39
+ 0.09744,0,5.96,0,0.499,5.841,61.4,3.3779,5,279,19.2,377.56,11.41,20
40
+ 0.08014,0,5.96,0,0.499,5.85,41.5,3.9342,5,279,19.2,396.9,8.77,21
41
+ 0.17505,0,5.96,0,0.499,5.966,30.2,3.8473,5,279,19.2,393.43,10.13,24.7
42
+ 0.02763,75,2.95,0,0.428,6.595,21.8,5.4011,3,252,18.3,395.63,4.32,30.8
43
+ 0.03359,75,2.95,0,0.428,7.024,15.8,5.4011,3,252,18.3,395.62,1.98,34.9
44
+ 0.12744,0,6.91,0,0.448,6.77,2.9,5.7209,3,233,17.9,385.41,4.84,26.6
45
+ 0.1415,0,6.91,0,0.448,6.169,6.6,5.7209,3,233,17.9,383.37,5.81,25.3
46
+ 0.15936,0,6.91,0,0.448,6.211,6.5,5.7209,3,233,17.9,394.46,7.44,24.7
47
+ 0.12269,0,6.91,0,0.448,6.069,40,5.7209,3,233,17.9,389.39,9.55,21.2
48
+ 0.17142,0,6.91,0,0.448,5.682,33.8,5.1004,3,233,17.9,396.9,10.21,19.3
49
+ 0.18836,0,6.91,0,0.448,5.786,33.3,5.1004,3,233,17.9,396.9,14.15,20
50
+ 0.22927,0,6.91,0,0.448,6.03,85.5,5.6894,3,233,17.9,392.74,18.8,16.6
51
+ 0.25387,0,6.91,0,0.448,5.399,95.3,5.87,3,233,17.9,396.9,30.81,14.4
52
+ 0.21977,0,6.91,0,0.448,5.602,62,6.0877,3,233,17.9,396.9,16.2,19.4
53
+ 0.08873,21,5.64,0,0.439,5.963,45.7,6.8147,4,243,16.8,395.56,13.45,19.7
54
+ 0.04337,21,5.64,0,0.439,6.115,63,6.8147,4,243,16.8,393.97,9.43,20.5
55
+ 0.0536,21,5.64,0,0.439,6.511,21.1,6.8147,4,243,16.8,396.9,5.28,25
56
+ 0.04981,21,5.64,0,0.439,5.998,21.4,6.8147,4,243,16.8,396.9,8.43,23.4
57
+ 0.0136,75,4,0,0.41,5.888,47.6,7.3197,3,469,21.1,396.9,14.8,18.9
58
+ 0.01311,90,1.22,0,0.403,7.249,21.9,8.6966,5,226,17.9,395.93,4.81,35.4
59
+ 0.02055,85,0.74,0,0.41,6.383,35.7,9.1876,2,313,17.3,396.9,5.77,24.7
60
+ 0.01432,100,1.32,0,0.411,6.816,40.5,8.3248,5,256,15.1,392.9,3.95,31.6
61
+ 0.15445,25,5.13,0,0.453,6.145,29.2,7.8148,8,284,19.7,390.68,6.86,23.3
62
+ 0.10328,25,5.13,0,0.453,5.927,47.2,6.932,8,284,19.7,396.9,9.22,19.6
63
+ 0.14932,25,5.13,0,0.453,5.741,66.2,7.2254,8,284,19.7,395.11,13.15,18.7
64
+ 0.17171,25,5.13,0,0.453,5.966,93.4,6.8185,8,284,19.7,378.08,14.44,16
65
+ 0.11027,25,5.13,0,0.453,6.456,67.8,7.2255,8,284,19.7,396.9,6.73,22.2
66
+ 0.1265,25,5.13,0,0.453,6.762,43.4,7.9809,8,284,19.7,395.58,9.5,25
67
+ 0.01951,17.5,1.38,0,0.4161,7.104,59.5,9.2229,3,216,18.6,393.24,8.05,33
68
+ 0.03584,80,3.37,0,0.398,6.29,17.8,6.6115,4,337,16.1,396.9,4.67,23.5
69
+ 0.04379,80,3.37,0,0.398,5.787,31.1,6.6115,4,337,16.1,396.9,10.24,19.4
70
+ 0.05789,12.5,6.07,0,0.409,5.878,21.4,6.498,4,345,18.9,396.21,8.1,22
71
+ 0.13554,12.5,6.07,0,0.409,5.594,36.8,6.498,4,345,18.9,396.9,13.09,17.4
72
+ 0.12816,12.5,6.07,0,0.409,5.885,33,6.498,4,345,18.9,396.9,8.79,20.9
73
+ 0.08826,0,10.81,0,0.413,6.417,6.6,5.2873,4,305,19.2,383.73,6.72,24.2
74
+ 0.15876,0,10.81,0,0.413,5.961,17.5,5.2873,4,305,19.2,376.94,9.88,21.7
75
+ 0.09164,0,10.81,0,0.413,6.065,7.8,5.2873,4,305,19.2,390.91,5.52,22.8
76
+ 0.19539,0,10.81,0,0.413,6.245,6.2,5.2873,4,305,19.2,377.17,7.54,23.4
77
+ 0.07896,0,12.83,0,0.437,6.273,6,4.2515,5,398,18.7,394.92,6.78,24.1
78
+ 0.09512,0,12.83,0,0.437,6.286,45,4.5026,5,398,18.7,383.23,8.94,21.4
79
+ 0.10153,0,12.83,0,0.437,6.279,74.5,4.0522,5,398,18.7,373.66,11.97,20
80
+ 0.08707,0,12.83,0,0.437,6.14,45.8,4.0905,5,398,18.7,386.96,10.27,20.8
81
+ 0.05646,0,12.83,0,0.437,6.232,53.7,5.0141,5,398,18.7,386.4,12.34,21.2
82
+ 0.08387,0,12.83,0,0.437,5.874,36.6,4.5026,5,398,18.7,396.06,9.1,20.3
83
+ 0.04113,25,4.86,0,0.426,6.727,33.5,5.4007,4,281,19,396.9,5.29,28
84
+ 0.04462,25,4.86,0,0.426,6.619,70.4,5.4007,4,281,19,395.63,7.22,23.9
85
+ 0.03659,25,4.86,0,0.426,6.302,32.2,5.4007,4,281,19,396.9,6.72,24.8
86
+ 0.03551,25,4.86,0,0.426,6.167,46.7,5.4007,4,281,19,390.64,7.51,22.9
87
+ 0.05059,0,4.49,0,0.449,6.389,48,4.7794,3,247,18.5,396.9,9.62,23.9
88
+ 0.05735,0,4.49,0,0.449,6.63,56.1,4.4377,3,247,18.5,392.3,6.53,26.6
89
+ 0.05188,0,4.49,0,0.449,6.015,45.1,4.4272,3,247,18.5,395.99,12.86,22.5
90
+ 0.07151,0,4.49,0,0.449,6.121,56.8,3.7476,3,247,18.5,395.15,8.44,22.2
91
+ 0.0566,0,3.41,0,0.489,7.007,86.3,3.4217,2,270,17.8,396.9,5.5,23.6
92
+ 0.05302,0,3.41,0,0.489,7.079,63.1,3.4145,2,270,17.8,396.06,5.7,28.7
93
+ 0.04684,0,3.41,0,0.489,6.417,66.1,3.0923,2,270,17.8,392.18,8.81,22.6
94
+ 0.03932,0,3.41,0,0.489,6.405,73.9,3.0921,2,270,17.8,393.55,8.2,22
95
+ 0.04203,28,15.04,0,0.464,6.442,53.6,3.6659,4,270,18.2,395.01,8.16,22.9
96
+ 0.02875,28,15.04,0,0.464,6.211,28.9,3.6659,4,270,18.2,396.33,6.21,25
97
+ 0.04294,28,15.04,0,0.464,6.249,77.3,3.615,4,270,18.2,396.9,10.59,20.6
98
+ 0.12204,0,2.89,0,0.445,6.625,57.8,3.4952,2,276,18,357.98,6.65,28.4
99
+ 0.11504,0,2.89,0,0.445,6.163,69.6,3.4952,2,276,18,391.83,11.34,21.4
100
+ 0.12083,0,2.89,0,0.445,8.069,76,3.4952,2,276,18,396.9,4.21,38.7
101
+ 0.08187,0,2.89,0,0.445,7.82,36.9,3.4952,2,276,18,393.53,3.57,43.8
102
+ 0.0686,0,2.89,0,0.445,7.416,62.5,3.4952,2,276,18,396.9,6.19,33.2
103
+ 0.14866,0,8.56,0,0.52,6.727,79.9,2.7778,5,384,20.9,394.76,9.42,27.5
104
+ 0.11432,0,8.56,0,0.52,6.781,71.3,2.8561,5,384,20.9,395.58,7.67,26.5
105
+ 0.22876,0,8.56,0,0.52,6.405,85.4,2.7147,5,384,20.9,70.8,10.63,18.6
106
+ 0.21161,0,8.56,0,0.52,6.137,87.4,2.7147,5,384,20.9,394.47,13.44,19.3
107
+ 0.1396,0,8.56,0,0.52,6.167,90,2.421,5,384,20.9,392.69,12.33,20.1
108
+ 0.13262,0,8.56,0,0.52,5.851,96.7,2.1069,5,384,20.9,394.05,16.47,19.5
109
+ 0.1712,0,8.56,0,0.52,5.836,91.9,2.211,5,384,20.9,395.67,18.66,19.5
110
+ 0.13117,0,8.56,0,0.52,6.127,85.2,2.1224,5,384,20.9,387.69,14.09,20.4
111
+ 0.12802,0,8.56,0,0.52,6.474,97.1,2.4329,5,384,20.9,395.24,12.27,19.8
112
+ 0.26363,0,8.56,0,0.52,6.229,91.2,2.5451,5,384,20.9,391.23,15.55,19.4
113
+ 0.10793,0,8.56,0,0.52,6.195,54.4,2.7778,5,384,20.9,393.49,13,21.7
114
+ 0.10084,0,10.01,0,0.547,6.715,81.6,2.6775,6,432,17.8,395.59,10.16,22.8
115
+ 0.12329,0,10.01,0,0.547,5.913,92.9,2.3534,6,432,17.8,394.95,16.21,18.8
116
+ 0.22212,0,10.01,0,0.547,6.092,95.4,2.548,6,432,17.8,396.9,17.09,18.7
117
+ 0.14231,0,10.01,0,0.547,6.254,84.2,2.2565,6,432,17.8,388.74,10.45,18.5
118
+ 0.17134,0,10.01,0,0.547,5.928,88.2,2.4631,6,432,17.8,344.91,15.76,18.3
119
+ 0.13158,0,10.01,0,0.547,6.176,72.5,2.7301,6,432,17.8,393.3,12.04,21.2
120
+ 0.15098,0,10.01,0,0.547,6.021,82.6,2.7474,6,432,17.8,394.51,10.3,19.2
121
+ 0.13058,0,10.01,0,0.547,5.872,73.1,2.4775,6,432,17.8,338.63,15.37,20.4
122
+ 0.14476,0,10.01,0,0.547,5.731,65.2,2.7592,6,432,17.8,391.5,13.61,19.3
123
+ 0.06899,0,25.65,0,0.581,5.87,69.7,2.2577,2,188,19.1,389.15,14.37,22
124
+ 0.07165,0,25.65,0,0.581,6.004,84.1,2.1974,2,188,19.1,377.67,14.27,20.3
125
+ 0.09299,0,25.65,0,0.581,5.961,92.9,2.0869,2,188,19.1,378.09,17.93,20.5
126
+ 0.15038,0,25.65,0,0.581,5.856,97,1.9444,2,188,19.1,370.31,25.41,17.3
127
+ 0.09849,0,25.65,0,0.581,5.879,95.8,2.0063,2,188,19.1,379.38,17.58,18.8
128
+ 0.16902,0,25.65,0,0.581,5.986,88.4,1.9929,2,188,19.1,385.02,14.81,21.4
129
+ 0.38735,0,25.65,0,0.581,5.613,95.6,1.7572,2,188,19.1,359.29,27.26,15.7
130
+ 0.25915,0,21.89,0,0.624,5.693,96,1.7883,4,437,21.2,392.11,17.19,16.2
131
+ 0.32543,0,21.89,0,0.624,6.431,98.8,1.8125,4,437,21.2,396.9,15.39,18
132
+ 0.88125,0,21.89,0,0.624,5.637,94.7,1.9799,4,437,21.2,396.9,18.34,14.3
133
+ 0.34006,0,21.89,0,0.624,6.458,98.9,2.1185,4,437,21.2,395.04,12.6,19.2
134
+ 1.19294,0,21.89,0,0.624,6.326,97.7,2.271,4,437,21.2,396.9,12.26,19.6
135
+ 0.59005,0,21.89,0,0.624,6.372,97.9,2.3274,4,437,21.2,385.76,11.12,23
136
+ 0.32982,0,21.89,0,0.624,5.822,95.4,2.4699,4,437,21.2,388.69,15.03,18.4
137
+ 0.97617,0,21.89,0,0.624,5.757,98.4,2.346,4,437,21.2,262.76,17.31,15.6
138
+ 0.55778,0,21.89,0,0.624,6.335,98.2,2.1107,4,437,21.2,394.67,16.96,18.1
139
+ 0.32264,0,21.89,0,0.624,5.942,93.5,1.9669,4,437,21.2,378.25,16.9,17.4
140
+ 0.35233,0,21.89,0,0.624,6.454,98.4,1.8498,4,437,21.2,394.08,14.59,17.1
141
+ 0.2498,0,21.89,0,0.624,5.857,98.2,1.6686,4,437,21.2,392.04,21.32,13.3
142
+ 0.54452,0,21.89,0,0.624,6.151,97.9,1.6687,4,437,21.2,396.9,18.46,17.8
143
+ 0.2909,0,21.89,0,0.624,6.174,93.6,1.6119,4,437,21.2,388.08,24.16,14
144
+ 1.62864,0,21.89,0,0.624,5.019,100,1.4394,4,437,21.2,396.9,34.41,14.4
145
+ 3.32105,0,19.58,1,0.871,5.403,100,1.3216,5,403,14.7,396.9,26.82,13.4
146
+ 4.0974,0,19.58,0,0.871,5.468,100,1.4118,5,403,14.7,396.9,26.42,15.6
147
+ 2.77974,0,19.58,0,0.871,4.903,97.8,1.3459,5,403,14.7,396.9,29.29,11.8
148
+ 2.37934,0,19.58,0,0.871,6.13,100,1.4191,5,403,14.7,172.91,27.8,13.8
149
+ 2.15505,0,19.58,0,0.871,5.628,100,1.5166,5,403,14.7,169.27,16.65,15.6
150
+ 2.36862,0,19.58,0,0.871,4.926,95.7,1.4608,5,403,14.7,391.71,29.53,14.6
151
+ 2.33099,0,19.58,0,0.871,5.186,93.8,1.5296,5,403,14.7,356.99,28.32,17.8
152
+ 2.73397,0,19.58,0,0.871,5.597,94.9,1.5257,5,403,14.7,351.85,21.45,15.4
153
+ 1.6566,0,19.58,0,0.871,6.122,97.3,1.618,5,403,14.7,372.8,14.1,21.5
154
+ 1.49632,0,19.58,0,0.871,5.404,100,1.5916,5,403,14.7,341.6,13.28,19.6
155
+ 1.12658,0,19.58,1,0.871,5.012,88,1.6102,5,403,14.7,343.28,12.12,15.3
156
+ 2.14918,0,19.58,0,0.871,5.709,98.5,1.6232,5,403,14.7,261.95,15.79,19.4
157
+ 1.41385,0,19.58,1,0.871,6.129,96,1.7494,5,403,14.7,321.02,15.12,17
158
+ 3.53501,0,19.58,1,0.871,6.152,82.6,1.7455,5,403,14.7,88.01,15.02,15.6
159
+ 2.44668,0,19.58,0,0.871,5.272,94,1.7364,5,403,14.7,88.63,16.14,13.1
160
+ 1.22358,0,19.58,0,0.605,6.943,97.4,1.8773,5,403,14.7,363.43,4.59,41.3
161
+ 1.34284,0,19.58,0,0.605,6.066,100,1.7573,5,403,14.7,353.89,6.43,24.3
162
+ 1.42502,0,19.58,0,0.871,6.51,100,1.7659,5,403,14.7,364.31,7.39,23.3
163
+ 1.27346,0,19.58,1,0.605,6.25,92.6,1.7984,5,403,14.7,338.92,5.5,27
164
+ 1.46336,0,19.58,0,0.605,7.489,90.8,1.9709,5,403,14.7,374.43,1.73,50
165
+ 1.83377,0,19.58,1,0.605,7.802,98.2,2.0407,5,403,14.7,389.61,1.92,50
166
+ 1.51902,0,19.58,1,0.605,8.375,93.9,2.162,5,403,14.7,388.45,3.32,50
167
+ 2.24236,0,19.58,0,0.605,5.854,91.8,2.422,5,403,14.7,395.11,11.64,22.7
168
+ 2.924,0,19.58,0,0.605,6.101,93,2.2834,5,403,14.7,240.16,9.81,25
169
+ 2.01019,0,19.58,0,0.605,7.929,96.2,2.0459,5,403,14.7,369.3,3.7,50
170
+ 1.80028,0,19.58,0,0.605,5.877,79.2,2.4259,5,403,14.7,227.61,12.14,23.8
171
+ 2.3004,0,19.58,0,0.605,6.319,96.1,2.1,5,403,14.7,297.09,11.1,23.8
172
+ 2.44953,0,19.58,0,0.605,6.402,95.2,2.2625,5,403,14.7,330.04,11.32,22.3
173
+ 1.20742,0,19.58,0,0.605,5.875,94.6,2.4259,5,403,14.7,292.29,14.43,17.4
174
+ 2.3139,0,19.58,0,0.605,5.88,97.3,2.3887,5,403,14.7,348.13,12.03,19.1
175
+ 0.13914,0,4.05,0,0.51,5.572,88.5,2.5961,5,296,16.6,396.9,14.69,23.1
176
+ 0.09178,0,4.05,0,0.51,6.416,84.1,2.6463,5,296,16.6,395.5,9.04,23.6
177
+ 0.08447,0,4.05,0,0.51,5.859,68.7,2.7019,5,296,16.6,393.23,9.64,22.6
178
+ 0.06664,0,4.05,0,0.51,6.546,33.1,3.1323,5,296,16.6,390.96,5.33,29.4
179
+ 0.07022,0,4.05,0,0.51,6.02,47.2,3.5549,5,296,16.6,393.23,10.11,23.2
180
+ 0.05425,0,4.05,0,0.51,6.315,73.4,3.3175,5,296,16.6,395.6,6.29,24.6
181
+ 0.06642,0,4.05,0,0.51,6.86,74.4,2.9153,5,296,16.6,391.27,6.92,29.9
182
+ 0.0578,0,2.46,0,0.488,6.98,58.4,2.829,3,193,17.8,396.9,5.04,37.2
183
+ 0.06588,0,2.46,0,0.488,7.765,83.3,2.741,3,193,17.8,395.56,7.56,39.8
184
+ 0.06888,0,2.46,0,0.488,6.144,62.2,2.5979,3,193,17.8,396.9,9.45,36.2
185
+ 0.09103,0,2.46,0,0.488,7.155,92.2,2.7006,3,193,17.8,394.12,4.82,37.9
186
+ 0.10008,0,2.46,0,0.488,6.563,95.6,2.847,3,193,17.8,396.9,5.68,32.5
187
+ 0.08308,0,2.46,0,0.488,5.604,89.8,2.9879,3,193,17.8,391,13.98,26.4
188
+ 0.06047,0,2.46,0,0.488,6.153,68.8,3.2797,3,193,17.8,387.11,13.15,29.6
189
+ 0.05602,0,2.46,0,0.488,7.831,53.6,3.1992,3,193,17.8,392.63,4.45,50
190
+ 0.07875,45,3.44,0,0.437,6.782,41.1,3.7886,5,398,15.2,393.87,6.68,32
191
+ 0.12579,45,3.44,0,0.437,6.556,29.1,4.5667,5,398,15.2,382.84,4.56,29.8
192
+ 0.0837,45,3.44,0,0.437,7.185,38.9,4.5667,5,398,15.2,396.9,5.39,34.9
193
+ 0.09068,45,3.44,0,0.437,6.951,21.5,6.4798,5,398,15.2,377.68,5.1,37
194
+ 0.06911,45,3.44,0,0.437,6.739,30.8,6.4798,5,398,15.2,389.71,4.69,30.5
195
+ 0.08664,45,3.44,0,0.437,7.178,26.3,6.4798,5,398,15.2,390.49,2.87,36.4
196
+ 0.02187,60,2.93,0,0.401,6.8,9.9,6.2196,1,265,15.6,393.37,5.03,31.1
197
+ 0.01439,60,2.93,0,0.401,6.604,18.8,6.2196,1,265,15.6,376.7,4.38,29.1
198
+ 0.01381,80,0.46,0,0.422,7.875,32,5.6484,4,255,14.4,394.23,2.97,50
199
+ 0.04011,80,1.52,0,0.404,7.287,34.1,7.309,2,329,12.6,396.9,4.08,33.3
200
+ 0.04666,80,1.52,0,0.404,7.107,36.6,7.309,2,329,12.6,354.31,8.61,30.3
201
+ 0.03768,80,1.52,0,0.404,7.274,38.3,7.309,2,329,12.6,392.2,6.62,34.6
202
+ 0.0315,95,1.47,0,0.403,6.975,15.3,7.6534,3,402,17,396.9,4.56,34.9
203
+ 0.01778,95,1.47,0,0.403,7.135,13.9,7.6534,3,402,17,384.3,4.45,32.9
204
+ 0.03445,82.5,2.03,0,0.415,6.162,38.4,6.27,2,348,14.7,393.77,7.43,24.1
205
+ 0.02177,82.5,2.03,0,0.415,7.61,15.7,6.27,2,348,14.7,395.38,3.11,42.3
206
+ 0.0351,95,2.68,0,0.4161,7.853,33.2,5.118,4,224,14.7,392.78,3.81,48.5
207
+ 0.02009,95,2.68,0,0.4161,8.034,31.9,5.118,4,224,14.7,390.55,2.88,50
208
+ 0.13642,0,10.59,0,0.489,5.891,22.3,3.9454,4,277,18.6,396.9,10.87,22.6
209
+ 0.22969,0,10.59,0,0.489,6.326,52.5,4.3549,4,277,18.6,394.87,10.97,24.4
210
+ 0.25199,0,10.59,0,0.489,5.783,72.7,4.3549,4,277,18.6,389.43,18.06,22.5
211
+ 0.13587,0,10.59,1,0.489,6.064,59.1,4.2392,4,277,18.6,381.32,14.66,24.4
212
+ 0.43571,0,10.59,1,0.489,5.344,100,3.875,4,277,18.6,396.9,23.09,20
213
+ 0.17446,0,10.59,1,0.489,5.96,92.1,3.8771,4,277,18.6,393.25,17.27,21.7
214
+ 0.37578,0,10.59,1,0.489,5.404,88.6,3.665,4,277,18.6,395.24,23.98,19.3
215
+ 0.21719,0,10.59,1,0.489,5.807,53.8,3.6526,4,277,18.6,390.94,16.03,22.4
216
+ 0.14052,0,10.59,0,0.489,6.375,32.3,3.9454,4,277,18.6,385.81,9.38,28.1
217
+ 0.28955,0,10.59,0,0.489,5.412,9.8,3.5875,4,277,18.6,348.93,29.55,23.7
218
+ 0.19802,0,10.59,0,0.489,6.182,42.4,3.9454,4,277,18.6,393.63,9.47,25
219
+ 0.0456,0,13.89,1,0.55,5.888,56,3.1121,5,276,16.4,392.8,13.51,23.3
220
+ 0.07013,0,13.89,0,0.55,6.642,85.1,3.4211,5,276,16.4,392.78,9.69,28.7
221
+ 0.11069,0,13.89,1,0.55,5.951,93.8,2.8893,5,276,16.4,396.9,17.92,21.5
222
+ 0.11425,0,13.89,1,0.55,6.373,92.4,3.3633,5,276,16.4,393.74,10.5,23
223
+ 0.35809,0,6.2,1,0.507,6.951,88.5,2.8617,8,307,17.4,391.7,9.71,26.7
224
+ 0.40771,0,6.2,1,0.507,6.164,91.3,3.048,8,307,17.4,395.24,21.46,21.7
225
+ 0.62356,0,6.2,1,0.507,6.879,77.7,3.2721,8,307,17.4,390.39,9.93,27.5
226
+ 0.6147,0,6.2,0,0.507,6.618,80.8,3.2721,8,307,17.4,396.9,7.6,30.1
227
+ 0.31533,0,6.2,0,0.504,8.266,78.3,2.8944,8,307,17.4,385.05,4.14,44.8
228
+ 0.52693,0,6.2,0,0.504,8.725,83,2.8944,8,307,17.4,382,4.63,50
229
+ 0.38214,0,6.2,0,0.504,8.04,86.5,3.2157,8,307,17.4,387.38,3.13,37.6
230
+ 0.41238,0,6.2,0,0.504,7.163,79.9,3.2157,8,307,17.4,372.08,6.36,31.6
231
+ 0.29819,0,6.2,0,0.504,7.686,17,3.3751,8,307,17.4,377.51,3.92,46.7
232
+ 0.44178,0,6.2,0,0.504,6.552,21.4,3.3751,8,307,17.4,380.34,3.76,31.5
233
+ 0.537,0,6.2,0,0.504,5.981,68.1,3.6715,8,307,17.4,378.35,11.65,24.3
234
+ 0.46296,0,6.2,0,0.504,7.412,76.9,3.6715,8,307,17.4,376.14,5.25,31.7
235
+ 0.57529,0,6.2,0,0.507,8.337,73.3,3.8384,8,307,17.4,385.91,2.47,41.7
236
+ 0.33147,0,6.2,0,0.507,8.247,70.4,3.6519,8,307,17.4,378.95,3.95,48.3
237
+ 0.44791,0,6.2,1,0.507,6.726,66.5,3.6519,8,307,17.4,360.2,8.05,29
238
+ 0.33045,0,6.2,0,0.507,6.086,61.5,3.6519,8,307,17.4,376.75,10.88,24
239
+ 0.52058,0,6.2,1,0.507,6.631,76.5,4.148,8,307,17.4,388.45,9.54,25.1
240
+ 0.51183,0,6.2,0,0.507,7.358,71.6,4.148,8,307,17.4,390.07,4.73,31.5
241
+ 0.08244,30,4.93,0,0.428,6.481,18.5,6.1899,6,300,16.6,379.41,6.36,23.7
242
+ 0.09252,30,4.93,0,0.428,6.606,42.2,6.1899,6,300,16.6,383.78,7.37,23.3
243
+ 0.11329,30,4.93,0,0.428,6.897,54.3,6.3361,6,300,16.6,391.25,11.38,22
244
+ 0.10612,30,4.93,0,0.428,6.095,65.1,6.3361,6,300,16.6,394.62,12.4,20.1
245
+ 0.1029,30,4.93,0,0.428,6.358,52.9,7.0355,6,300,16.6,372.75,11.22,22.2
246
+ 0.12757,30,4.93,0,0.428,6.393,7.8,7.0355,6,300,16.6,374.71,5.19,23.7
247
+ 0.20608,22,5.86,0,0.431,5.593,76.5,7.9549,7,330,19.1,372.49,12.5,17.6
248
+ 0.19133,22,5.86,0,0.431,5.605,70.2,7.9549,7,330,19.1,389.13,18.46,18.5
249
+ 0.33983,22,5.86,0,0.431,6.108,34.9,8.0555,7,330,19.1,390.18,9.16,24.3
250
+ 0.19657,22,5.86,0,0.431,6.226,79.2,8.0555,7,330,19.1,376.14,10.15,20.5
251
+ 0.16439,22,5.86,0,0.431,6.433,49.1,7.8265,7,330,19.1,374.71,9.52,24.5
252
+ 0.19073,22,5.86,0,0.431,6.718,17.5,7.8265,7,330,19.1,393.74,6.56,26.2
253
+ 0.1403,22,5.86,0,0.431,6.487,13,7.3967,7,330,19.1,396.28,5.9,24.4
254
+ 0.21409,22,5.86,0,0.431,6.438,8.9,7.3967,7,330,19.1,377.07,3.59,24.8
255
+ 0.08221,22,5.86,0,0.431,6.957,6.8,8.9067,7,330,19.1,386.09,3.53,29.6
256
+ 0.36894,22,5.86,0,0.431,8.259,8.4,8.9067,7,330,19.1,396.9,3.54,42.8
257
+ 0.04819,80,3.64,0,0.392,6.108,32,9.2203,1,315,16.4,392.89,6.57,21.9
258
+ 0.03548,80,3.64,0,0.392,5.876,19.1,9.2203,1,315,16.4,395.18,9.25,20.9
259
+ 0.01538,90,3.75,0,0.394,7.454,34.2,6.3361,3,244,15.9,386.34,3.11,44
260
+ 0.61154,20,3.97,0,0.647,8.704,86.9,1.801,5,264,13,389.7,5.12,50
261
+ 0.66351,20,3.97,0,0.647,7.333,100,1.8946,5,264,13,383.29,7.79,36
262
+ 0.65665,20,3.97,0,0.647,6.842,100,2.0107,5,264,13,391.93,6.9,30.1
263
+ 0.54011,20,3.97,0,0.647,7.203,81.8,2.1121,5,264,13,392.8,9.59,33.8
264
+ 0.53412,20,3.97,0,0.647,7.52,89.4,2.1398,5,264,13,388.37,7.26,43.1
265
+ 0.52014,20,3.97,0,0.647,8.398,91.5,2.2885,5,264,13,386.86,5.91,48.8
266
+ 0.82526,20,3.97,0,0.647,7.327,94.5,2.0788,5,264,13,393.42,11.25,31
267
+ 0.55007,20,3.97,0,0.647,7.206,91.6,1.9301,5,264,13,387.89,8.1,36.5
268
+ 0.76162,20,3.97,0,0.647,5.56,62.8,1.9865,5,264,13,392.4,10.45,22.8
269
+ 0.7857,20,3.97,0,0.647,7.014,84.6,2.1329,5,264,13,384.07,14.79,30.7
270
+ 0.57834,20,3.97,0,0.575,8.297,67,2.4216,5,264,13,384.54,7.44,50
271
+ 0.5405,20,3.97,0,0.575,7.47,52.6,2.872,5,264,13,390.3,3.16,43.5
272
+ 0.09065,20,6.96,1,0.464,5.92,61.5,3.9175,3,223,18.6,391.34,13.65,20.7
273
+ 0.29916,20,6.96,0,0.464,5.856,42.1,4.429,3,223,18.6,388.65,13,21.1
274
+ 0.16211,20,6.96,0,0.464,6.24,16.3,4.429,3,223,18.6,396.9,6.59,25.2
275
+ 0.1146,20,6.96,0,0.464,6.538,58.7,3.9175,3,223,18.6,394.96,7.73,24.4
276
+ 0.22188,20,6.96,1,0.464,7.691,51.8,4.3665,3,223,18.6,390.77,6.58,35.2
277
+ 0.05644,40,6.41,1,0.447,6.758,32.9,4.0776,4,254,17.6,396.9,3.53,32.4
278
+ 0.09604,40,6.41,0,0.447,6.854,42.8,4.2673,4,254,17.6,396.9,2.98,32
279
+ 0.10469,40,6.41,1,0.447,7.267,49,4.7872,4,254,17.6,389.25,6.05,33.2
280
+ 0.06127,40,6.41,1,0.447,6.826,27.6,4.8628,4,254,17.6,393.45,4.16,33.1
281
+ 0.07978,40,6.41,0,0.447,6.482,32.1,4.1403,4,254,17.6,396.9,7.19,29.1
282
+ 0.21038,20,3.33,0,0.4429,6.812,32.2,4.1007,5,216,14.9,396.9,4.85,35.1
283
+ 0.03578,20,3.33,0,0.4429,7.82,64.5,4.6947,5,216,14.9,387.31,3.76,45.4
284
+ 0.03705,20,3.33,0,0.4429,6.968,37.2,5.2447,5,216,14.9,392.23,4.59,35.4
285
+ 0.06129,20,3.33,1,0.4429,7.645,49.7,5.2119,5,216,14.9,377.07,3.01,46
286
+ 0.01501,90,1.21,1,0.401,7.923,24.8,5.885,1,198,13.6,395.52,3.16,50
287
+ 0.00906,90,2.97,0,0.4,7.088,20.8,7.3073,1,285,15.3,394.72,7.85,32.2
288
+ 0.01096,55,2.25,0,0.389,6.453,31.9,7.3073,1,300,15.3,394.72,8.23,22
289
+ 0.01965,80,1.76,0,0.385,6.23,31.5,9.0892,1,241,18.2,341.6,12.93,20.1
290
+ 0.03871,52.5,5.32,0,0.405,6.209,31.3,7.3172,6,293,16.6,396.9,7.14,23.2
291
+ 0.0459,52.5,5.32,0,0.405,6.315,45.6,7.3172,6,293,16.6,396.9,7.6,22.3
292
+ 0.04297,52.5,5.32,0,0.405,6.565,22.9,7.3172,6,293,16.6,371.72,9.51,24.8
293
+ 0.03502,80,4.95,0,0.411,6.861,27.9,5.1167,4,245,19.2,396.9,3.33,28.5
294
+ 0.07886,80,4.95,0,0.411,7.148,27.7,5.1167,4,245,19.2,396.9,3.56,37.3
295
+ 0.03615,80,4.95,0,0.411,6.63,23.4,5.1167,4,245,19.2,396.9,4.7,27.9
296
+ 0.08265,0,13.92,0,0.437,6.127,18.4,5.5027,4,289,16,396.9,8.58,23.9
297
+ 0.08199,0,13.92,0,0.437,6.009,42.3,5.5027,4,289,16,396.9,10.4,21.7
298
+ 0.12932,0,13.92,0,0.437,6.678,31.1,5.9604,4,289,16,396.9,6.27,28.6
299
+ 0.05372,0,13.92,0,0.437,6.549,51,5.9604,4,289,16,392.85,7.39,27.1
300
+ 0.14103,0,13.92,0,0.437,5.79,58,6.32,4,289,16,396.9,15.84,20.3
301
+ 0.06466,70,2.24,0,0.4,6.345,20.1,7.8278,5,358,14.8,368.24,4.97,22.5
302
+ 0.05561,70,2.24,0,0.4,7.041,10,7.8278,5,358,14.8,371.58,4.74,29
303
+ 0.04417,70,2.24,0,0.4,6.871,47.4,7.8278,5,358,14.8,390.86,6.07,24.8
304
+ 0.03537,34,6.09,0,0.433,6.59,40.4,5.4917,7,329,16.1,395.75,9.5,22
305
+ 0.09266,34,6.09,0,0.433,6.495,18.4,5.4917,7,329,16.1,383.61,8.67,26.4
306
+ 0.1,34,6.09,0,0.433,6.982,17.7,5.4917,7,329,16.1,390.43,4.86,33.1
307
+ 0.05515,33,2.18,0,0.472,7.236,41.1,4.022,7,222,18.4,393.68,6.93,36.1
308
+ 0.05479,33,2.18,0,0.472,6.616,58.1,3.37,7,222,18.4,393.36,8.93,28.4
309
+ 0.07503,33,2.18,0,0.472,7.42,71.9,3.0992,7,222,18.4,396.9,6.47,33.4
310
+ 0.04932,33,2.18,0,0.472,6.849,70.3,3.1827,7,222,18.4,396.9,7.53,28.2
311
+ 0.49298,0,9.9,0,0.544,6.635,82.5,3.3175,4,304,18.4,396.9,4.54,22.8
312
+ 0.3494,0,9.9,0,0.544,5.972,76.7,3.1025,4,304,18.4,396.24,9.97,20.3
313
+ 2.63548,0,9.9,0,0.544,4.973,37.8,2.5194,4,304,18.4,350.45,12.64,16.1
314
+ 0.79041,0,9.9,0,0.544,6.122,52.8,2.6403,4,304,18.4,396.9,5.98,22.1
315
+ 0.26169,0,9.9,0,0.544,6.023,90.4,2.834,4,304,18.4,396.3,11.72,19.4
316
+ 0.26938,0,9.9,0,0.544,6.266,82.8,3.2628,4,304,18.4,393.39,7.9,21.6
317
+ 0.3692,0,9.9,0,0.544,6.567,87.3,3.6023,4,304,18.4,395.69,9.28,23.8
318
+ 0.25356,0,9.9,0,0.544,5.705,77.7,3.945,4,304,18.4,396.42,11.5,16.2
319
+ 0.31827,0,9.9,0,0.544,5.914,83.2,3.9986,4,304,18.4,390.7,18.33,17.8
320
+ 0.24522,0,9.9,0,0.544,5.782,71.7,4.0317,4,304,18.4,396.9,15.94,19.8
321
+ 0.40202,0,9.9,0,0.544,6.382,67.2,3.5325,4,304,18.4,395.21,10.36,23.1
322
+ 0.47547,0,9.9,0,0.544,6.113,58.8,4.0019,4,304,18.4,396.23,12.73,21
323
+ 0.1676,0,7.38,0,0.493,6.426,52.3,4.5404,5,287,19.6,396.9,7.2,23.8
324
+ 0.18159,0,7.38,0,0.493,6.376,54.3,4.5404,5,287,19.6,396.9,6.87,23.1
325
+ 0.35114,0,7.38,0,0.493,6.041,49.9,4.7211,5,287,19.6,396.9,7.7,20.4
326
+ 0.28392,0,7.38,0,0.493,5.708,74.3,4.7211,5,287,19.6,391.13,11.74,18.5
327
+ 0.34109,0,7.38,0,0.493,6.415,40.1,4.7211,5,287,19.6,396.9,6.12,25
328
+ 0.19186,0,7.38,0,0.493,6.431,14.7,5.4159,5,287,19.6,393.68,5.08,24.6
329
+ 0.30347,0,7.38,0,0.493,6.312,28.9,5.4159,5,287,19.6,396.9,6.15,23
330
+ 0.24103,0,7.38,0,0.493,6.083,43.7,5.4159,5,287,19.6,396.9,12.79,22.2
331
+ 0.06617,0,3.24,0,0.46,5.868,25.8,5.2146,4,430,16.9,382.44,9.97,19.3
332
+ 0.06724,0,3.24,0,0.46,6.333,17.2,5.2146,4,430,16.9,375.21,7.34,22.6
333
+ 0.04544,0,3.24,0,0.46,6.144,32.2,5.8736,4,430,16.9,368.57,9.09,19.8
334
+ 0.05023,35,6.06,0,0.4379,5.706,28.4,6.6407,1,304,16.9,394.02,12.43,17.1
335
+ 0.03466,35,6.06,0,0.4379,6.031,23.3,6.6407,1,304,16.9,362.25,7.83,19.4
336
+ 0.05083,0,5.19,0,0.515,6.316,38.1,6.4584,5,224,20.2,389.71,5.68,22.2
337
+ 0.03738,0,5.19,0,0.515,6.31,38.5,6.4584,5,224,20.2,389.4,6.75,20.7
338
+ 0.03961,0,5.19,0,0.515,6.037,34.5,5.9853,5,224,20.2,396.9,8.01,21.1
339
+ 0.03427,0,5.19,0,0.515,5.869,46.3,5.2311,5,224,20.2,396.9,9.8,19.5
340
+ 0.03041,0,5.19,0,0.515,5.895,59.6,5.615,5,224,20.2,394.81,10.56,18.5
341
+ 0.03306,0,5.19,0,0.515,6.059,37.3,4.8122,5,224,20.2,396.14,8.51,20.6
342
+ 0.05497,0,5.19,0,0.515,5.985,45.4,4.8122,5,224,20.2,396.9,9.74,19
343
+ 0.06151,0,5.19,0,0.515,5.968,58.5,4.8122,5,224,20.2,396.9,9.29,18.7
344
+ 0.01301,35,1.52,0,0.442,7.241,49.3,7.0379,1,284,15.5,394.74,5.49,32.7
345
+ 0.02498,0,1.89,0,0.518,6.54,59.7,6.2669,1,422,15.9,389.96,8.65,16.5
346
+ 0.02543,55,3.78,0,0.484,6.696,56.4,5.7321,5,370,17.6,396.9,7.18,23.9
347
+ 0.03049,55,3.78,0,0.484,6.874,28.1,6.4654,5,370,17.6,387.97,4.61,31.2
348
+ 0.03113,0,4.39,0,0.442,6.014,48.5,8.0136,3,352,18.8,385.64,10.53,17.5
349
+ 0.06162,0,4.39,0,0.442,5.898,52.3,8.0136,3,352,18.8,364.61,12.67,17.2
350
+ 0.0187,85,4.15,0,0.429,6.516,27.7,8.5353,4,351,17.9,392.43,6.36,23.1
351
+ 0.01501,80,2.01,0,0.435,6.635,29.7,8.344,4,280,17,390.94,5.99,24.5
352
+ 0.02899,40,1.25,0,0.429,6.939,34.5,8.7921,1,335,19.7,389.85,5.89,26.6
353
+ 0.06211,40,1.25,0,0.429,6.49,44.4,8.7921,1,335,19.7,396.9,5.98,22.9
354
+ 0.0795,60,1.69,0,0.411,6.579,35.9,10.7103,4,411,18.3,370.78,5.49,24.1
355
+ 0.07244,60,1.69,0,0.411,5.884,18.5,10.7103,4,411,18.3,392.33,7.79,18.6
356
+ 0.01709,90,2.02,0,0.41,6.728,36.1,12.1265,5,187,17,384.46,4.5,30.1
357
+ 0.04301,80,1.91,0,0.413,5.663,21.9,10.5857,4,334,22,382.8,8.05,18.2
358
+ 0.10659,80,1.91,0,0.413,5.936,19.5,10.5857,4,334,22,376.04,5.57,20.6
359
+ 8.98296,0,18.1,1,0.77,6.212,97.4,2.1222,24,666,20.2,377.73,17.6,17.8
360
+ 3.8497,0,18.1,1,0.77,6.395,91,2.5052,24,666,20.2,391.34,13.27,21.7
361
+ 5.20177,0,18.1,1,0.77,6.127,83.4,2.7227,24,666,20.2,395.43,11.48,22.7
362
+ 4.26131,0,18.1,0,0.77,6.112,81.3,2.5091,24,666,20.2,390.74,12.67,22.6
363
+ 4.54192,0,18.1,0,0.77,6.398,88,2.5182,24,666,20.2,374.56,7.79,25
364
+ 3.83684,0,18.1,0,0.77,6.251,91.1,2.2955,24,666,20.2,350.65,14.19,19.9
365
+ 3.67822,0,18.1,0,0.77,5.362,96.2,2.1036,24,666,20.2,380.79,10.19,20.8
366
+ 4.22239,0,18.1,1,0.77,5.803,89,1.9047,24,666,20.2,353.04,14.64,16.8
367
+ 3.47428,0,18.1,1,0.718,8.78,82.9,1.9047,24,666,20.2,354.55,5.29,21.9
368
+ 4.55587,0,18.1,0,0.718,3.561,87.9,1.6132,24,666,20.2,354.7,7.12,27.5
369
+ 3.69695,0,18.1,0,0.718,4.963,91.4,1.7523,24,666,20.2,316.03,14,21.9
370
+ 13.5222,0,18.1,0,0.631,3.863,100,1.5106,24,666,20.2,131.42,13.33,23.1
371
+ 4.89822,0,18.1,0,0.631,4.97,100,1.3325,24,666,20.2,375.52,3.26,50
372
+ 5.66998,0,18.1,1,0.631,6.683,96.8,1.3567,24,666,20.2,375.33,3.73,50
373
+ 6.53876,0,18.1,1,0.631,7.016,97.5,1.2024,24,666,20.2,392.05,2.96,50
374
+ 9.2323,0,18.1,0,0.631,6.216,100,1.1691,24,666,20.2,366.15,9.53,50
375
+ 8.26725,0,18.1,1,0.668,5.875,89.6,1.1296,24,666,20.2,347.88,8.88,50
376
+ 11.1081,0,18.1,0,0.668,4.906,100,1.1742,24,666,20.2,396.9,34.77,13.8
377
+ 18.4982,0,18.1,0,0.668,4.138,100,1.137,24,666,20.2,396.9,37.97,13.8
378
+ 19.6091,0,18.1,0,0.671,7.313,97.9,1.3163,24,666,20.2,396.9,13.44,15
379
+ 15.288,0,18.1,0,0.671,6.649,93.3,1.3449,24,666,20.2,363.02,23.24,13.9
380
+ 9.82349,0,18.1,0,0.671,6.794,98.8,1.358,24,666,20.2,396.9,21.24,13.3
381
+ 23.6482,0,18.1,0,0.671,6.38,96.2,1.3861,24,666,20.2,396.9,23.69,13.1
382
+ 17.8667,0,18.1,0,0.671,6.223,100,1.3861,24,666,20.2,393.74,21.78,10.2
383
+ 88.9762,0,18.1,0,0.671,6.968,91.9,1.4165,24,666,20.2,396.9,17.21,10.4
384
+ 15.8744,0,18.1,0,0.671,6.545,99.1,1.5192,24,666,20.2,396.9,21.08,10.9
385
+ 9.18702,0,18.1,0,0.7,5.536,100,1.5804,24,666,20.2,396.9,23.6,11.3
386
+ 7.99248,0,18.1,0,0.7,5.52,100,1.5331,24,666,20.2,396.9,24.56,12.3
387
+ 20.0849,0,18.1,0,0.7,4.368,91.2,1.4395,24,666,20.2,285.83,30.63,8.8
388
+ 16.8118,0,18.1,0,0.7,5.277,98.1,1.4261,24,666,20.2,396.9,30.81,7.2
389
+ 24.3938,0,18.1,0,0.7,4.652,100,1.4672,24,666,20.2,396.9,28.28,10.5
390
+ 22.5971,0,18.1,0,0.7,5,89.5,1.5184,24,666,20.2,396.9,31.99,7.4
391
+ 14.3337,0,18.1,0,0.7,4.88,100,1.5895,24,666,20.2,372.92,30.62,10.2
392
+ 8.15174,0,18.1,0,0.7,5.39,98.9,1.7281,24,666,20.2,396.9,20.85,11.5
393
+ 6.96215,0,18.1,0,0.7,5.713,97,1.9265,24,666,20.2,394.43,17.11,15.1
394
+ 5.29305,0,18.1,0,0.7,6.051,82.5,2.1678,24,666,20.2,378.38,18.76,23.2
395
+ 11.5779,0,18.1,0,0.7,5.036,97,1.77,24,666,20.2,396.9,25.68,9.7
396
+ 8.64476,0,18.1,0,0.693,6.193,92.6,1.7912,24,666,20.2,396.9,15.17,13.8
397
+ 13.3598,0,18.1,0,0.693,5.887,94.7,1.7821,24,666,20.2,396.9,16.35,12.7
398
+ 8.71675,0,18.1,0,0.693,6.471,98.8,1.7257,24,666,20.2,391.98,17.12,13.1
399
+ 5.87205,0,18.1,0,0.693,6.405,96,1.6768,24,666,20.2,396.9,19.37,12.5
400
+ 7.67202,0,18.1,0,0.693,5.747,98.9,1.6334,24,666,20.2,393.1,19.92,8.5
401
+ 38.3518,0,18.1,0,0.693,5.453,100,1.4896,24,666,20.2,396.9,30.59,5
402
+ 9.91655,0,18.1,0,0.693,5.852,77.8,1.5004,24,666,20.2,338.16,29.97,6.3
403
+ 25.0461,0,18.1,0,0.693,5.987,100,1.5888,24,666,20.2,396.9,26.77,5.6
404
+ 14.2362,0,18.1,0,0.693,6.343,100,1.5741,24,666,20.2,396.9,20.32,7.2
405
+ 9.59571,0,18.1,0,0.693,6.404,100,1.639,24,666,20.2,376.11,20.31,12.1
406
+ 24.8017,0,18.1,0,0.693,5.349,96,1.7028,24,666,20.2,396.9,19.77,8.3
407
+ 41.5292,0,18.1,0,0.693,5.531,85.4,1.6074,24,666,20.2,329.46,27.38,8.5
408
+ 67.9208,0,18.1,0,0.693,5.683,100,1.4254,24,666,20.2,384.97,22.98,5
409
+ 20.7162,0,18.1,0,0.659,4.138,100,1.1781,24,666,20.2,370.22,23.34,11.9
410
+ 11.9511,0,18.1,0,0.659,5.608,100,1.2852,24,666,20.2,332.09,12.13,27.9
411
+ 7.40389,0,18.1,0,0.597,5.617,97.9,1.4547,24,666,20.2,314.64,26.4,17.2
412
+ 14.4383,0,18.1,0,0.597,6.852,100,1.4655,24,666,20.2,179.36,19.78,27.5
413
+ 51.1358,0,18.1,0,0.597,5.757,100,1.413,24,666,20.2,2.6,10.11,15
414
+ 14.0507,0,18.1,0,0.597,6.657,100,1.5275,24,666,20.2,35.05,21.22,17.2
415
+ 18.811,0,18.1,0,0.597,4.628,100,1.5539,24,666,20.2,28.79,34.37,17.9
416
+ 28.6558,0,18.1,0,0.597,5.155,100,1.5894,24,666,20.2,210.97,20.08,16.3
417
+ 45.7461,0,18.1,0,0.693,4.519,100,1.6582,24,666,20.2,88.27,36.98,7
418
+ 18.0846,0,18.1,0,0.679,6.434,100,1.8347,24,666,20.2,27.25,29.05,7.2
419
+ 10.8342,0,18.1,0,0.679,6.782,90.8,1.8195,24,666,20.2,21.57,25.79,7.5
420
+ 25.9406,0,18.1,0,0.679,5.304,89.1,1.6475,24,666,20.2,127.36,26.64,10.4
421
+ 73.5341,0,18.1,0,0.679,5.957,100,1.8026,24,666,20.2,16.45,20.62,8.8
422
+ 11.8123,0,18.1,0,0.718,6.824,76.5,1.794,24,666,20.2,48.45,22.74,8.4
423
+ 11.0874,0,18.1,0,0.718,6.411,100,1.8589,24,666,20.2,318.75,15.02,16.7
424
+ 7.02259,0,18.1,0,0.718,6.006,95.3,1.8746,24,666,20.2,319.98,15.7,14.2
425
+ 12.0482,0,18.1,0,0.614,5.648,87.6,1.9512,24,666,20.2,291.55,14.1,20.8
426
+ 7.05042,0,18.1,0,0.614,6.103,85.1,2.0218,24,666,20.2,2.52,23.29,13.4
427
+ 8.79212,0,18.1,0,0.584,5.565,70.6,2.0635,24,666,20.2,3.65,17.16,11.7
428
+ 15.8603,0,18.1,0,0.679,5.896,95.4,1.9096,24,666,20.2,7.68,24.39,8.3
429
+ 12.2472,0,18.1,0,0.584,5.837,59.7,1.9976,24,666,20.2,24.65,15.69,10.2
430
+ 37.6619,0,18.1,0,0.679,6.202,78.7,1.8629,24,666,20.2,18.82,14.52,10.9
431
+ 7.36711,0,18.1,0,0.679,6.193,78.1,1.9356,24,666,20.2,96.73,21.52,11
432
+ 9.33889,0,18.1,0,0.679,6.38,95.6,1.9682,24,666,20.2,60.72,24.08,9.5
433
+ 8.49213,0,18.1,0,0.584,6.348,86.1,2.0527,24,666,20.2,83.45,17.64,14.5
434
+ 10.0623,0,18.1,0,0.584,6.833,94.3,2.0882,24,666,20.2,81.33,19.69,14.1
435
+ 6.44405,0,18.1,0,0.584,6.425,74.8,2.2004,24,666,20.2,97.95,12.03,16.1
436
+ 5.58107,0,18.1,0,0.713,6.436,87.9,2.3158,24,666,20.2,100.19,16.22,14.3
437
+ 13.9134,0,18.1,0,0.713,6.208,95,2.2222,24,666,20.2,100.63,15.17,11.7
438
+ 11.1604,0,18.1,0,0.74,6.629,94.6,2.1247,24,666,20.2,109.85,23.27,13.4
439
+ 14.4208,0,18.1,0,0.74,6.461,93.3,2.0026,24,666,20.2,27.49,18.05,9.6
440
+ 15.1772,0,18.1,0,0.74,6.152,100,1.9142,24,666,20.2,9.32,26.45,8.7
441
+ 13.6781,0,18.1,0,0.74,5.935,87.9,1.8206,24,666,20.2,68.95,34.02,8.4
442
+ 9.39063,0,18.1,0,0.74,5.627,93.9,1.8172,24,666,20.2,396.9,22.88,12.8
443
+ 22.0511,0,18.1,0,0.74,5.818,92.4,1.8662,24,666,20.2,391.45,22.11,10.5
444
+ 9.72418,0,18.1,0,0.74,6.406,97.2,2.0651,24,666,20.2,385.96,19.52,17.1
445
+ 5.66637,0,18.1,0,0.74,6.219,100,2.0048,24,666,20.2,395.69,16.59,18.4
446
+ 9.96654,0,18.1,0,0.74,6.485,100,1.9784,24,666,20.2,386.73,18.85,15.4
447
+ 12.8023,0,18.1,0,0.74,5.854,96.6,1.8956,24,666,20.2,240.52,23.79,10.8
448
+ 10.6718,0,18.1,0,0.74,6.459,94.8,1.9879,24,666,20.2,43.06,23.98,11.8
449
+ 6.28807,0,18.1,0,0.74,6.341,96.4,2.072,24,666,20.2,318.01,17.79,14.9
450
+ 9.92485,0,18.1,0,0.74,6.251,96.6,2.198,24,666,20.2,388.52,16.44,12.6
451
+ 9.32909,0,18.1,0,0.713,6.185,98.7,2.2616,24,666,20.2,396.9,18.13,14.1
452
+ 7.52601,0,18.1,0,0.713,6.417,98.3,2.185,24,666,20.2,304.21,19.31,13
453
+ 6.71772,0,18.1,0,0.713,6.749,92.6,2.3236,24,666,20.2,0.32,17.44,13.4
454
+ 5.44114,0,18.1,0,0.713,6.655,98.2,2.3552,24,666,20.2,355.29,17.73,15.2
455
+ 5.09017,0,18.1,0,0.713,6.297,91.8,2.3682,24,666,20.2,385.09,17.27,16.1
456
+ 8.24809,0,18.1,0,0.713,7.393,99.3,2.4527,24,666,20.2,375.87,16.74,17.8
457
+ 9.51363,0,18.1,0,0.713,6.728,94.1,2.4961,24,666,20.2,6.68,18.71,14.9
458
+ 4.75237,0,18.1,0,0.713,6.525,86.5,2.4358,24,666,20.2,50.92,18.13,14.1
459
+ 4.66883,0,18.1,0,0.713,5.976,87.9,2.5806,24,666,20.2,10.48,19.01,12.7
460
+ 8.20058,0,18.1,0,0.713,5.936,80.3,2.7792,24,666,20.2,3.5,16.94,13.5
461
+ 7.75223,0,18.1,0,0.713,6.301,83.7,2.7831,24,666,20.2,272.21,16.23,14.9
462
+ 6.80117,0,18.1,0,0.713,6.081,84.4,2.7175,24,666,20.2,396.9,14.7,20
463
+ 4.81213,0,18.1,0,0.713,6.701,90,2.5975,24,666,20.2,255.23,16.42,16.4
464
+ 3.69311,0,18.1,0,0.713,6.376,88.4,2.5671,24,666,20.2,391.43,14.65,17.7
465
+ 6.65492,0,18.1,0,0.713,6.317,83,2.7344,24,666,20.2,396.9,13.99,19.5
466
+ 5.82115,0,18.1,0,0.713,6.513,89.9,2.8016,24,666,20.2,393.82,10.29,20.2
467
+ 7.83932,0,18.1,0,0.655,6.209,65.4,2.9634,24,666,20.2,396.9,13.22,21.4
468
+ 3.1636,0,18.1,0,0.655,5.759,48.2,3.0665,24,666,20.2,334.4,14.13,19.9
469
+ 3.77498,0,18.1,0,0.655,5.952,84.7,2.8715,24,666,20.2,22.01,17.15,19
470
+ 4.42228,0,18.1,0,0.584,6.003,94.5,2.5403,24,666,20.2,331.29,21.32,19.1
471
+ 15.5757,0,18.1,0,0.58,5.926,71,2.9084,24,666,20.2,368.74,18.13,19.1
472
+ 13.0751,0,18.1,0,0.58,5.713,56.7,2.8237,24,666,20.2,396.9,14.76,20.1
473
+ 4.34879,0,18.1,0,0.58,6.167,84,3.0334,24,666,20.2,396.9,16.29,19.9
474
+ 4.03841,0,18.1,0,0.532,6.229,90.7,3.0993,24,666,20.2,395.33,12.87,19.6
475
+ 3.56868,0,18.1,0,0.58,6.437,75,2.8965,24,666,20.2,393.37,14.36,23.2
476
+ 4.64689,0,18.1,0,0.614,6.98,67.6,2.5329,24,666,20.2,374.68,11.66,29.8
477
+ 8.05579,0,18.1,0,0.584,5.427,95.4,2.4298,24,666,20.2,352.58,18.14,13.8
478
+ 6.39312,0,18.1,0,0.584,6.162,97.4,2.206,24,666,20.2,302.76,24.1,13.3
479
+ 4.87141,0,18.1,0,0.614,6.484,93.6,2.3053,24,666,20.2,396.21,18.68,16.7
480
+ 15.0234,0,18.1,0,0.614,5.304,97.3,2.1007,24,666,20.2,349.48,24.91,12
481
+ 10.233,0,18.1,0,0.614,6.185,96.7,2.1705,24,666,20.2,379.7,18.03,14.6
482
+ 14.3337,0,18.1,0,0.614,6.229,88,1.9512,24,666,20.2,383.32,13.11,21.4
483
+ 5.82401,0,18.1,0,0.532,6.242,64.7,3.4242,24,666,20.2,396.9,10.74,23
484
+ 5.70818,0,18.1,0,0.532,6.75,74.9,3.3317,24,666,20.2,393.07,7.74,23.7
485
+ 5.73116,0,18.1,0,0.532,7.061,77,3.4106,24,666,20.2,395.28,7.01,25
486
+ 2.81838,0,18.1,0,0.532,5.762,40.3,4.0983,24,666,20.2,392.92,10.42,21.8
487
+ 2.37857,0,18.1,0,0.583,5.871,41.9,3.724,24,666,20.2,370.73,13.34,20.6
488
+ 3.67367,0,18.1,0,0.583,6.312,51.9,3.9917,24,666,20.2,388.62,10.58,21.2
489
+ 5.69175,0,18.1,0,0.583,6.114,79.8,3.5459,24,666,20.2,392.68,14.98,19.1
490
+ 4.83567,0,18.1,0,0.583,5.905,53.2,3.1523,24,666,20.2,388.22,11.45,20.6
491
+ 0.15086,0,27.74,0,0.609,5.454,92.7,1.8209,4,711,20.1,395.09,18.06,15.2
492
+ 0.18337,0,27.74,0,0.609,5.414,98.3,1.7554,4,711,20.1,344.05,23.97,7
493
+ 0.20746,0,27.74,0,0.609,5.093,98,1.8226,4,711,20.1,318.43,29.68,8.1
494
+ 0.10574,0,27.74,0,0.609,5.983,98.8,1.8681,4,711,20.1,390.11,18.07,13.6
495
+ 0.11132,0,27.74,0,0.609,5.983,83.5,2.1099,4,711,20.1,396.9,13.35,20.1
496
+ 0.17331,0,9.69,0,0.585,5.707,54,2.3817,6,391,19.2,396.9,12.01,21.8
497
+ 0.27957,0,9.69,0,0.585,5.926,42.6,2.3817,6,391,19.2,396.9,13.59,24.5
498
+ 0.17899,0,9.69,0,0.585,5.67,28.8,2.7986,6,391,19.2,393.29,17.6,23.1
499
+ 0.2896,0,9.69,0,0.585,5.39,72.9,2.7986,6,391,19.2,396.9,21.14,19.7
500
+ 0.26838,0,9.69,0,0.585,5.794,70.6,2.8927,6,391,19.2,396.9,14.1,18.3
501
+ 0.23912,0,9.69,0,0.585,6.019,65.3,2.4091,6,391,19.2,396.9,12.92,21.2
502
+ 0.17783,0,9.69,0,0.585,5.569,73.5,2.3999,6,391,19.2,395.77,15.1,17.5
503
+ 0.22438,0,9.69,0,0.585,6.027,79.7,2.4982,6,391,19.2,396.9,14.33,16.8
504
+ 0.06263,0,11.93,0,0.573,6.593,69.1,2.4786,1,273,21,391.99,9.67,22.4
505
+ 0.04527,0,11.93,0,0.573,6.12,76.7,2.2875,1,273,21,396.9,9.08,20.6
506
+ 0.06076,0,11.93,0,0.573,6.976,91,2.1675,1,273,21,396.9,5.64,23.9
507
+ 0.10959,0,11.93,0,0.573,6.794,89.3,2.3889,1,273,21,393.45,6.48,22
508
+ 0.04741,0,11.93,0,0.573,6.03,80.8,2.505,1,273,21,396.9,7.88,11.9
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/breast_cancer.csv ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/iris.csv ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 150,4,setosa,versicolor,virginica
2
+ 5.1,3.5,1.4,0.2,0
3
+ 4.9,3.0,1.4,0.2,0
4
+ 4.7,3.2,1.3,0.2,0
5
+ 4.6,3.1,1.5,0.2,0
6
+ 5.0,3.6,1.4,0.2,0
7
+ 5.4,3.9,1.7,0.4,0
8
+ 4.6,3.4,1.4,0.3,0
9
+ 5.0,3.4,1.5,0.2,0
10
+ 4.4,2.9,1.4,0.2,0
11
+ 4.9,3.1,1.5,0.1,0
12
+ 5.4,3.7,1.5,0.2,0
13
+ 4.8,3.4,1.6,0.2,0
14
+ 4.8,3.0,1.4,0.1,0
15
+ 4.3,3.0,1.1,0.1,0
16
+ 5.8,4.0,1.2,0.2,0
17
+ 5.7,4.4,1.5,0.4,0
18
+ 5.4,3.9,1.3,0.4,0
19
+ 5.1,3.5,1.4,0.3,0
20
+ 5.7,3.8,1.7,0.3,0
21
+ 5.1,3.8,1.5,0.3,0
22
+ 5.4,3.4,1.7,0.2,0
23
+ 5.1,3.7,1.5,0.4,0
24
+ 4.6,3.6,1.0,0.2,0
25
+ 5.1,3.3,1.7,0.5,0
26
+ 4.8,3.4,1.9,0.2,0
27
+ 5.0,3.0,1.6,0.2,0
28
+ 5.0,3.4,1.6,0.4,0
29
+ 5.2,3.5,1.5,0.2,0
30
+ 5.2,3.4,1.4,0.2,0
31
+ 4.7,3.2,1.6,0.2,0
32
+ 4.8,3.1,1.6,0.2,0
33
+ 5.4,3.4,1.5,0.4,0
34
+ 5.2,4.1,1.5,0.1,0
35
+ 5.5,4.2,1.4,0.2,0
36
+ 4.9,3.1,1.5,0.2,0
37
+ 5.0,3.2,1.2,0.2,0
38
+ 5.5,3.5,1.3,0.2,0
39
+ 4.9,3.6,1.4,0.1,0
40
+ 4.4,3.0,1.3,0.2,0
41
+ 5.1,3.4,1.5,0.2,0
42
+ 5.0,3.5,1.3,0.3,0
43
+ 4.5,2.3,1.3,0.3,0
44
+ 4.4,3.2,1.3,0.2,0
45
+ 5.0,3.5,1.6,0.6,0
46
+ 5.1,3.8,1.9,0.4,0
47
+ 4.8,3.0,1.4,0.3,0
48
+ 5.1,3.8,1.6,0.2,0
49
+ 4.6,3.2,1.4,0.2,0
50
+ 5.3,3.7,1.5,0.2,0
51
+ 5.0,3.3,1.4,0.2,0
52
+ 7.0,3.2,4.7,1.4,1
53
+ 6.4,3.2,4.5,1.5,1
54
+ 6.9,3.1,4.9,1.5,1
55
+ 5.5,2.3,4.0,1.3,1
56
+ 6.5,2.8,4.6,1.5,1
57
+ 5.7,2.8,4.5,1.3,1
58
+ 6.3,3.3,4.7,1.6,1
59
+ 4.9,2.4,3.3,1.0,1
60
+ 6.6,2.9,4.6,1.3,1
61
+ 5.2,2.7,3.9,1.4,1
62
+ 5.0,2.0,3.5,1.0,1
63
+ 5.9,3.0,4.2,1.5,1
64
+ 6.0,2.2,4.0,1.0,1
65
+ 6.1,2.9,4.7,1.4,1
66
+ 5.6,2.9,3.6,1.3,1
67
+ 6.7,3.1,4.4,1.4,1
68
+ 5.6,3.0,4.5,1.5,1
69
+ 5.8,2.7,4.1,1.0,1
70
+ 6.2,2.2,4.5,1.5,1
71
+ 5.6,2.5,3.9,1.1,1
72
+ 5.9,3.2,4.8,1.8,1
73
+ 6.1,2.8,4.0,1.3,1
74
+ 6.3,2.5,4.9,1.5,1
75
+ 6.1,2.8,4.7,1.2,1
76
+ 6.4,2.9,4.3,1.3,1
77
+ 6.6,3.0,4.4,1.4,1
78
+ 6.8,2.8,4.8,1.4,1
79
+ 6.7,3.0,5.0,1.7,1
80
+ 6.0,2.9,4.5,1.5,1
81
+ 5.7,2.6,3.5,1.0,1
82
+ 5.5,2.4,3.8,1.1,1
83
+ 5.5,2.4,3.7,1.0,1
84
+ 5.8,2.7,3.9,1.2,1
85
+ 6.0,2.7,5.1,1.6,1
86
+ 5.4,3.0,4.5,1.5,1
87
+ 6.0,3.4,4.5,1.6,1
88
+ 6.7,3.1,4.7,1.5,1
89
+ 6.3,2.3,4.4,1.3,1
90
+ 5.6,3.0,4.1,1.3,1
91
+ 5.5,2.5,4.0,1.3,1
92
+ 5.5,2.6,4.4,1.2,1
93
+ 6.1,3.0,4.6,1.4,1
94
+ 5.8,2.6,4.0,1.2,1
95
+ 5.0,2.3,3.3,1.0,1
96
+ 5.6,2.7,4.2,1.3,1
97
+ 5.7,3.0,4.2,1.2,1
98
+ 5.7,2.9,4.2,1.3,1
99
+ 6.2,2.9,4.3,1.3,1
100
+ 5.1,2.5,3.0,1.1,1
101
+ 5.7,2.8,4.1,1.3,1
102
+ 6.3,3.3,6.0,2.5,2
103
+ 5.8,2.7,5.1,1.9,2
104
+ 7.1,3.0,5.9,2.1,2
105
+ 6.3,2.9,5.6,1.8,2
106
+ 6.5,3.0,5.8,2.2,2
107
+ 7.6,3.0,6.6,2.1,2
108
+ 4.9,2.5,4.5,1.7,2
109
+ 7.3,2.9,6.3,1.8,2
110
+ 6.7,2.5,5.8,1.8,2
111
+ 7.2,3.6,6.1,2.5,2
112
+ 6.5,3.2,5.1,2.0,2
113
+ 6.4,2.7,5.3,1.9,2
114
+ 6.8,3.0,5.5,2.1,2
115
+ 5.7,2.5,5.0,2.0,2
116
+ 5.8,2.8,5.1,2.4,2
117
+ 6.4,3.2,5.3,2.3,2
118
+ 6.5,3.0,5.5,1.8,2
119
+ 7.7,3.8,6.7,2.2,2
120
+ 7.7,2.6,6.9,2.3,2
121
+ 6.0,2.2,5.0,1.5,2
122
+ 6.9,3.2,5.7,2.3,2
123
+ 5.6,2.8,4.9,2.0,2
124
+ 7.7,2.8,6.7,2.0,2
125
+ 6.3,2.7,4.9,1.8,2
126
+ 6.7,3.3,5.7,2.1,2
127
+ 7.2,3.2,6.0,1.8,2
128
+ 6.2,2.8,4.8,1.8,2
129
+ 6.1,3.0,4.9,1.8,2
130
+ 6.4,2.8,5.6,2.1,2
131
+ 7.2,3.0,5.8,1.6,2
132
+ 7.4,2.8,6.1,1.9,2
133
+ 7.9,3.8,6.4,2.0,2
134
+ 6.4,2.8,5.6,2.2,2
135
+ 6.3,2.8,5.1,1.5,2
136
+ 6.1,2.6,5.6,1.4,2
137
+ 7.7,3.0,6.1,2.3,2
138
+ 6.3,3.4,5.6,2.4,2
139
+ 6.4,3.1,5.5,1.8,2
140
+ 6.0,3.0,4.8,1.8,2
141
+ 6.9,3.1,5.4,2.1,2
142
+ 6.7,3.1,5.6,2.4,2
143
+ 6.9,3.1,5.1,2.3,2
144
+ 5.8,2.7,5.1,1.9,2
145
+ 6.8,3.2,5.9,2.3,2
146
+ 6.7,3.3,5.7,2.5,2
147
+ 6.7,3.0,5.2,2.3,2
148
+ 6.3,2.5,5.0,1.9,2
149
+ 6.5,3.0,5.2,2.0,2
150
+ 6.2,3.4,5.4,2.3,2
151
+ 5.9,3.0,5.1,1.8,2
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/linnerud_exercise.csv ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Chins Situps Jumps
2
+ 5 162 60
3
+ 2 110 60
4
+ 12 101 101
5
+ 12 105 37
6
+ 13 155 58
7
+ 4 101 42
8
+ 8 101 38
9
+ 6 125 40
10
+ 15 200 40
11
+ 17 251 250
12
+ 17 120 38
13
+ 13 210 115
14
+ 14 215 105
15
+ 1 50 50
16
+ 6 70 31
17
+ 12 210 120
18
+ 4 60 25
19
+ 11 230 80
20
+ 15 225 73
21
+ 2 110 43
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/wine_data.csv ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 178,13,class_0,class_1,class_2
2
+ 14.23,1.71,2.43,15.6,127,2.8,3.06,0.28,2.29,5.64,1.04,3.92,1065,0
3
+ 13.2,1.78,2.14,11.2,100,2.65,2.76,0.26,1.28,4.38,1.05,3.4,1050,0
4
+ 13.16,2.36,2.67,18.6,101,2.8,3.24,0.3,2.81,5.68,1.03,3.17,1185,0
5
+ 14.37,1.95,2.5,16.8,113,3.85,3.49,0.24,2.18,7.8,0.86,3.45,1480,0
6
+ 13.24,2.59,2.87,21,118,2.8,2.69,0.39,1.82,4.32,1.04,2.93,735,0
7
+ 14.2,1.76,2.45,15.2,112,3.27,3.39,0.34,1.97,6.75,1.05,2.85,1450,0
8
+ 14.39,1.87,2.45,14.6,96,2.5,2.52,0.3,1.98,5.25,1.02,3.58,1290,0
9
+ 14.06,2.15,2.61,17.6,121,2.6,2.51,0.31,1.25,5.05,1.06,3.58,1295,0
10
+ 14.83,1.64,2.17,14,97,2.8,2.98,0.29,1.98,5.2,1.08,2.85,1045,0
11
+ 13.86,1.35,2.27,16,98,2.98,3.15,0.22,1.85,7.22,1.01,3.55,1045,0
12
+ 14.1,2.16,2.3,18,105,2.95,3.32,0.22,2.38,5.75,1.25,3.17,1510,0
13
+ 14.12,1.48,2.32,16.8,95,2.2,2.43,0.26,1.57,5,1.17,2.82,1280,0
14
+ 13.75,1.73,2.41,16,89,2.6,2.76,0.29,1.81,5.6,1.15,2.9,1320,0
15
+ 14.75,1.73,2.39,11.4,91,3.1,3.69,0.43,2.81,5.4,1.25,2.73,1150,0
16
+ 14.38,1.87,2.38,12,102,3.3,3.64,0.29,2.96,7.5,1.2,3,1547,0
17
+ 13.63,1.81,2.7,17.2,112,2.85,2.91,0.3,1.46,7.3,1.28,2.88,1310,0
18
+ 14.3,1.92,2.72,20,120,2.8,3.14,0.33,1.97,6.2,1.07,2.65,1280,0
19
+ 13.83,1.57,2.62,20,115,2.95,3.4,0.4,1.72,6.6,1.13,2.57,1130,0
20
+ 14.19,1.59,2.48,16.5,108,3.3,3.93,0.32,1.86,8.7,1.23,2.82,1680,0
21
+ 13.64,3.1,2.56,15.2,116,2.7,3.03,0.17,1.66,5.1,0.96,3.36,845,0
22
+ 14.06,1.63,2.28,16,126,3,3.17,0.24,2.1,5.65,1.09,3.71,780,0
23
+ 12.93,3.8,2.65,18.6,102,2.41,2.41,0.25,1.98,4.5,1.03,3.52,770,0
24
+ 13.71,1.86,2.36,16.6,101,2.61,2.88,0.27,1.69,3.8,1.11,4,1035,0
25
+ 12.85,1.6,2.52,17.8,95,2.48,2.37,0.26,1.46,3.93,1.09,3.63,1015,0
26
+ 13.5,1.81,2.61,20,96,2.53,2.61,0.28,1.66,3.52,1.12,3.82,845,0
27
+ 13.05,2.05,3.22,25,124,2.63,2.68,0.47,1.92,3.58,1.13,3.2,830,0
28
+ 13.39,1.77,2.62,16.1,93,2.85,2.94,0.34,1.45,4.8,0.92,3.22,1195,0
29
+ 13.3,1.72,2.14,17,94,2.4,2.19,0.27,1.35,3.95,1.02,2.77,1285,0
30
+ 13.87,1.9,2.8,19.4,107,2.95,2.97,0.37,1.76,4.5,1.25,3.4,915,0
31
+ 14.02,1.68,2.21,16,96,2.65,2.33,0.26,1.98,4.7,1.04,3.59,1035,0
32
+ 13.73,1.5,2.7,22.5,101,3,3.25,0.29,2.38,5.7,1.19,2.71,1285,0
33
+ 13.58,1.66,2.36,19.1,106,2.86,3.19,0.22,1.95,6.9,1.09,2.88,1515,0
34
+ 13.68,1.83,2.36,17.2,104,2.42,2.69,0.42,1.97,3.84,1.23,2.87,990,0
35
+ 13.76,1.53,2.7,19.5,132,2.95,2.74,0.5,1.35,5.4,1.25,3,1235,0
36
+ 13.51,1.8,2.65,19,110,2.35,2.53,0.29,1.54,4.2,1.1,2.87,1095,0
37
+ 13.48,1.81,2.41,20.5,100,2.7,2.98,0.26,1.86,5.1,1.04,3.47,920,0
38
+ 13.28,1.64,2.84,15.5,110,2.6,2.68,0.34,1.36,4.6,1.09,2.78,880,0
39
+ 13.05,1.65,2.55,18,98,2.45,2.43,0.29,1.44,4.25,1.12,2.51,1105,0
40
+ 13.07,1.5,2.1,15.5,98,2.4,2.64,0.28,1.37,3.7,1.18,2.69,1020,0
41
+ 14.22,3.99,2.51,13.2,128,3,3.04,0.2,2.08,5.1,0.89,3.53,760,0
42
+ 13.56,1.71,2.31,16.2,117,3.15,3.29,0.34,2.34,6.13,0.95,3.38,795,0
43
+ 13.41,3.84,2.12,18.8,90,2.45,2.68,0.27,1.48,4.28,0.91,3,1035,0
44
+ 13.88,1.89,2.59,15,101,3.25,3.56,0.17,1.7,5.43,0.88,3.56,1095,0
45
+ 13.24,3.98,2.29,17.5,103,2.64,2.63,0.32,1.66,4.36,0.82,3,680,0
46
+ 13.05,1.77,2.1,17,107,3,3,0.28,2.03,5.04,0.88,3.35,885,0
47
+ 14.21,4.04,2.44,18.9,111,2.85,2.65,0.3,1.25,5.24,0.87,3.33,1080,0
48
+ 14.38,3.59,2.28,16,102,3.25,3.17,0.27,2.19,4.9,1.04,3.44,1065,0
49
+ 13.9,1.68,2.12,16,101,3.1,3.39,0.21,2.14,6.1,0.91,3.33,985,0
50
+ 14.1,2.02,2.4,18.8,103,2.75,2.92,0.32,2.38,6.2,1.07,2.75,1060,0
51
+ 13.94,1.73,2.27,17.4,108,2.88,3.54,0.32,2.08,8.9,1.12,3.1,1260,0
52
+ 13.05,1.73,2.04,12.4,92,2.72,3.27,0.17,2.91,7.2,1.12,2.91,1150,0
53
+ 13.83,1.65,2.6,17.2,94,2.45,2.99,0.22,2.29,5.6,1.24,3.37,1265,0
54
+ 13.82,1.75,2.42,14,111,3.88,3.74,0.32,1.87,7.05,1.01,3.26,1190,0
55
+ 13.77,1.9,2.68,17.1,115,3,2.79,0.39,1.68,6.3,1.13,2.93,1375,0
56
+ 13.74,1.67,2.25,16.4,118,2.6,2.9,0.21,1.62,5.85,0.92,3.2,1060,0
57
+ 13.56,1.73,2.46,20.5,116,2.96,2.78,0.2,2.45,6.25,0.98,3.03,1120,0
58
+ 14.22,1.7,2.3,16.3,118,3.2,3,0.26,2.03,6.38,0.94,3.31,970,0
59
+ 13.29,1.97,2.68,16.8,102,3,3.23,0.31,1.66,6,1.07,2.84,1270,0
60
+ 13.72,1.43,2.5,16.7,108,3.4,3.67,0.19,2.04,6.8,0.89,2.87,1285,0
61
+ 12.37,0.94,1.36,10.6,88,1.98,0.57,0.28,0.42,1.95,1.05,1.82,520,1
62
+ 12.33,1.1,2.28,16,101,2.05,1.09,0.63,0.41,3.27,1.25,1.67,680,1
63
+ 12.64,1.36,2.02,16.8,100,2.02,1.41,0.53,0.62,5.75,0.98,1.59,450,1
64
+ 13.67,1.25,1.92,18,94,2.1,1.79,0.32,0.73,3.8,1.23,2.46,630,1
65
+ 12.37,1.13,2.16,19,87,3.5,3.1,0.19,1.87,4.45,1.22,2.87,420,1
66
+ 12.17,1.45,2.53,19,104,1.89,1.75,0.45,1.03,2.95,1.45,2.23,355,1
67
+ 12.37,1.21,2.56,18.1,98,2.42,2.65,0.37,2.08,4.6,1.19,2.3,678,1
68
+ 13.11,1.01,1.7,15,78,2.98,3.18,0.26,2.28,5.3,1.12,3.18,502,1
69
+ 12.37,1.17,1.92,19.6,78,2.11,2,0.27,1.04,4.68,1.12,3.48,510,1
70
+ 13.34,0.94,2.36,17,110,2.53,1.3,0.55,0.42,3.17,1.02,1.93,750,1
71
+ 12.21,1.19,1.75,16.8,151,1.85,1.28,0.14,2.5,2.85,1.28,3.07,718,1
72
+ 12.29,1.61,2.21,20.4,103,1.1,1.02,0.37,1.46,3.05,0.906,1.82,870,1
73
+ 13.86,1.51,2.67,25,86,2.95,2.86,0.21,1.87,3.38,1.36,3.16,410,1
74
+ 13.49,1.66,2.24,24,87,1.88,1.84,0.27,1.03,3.74,0.98,2.78,472,1
75
+ 12.99,1.67,2.6,30,139,3.3,2.89,0.21,1.96,3.35,1.31,3.5,985,1
76
+ 11.96,1.09,2.3,21,101,3.38,2.14,0.13,1.65,3.21,0.99,3.13,886,1
77
+ 11.66,1.88,1.92,16,97,1.61,1.57,0.34,1.15,3.8,1.23,2.14,428,1
78
+ 13.03,0.9,1.71,16,86,1.95,2.03,0.24,1.46,4.6,1.19,2.48,392,1
79
+ 11.84,2.89,2.23,18,112,1.72,1.32,0.43,0.95,2.65,0.96,2.52,500,1
80
+ 12.33,0.99,1.95,14.8,136,1.9,1.85,0.35,2.76,3.4,1.06,2.31,750,1
81
+ 12.7,3.87,2.4,23,101,2.83,2.55,0.43,1.95,2.57,1.19,3.13,463,1
82
+ 12,0.92,2,19,86,2.42,2.26,0.3,1.43,2.5,1.38,3.12,278,1
83
+ 12.72,1.81,2.2,18.8,86,2.2,2.53,0.26,1.77,3.9,1.16,3.14,714,1
84
+ 12.08,1.13,2.51,24,78,2,1.58,0.4,1.4,2.2,1.31,2.72,630,1
85
+ 13.05,3.86,2.32,22.5,85,1.65,1.59,0.61,1.62,4.8,0.84,2.01,515,1
86
+ 11.84,0.89,2.58,18,94,2.2,2.21,0.22,2.35,3.05,0.79,3.08,520,1
87
+ 12.67,0.98,2.24,18,99,2.2,1.94,0.3,1.46,2.62,1.23,3.16,450,1
88
+ 12.16,1.61,2.31,22.8,90,1.78,1.69,0.43,1.56,2.45,1.33,2.26,495,1
89
+ 11.65,1.67,2.62,26,88,1.92,1.61,0.4,1.34,2.6,1.36,3.21,562,1
90
+ 11.64,2.06,2.46,21.6,84,1.95,1.69,0.48,1.35,2.8,1,2.75,680,1
91
+ 12.08,1.33,2.3,23.6,70,2.2,1.59,0.42,1.38,1.74,1.07,3.21,625,1
92
+ 12.08,1.83,2.32,18.5,81,1.6,1.5,0.52,1.64,2.4,1.08,2.27,480,1
93
+ 12,1.51,2.42,22,86,1.45,1.25,0.5,1.63,3.6,1.05,2.65,450,1
94
+ 12.69,1.53,2.26,20.7,80,1.38,1.46,0.58,1.62,3.05,0.96,2.06,495,1
95
+ 12.29,2.83,2.22,18,88,2.45,2.25,0.25,1.99,2.15,1.15,3.3,290,1
96
+ 11.62,1.99,2.28,18,98,3.02,2.26,0.17,1.35,3.25,1.16,2.96,345,1
97
+ 12.47,1.52,2.2,19,162,2.5,2.27,0.32,3.28,2.6,1.16,2.63,937,1
98
+ 11.81,2.12,2.74,21.5,134,1.6,0.99,0.14,1.56,2.5,0.95,2.26,625,1
99
+ 12.29,1.41,1.98,16,85,2.55,2.5,0.29,1.77,2.9,1.23,2.74,428,1
100
+ 12.37,1.07,2.1,18.5,88,3.52,3.75,0.24,1.95,4.5,1.04,2.77,660,1
101
+ 12.29,3.17,2.21,18,88,2.85,2.99,0.45,2.81,2.3,1.42,2.83,406,1
102
+ 12.08,2.08,1.7,17.5,97,2.23,2.17,0.26,1.4,3.3,1.27,2.96,710,1
103
+ 12.6,1.34,1.9,18.5,88,1.45,1.36,0.29,1.35,2.45,1.04,2.77,562,1
104
+ 12.34,2.45,2.46,21,98,2.56,2.11,0.34,1.31,2.8,0.8,3.38,438,1
105
+ 11.82,1.72,1.88,19.5,86,2.5,1.64,0.37,1.42,2.06,0.94,2.44,415,1
106
+ 12.51,1.73,1.98,20.5,85,2.2,1.92,0.32,1.48,2.94,1.04,3.57,672,1
107
+ 12.42,2.55,2.27,22,90,1.68,1.84,0.66,1.42,2.7,0.86,3.3,315,1
108
+ 12.25,1.73,2.12,19,80,1.65,2.03,0.37,1.63,3.4,1,3.17,510,1
109
+ 12.72,1.75,2.28,22.5,84,1.38,1.76,0.48,1.63,3.3,0.88,2.42,488,1
110
+ 12.22,1.29,1.94,19,92,2.36,2.04,0.39,2.08,2.7,0.86,3.02,312,1
111
+ 11.61,1.35,2.7,20,94,2.74,2.92,0.29,2.49,2.65,0.96,3.26,680,1
112
+ 11.46,3.74,1.82,19.5,107,3.18,2.58,0.24,3.58,2.9,0.75,2.81,562,1
113
+ 12.52,2.43,2.17,21,88,2.55,2.27,0.26,1.22,2,0.9,2.78,325,1
114
+ 11.76,2.68,2.92,20,103,1.75,2.03,0.6,1.05,3.8,1.23,2.5,607,1
115
+ 11.41,0.74,2.5,21,88,2.48,2.01,0.42,1.44,3.08,1.1,2.31,434,1
116
+ 12.08,1.39,2.5,22.5,84,2.56,2.29,0.43,1.04,2.9,0.93,3.19,385,1
117
+ 11.03,1.51,2.2,21.5,85,2.46,2.17,0.52,2.01,1.9,1.71,2.87,407,1
118
+ 11.82,1.47,1.99,20.8,86,1.98,1.6,0.3,1.53,1.95,0.95,3.33,495,1
119
+ 12.42,1.61,2.19,22.5,108,2,2.09,0.34,1.61,2.06,1.06,2.96,345,1
120
+ 12.77,3.43,1.98,16,80,1.63,1.25,0.43,0.83,3.4,0.7,2.12,372,1
121
+ 12,3.43,2,19,87,2,1.64,0.37,1.87,1.28,0.93,3.05,564,1
122
+ 11.45,2.4,2.42,20,96,2.9,2.79,0.32,1.83,3.25,0.8,3.39,625,1
123
+ 11.56,2.05,3.23,28.5,119,3.18,5.08,0.47,1.87,6,0.93,3.69,465,1
124
+ 12.42,4.43,2.73,26.5,102,2.2,2.13,0.43,1.71,2.08,0.92,3.12,365,1
125
+ 13.05,5.8,2.13,21.5,86,2.62,2.65,0.3,2.01,2.6,0.73,3.1,380,1
126
+ 11.87,4.31,2.39,21,82,2.86,3.03,0.21,2.91,2.8,0.75,3.64,380,1
127
+ 12.07,2.16,2.17,21,85,2.6,2.65,0.37,1.35,2.76,0.86,3.28,378,1
128
+ 12.43,1.53,2.29,21.5,86,2.74,3.15,0.39,1.77,3.94,0.69,2.84,352,1
129
+ 11.79,2.13,2.78,28.5,92,2.13,2.24,0.58,1.76,3,0.97,2.44,466,1
130
+ 12.37,1.63,2.3,24.5,88,2.22,2.45,0.4,1.9,2.12,0.89,2.78,342,1
131
+ 12.04,4.3,2.38,22,80,2.1,1.75,0.42,1.35,2.6,0.79,2.57,580,1
132
+ 12.86,1.35,2.32,18,122,1.51,1.25,0.21,0.94,4.1,0.76,1.29,630,2
133
+ 12.88,2.99,2.4,20,104,1.3,1.22,0.24,0.83,5.4,0.74,1.42,530,2
134
+ 12.81,2.31,2.4,24,98,1.15,1.09,0.27,0.83,5.7,0.66,1.36,560,2
135
+ 12.7,3.55,2.36,21.5,106,1.7,1.2,0.17,0.84,5,0.78,1.29,600,2
136
+ 12.51,1.24,2.25,17.5,85,2,0.58,0.6,1.25,5.45,0.75,1.51,650,2
137
+ 12.6,2.46,2.2,18.5,94,1.62,0.66,0.63,0.94,7.1,0.73,1.58,695,2
138
+ 12.25,4.72,2.54,21,89,1.38,0.47,0.53,0.8,3.85,0.75,1.27,720,2
139
+ 12.53,5.51,2.64,25,96,1.79,0.6,0.63,1.1,5,0.82,1.69,515,2
140
+ 13.49,3.59,2.19,19.5,88,1.62,0.48,0.58,0.88,5.7,0.81,1.82,580,2
141
+ 12.84,2.96,2.61,24,101,2.32,0.6,0.53,0.81,4.92,0.89,2.15,590,2
142
+ 12.93,2.81,2.7,21,96,1.54,0.5,0.53,0.75,4.6,0.77,2.31,600,2
143
+ 13.36,2.56,2.35,20,89,1.4,0.5,0.37,0.64,5.6,0.7,2.47,780,2
144
+ 13.52,3.17,2.72,23.5,97,1.55,0.52,0.5,0.55,4.35,0.89,2.06,520,2
145
+ 13.62,4.95,2.35,20,92,2,0.8,0.47,1.02,4.4,0.91,2.05,550,2
146
+ 12.25,3.88,2.2,18.5,112,1.38,0.78,0.29,1.14,8.21,0.65,2,855,2
147
+ 13.16,3.57,2.15,21,102,1.5,0.55,0.43,1.3,4,0.6,1.68,830,2
148
+ 13.88,5.04,2.23,20,80,0.98,0.34,0.4,0.68,4.9,0.58,1.33,415,2
149
+ 12.87,4.61,2.48,21.5,86,1.7,0.65,0.47,0.86,7.65,0.54,1.86,625,2
150
+ 13.32,3.24,2.38,21.5,92,1.93,0.76,0.45,1.25,8.42,0.55,1.62,650,2
151
+ 13.08,3.9,2.36,21.5,113,1.41,1.39,0.34,1.14,9.4,0.57,1.33,550,2
152
+ 13.5,3.12,2.62,24,123,1.4,1.57,0.22,1.25,8.6,0.59,1.3,500,2
153
+ 12.79,2.67,2.48,22,112,1.48,1.36,0.24,1.26,10.8,0.48,1.47,480,2
154
+ 13.11,1.9,2.75,25.5,116,2.2,1.28,0.26,1.56,7.1,0.61,1.33,425,2
155
+ 13.23,3.3,2.28,18.5,98,1.8,0.83,0.61,1.87,10.52,0.56,1.51,675,2
156
+ 12.58,1.29,2.1,20,103,1.48,0.58,0.53,1.4,7.6,0.58,1.55,640,2
157
+ 13.17,5.19,2.32,22,93,1.74,0.63,0.61,1.55,7.9,0.6,1.48,725,2
158
+ 13.84,4.12,2.38,19.5,89,1.8,0.83,0.48,1.56,9.01,0.57,1.64,480,2
159
+ 12.45,3.03,2.64,27,97,1.9,0.58,0.63,1.14,7.5,0.67,1.73,880,2
160
+ 14.34,1.68,2.7,25,98,2.8,1.31,0.53,2.7,13,0.57,1.96,660,2
161
+ 13.48,1.67,2.64,22.5,89,2.6,1.1,0.52,2.29,11.75,0.57,1.78,620,2
162
+ 12.36,3.83,2.38,21,88,2.3,0.92,0.5,1.04,7.65,0.56,1.58,520,2
163
+ 13.69,3.26,2.54,20,107,1.83,0.56,0.5,0.8,5.88,0.96,1.82,680,2
164
+ 12.85,3.27,2.58,22,106,1.65,0.6,0.6,0.96,5.58,0.87,2.11,570,2
165
+ 12.96,3.45,2.35,18.5,106,1.39,0.7,0.4,0.94,5.28,0.68,1.75,675,2
166
+ 13.78,2.76,2.3,22,90,1.35,0.68,0.41,1.03,9.58,0.7,1.68,615,2
167
+ 13.73,4.36,2.26,22.5,88,1.28,0.47,0.52,1.15,6.62,0.78,1.75,520,2
168
+ 13.45,3.7,2.6,23,111,1.7,0.92,0.43,1.46,10.68,0.85,1.56,695,2
169
+ 12.82,3.37,2.3,19.5,88,1.48,0.66,0.4,0.97,10.26,0.72,1.75,685,2
170
+ 13.58,2.58,2.69,24.5,105,1.55,0.84,0.39,1.54,8.66,0.74,1.8,750,2
171
+ 13.4,4.6,2.86,25,112,1.98,0.96,0.27,1.11,8.5,0.67,1.92,630,2
172
+ 12.2,3.03,2.32,19,96,1.25,0.49,0.4,0.73,5.5,0.66,1.83,510,2
173
+ 12.77,2.39,2.28,19.5,86,1.39,0.51,0.48,0.64,9.899999,0.57,1.63,470,2
174
+ 14.16,2.51,2.48,20,91,1.68,0.7,0.44,1.24,9.7,0.62,1.71,660,2
175
+ 13.71,5.65,2.45,20.5,95,1.68,0.61,0.52,1.06,7.7,0.64,1.74,740,2
176
+ 13.4,3.91,2.48,23,102,1.8,0.75,0.43,1.41,7.3,0.7,1.56,750,2
177
+ 13.27,4.28,2.26,20,120,1.59,0.69,0.43,1.35,10.2,0.59,1.56,835,2
178
+ 13.17,2.59,2.37,20,120,1.65,0.68,0.53,1.46,9.3,0.6,1.62,840,2
179
+ 14.13,4.1,2.74,24.5,96,2.05,0.76,0.56,1.35,9.2,0.61,1.6,560,2
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdf-1119.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82f899edc59cb41fdd671b256a228e5e06dfc5e24c92712e75005b251b000865
3
+ size 1108
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/data-v1-dl-54002.arff.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6931af256195fcdd2e47dd8b0f9edf16fbf03b198e77b70e3dfd9877cdf09515
3
+ size 1190
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (208 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (208 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jd-40945.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02882c6b02c4e068ef2b16f37f33ae3d5e9dd17ca29d01662c6924e16427eb5d
3
+ size 437
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdq-40945.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c7e5a46554ab6a8121832dc0cd9f7a60f5034cef1a5a7d61346bbd912516b54
3
+ size 1042
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/data-v1-dl-16826755.arff.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:516e961f519876e5f89b339a0364a08dd64160ac3a4d76d5ec62955bfd6d6ce5
3
+ size 32243
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (208 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jd-42585.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ccbf138e0663895f9cf511136bc6395c153f6238af2eacb6a367e86e15d1a71
3
+ size 1492
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jdf-42585.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0985045a454c8186b4e690ebefb6cea1ef7c13292c98d50abda470a0ff3ad425
3
+ size 312
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jdq-42585.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3736e7feb7ad30c68675c2c4e48a9fb262e80308c9083b100ddd0339da1fc282
3
+ size 348
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/data-v1-dl-21854866.arff.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8d00c6690576a9ec39e1cb77054e13296be0fdebab0fb35a64a0e8627b6e6f3
3
+ size 4519
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jd-61.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5c7e79aa41ef580838fb9fc1906280f076c47be1741fddd5004ddb500eb57fe
3
+ size 898
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdf-61.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33cbd6ae945ba04969370ab35604e9363c87256393493382b5118a89d59386d6
3
+ size 268
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdl-dn-iris-l-2-dv-1.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bce20aae7fd903796d96d5b3a3677b7058fbc5f3fe0996ee9d491e4ee23d132
3
+ size 293
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdl-dn-iris-l-2-s-act-.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9f4b9317997df63ed8d2bb073a3906344c0e0be017fd384eaec36ced8b94bae
3
+ size 330
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdq-61.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:424cd47c12a51c7bb8d8169fac80fb5601f152bd78468b241d4b115bf7d22f20
3
+ size 1121
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/data-v1-dl-61.arff.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afe4736924606638984e573235191025d419c545d31dc8874c96b72f5ec5db73
3
+ size 2342
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (205 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.manifold` module implements data embedding techniques.
3
+ """
4
+
5
+ from ._isomap import Isomap
6
+ from ._locally_linear import LocallyLinearEmbedding, locally_linear_embedding
7
+ from ._mds import MDS, smacof
8
+ from ._spectral_embedding import SpectralEmbedding, spectral_embedding
9
+ from ._t_sne import TSNE, trustworthiness
10
+
11
+ __all__ = [
12
+ "locally_linear_embedding",
13
+ "LocallyLinearEmbedding",
14
+ "Isomap",
15
+ "MDS",
16
+ "smacof",
17
+ "SpectralEmbedding",
18
+ "spectral_embedding",
19
+ "TSNE",
20
+ "trustworthiness",
21
+ ]
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (673 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_isomap.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_locally_linear.cpython-310.pyc ADDED
Binary file (23.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_mds.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_spectral_embedding.cpython-310.pyc ADDED
Binary file (20.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_t_sne.cpython-310.pyc ADDED
Binary file (33.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_barnes_hut_tsne.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (250 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_isomap.py ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Isomap for manifold learning"""
2
+
3
+ # Author: Jake Vanderplas -- <[email protected]>
4
+ # License: BSD 3 clause (C) 2011
5
+ import warnings
6
+ from numbers import Integral, Real
7
+
8
+ import numpy as np
9
+ from scipy.sparse import issparse
10
+ from scipy.sparse.csgraph import connected_components, shortest_path
11
+
12
+ from ..base import (
13
+ BaseEstimator,
14
+ ClassNamePrefixFeaturesOutMixin,
15
+ TransformerMixin,
16
+ _fit_context,
17
+ )
18
+ from ..decomposition import KernelPCA
19
+ from ..metrics.pairwise import _VALID_METRICS
20
+ from ..neighbors import NearestNeighbors, kneighbors_graph, radius_neighbors_graph
21
+ from ..preprocessing import KernelCenterer
22
+ from ..utils._param_validation import Interval, StrOptions
23
+ from ..utils.graph import _fix_connected_components
24
+ from ..utils.validation import check_is_fitted
25
+
26
+
27
+ class Isomap(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
28
+ """Isomap Embedding.
29
+
30
+ Non-linear dimensionality reduction through Isometric Mapping
31
+
32
+ Read more in the :ref:`User Guide <isomap>`.
33
+
34
+ Parameters
35
+ ----------
36
+ n_neighbors : int or None, default=5
37
+ Number of neighbors to consider for each point. If `n_neighbors` is an int,
38
+ then `radius` must be `None`.
39
+
40
+ radius : float or None, default=None
41
+ Limiting distance of neighbors to return. If `radius` is a float,
42
+ then `n_neighbors` must be set to `None`.
43
+
44
+ .. versionadded:: 1.1
45
+
46
+ n_components : int, default=2
47
+ Number of coordinates for the manifold.
48
+
49
+ eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
50
+ 'auto' : Attempt to choose the most efficient solver
51
+ for the given problem.
52
+
53
+ 'arpack' : Use Arnoldi decomposition to find the eigenvalues
54
+ and eigenvectors.
55
+
56
+ 'dense' : Use a direct solver (i.e. LAPACK)
57
+ for the eigenvalue decomposition.
58
+
59
+ tol : float, default=0
60
+ Convergence tolerance passed to arpack or lobpcg.
61
+ not used if eigen_solver == 'dense'.
62
+
63
+ max_iter : int, default=None
64
+ Maximum number of iterations for the arpack solver.
65
+ not used if eigen_solver == 'dense'.
66
+
67
+ path_method : {'auto', 'FW', 'D'}, default='auto'
68
+ Method to use in finding shortest path.
69
+
70
+ 'auto' : attempt to choose the best algorithm automatically.
71
+
72
+ 'FW' : Floyd-Warshall algorithm.
73
+
74
+ 'D' : Dijkstra's algorithm.
75
+
76
+ neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'}, \
77
+ default='auto'
78
+ Algorithm to use for nearest neighbors search,
79
+ passed to neighbors.NearestNeighbors instance.
80
+
81
+ n_jobs : int or None, default=None
82
+ The number of parallel jobs to run.
83
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
84
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
85
+ for more details.
86
+
87
+ metric : str, or callable, default="minkowski"
88
+ The metric to use when calculating distance between instances in a
89
+ feature array. If metric is a string or callable, it must be one of
90
+ the options allowed by :func:`sklearn.metrics.pairwise_distances` for
91
+ its metric parameter.
92
+ If metric is "precomputed", X is assumed to be a distance matrix and
93
+ must be square. X may be a :term:`Glossary <sparse graph>`.
94
+
95
+ .. versionadded:: 0.22
96
+
97
+ p : float, default=2
98
+ Parameter for the Minkowski metric from
99
+ sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
100
+ equivalent to using manhattan_distance (l1), and euclidean_distance
101
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
102
+
103
+ .. versionadded:: 0.22
104
+
105
+ metric_params : dict, default=None
106
+ Additional keyword arguments for the metric function.
107
+
108
+ .. versionadded:: 0.22
109
+
110
+ Attributes
111
+ ----------
112
+ embedding_ : array-like, shape (n_samples, n_components)
113
+ Stores the embedding vectors.
114
+
115
+ kernel_pca_ : object
116
+ :class:`~sklearn.decomposition.KernelPCA` object used to implement the
117
+ embedding.
118
+
119
+ nbrs_ : sklearn.neighbors.NearestNeighbors instance
120
+ Stores nearest neighbors instance, including BallTree or KDtree
121
+ if applicable.
122
+
123
+ dist_matrix_ : array-like, shape (n_samples, n_samples)
124
+ Stores the geodesic distance matrix of training data.
125
+
126
+ n_features_in_ : int
127
+ Number of features seen during :term:`fit`.
128
+
129
+ .. versionadded:: 0.24
130
+
131
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
132
+ Names of features seen during :term:`fit`. Defined only when `X`
133
+ has feature names that are all strings.
134
+
135
+ .. versionadded:: 1.0
136
+
137
+ See Also
138
+ --------
139
+ sklearn.decomposition.PCA : Principal component analysis that is a linear
140
+ dimensionality reduction method.
141
+ sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using
142
+ kernels and PCA.
143
+ MDS : Manifold learning using multidimensional scaling.
144
+ TSNE : T-distributed Stochastic Neighbor Embedding.
145
+ LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding.
146
+ SpectralEmbedding : Spectral embedding for non-linear dimensionality.
147
+
148
+ References
149
+ ----------
150
+
151
+ .. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
152
+ framework for nonlinear dimensionality reduction. Science 290 (5500)
153
+
154
+ Examples
155
+ --------
156
+ >>> from sklearn.datasets import load_digits
157
+ >>> from sklearn.manifold import Isomap
158
+ >>> X, _ = load_digits(return_X_y=True)
159
+ >>> X.shape
160
+ (1797, 64)
161
+ >>> embedding = Isomap(n_components=2)
162
+ >>> X_transformed = embedding.fit_transform(X[:100])
163
+ >>> X_transformed.shape
164
+ (100, 2)
165
+ """
166
+
167
+ _parameter_constraints: dict = {
168
+ "n_neighbors": [Interval(Integral, 1, None, closed="left"), None],
169
+ "radius": [Interval(Real, 0, None, closed="both"), None],
170
+ "n_components": [Interval(Integral, 1, None, closed="left")],
171
+ "eigen_solver": [StrOptions({"auto", "arpack", "dense"})],
172
+ "tol": [Interval(Real, 0, None, closed="left")],
173
+ "max_iter": [Interval(Integral, 1, None, closed="left"), None],
174
+ "path_method": [StrOptions({"auto", "FW", "D"})],
175
+ "neighbors_algorithm": [StrOptions({"auto", "brute", "kd_tree", "ball_tree"})],
176
+ "n_jobs": [Integral, None],
177
+ "p": [Interval(Real, 1, None, closed="left")],
178
+ "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
179
+ "metric_params": [dict, None],
180
+ }
181
+
182
+ def __init__(
183
+ self,
184
+ *,
185
+ n_neighbors=5,
186
+ radius=None,
187
+ n_components=2,
188
+ eigen_solver="auto",
189
+ tol=0,
190
+ max_iter=None,
191
+ path_method="auto",
192
+ neighbors_algorithm="auto",
193
+ n_jobs=None,
194
+ metric="minkowski",
195
+ p=2,
196
+ metric_params=None,
197
+ ):
198
+ self.n_neighbors = n_neighbors
199
+ self.radius = radius
200
+ self.n_components = n_components
201
+ self.eigen_solver = eigen_solver
202
+ self.tol = tol
203
+ self.max_iter = max_iter
204
+ self.path_method = path_method
205
+ self.neighbors_algorithm = neighbors_algorithm
206
+ self.n_jobs = n_jobs
207
+ self.metric = metric
208
+ self.p = p
209
+ self.metric_params = metric_params
210
+
211
+ def _fit_transform(self, X):
212
+ if self.n_neighbors is not None and self.radius is not None:
213
+ raise ValueError(
214
+ "Both n_neighbors and radius are provided. Use"
215
+ f" Isomap(radius={self.radius}, n_neighbors=None) if intended to use"
216
+ " radius-based neighbors"
217
+ )
218
+
219
+ self.nbrs_ = NearestNeighbors(
220
+ n_neighbors=self.n_neighbors,
221
+ radius=self.radius,
222
+ algorithm=self.neighbors_algorithm,
223
+ metric=self.metric,
224
+ p=self.p,
225
+ metric_params=self.metric_params,
226
+ n_jobs=self.n_jobs,
227
+ )
228
+ self.nbrs_.fit(X)
229
+ self.n_features_in_ = self.nbrs_.n_features_in_
230
+ if hasattr(self.nbrs_, "feature_names_in_"):
231
+ self.feature_names_in_ = self.nbrs_.feature_names_in_
232
+
233
+ self.kernel_pca_ = KernelPCA(
234
+ n_components=self.n_components,
235
+ kernel="precomputed",
236
+ eigen_solver=self.eigen_solver,
237
+ tol=self.tol,
238
+ max_iter=self.max_iter,
239
+ n_jobs=self.n_jobs,
240
+ ).set_output(transform="default")
241
+
242
+ if self.n_neighbors is not None:
243
+ nbg = kneighbors_graph(
244
+ self.nbrs_,
245
+ self.n_neighbors,
246
+ metric=self.metric,
247
+ p=self.p,
248
+ metric_params=self.metric_params,
249
+ mode="distance",
250
+ n_jobs=self.n_jobs,
251
+ )
252
+ else:
253
+ nbg = radius_neighbors_graph(
254
+ self.nbrs_,
255
+ radius=self.radius,
256
+ metric=self.metric,
257
+ p=self.p,
258
+ metric_params=self.metric_params,
259
+ mode="distance",
260
+ n_jobs=self.n_jobs,
261
+ )
262
+
263
+ # Compute the number of connected components, and connect the different
264
+ # components to be able to compute a shortest path between all pairs
265
+ # of samples in the graph.
266
+ # Similar fix to cluster._agglomerative._fix_connectivity.
267
+ n_connected_components, labels = connected_components(nbg)
268
+ if n_connected_components > 1:
269
+ if self.metric == "precomputed" and issparse(X):
270
+ raise RuntimeError(
271
+ "The number of connected components of the neighbors graph"
272
+ f" is {n_connected_components} > 1. The graph cannot be "
273
+ "completed with metric='precomputed', and Isomap cannot be"
274
+ "fitted. Increase the number of neighbors to avoid this "
275
+ "issue, or precompute the full distance matrix instead "
276
+ "of passing a sparse neighbors graph."
277
+ )
278
+ warnings.warn(
279
+ (
280
+ "The number of connected components of the neighbors graph "
281
+ f"is {n_connected_components} > 1. Completing the graph to fit"
282
+ " Isomap might be slow. Increase the number of neighbors to "
283
+ "avoid this issue."
284
+ ),
285
+ stacklevel=2,
286
+ )
287
+
288
+ # use array validated by NearestNeighbors
289
+ nbg = _fix_connected_components(
290
+ X=self.nbrs_._fit_X,
291
+ graph=nbg,
292
+ n_connected_components=n_connected_components,
293
+ component_labels=labels,
294
+ mode="distance",
295
+ metric=self.nbrs_.effective_metric_,
296
+ **self.nbrs_.effective_metric_params_,
297
+ )
298
+
299
+ self.dist_matrix_ = shortest_path(nbg, method=self.path_method, directed=False)
300
+
301
+ if self.nbrs_._fit_X.dtype == np.float32:
302
+ self.dist_matrix_ = self.dist_matrix_.astype(
303
+ self.nbrs_._fit_X.dtype, copy=False
304
+ )
305
+
306
+ G = self.dist_matrix_**2
307
+ G *= -0.5
308
+
309
+ self.embedding_ = self.kernel_pca_.fit_transform(G)
310
+ self._n_features_out = self.embedding_.shape[1]
311
+
312
+ def reconstruction_error(self):
313
+ """Compute the reconstruction error for the embedding.
314
+
315
+ Returns
316
+ -------
317
+ reconstruction_error : float
318
+ Reconstruction error.
319
+
320
+ Notes
321
+ -----
322
+ The cost function of an isomap embedding is
323
+
324
+ ``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
325
+
326
+ Where D is the matrix of distances for the input data X,
327
+ D_fit is the matrix of distances for the output embedding X_fit,
328
+ and K is the isomap kernel:
329
+
330
+ ``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
331
+ """
332
+ G = -0.5 * self.dist_matrix_**2
333
+ G_center = KernelCenterer().fit_transform(G)
334
+ evals = self.kernel_pca_.eigenvalues_
335
+ return np.sqrt(np.sum(G_center**2) - np.sum(evals**2)) / G.shape[0]
336
+
337
+ @_fit_context(
338
+ # Isomap.metric is not validated yet
339
+ prefer_skip_nested_validation=False
340
+ )
341
+ def fit(self, X, y=None):
342
+ """Compute the embedding vectors for data X.
343
+
344
+ Parameters
345
+ ----------
346
+ X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
347
+ Sample data, shape = (n_samples, n_features), in the form of a
348
+ numpy array, sparse matrix, precomputed tree, or NearestNeighbors
349
+ object.
350
+
351
+ y : Ignored
352
+ Not used, present for API consistency by convention.
353
+
354
+ Returns
355
+ -------
356
+ self : object
357
+ Returns a fitted instance of self.
358
+ """
359
+ self._fit_transform(X)
360
+ return self
361
+
362
+ @_fit_context(
363
+ # Isomap.metric is not validated yet
364
+ prefer_skip_nested_validation=False
365
+ )
366
+ def fit_transform(self, X, y=None):
367
+ """Fit the model from data in X and transform X.
368
+
369
+ Parameters
370
+ ----------
371
+ X : {array-like, sparse matrix, BallTree, KDTree}
372
+ Training vector, where `n_samples` is the number of samples
373
+ and `n_features` is the number of features.
374
+
375
+ y : Ignored
376
+ Not used, present for API consistency by convention.
377
+
378
+ Returns
379
+ -------
380
+ X_new : array-like, shape (n_samples, n_components)
381
+ X transformed in the new space.
382
+ """
383
+ self._fit_transform(X)
384
+ return self.embedding_
385
+
386
+ def transform(self, X):
387
+ """Transform X.
388
+
389
+ This is implemented by linking the points X into the graph of geodesic
390
+ distances of the training data. First the `n_neighbors` nearest
391
+ neighbors of X are found in the training data, and from these the
392
+ shortest geodesic distances from each point in X to each point in
393
+ the training data are computed in order to construct the kernel.
394
+ The embedding of X is the projection of this kernel onto the
395
+ embedding vectors of the training set.
396
+
397
+ Parameters
398
+ ----------
399
+ X : {array-like, sparse matrix}, shape (n_queries, n_features)
400
+ If neighbors_algorithm='precomputed', X is assumed to be a
401
+ distance matrix or a sparse graph of shape
402
+ (n_queries, n_samples_fit).
403
+
404
+ Returns
405
+ -------
406
+ X_new : array-like, shape (n_queries, n_components)
407
+ X transformed in the new space.
408
+ """
409
+ check_is_fitted(self)
410
+ if self.n_neighbors is not None:
411
+ distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
412
+ else:
413
+ distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True)
414
+
415
+ # Create the graph of shortest distances from X to
416
+ # training data via the nearest neighbors of X.
417
+ # This can be done as a single array operation, but it potentially
418
+ # takes a lot of memory. To avoid that, use a loop:
419
+
420
+ n_samples_fit = self.nbrs_.n_samples_fit_
421
+ n_queries = distances.shape[0]
422
+
423
+ if hasattr(X, "dtype") and X.dtype == np.float32:
424
+ dtype = np.float32
425
+ else:
426
+ dtype = np.float64
427
+
428
+ G_X = np.zeros((n_queries, n_samples_fit), dtype)
429
+ for i in range(n_queries):
430
+ G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0)
431
+
432
+ G_X **= 2
433
+ G_X *= -0.5
434
+
435
+ return self.kernel_pca_.transform(G_X)
436
+
437
+ def _more_tags(self):
438
+ return {"preserves_dtype": [np.float64, np.float32]}
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_locally_linear.py ADDED
@@ -0,0 +1,841 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Locally Linear Embedding"""
2
+
3
+ # Author: Fabian Pedregosa -- <[email protected]>
4
+ # Jake Vanderplas -- <[email protected]>
5
+ # License: BSD 3 clause (C) INRIA 2011
6
+
7
+ from numbers import Integral, Real
8
+
9
+ import numpy as np
10
+ from scipy.linalg import eigh, qr, solve, svd
11
+ from scipy.sparse import csr_matrix, eye
12
+ from scipy.sparse.linalg import eigsh
13
+
14
+ from ..base import (
15
+ BaseEstimator,
16
+ ClassNamePrefixFeaturesOutMixin,
17
+ TransformerMixin,
18
+ _fit_context,
19
+ _UnstableArchMixin,
20
+ )
21
+ from ..neighbors import NearestNeighbors
22
+ from ..utils import check_array, check_random_state
23
+ from ..utils._arpack import _init_arpack_v0
24
+ from ..utils._param_validation import Interval, StrOptions
25
+ from ..utils.extmath import stable_cumsum
26
+ from ..utils.validation import FLOAT_DTYPES, check_is_fitted
27
+
28
+
29
+ def barycenter_weights(X, Y, indices, reg=1e-3):
30
+ """Compute barycenter weights of X from Y along the first axis
31
+
32
+ We estimate the weights to assign to each point in Y[indices] to recover
33
+ the point X[i]. The barycenter weights sum to 1.
34
+
35
+ Parameters
36
+ ----------
37
+ X : array-like, shape (n_samples, n_dim)
38
+
39
+ Y : array-like, shape (n_samples, n_dim)
40
+
41
+ indices : array-like, shape (n_samples, n_dim)
42
+ Indices of the points in Y used to compute the barycenter
43
+
44
+ reg : float, default=1e-3
45
+ Amount of regularization to add for the problem to be
46
+ well-posed in the case of n_neighbors > n_dim
47
+
48
+ Returns
49
+ -------
50
+ B : array-like, shape (n_samples, n_neighbors)
51
+
52
+ Notes
53
+ -----
54
+ See developers note for more information.
55
+ """
56
+ X = check_array(X, dtype=FLOAT_DTYPES)
57
+ Y = check_array(Y, dtype=FLOAT_DTYPES)
58
+ indices = check_array(indices, dtype=int)
59
+
60
+ n_samples, n_neighbors = indices.shape
61
+ assert X.shape[0] == n_samples
62
+
63
+ B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
64
+ v = np.ones(n_neighbors, dtype=X.dtype)
65
+
66
+ # this might raise a LinalgError if G is singular and has trace
67
+ # zero
68
+ for i, ind in enumerate(indices):
69
+ A = Y[ind]
70
+ C = A - X[i] # broadcasting
71
+ G = np.dot(C, C.T)
72
+ trace = np.trace(G)
73
+ if trace > 0:
74
+ R = reg * trace
75
+ else:
76
+ R = reg
77
+ G.flat[:: n_neighbors + 1] += R
78
+ w = solve(G, v, assume_a="pos")
79
+ B[i, :] = w / np.sum(w)
80
+ return B
81
+
82
+
83
+ def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=None):
84
+ """Computes the barycenter weighted graph of k-Neighbors for points in X
85
+
86
+ Parameters
87
+ ----------
88
+ X : {array-like, NearestNeighbors}
89
+ Sample data, shape = (n_samples, n_features), in the form of a
90
+ numpy array or a NearestNeighbors object.
91
+
92
+ n_neighbors : int
93
+ Number of neighbors for each sample.
94
+
95
+ reg : float, default=1e-3
96
+ Amount of regularization when solving the least-squares
97
+ problem. Only relevant if mode='barycenter'. If None, use the
98
+ default.
99
+
100
+ n_jobs : int or None, default=None
101
+ The number of parallel jobs to run for neighbors search.
102
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
103
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
104
+ for more details.
105
+
106
+ Returns
107
+ -------
108
+ A : sparse matrix in CSR format, shape = [n_samples, n_samples]
109
+ A[i, j] is assigned the weight of edge that connects i to j.
110
+
111
+ See Also
112
+ --------
113
+ sklearn.neighbors.kneighbors_graph
114
+ sklearn.neighbors.radius_neighbors_graph
115
+ """
116
+ knn = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs).fit(X)
117
+ X = knn._fit_X
118
+ n_samples = knn.n_samples_fit_
119
+ ind = knn.kneighbors(X, return_distance=False)[:, 1:]
120
+ data = barycenter_weights(X, X, ind, reg=reg)
121
+ indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
122
+ return csr_matrix((data.ravel(), ind.ravel(), indptr), shape=(n_samples, n_samples))
123
+
124
+
125
+ def null_space(
126
+ M, k, k_skip=1, eigen_solver="arpack", tol=1e-6, max_iter=100, random_state=None
127
+ ):
128
+ """
129
+ Find the null space of a matrix M.
130
+
131
+ Parameters
132
+ ----------
133
+ M : {array, matrix, sparse matrix, LinearOperator}
134
+ Input covariance matrix: should be symmetric positive semi-definite
135
+
136
+ k : int
137
+ Number of eigenvalues/vectors to return
138
+
139
+ k_skip : int, default=1
140
+ Number of low eigenvalues to skip.
141
+
142
+ eigen_solver : {'auto', 'arpack', 'dense'}, default='arpack'
143
+ auto : algorithm will attempt to choose the best method for input data
144
+ arpack : use arnoldi iteration in shift-invert mode.
145
+ For this method, M may be a dense matrix, sparse matrix,
146
+ or general linear operator.
147
+ Warning: ARPACK can be unstable for some problems. It is
148
+ best to try several random seeds in order to check results.
149
+ dense : use standard dense matrix operations for the eigenvalue
150
+ decomposition. For this method, M must be an array
151
+ or matrix type. This method should be avoided for
152
+ large problems.
153
+
154
+ tol : float, default=1e-6
155
+ Tolerance for 'arpack' method.
156
+ Not used if eigen_solver=='dense'.
157
+
158
+ max_iter : int, default=100
159
+ Maximum number of iterations for 'arpack' method.
160
+ Not used if eigen_solver=='dense'
161
+
162
+ random_state : int, RandomState instance, default=None
163
+ Determines the random number generator when ``solver`` == 'arpack'.
164
+ Pass an int for reproducible results across multiple function calls.
165
+ See :term:`Glossary <random_state>`.
166
+ """
167
+ if eigen_solver == "auto":
168
+ if M.shape[0] > 200 and k + k_skip < 10:
169
+ eigen_solver = "arpack"
170
+ else:
171
+ eigen_solver = "dense"
172
+
173
+ if eigen_solver == "arpack":
174
+ v0 = _init_arpack_v0(M.shape[0], random_state)
175
+ try:
176
+ eigen_values, eigen_vectors = eigsh(
177
+ M, k + k_skip, sigma=0.0, tol=tol, maxiter=max_iter, v0=v0
178
+ )
179
+ except RuntimeError as e:
180
+ raise ValueError(
181
+ "Error in determining null-space with ARPACK. Error message: "
182
+ "'%s'. Note that eigen_solver='arpack' can fail when the "
183
+ "weight matrix is singular or otherwise ill-behaved. In that "
184
+ "case, eigen_solver='dense' is recommended. See online "
185
+ "documentation for more information." % e
186
+ ) from e
187
+
188
+ return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
189
+ elif eigen_solver == "dense":
190
+ if hasattr(M, "toarray"):
191
+ M = M.toarray()
192
+ eigen_values, eigen_vectors = eigh(
193
+ M, subset_by_index=(k_skip, k + k_skip - 1), overwrite_a=True
194
+ )
195
+ index = np.argsort(np.abs(eigen_values))
196
+ return eigen_vectors[:, index], np.sum(eigen_values)
197
+ else:
198
+ raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
199
+
200
+
201
+ def locally_linear_embedding(
202
+ X,
203
+ *,
204
+ n_neighbors,
205
+ n_components,
206
+ reg=1e-3,
207
+ eigen_solver="auto",
208
+ tol=1e-6,
209
+ max_iter=100,
210
+ method="standard",
211
+ hessian_tol=1e-4,
212
+ modified_tol=1e-12,
213
+ random_state=None,
214
+ n_jobs=None,
215
+ ):
216
+ """Perform a Locally Linear Embedding analysis on the data.
217
+
218
+ Read more in the :ref:`User Guide <locally_linear_embedding>`.
219
+
220
+ Parameters
221
+ ----------
222
+ X : {array-like, NearestNeighbors}
223
+ Sample data, shape = (n_samples, n_features), in the form of a
224
+ numpy array or a NearestNeighbors object.
225
+
226
+ n_neighbors : int
227
+ Number of neighbors to consider for each point.
228
+
229
+ n_components : int
230
+ Number of coordinates for the manifold.
231
+
232
+ reg : float, default=1e-3
233
+ Regularization constant, multiplies the trace of the local covariance
234
+ matrix of the distances.
235
+
236
+ eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
237
+ auto : algorithm will attempt to choose the best method for input data
238
+
239
+ arpack : use arnoldi iteration in shift-invert mode.
240
+ For this method, M may be a dense matrix, sparse matrix,
241
+ or general linear operator.
242
+ Warning: ARPACK can be unstable for some problems. It is
243
+ best to try several random seeds in order to check results.
244
+
245
+ dense : use standard dense matrix operations for the eigenvalue
246
+ decomposition. For this method, M must be an array
247
+ or matrix type. This method should be avoided for
248
+ large problems.
249
+
250
+ tol : float, default=1e-6
251
+ Tolerance for 'arpack' method
252
+ Not used if eigen_solver=='dense'.
253
+
254
+ max_iter : int, default=100
255
+ Maximum number of iterations for the arpack solver.
256
+
257
+ method : {'standard', 'hessian', 'modified', 'ltsa'}, default='standard'
258
+ standard : use the standard locally linear embedding algorithm.
259
+ see reference [1]_
260
+ hessian : use the Hessian eigenmap method. This method requires
261
+ n_neighbors > n_components * (1 + (n_components + 1) / 2.
262
+ see reference [2]_
263
+ modified : use the modified locally linear embedding algorithm.
264
+ see reference [3]_
265
+ ltsa : use local tangent space alignment algorithm
266
+ see reference [4]_
267
+
268
+ hessian_tol : float, default=1e-4
269
+ Tolerance for Hessian eigenmapping method.
270
+ Only used if method == 'hessian'.
271
+
272
+ modified_tol : float, default=1e-12
273
+ Tolerance for modified LLE method.
274
+ Only used if method == 'modified'.
275
+
276
+ random_state : int, RandomState instance, default=None
277
+ Determines the random number generator when ``solver`` == 'arpack'.
278
+ Pass an int for reproducible results across multiple function calls.
279
+ See :term:`Glossary <random_state>`.
280
+
281
+ n_jobs : int or None, default=None
282
+ The number of parallel jobs to run for neighbors search.
283
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
284
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
285
+ for more details.
286
+
287
+ Returns
288
+ -------
289
+ Y : array-like, shape [n_samples, n_components]
290
+ Embedding vectors.
291
+
292
+ squared_error : float
293
+ Reconstruction error for the embedding vectors. Equivalent to
294
+ ``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
295
+
296
+ References
297
+ ----------
298
+
299
+ .. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction
300
+ by locally linear embedding. Science 290:2323 (2000).
301
+ .. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
302
+ linear embedding techniques for high-dimensional data.
303
+ Proc Natl Acad Sci U S A. 100:5591 (2003).
304
+ .. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
305
+ Embedding Using Multiple Weights.
306
+ <https://citeseerx.ist.psu.edu/doc_view/pid/0b060fdbd92cbcc66b383bcaa9ba5e5e624d7ee3>`_
307
+ .. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear
308
+ dimensionality reduction via tangent space alignment.
309
+ Journal of Shanghai Univ. 8:406 (2004)
310
+
311
+ Examples
312
+ --------
313
+ >>> from sklearn.datasets import load_digits
314
+ >>> from sklearn.manifold import locally_linear_embedding
315
+ >>> X, _ = load_digits(return_X_y=True)
316
+ >>> X.shape
317
+ (1797, 64)
318
+ >>> embedding, _ = locally_linear_embedding(X[:100],n_neighbors=5, n_components=2)
319
+ >>> embedding.shape
320
+ (100, 2)
321
+ """
322
+ if eigen_solver not in ("auto", "arpack", "dense"):
323
+ raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
324
+
325
+ if method not in ("standard", "hessian", "modified", "ltsa"):
326
+ raise ValueError("unrecognized method '%s'" % method)
327
+
328
+ nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
329
+ nbrs.fit(X)
330
+ X = nbrs._fit_X
331
+
332
+ N, d_in = X.shape
333
+
334
+ if n_components > d_in:
335
+ raise ValueError(
336
+ "output dimension must be less than or equal to input dimension"
337
+ )
338
+ if n_neighbors >= N:
339
+ raise ValueError(
340
+ "Expected n_neighbors <= n_samples, but n_samples = %d, n_neighbors = %d"
341
+ % (N, n_neighbors)
342
+ )
343
+
344
+ if n_neighbors <= 0:
345
+ raise ValueError("n_neighbors must be positive")
346
+
347
+ M_sparse = eigen_solver != "dense"
348
+
349
+ if method == "standard":
350
+ W = barycenter_kneighbors_graph(
351
+ nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs
352
+ )
353
+
354
+ # we'll compute M = (I-W)'(I-W)
355
+ # depending on the solver, we'll do this differently
356
+ if M_sparse:
357
+ M = eye(*W.shape, format=W.format) - W
358
+ M = (M.T * M).tocsr()
359
+ else:
360
+ M = (W.T * W - W.T - W).toarray()
361
+ M.flat[:: M.shape[0] + 1] += 1 # W = W - I = W - I
362
+
363
+ elif method == "hessian":
364
+ dp = n_components * (n_components + 1) // 2
365
+
366
+ if n_neighbors <= n_components + dp:
367
+ raise ValueError(
368
+ "for method='hessian', n_neighbors must be "
369
+ "greater than "
370
+ "[n_components * (n_components + 3) / 2]"
371
+ )
372
+
373
+ neighbors = nbrs.kneighbors(
374
+ X, n_neighbors=n_neighbors + 1, return_distance=False
375
+ )
376
+ neighbors = neighbors[:, 1:]
377
+
378
+ Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
379
+ Yi[:, 0] = 1
380
+
381
+ M = np.zeros((N, N), dtype=np.float64)
382
+
383
+ use_svd = n_neighbors > d_in
384
+
385
+ for i in range(N):
386
+ Gi = X[neighbors[i]]
387
+ Gi -= Gi.mean(0)
388
+
389
+ # build Hessian estimator
390
+ if use_svd:
391
+ U = svd(Gi, full_matrices=0)[0]
392
+ else:
393
+ Ci = np.dot(Gi, Gi.T)
394
+ U = eigh(Ci)[1][:, ::-1]
395
+
396
+ Yi[:, 1 : 1 + n_components] = U[:, :n_components]
397
+
398
+ j = 1 + n_components
399
+ for k in range(n_components):
400
+ Yi[:, j : j + n_components - k] = U[:, k : k + 1] * U[:, k:n_components]
401
+ j += n_components - k
402
+
403
+ Q, R = qr(Yi)
404
+
405
+ w = Q[:, n_components + 1 :]
406
+ S = w.sum(0)
407
+
408
+ S[np.where(abs(S) < hessian_tol)] = 1
409
+ w /= S
410
+
411
+ nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
412
+ M[nbrs_x, nbrs_y] += np.dot(w, w.T)
413
+
414
+ if M_sparse:
415
+ M = csr_matrix(M)
416
+
417
+ elif method == "modified":
418
+ if n_neighbors < n_components:
419
+ raise ValueError("modified LLE requires n_neighbors >= n_components")
420
+
421
+ neighbors = nbrs.kneighbors(
422
+ X, n_neighbors=n_neighbors + 1, return_distance=False
423
+ )
424
+ neighbors = neighbors[:, 1:]
425
+
426
+ # find the eigenvectors and eigenvalues of each local covariance
427
+ # matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
428
+ # where the columns are eigenvectors
429
+ V = np.zeros((N, n_neighbors, n_neighbors))
430
+ nev = min(d_in, n_neighbors)
431
+ evals = np.zeros([N, nev])
432
+
433
+ # choose the most efficient way to find the eigenvectors
434
+ use_svd = n_neighbors > d_in
435
+
436
+ if use_svd:
437
+ for i in range(N):
438
+ X_nbrs = X[neighbors[i]] - X[i]
439
+ V[i], evals[i], _ = svd(X_nbrs, full_matrices=True)
440
+ evals **= 2
441
+ else:
442
+ for i in range(N):
443
+ X_nbrs = X[neighbors[i]] - X[i]
444
+ C_nbrs = np.dot(X_nbrs, X_nbrs.T)
445
+ evi, vi = eigh(C_nbrs)
446
+ evals[i] = evi[::-1]
447
+ V[i] = vi[:, ::-1]
448
+
449
+ # find regularized weights: this is like normal LLE.
450
+ # because we've already computed the SVD of each covariance matrix,
451
+ # it's faster to use this rather than np.linalg.solve
452
+ reg = 1e-3 * evals.sum(1)
453
+
454
+ tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
455
+ tmp[:, :nev] /= evals + reg[:, None]
456
+ tmp[:, nev:] /= reg[:, None]
457
+
458
+ w_reg = np.zeros((N, n_neighbors))
459
+ for i in range(N):
460
+ w_reg[i] = np.dot(V[i], tmp[i])
461
+ w_reg /= w_reg.sum(1)[:, None]
462
+
463
+ # calculate eta: the median of the ratio of small to large eigenvalues
464
+ # across the points. This is used to determine s_i, below
465
+ rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
466
+ eta = np.median(rho)
467
+
468
+ # find s_i, the size of the "almost null space" for each point:
469
+ # this is the size of the largest set of eigenvalues
470
+ # such that Sum[v; v in set]/Sum[v; v not in set] < eta
471
+ s_range = np.zeros(N, dtype=int)
472
+ evals_cumsum = stable_cumsum(evals, 1)
473
+ eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
474
+ for i in range(N):
475
+ s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
476
+ s_range += n_neighbors - nev # number of zero eigenvalues
477
+
478
+ # Now calculate M.
479
+ # This is the [N x N] matrix whose null space is the desired embedding
480
+ M = np.zeros((N, N), dtype=np.float64)
481
+ for i in range(N):
482
+ s_i = s_range[i]
483
+
484
+ # select bottom s_i eigenvectors and calculate alpha
485
+ Vi = V[i, :, n_neighbors - s_i :]
486
+ alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
487
+
488
+ # compute Householder matrix which satisfies
489
+ # Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
490
+ # using prescription from paper
491
+ h = np.full(s_i, alpha_i) - np.dot(Vi.T, np.ones(n_neighbors))
492
+
493
+ norm_h = np.linalg.norm(h)
494
+ if norm_h < modified_tol:
495
+ h *= 0
496
+ else:
497
+ h /= norm_h
498
+
499
+ # Householder matrix is
500
+ # >> Hi = np.identity(s_i) - 2*np.outer(h,h)
501
+ # Then the weight matrix is
502
+ # >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
503
+ # We do this much more efficiently:
504
+ Wi = Vi - 2 * np.outer(np.dot(Vi, h), h) + (1 - alpha_i) * w_reg[i, :, None]
505
+
506
+ # Update M as follows:
507
+ # >> W_hat = np.zeros( (N,s_i) )
508
+ # >> W_hat[neighbors[i],:] = Wi
509
+ # >> W_hat[i] -= 1
510
+ # >> M += np.dot(W_hat,W_hat.T)
511
+ # We can do this much more efficiently:
512
+ nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
513
+ M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
514
+ Wi_sum1 = Wi.sum(1)
515
+ M[i, neighbors[i]] -= Wi_sum1
516
+ M[neighbors[i], i] -= Wi_sum1
517
+ M[i, i] += s_i
518
+
519
+ if M_sparse:
520
+ M = csr_matrix(M)
521
+
522
+ elif method == "ltsa":
523
+ neighbors = nbrs.kneighbors(
524
+ X, n_neighbors=n_neighbors + 1, return_distance=False
525
+ )
526
+ neighbors = neighbors[:, 1:]
527
+
528
+ M = np.zeros((N, N))
529
+
530
+ use_svd = n_neighbors > d_in
531
+
532
+ for i in range(N):
533
+ Xi = X[neighbors[i]]
534
+ Xi -= Xi.mean(0)
535
+
536
+ # compute n_components largest eigenvalues of Xi * Xi^T
537
+ if use_svd:
538
+ v = svd(Xi, full_matrices=True)[0]
539
+ else:
540
+ Ci = np.dot(Xi, Xi.T)
541
+ v = eigh(Ci)[1][:, ::-1]
542
+
543
+ Gi = np.zeros((n_neighbors, n_components + 1))
544
+ Gi[:, 1:] = v[:, :n_components]
545
+ Gi[:, 0] = 1.0 / np.sqrt(n_neighbors)
546
+
547
+ GiGiT = np.dot(Gi, Gi.T)
548
+
549
+ nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
550
+ M[nbrs_x, nbrs_y] -= GiGiT
551
+ M[neighbors[i], neighbors[i]] += 1
552
+
553
+ return null_space(
554
+ M,
555
+ n_components,
556
+ k_skip=1,
557
+ eigen_solver=eigen_solver,
558
+ tol=tol,
559
+ max_iter=max_iter,
560
+ random_state=random_state,
561
+ )
562
+
563
+
564
+ class LocallyLinearEmbedding(
565
+ ClassNamePrefixFeaturesOutMixin,
566
+ TransformerMixin,
567
+ _UnstableArchMixin,
568
+ BaseEstimator,
569
+ ):
570
+ """Locally Linear Embedding.
571
+
572
+ Read more in the :ref:`User Guide <locally_linear_embedding>`.
573
+
574
+ Parameters
575
+ ----------
576
+ n_neighbors : int, default=5
577
+ Number of neighbors to consider for each point.
578
+
579
+ n_components : int, default=2
580
+ Number of coordinates for the manifold.
581
+
582
+ reg : float, default=1e-3
583
+ Regularization constant, multiplies the trace of the local covariance
584
+ matrix of the distances.
585
+
586
+ eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
587
+ The solver used to compute the eigenvectors. The available options are:
588
+
589
+ - `'auto'` : algorithm will attempt to choose the best method for input
590
+ data.
591
+ - `'arpack'` : use arnoldi iteration in shift-invert mode. For this
592
+ method, M may be a dense matrix, sparse matrix, or general linear
593
+ operator.
594
+ - `'dense'` : use standard dense matrix operations for the eigenvalue
595
+ decomposition. For this method, M must be an array or matrix type.
596
+ This method should be avoided for large problems.
597
+
598
+ .. warning::
599
+ ARPACK can be unstable for some problems. It is best to try several
600
+ random seeds in order to check results.
601
+
602
+ tol : float, default=1e-6
603
+ Tolerance for 'arpack' method
604
+ Not used if eigen_solver=='dense'.
605
+
606
+ max_iter : int, default=100
607
+ Maximum number of iterations for the arpack solver.
608
+ Not used if eigen_solver=='dense'.
609
+
610
+ method : {'standard', 'hessian', 'modified', 'ltsa'}, default='standard'
611
+ - `standard`: use the standard locally linear embedding algorithm. see
612
+ reference [1]_
613
+ - `hessian`: use the Hessian eigenmap method. This method requires
614
+ ``n_neighbors > n_components * (1 + (n_components + 1) / 2``. see
615
+ reference [2]_
616
+ - `modified`: use the modified locally linear embedding algorithm.
617
+ see reference [3]_
618
+ - `ltsa`: use local tangent space alignment algorithm. see
619
+ reference [4]_
620
+
621
+ hessian_tol : float, default=1e-4
622
+ Tolerance for Hessian eigenmapping method.
623
+ Only used if ``method == 'hessian'``.
624
+
625
+ modified_tol : float, default=1e-12
626
+ Tolerance for modified LLE method.
627
+ Only used if ``method == 'modified'``.
628
+
629
+ neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'}, \
630
+ default='auto'
631
+ Algorithm to use for nearest neighbors search, passed to
632
+ :class:`~sklearn.neighbors.NearestNeighbors` instance.
633
+
634
+ random_state : int, RandomState instance, default=None
635
+ Determines the random number generator when
636
+ ``eigen_solver`` == 'arpack'. Pass an int for reproducible results
637
+ across multiple function calls. See :term:`Glossary <random_state>`.
638
+
639
+ n_jobs : int or None, default=None
640
+ The number of parallel jobs to run.
641
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
642
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
643
+ for more details.
644
+
645
+ Attributes
646
+ ----------
647
+ embedding_ : array-like, shape [n_samples, n_components]
648
+ Stores the embedding vectors
649
+
650
+ reconstruction_error_ : float
651
+ Reconstruction error associated with `embedding_`
652
+
653
+ n_features_in_ : int
654
+ Number of features seen during :term:`fit`.
655
+
656
+ .. versionadded:: 0.24
657
+
658
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
659
+ Names of features seen during :term:`fit`. Defined only when `X`
660
+ has feature names that are all strings.
661
+
662
+ .. versionadded:: 1.0
663
+
664
+ nbrs_ : NearestNeighbors object
665
+ Stores nearest neighbors instance, including BallTree or KDtree
666
+ if applicable.
667
+
668
+ See Also
669
+ --------
670
+ SpectralEmbedding : Spectral embedding for non-linear dimensionality
671
+ reduction.
672
+ TSNE : Distributed Stochastic Neighbor Embedding.
673
+
674
+ References
675
+ ----------
676
+
677
+ .. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction
678
+ by locally linear embedding. Science 290:2323 (2000).
679
+ .. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
680
+ linear embedding techniques for high-dimensional data.
681
+ Proc Natl Acad Sci U S A. 100:5591 (2003).
682
+ .. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
683
+ Embedding Using Multiple Weights.
684
+ <https://citeseerx.ist.psu.edu/doc_view/pid/0b060fdbd92cbcc66b383bcaa9ba5e5e624d7ee3>`_
685
+ .. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear
686
+ dimensionality reduction via tangent space alignment.
687
+ Journal of Shanghai Univ. 8:406 (2004)
688
+
689
+ Examples
690
+ --------
691
+ >>> from sklearn.datasets import load_digits
692
+ >>> from sklearn.manifold import LocallyLinearEmbedding
693
+ >>> X, _ = load_digits(return_X_y=True)
694
+ >>> X.shape
695
+ (1797, 64)
696
+ >>> embedding = LocallyLinearEmbedding(n_components=2)
697
+ >>> X_transformed = embedding.fit_transform(X[:100])
698
+ >>> X_transformed.shape
699
+ (100, 2)
700
+ """
701
+
702
+ _parameter_constraints: dict = {
703
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
704
+ "n_components": [Interval(Integral, 1, None, closed="left")],
705
+ "reg": [Interval(Real, 0, None, closed="left")],
706
+ "eigen_solver": [StrOptions({"auto", "arpack", "dense"})],
707
+ "tol": [Interval(Real, 0, None, closed="left")],
708
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
709
+ "method": [StrOptions({"standard", "hessian", "modified", "ltsa"})],
710
+ "hessian_tol": [Interval(Real, 0, None, closed="left")],
711
+ "modified_tol": [Interval(Real, 0, None, closed="left")],
712
+ "neighbors_algorithm": [StrOptions({"auto", "brute", "kd_tree", "ball_tree"})],
713
+ "random_state": ["random_state"],
714
+ "n_jobs": [None, Integral],
715
+ }
716
+
717
+ def __init__(
718
+ self,
719
+ *,
720
+ n_neighbors=5,
721
+ n_components=2,
722
+ reg=1e-3,
723
+ eigen_solver="auto",
724
+ tol=1e-6,
725
+ max_iter=100,
726
+ method="standard",
727
+ hessian_tol=1e-4,
728
+ modified_tol=1e-12,
729
+ neighbors_algorithm="auto",
730
+ random_state=None,
731
+ n_jobs=None,
732
+ ):
733
+ self.n_neighbors = n_neighbors
734
+ self.n_components = n_components
735
+ self.reg = reg
736
+ self.eigen_solver = eigen_solver
737
+ self.tol = tol
738
+ self.max_iter = max_iter
739
+ self.method = method
740
+ self.hessian_tol = hessian_tol
741
+ self.modified_tol = modified_tol
742
+ self.random_state = random_state
743
+ self.neighbors_algorithm = neighbors_algorithm
744
+ self.n_jobs = n_jobs
745
+
746
+ def _fit_transform(self, X):
747
+ self.nbrs_ = NearestNeighbors(
748
+ n_neighbors=self.n_neighbors,
749
+ algorithm=self.neighbors_algorithm,
750
+ n_jobs=self.n_jobs,
751
+ )
752
+
753
+ random_state = check_random_state(self.random_state)
754
+ X = self._validate_data(X, dtype=float)
755
+ self.nbrs_.fit(X)
756
+ self.embedding_, self.reconstruction_error_ = locally_linear_embedding(
757
+ X=self.nbrs_,
758
+ n_neighbors=self.n_neighbors,
759
+ n_components=self.n_components,
760
+ eigen_solver=self.eigen_solver,
761
+ tol=self.tol,
762
+ max_iter=self.max_iter,
763
+ method=self.method,
764
+ hessian_tol=self.hessian_tol,
765
+ modified_tol=self.modified_tol,
766
+ random_state=random_state,
767
+ reg=self.reg,
768
+ n_jobs=self.n_jobs,
769
+ )
770
+ self._n_features_out = self.embedding_.shape[1]
771
+
772
+ @_fit_context(prefer_skip_nested_validation=True)
773
+ def fit(self, X, y=None):
774
+ """Compute the embedding vectors for data X.
775
+
776
+ Parameters
777
+ ----------
778
+ X : array-like of shape (n_samples, n_features)
779
+ Training set.
780
+
781
+ y : Ignored
782
+ Not used, present here for API consistency by convention.
783
+
784
+ Returns
785
+ -------
786
+ self : object
787
+ Fitted `LocallyLinearEmbedding` class instance.
788
+ """
789
+ self._fit_transform(X)
790
+ return self
791
+
792
+ @_fit_context(prefer_skip_nested_validation=True)
793
+ def fit_transform(self, X, y=None):
794
+ """Compute the embedding vectors for data X and transform X.
795
+
796
+ Parameters
797
+ ----------
798
+ X : array-like of shape (n_samples, n_features)
799
+ Training set.
800
+
801
+ y : Ignored
802
+ Not used, present here for API consistency by convention.
803
+
804
+ Returns
805
+ -------
806
+ X_new : array-like, shape (n_samples, n_components)
807
+ Returns the instance itself.
808
+ """
809
+ self._fit_transform(X)
810
+ return self.embedding_
811
+
812
+ def transform(self, X):
813
+ """
814
+ Transform new points into embedding space.
815
+
816
+ Parameters
817
+ ----------
818
+ X : array-like of shape (n_samples, n_features)
819
+ Training set.
820
+
821
+ Returns
822
+ -------
823
+ X_new : ndarray of shape (n_samples, n_components)
824
+ Returns the instance itself.
825
+
826
+ Notes
827
+ -----
828
+ Because of scaling performed by this method, it is discouraged to use
829
+ it together with methods that are not scale-invariant (like SVMs).
830
+ """
831
+ check_is_fitted(self)
832
+
833
+ X = self._validate_data(X, reset=False)
834
+ ind = self.nbrs_.kneighbors(
835
+ X, n_neighbors=self.n_neighbors, return_distance=False
836
+ )
837
+ weights = barycenter_weights(X, self.nbrs_._fit_X, ind, reg=self.reg)
838
+ X_new = np.empty((X.shape[0], self.n_components))
839
+ for i in range(X.shape[0]):
840
+ X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
841
+ return X_new
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_mds.py ADDED
@@ -0,0 +1,653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Multi-dimensional Scaling (MDS).
3
+ """
4
+
5
+ # author: Nelle Varoquaux <[email protected]>
6
+ # License: BSD
7
+
8
+ import warnings
9
+ from numbers import Integral, Real
10
+
11
+ import numpy as np
12
+ from joblib import effective_n_jobs
13
+
14
+ from ..base import BaseEstimator, _fit_context
15
+ from ..isotonic import IsotonicRegression
16
+ from ..metrics import euclidean_distances
17
+ from ..utils import check_array, check_random_state, check_symmetric
18
+ from ..utils._param_validation import Interval, StrOptions, validate_params
19
+ from ..utils.parallel import Parallel, delayed
20
+
21
+
22
+ def _smacof_single(
23
+ dissimilarities,
24
+ metric=True,
25
+ n_components=2,
26
+ init=None,
27
+ max_iter=300,
28
+ verbose=0,
29
+ eps=1e-3,
30
+ random_state=None,
31
+ normalized_stress=False,
32
+ ):
33
+ """Computes multidimensional scaling using SMACOF algorithm.
34
+
35
+ Parameters
36
+ ----------
37
+ dissimilarities : ndarray of shape (n_samples, n_samples)
38
+ Pairwise dissimilarities between the points. Must be symmetric.
39
+
40
+ metric : bool, default=True
41
+ Compute metric or nonmetric SMACOF algorithm.
42
+ When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as
43
+ missing values.
44
+
45
+ n_components : int, default=2
46
+ Number of dimensions in which to immerse the dissimilarities. If an
47
+ ``init`` array is provided, this option is overridden and the shape of
48
+ ``init`` is used to determine the dimensionality of the embedding
49
+ space.
50
+
51
+ init : ndarray of shape (n_samples, n_components), default=None
52
+ Starting configuration of the embedding to initialize the algorithm. By
53
+ default, the algorithm is initialized with a randomly chosen array.
54
+
55
+ max_iter : int, default=300
56
+ Maximum number of iterations of the SMACOF algorithm for a single run.
57
+
58
+ verbose : int, default=0
59
+ Level of verbosity.
60
+
61
+ eps : float, default=1e-3
62
+ Relative tolerance with respect to stress at which to declare
63
+ convergence. The value of `eps` should be tuned separately depending
64
+ on whether or not `normalized_stress` is being used.
65
+
66
+ random_state : int, RandomState instance or None, default=None
67
+ Determines the random number generator used to initialize the centers.
68
+ Pass an int for reproducible results across multiple function calls.
69
+ See :term:`Glossary <random_state>`.
70
+
71
+ normalized_stress : bool, default=False
72
+ Whether use and return normed stress value (Stress-1) instead of raw
73
+ stress calculated by default. Only supported in non-metric MDS. The
74
+ caller must ensure that if `normalized_stress=True` then `metric=False`
75
+
76
+ .. versionadded:: 1.2
77
+
78
+ Returns
79
+ -------
80
+ X : ndarray of shape (n_samples, n_components)
81
+ Coordinates of the points in a ``n_components``-space.
82
+
83
+ stress : float
84
+ The final value of the stress (sum of squared distance of the
85
+ disparities and the distances for all constrained points).
86
+ If `normalized_stress=True`, and `metric=False` returns Stress-1.
87
+ A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good,
88
+ 0.1 fair, and 0.2 poor [1]_.
89
+
90
+ n_iter : int
91
+ The number of iterations corresponding to the best stress.
92
+
93
+ References
94
+ ----------
95
+ .. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
96
+ Psychometrika, 29 (1964)
97
+
98
+ .. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric
99
+ hypothesis" Kruskal, J. Psychometrika, 29, (1964)
100
+
101
+ .. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
102
+ Groenen P. Springer Series in Statistics (1997)
103
+ """
104
+ dissimilarities = check_symmetric(dissimilarities, raise_exception=True)
105
+
106
+ n_samples = dissimilarities.shape[0]
107
+ random_state = check_random_state(random_state)
108
+
109
+ sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel()
110
+ sim_flat_w = sim_flat[sim_flat != 0]
111
+ if init is None:
112
+ # Randomly choose initial configuration
113
+ X = random_state.uniform(size=n_samples * n_components)
114
+ X = X.reshape((n_samples, n_components))
115
+ else:
116
+ # overrides the parameter p
117
+ n_components = init.shape[1]
118
+ if n_samples != init.shape[0]:
119
+ raise ValueError(
120
+ "init matrix should be of shape (%d, %d)" % (n_samples, n_components)
121
+ )
122
+ X = init
123
+
124
+ old_stress = None
125
+ ir = IsotonicRegression()
126
+ for it in range(max_iter):
127
+ # Compute distance and monotonic regression
128
+ dis = euclidean_distances(X)
129
+
130
+ if metric:
131
+ disparities = dissimilarities
132
+ else:
133
+ dis_flat = dis.ravel()
134
+ # dissimilarities with 0 are considered as missing values
135
+ dis_flat_w = dis_flat[sim_flat != 0]
136
+
137
+ # Compute the disparities using a monotonic regression
138
+ disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
139
+ disparities = dis_flat.copy()
140
+ disparities[sim_flat != 0] = disparities_flat
141
+ disparities = disparities.reshape((n_samples, n_samples))
142
+ disparities *= np.sqrt(
143
+ (n_samples * (n_samples - 1) / 2) / (disparities**2).sum()
144
+ )
145
+
146
+ # Compute stress
147
+ stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2
148
+ if normalized_stress:
149
+ stress = np.sqrt(stress / ((disparities.ravel() ** 2).sum() / 2))
150
+ # Update X using the Guttman transform
151
+ dis[dis == 0] = 1e-5
152
+ ratio = disparities / dis
153
+ B = -ratio
154
+ B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
155
+ X = 1.0 / n_samples * np.dot(B, X)
156
+
157
+ dis = np.sqrt((X**2).sum(axis=1)).sum()
158
+ if verbose >= 2:
159
+ print("it: %d, stress %s" % (it, stress))
160
+ if old_stress is not None:
161
+ if (old_stress - stress / dis) < eps:
162
+ if verbose:
163
+ print("breaking at iteration %d with stress %s" % (it, stress))
164
+ break
165
+ old_stress = stress / dis
166
+
167
+ return X, stress, it + 1
168
+
169
+
170
+ @validate_params(
171
+ {
172
+ "dissimilarities": ["array-like"],
173
+ "metric": ["boolean"],
174
+ "n_components": [Interval(Integral, 1, None, closed="left")],
175
+ "init": ["array-like", None],
176
+ "n_init": [Interval(Integral, 1, None, closed="left")],
177
+ "n_jobs": [Integral, None],
178
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
179
+ "verbose": ["verbose"],
180
+ "eps": [Interval(Real, 0, None, closed="left")],
181
+ "random_state": ["random_state"],
182
+ "return_n_iter": ["boolean"],
183
+ "normalized_stress": ["boolean", StrOptions({"auto"})],
184
+ },
185
+ prefer_skip_nested_validation=True,
186
+ )
187
+ def smacof(
188
+ dissimilarities,
189
+ *,
190
+ metric=True,
191
+ n_components=2,
192
+ init=None,
193
+ n_init=8,
194
+ n_jobs=None,
195
+ max_iter=300,
196
+ verbose=0,
197
+ eps=1e-3,
198
+ random_state=None,
199
+ return_n_iter=False,
200
+ normalized_stress="auto",
201
+ ):
202
+ """Compute multidimensional scaling using the SMACOF algorithm.
203
+
204
+ The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a
205
+ multidimensional scaling algorithm which minimizes an objective function
206
+ (the *stress*) using a majorization technique. Stress majorization, also
207
+ known as the Guttman Transform, guarantees a monotone convergence of
208
+ stress, and is more powerful than traditional techniques such as gradient
209
+ descent.
210
+
211
+ The SMACOF algorithm for metric MDS can be summarized by the following
212
+ steps:
213
+
214
+ 1. Set an initial start configuration, randomly or not.
215
+ 2. Compute the stress
216
+ 3. Compute the Guttman Transform
217
+ 4. Iterate 2 and 3 until convergence.
218
+
219
+ The nonmetric algorithm adds a monotonic regression step before computing
220
+ the stress.
221
+
222
+ Parameters
223
+ ----------
224
+ dissimilarities : array-like of shape (n_samples, n_samples)
225
+ Pairwise dissimilarities between the points. Must be symmetric.
226
+
227
+ metric : bool, default=True
228
+ Compute metric or nonmetric SMACOF algorithm.
229
+ When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as
230
+ missing values.
231
+
232
+ n_components : int, default=2
233
+ Number of dimensions in which to immerse the dissimilarities. If an
234
+ ``init`` array is provided, this option is overridden and the shape of
235
+ ``init`` is used to determine the dimensionality of the embedding
236
+ space.
237
+
238
+ init : array-like of shape (n_samples, n_components), default=None
239
+ Starting configuration of the embedding to initialize the algorithm. By
240
+ default, the algorithm is initialized with a randomly chosen array.
241
+
242
+ n_init : int, default=8
243
+ Number of times the SMACOF algorithm will be run with different
244
+ initializations. The final results will be the best output of the runs,
245
+ determined by the run with the smallest final stress. If ``init`` is
246
+ provided, this option is overridden and a single run is performed.
247
+
248
+ n_jobs : int, default=None
249
+ The number of jobs to use for the computation. If multiple
250
+ initializations are used (``n_init``), each run of the algorithm is
251
+ computed in parallel.
252
+
253
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
254
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
255
+ for more details.
256
+
257
+ max_iter : int, default=300
258
+ Maximum number of iterations of the SMACOF algorithm for a single run.
259
+
260
+ verbose : int, default=0
261
+ Level of verbosity.
262
+
263
+ eps : float, default=1e-3
264
+ Relative tolerance with respect to stress at which to declare
265
+ convergence. The value of `eps` should be tuned separately depending
266
+ on whether or not `normalized_stress` is being used.
267
+
268
+ random_state : int, RandomState instance or None, default=None
269
+ Determines the random number generator used to initialize the centers.
270
+ Pass an int for reproducible results across multiple function calls.
271
+ See :term:`Glossary <random_state>`.
272
+
273
+ return_n_iter : bool, default=False
274
+ Whether or not to return the number of iterations.
275
+
276
+ normalized_stress : bool or "auto" default="auto"
277
+ Whether use and return normed stress value (Stress-1) instead of raw
278
+ stress calculated by default. Only supported in non-metric MDS.
279
+
280
+ .. versionadded:: 1.2
281
+
282
+ .. versionchanged:: 1.4
283
+ The default value changed from `False` to `"auto"` in version 1.4.
284
+
285
+ Returns
286
+ -------
287
+ X : ndarray of shape (n_samples, n_components)
288
+ Coordinates of the points in a ``n_components``-space.
289
+
290
+ stress : float
291
+ The final value of the stress (sum of squared distance of the
292
+ disparities and the distances for all constrained points).
293
+ If `normalized_stress=True`, and `metric=False` returns Stress-1.
294
+ A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good,
295
+ 0.1 fair, and 0.2 poor [1]_.
296
+
297
+ n_iter : int
298
+ The number of iterations corresponding to the best stress. Returned
299
+ only if ``return_n_iter`` is set to ``True``.
300
+
301
+ References
302
+ ----------
303
+ .. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
304
+ Psychometrika, 29 (1964)
305
+
306
+ .. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric
307
+ hypothesis" Kruskal, J. Psychometrika, 29, (1964)
308
+
309
+ .. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
310
+ Groenen P. Springer Series in Statistics (1997)
311
+
312
+ Examples
313
+ --------
314
+ >>> import numpy as np
315
+ >>> from sklearn.manifold import smacof
316
+ >>> from sklearn.metrics import euclidean_distances
317
+ >>> X = np.array([[0, 1, 2], [1, 0, 3],[2, 3, 0]])
318
+ >>> dissimilarities = euclidean_distances(X)
319
+ >>> mds_result, stress = smacof(dissimilarities, n_components=2, random_state=42)
320
+ >>> mds_result
321
+ array([[ 0.05... -1.07... ],
322
+ [ 1.74..., -0.75...],
323
+ [-1.79..., 1.83...]])
324
+ >>> stress
325
+ 0.0012...
326
+ """
327
+
328
+ dissimilarities = check_array(dissimilarities)
329
+ random_state = check_random_state(random_state)
330
+
331
+ if normalized_stress == "auto":
332
+ normalized_stress = not metric
333
+
334
+ if normalized_stress and metric:
335
+ raise ValueError(
336
+ "Normalized stress is not supported for metric MDS. Either set"
337
+ " `normalized_stress=False` or use `metric=False`."
338
+ )
339
+ if hasattr(init, "__array__"):
340
+ init = np.asarray(init).copy()
341
+ if not n_init == 1:
342
+ warnings.warn(
343
+ "Explicit initial positions passed: "
344
+ "performing only one init of the MDS instead of %d" % n_init
345
+ )
346
+ n_init = 1
347
+
348
+ best_pos, best_stress = None, None
349
+
350
+ if effective_n_jobs(n_jobs) == 1:
351
+ for it in range(n_init):
352
+ pos, stress, n_iter_ = _smacof_single(
353
+ dissimilarities,
354
+ metric=metric,
355
+ n_components=n_components,
356
+ init=init,
357
+ max_iter=max_iter,
358
+ verbose=verbose,
359
+ eps=eps,
360
+ random_state=random_state,
361
+ normalized_stress=normalized_stress,
362
+ )
363
+ if best_stress is None or stress < best_stress:
364
+ best_stress = stress
365
+ best_pos = pos.copy()
366
+ best_iter = n_iter_
367
+ else:
368
+ seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
369
+ results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(
370
+ delayed(_smacof_single)(
371
+ dissimilarities,
372
+ metric=metric,
373
+ n_components=n_components,
374
+ init=init,
375
+ max_iter=max_iter,
376
+ verbose=verbose,
377
+ eps=eps,
378
+ random_state=seed,
379
+ normalized_stress=normalized_stress,
380
+ )
381
+ for seed in seeds
382
+ )
383
+ positions, stress, n_iters = zip(*results)
384
+ best = np.argmin(stress)
385
+ best_stress = stress[best]
386
+ best_pos = positions[best]
387
+ best_iter = n_iters[best]
388
+
389
+ if return_n_iter:
390
+ return best_pos, best_stress, best_iter
391
+ else:
392
+ return best_pos, best_stress
393
+
394
+
395
+ class MDS(BaseEstimator):
396
+ """Multidimensional scaling.
397
+
398
+ Read more in the :ref:`User Guide <multidimensional_scaling>`.
399
+
400
+ Parameters
401
+ ----------
402
+ n_components : int, default=2
403
+ Number of dimensions in which to immerse the dissimilarities.
404
+
405
+ metric : bool, default=True
406
+ If ``True``, perform metric MDS; otherwise, perform nonmetric MDS.
407
+ When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as
408
+ missing values.
409
+
410
+ n_init : int, default=4
411
+ Number of times the SMACOF algorithm will be run with different
412
+ initializations. The final results will be the best output of the runs,
413
+ determined by the run with the smallest final stress.
414
+
415
+ max_iter : int, default=300
416
+ Maximum number of iterations of the SMACOF algorithm for a single run.
417
+
418
+ verbose : int, default=0
419
+ Level of verbosity.
420
+
421
+ eps : float, default=1e-3
422
+ Relative tolerance with respect to stress at which to declare
423
+ convergence. The value of `eps` should be tuned separately depending
424
+ on whether or not `normalized_stress` is being used.
425
+
426
+ n_jobs : int, default=None
427
+ The number of jobs to use for the computation. If multiple
428
+ initializations are used (``n_init``), each run of the algorithm is
429
+ computed in parallel.
430
+
431
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
432
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
433
+ for more details.
434
+
435
+ random_state : int, RandomState instance or None, default=None
436
+ Determines the random number generator used to initialize the centers.
437
+ Pass an int for reproducible results across multiple function calls.
438
+ See :term:`Glossary <random_state>`.
439
+
440
+ dissimilarity : {'euclidean', 'precomputed'}, default='euclidean'
441
+ Dissimilarity measure to use:
442
+
443
+ - 'euclidean':
444
+ Pairwise Euclidean distances between points in the dataset.
445
+
446
+ - 'precomputed':
447
+ Pre-computed dissimilarities are passed directly to ``fit`` and
448
+ ``fit_transform``.
449
+
450
+ normalized_stress : bool or "auto" default="auto"
451
+ Whether use and return normed stress value (Stress-1) instead of raw
452
+ stress calculated by default. Only supported in non-metric MDS.
453
+
454
+ .. versionadded:: 1.2
455
+
456
+ .. versionchanged:: 1.4
457
+ The default value changed from `False` to `"auto"` in version 1.4.
458
+
459
+ Attributes
460
+ ----------
461
+ embedding_ : ndarray of shape (n_samples, n_components)
462
+ Stores the position of the dataset in the embedding space.
463
+
464
+ stress_ : float
465
+ The final value of the stress (sum of squared distance of the
466
+ disparities and the distances for all constrained points).
467
+ If `normalized_stress=True`, and `metric=False` returns Stress-1.
468
+ A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good,
469
+ 0.1 fair, and 0.2 poor [1]_.
470
+
471
+ dissimilarity_matrix_ : ndarray of shape (n_samples, n_samples)
472
+ Pairwise dissimilarities between the points. Symmetric matrix that:
473
+
474
+ - either uses a custom dissimilarity matrix by setting `dissimilarity`
475
+ to 'precomputed';
476
+ - or constructs a dissimilarity matrix from data using
477
+ Euclidean distances.
478
+
479
+ n_features_in_ : int
480
+ Number of features seen during :term:`fit`.
481
+
482
+ .. versionadded:: 0.24
483
+
484
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
485
+ Names of features seen during :term:`fit`. Defined only when `X`
486
+ has feature names that are all strings.
487
+
488
+ .. versionadded:: 1.0
489
+
490
+ n_iter_ : int
491
+ The number of iterations corresponding to the best stress.
492
+
493
+ See Also
494
+ --------
495
+ sklearn.decomposition.PCA : Principal component analysis that is a linear
496
+ dimensionality reduction method.
497
+ sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using
498
+ kernels and PCA.
499
+ TSNE : T-distributed Stochastic Neighbor Embedding.
500
+ Isomap : Manifold learning based on Isometric Mapping.
501
+ LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding.
502
+ SpectralEmbedding : Spectral embedding for non-linear dimensionality.
503
+
504
+ References
505
+ ----------
506
+ .. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
507
+ Psychometrika, 29 (1964)
508
+
509
+ .. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric
510
+ hypothesis" Kruskal, J. Psychometrika, 29, (1964)
511
+
512
+ .. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
513
+ Groenen P. Springer Series in Statistics (1997)
514
+
515
+ Examples
516
+ --------
517
+ >>> from sklearn.datasets import load_digits
518
+ >>> from sklearn.manifold import MDS
519
+ >>> X, _ = load_digits(return_X_y=True)
520
+ >>> X.shape
521
+ (1797, 64)
522
+ >>> embedding = MDS(n_components=2, normalized_stress='auto')
523
+ >>> X_transformed = embedding.fit_transform(X[:100])
524
+ >>> X_transformed.shape
525
+ (100, 2)
526
+
527
+ For a more detailed example of usage, see:
528
+ :ref:`sphx_glr_auto_examples_manifold_plot_mds.py`
529
+ """
530
+
531
+ _parameter_constraints: dict = {
532
+ "n_components": [Interval(Integral, 1, None, closed="left")],
533
+ "metric": ["boolean"],
534
+ "n_init": [Interval(Integral, 1, None, closed="left")],
535
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
536
+ "verbose": ["verbose"],
537
+ "eps": [Interval(Real, 0.0, None, closed="left")],
538
+ "n_jobs": [None, Integral],
539
+ "random_state": ["random_state"],
540
+ "dissimilarity": [StrOptions({"euclidean", "precomputed"})],
541
+ "normalized_stress": ["boolean", StrOptions({"auto"})],
542
+ }
543
+
544
+ def __init__(
545
+ self,
546
+ n_components=2,
547
+ *,
548
+ metric=True,
549
+ n_init=4,
550
+ max_iter=300,
551
+ verbose=0,
552
+ eps=1e-3,
553
+ n_jobs=None,
554
+ random_state=None,
555
+ dissimilarity="euclidean",
556
+ normalized_stress="auto",
557
+ ):
558
+ self.n_components = n_components
559
+ self.dissimilarity = dissimilarity
560
+ self.metric = metric
561
+ self.n_init = n_init
562
+ self.max_iter = max_iter
563
+ self.eps = eps
564
+ self.verbose = verbose
565
+ self.n_jobs = n_jobs
566
+ self.random_state = random_state
567
+ self.normalized_stress = normalized_stress
568
+
569
+ def _more_tags(self):
570
+ return {"pairwise": self.dissimilarity == "precomputed"}
571
+
572
+ def fit(self, X, y=None, init=None):
573
+ """
574
+ Compute the position of the points in the embedding space.
575
+
576
+ Parameters
577
+ ----------
578
+ X : array-like of shape (n_samples, n_features) or \
579
+ (n_samples, n_samples)
580
+ Input data. If ``dissimilarity=='precomputed'``, the input should
581
+ be the dissimilarity matrix.
582
+
583
+ y : Ignored
584
+ Not used, present for API consistency by convention.
585
+
586
+ init : ndarray of shape (n_samples, n_components), default=None
587
+ Starting configuration of the embedding to initialize the SMACOF
588
+ algorithm. By default, the algorithm is initialized with a randomly
589
+ chosen array.
590
+
591
+ Returns
592
+ -------
593
+ self : object
594
+ Fitted estimator.
595
+ """
596
+ self.fit_transform(X, init=init)
597
+ return self
598
+
599
+ @_fit_context(prefer_skip_nested_validation=True)
600
+ def fit_transform(self, X, y=None, init=None):
601
+ """
602
+ Fit the data from `X`, and returns the embedded coordinates.
603
+
604
+ Parameters
605
+ ----------
606
+ X : array-like of shape (n_samples, n_features) or \
607
+ (n_samples, n_samples)
608
+ Input data. If ``dissimilarity=='precomputed'``, the input should
609
+ be the dissimilarity matrix.
610
+
611
+ y : Ignored
612
+ Not used, present for API consistency by convention.
613
+
614
+ init : ndarray of shape (n_samples, n_components), default=None
615
+ Starting configuration of the embedding to initialize the SMACOF
616
+ algorithm. By default, the algorithm is initialized with a randomly
617
+ chosen array.
618
+
619
+ Returns
620
+ -------
621
+ X_new : ndarray of shape (n_samples, n_components)
622
+ X transformed in the new space.
623
+ """
624
+ X = self._validate_data(X)
625
+ if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed":
626
+ warnings.warn(
627
+ "The MDS API has changed. ``fit`` now constructs an"
628
+ " dissimilarity matrix from data. To use a custom "
629
+ "dissimilarity matrix, set "
630
+ "``dissimilarity='precomputed'``."
631
+ )
632
+
633
+ if self.dissimilarity == "precomputed":
634
+ self.dissimilarity_matrix_ = X
635
+ elif self.dissimilarity == "euclidean":
636
+ self.dissimilarity_matrix_ = euclidean_distances(X)
637
+
638
+ self.embedding_, self.stress_, self.n_iter_ = smacof(
639
+ self.dissimilarity_matrix_,
640
+ metric=self.metric,
641
+ n_components=self.n_components,
642
+ init=init,
643
+ n_init=self.n_init,
644
+ n_jobs=self.n_jobs,
645
+ max_iter=self.max_iter,
646
+ verbose=self.verbose,
647
+ eps=self.eps,
648
+ random_state=self.random_state,
649
+ return_n_iter=True,
650
+ normalized_stress=self.normalized_stress,
651
+ )
652
+
653
+ return self.embedding_
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_spectral_embedding.py ADDED
@@ -0,0 +1,749 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Spectral Embedding."""
2
+
3
+ # Author: Gael Varoquaux <[email protected]>
4
+ # Wei LI <[email protected]>
5
+ # License: BSD 3 clause
6
+
7
+
8
+ import warnings
9
+ from numbers import Integral, Real
10
+
11
+ import numpy as np
12
+ from scipy import sparse
13
+ from scipy.linalg import eigh
14
+ from scipy.sparse.csgraph import connected_components
15
+ from scipy.sparse.linalg import eigsh, lobpcg
16
+
17
+ from ..base import BaseEstimator, _fit_context
18
+ from ..metrics.pairwise import rbf_kernel
19
+ from ..neighbors import NearestNeighbors, kneighbors_graph
20
+ from ..utils import (
21
+ check_array,
22
+ check_random_state,
23
+ check_symmetric,
24
+ )
25
+ from ..utils._arpack import _init_arpack_v0
26
+ from ..utils._param_validation import Interval, StrOptions
27
+ from ..utils.extmath import _deterministic_vector_sign_flip
28
+ from ..utils.fixes import laplacian as csgraph_laplacian
29
+ from ..utils.fixes import parse_version, sp_version
30
+
31
+
32
+ def _graph_connected_component(graph, node_id):
33
+ """Find the largest graph connected components that contains one
34
+ given node.
35
+
36
+ Parameters
37
+ ----------
38
+ graph : array-like of shape (n_samples, n_samples)
39
+ Adjacency matrix of the graph, non-zero weight means an edge
40
+ between the nodes.
41
+
42
+ node_id : int
43
+ The index of the query node of the graph.
44
+
45
+ Returns
46
+ -------
47
+ connected_components_matrix : array-like of shape (n_samples,)
48
+ An array of bool value indicating the indexes of the nodes
49
+ belonging to the largest connected components of the given query
50
+ node.
51
+ """
52
+ n_node = graph.shape[0]
53
+ if sparse.issparse(graph):
54
+ # speed up row-wise access to boolean connection mask
55
+ graph = graph.tocsr()
56
+ connected_nodes = np.zeros(n_node, dtype=bool)
57
+ nodes_to_explore = np.zeros(n_node, dtype=bool)
58
+ nodes_to_explore[node_id] = True
59
+ for _ in range(n_node):
60
+ last_num_component = connected_nodes.sum()
61
+ np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
62
+ if last_num_component >= connected_nodes.sum():
63
+ break
64
+ indices = np.where(nodes_to_explore)[0]
65
+ nodes_to_explore.fill(False)
66
+ for i in indices:
67
+ if sparse.issparse(graph):
68
+ # scipy not yet implemented 1D sparse slices; can be changed back to
69
+ # `neighbors = graph[i].toarray().ravel()` once implemented
70
+ neighbors = graph[[i], :].toarray().ravel()
71
+ else:
72
+ neighbors = graph[i]
73
+ np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
74
+ return connected_nodes
75
+
76
+
77
+ def _graph_is_connected(graph):
78
+ """Return whether the graph is connected (True) or Not (False).
79
+
80
+ Parameters
81
+ ----------
82
+ graph : {array-like, sparse matrix} of shape (n_samples, n_samples)
83
+ Adjacency matrix of the graph, non-zero weight means an edge
84
+ between the nodes.
85
+
86
+ Returns
87
+ -------
88
+ is_connected : bool
89
+ True means the graph is fully connected and False means not.
90
+ """
91
+ if sparse.issparse(graph):
92
+ # Before Scipy 1.11.3, `connected_components` only supports 32-bit indices.
93
+ # PR: https://github.com/scipy/scipy/pull/18913
94
+ # First integration in 1.11.3: https://github.com/scipy/scipy/pull/19279
95
+ # TODO(jjerphan): Once SciPy 1.11.3 is the minimum supported version, use
96
+ # `accept_large_sparse=True`.
97
+ accept_large_sparse = sp_version >= parse_version("1.11.3")
98
+ graph = check_array(
99
+ graph, accept_sparse=True, accept_large_sparse=accept_large_sparse
100
+ )
101
+ # sparse graph, find all the connected components
102
+ n_connected_components, _ = connected_components(graph)
103
+ return n_connected_components == 1
104
+ else:
105
+ # dense graph, find all connected components start from node 0
106
+ return _graph_connected_component(graph, 0).sum() == graph.shape[0]
107
+
108
+
109
+ def _set_diag(laplacian, value, norm_laplacian):
110
+ """Set the diagonal of the laplacian matrix and convert it to a
111
+ sparse format well suited for eigenvalue decomposition.
112
+
113
+ Parameters
114
+ ----------
115
+ laplacian : {ndarray, sparse matrix}
116
+ The graph laplacian.
117
+
118
+ value : float
119
+ The value of the diagonal.
120
+
121
+ norm_laplacian : bool
122
+ Whether the value of the diagonal should be changed or not.
123
+
124
+ Returns
125
+ -------
126
+ laplacian : {array, sparse matrix}
127
+ An array of matrix in a form that is well suited to fast
128
+ eigenvalue decomposition, depending on the band width of the
129
+ matrix.
130
+ """
131
+ n_nodes = laplacian.shape[0]
132
+ # We need all entries in the diagonal to values
133
+ if not sparse.issparse(laplacian):
134
+ if norm_laplacian:
135
+ laplacian.flat[:: n_nodes + 1] = value
136
+ else:
137
+ laplacian = laplacian.tocoo()
138
+ if norm_laplacian:
139
+ diag_idx = laplacian.row == laplacian.col
140
+ laplacian.data[diag_idx] = value
141
+ # If the matrix has a small number of diagonals (as in the
142
+ # case of structured matrices coming from images), the
143
+ # dia format might be best suited for matvec products:
144
+ n_diags = np.unique(laplacian.row - laplacian.col).size
145
+ if n_diags <= 7:
146
+ # 3 or less outer diagonals on each side
147
+ laplacian = laplacian.todia()
148
+ else:
149
+ # csr has the fastest matvec and is thus best suited to
150
+ # arpack
151
+ laplacian = laplacian.tocsr()
152
+ return laplacian
153
+
154
+
155
+ def spectral_embedding(
156
+ adjacency,
157
+ *,
158
+ n_components=8,
159
+ eigen_solver=None,
160
+ random_state=None,
161
+ eigen_tol="auto",
162
+ norm_laplacian=True,
163
+ drop_first=True,
164
+ ):
165
+ """Project the sample on the first eigenvectors of the graph Laplacian.
166
+
167
+ The adjacency matrix is used to compute a normalized graph Laplacian
168
+ whose spectrum (especially the eigenvectors associated to the
169
+ smallest eigenvalues) has an interpretation in terms of minimal
170
+ number of cuts necessary to split the graph into comparably sized
171
+ components.
172
+
173
+ This embedding can also 'work' even if the ``adjacency`` variable is
174
+ not strictly the adjacency matrix of a graph but more generally
175
+ an affinity or similarity matrix between samples (for instance the
176
+ heat kernel of a euclidean distance matrix or a k-NN matrix).
177
+
178
+ However care must taken to always make the affinity matrix symmetric
179
+ so that the eigenvector decomposition works as expected.
180
+
181
+ Note : Laplacian Eigenmaps is the actual algorithm implemented here.
182
+
183
+ Read more in the :ref:`User Guide <spectral_embedding>`.
184
+
185
+ Parameters
186
+ ----------
187
+ adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)
188
+ The adjacency matrix of the graph to embed.
189
+
190
+ n_components : int, default=8
191
+ The dimension of the projection subspace.
192
+
193
+ eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
194
+ The eigenvalue decomposition strategy to use. AMG requires pyamg
195
+ to be installed. It can be faster on very large, sparse problems,
196
+ but may also lead to instabilities. If None, then ``'arpack'`` is
197
+ used.
198
+
199
+ random_state : int, RandomState instance or None, default=None
200
+ A pseudo random number generator used for the initialization
201
+ of the lobpcg eigen vectors decomposition when `eigen_solver ==
202
+ 'amg'`, and for the K-Means initialization. Use an int to make
203
+ the results deterministic across calls (See
204
+ :term:`Glossary <random_state>`).
205
+
206
+ .. note::
207
+ When using `eigen_solver == 'amg'`,
208
+ it is necessary to also fix the global numpy seed with
209
+ `np.random.seed(int)` to get deterministic results. See
210
+ https://github.com/pyamg/pyamg/issues/139 for further
211
+ information.
212
+
213
+ eigen_tol : float, default="auto"
214
+ Stopping criterion for eigendecomposition of the Laplacian matrix.
215
+ If `eigen_tol="auto"` then the passed tolerance will depend on the
216
+ `eigen_solver`:
217
+
218
+ - If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
219
+ - If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
220
+ `eigen_tol=None` which configures the underlying `lobpcg` solver to
221
+ automatically resolve the value according to their heuristics. See,
222
+ :func:`scipy.sparse.linalg.lobpcg` for details.
223
+
224
+ Note that when using `eigen_solver="amg"` values of `tol<1e-5` may lead
225
+ to convergence issues and should be avoided.
226
+
227
+ .. versionadded:: 1.2
228
+ Added 'auto' option.
229
+
230
+ norm_laplacian : bool, default=True
231
+ If True, then compute symmetric normalized Laplacian.
232
+
233
+ drop_first : bool, default=True
234
+ Whether to drop the first eigenvector. For spectral embedding, this
235
+ should be True as the first eigenvector should be constant vector for
236
+ connected graph, but for spectral clustering, this should be kept as
237
+ False to retain the first eigenvector.
238
+
239
+ Returns
240
+ -------
241
+ embedding : ndarray of shape (n_samples, n_components)
242
+ The reduced samples.
243
+
244
+ Notes
245
+ -----
246
+ Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
247
+ has one connected component. If there graph has many components, the first
248
+ few eigenvectors will simply uncover the connected components of the graph.
249
+
250
+ References
251
+ ----------
252
+ * https://en.wikipedia.org/wiki/LOBPCG
253
+
254
+ * :doi:`"Toward the Optimal Preconditioned Eigensolver: Locally Optimal
255
+ Block Preconditioned Conjugate Gradient Method",
256
+ Andrew V. Knyazev
257
+ <10.1137/S1064827500366124>`
258
+
259
+ Examples
260
+ --------
261
+ >>> from sklearn.datasets import load_digits
262
+ >>> from sklearn.neighbors import kneighbors_graph
263
+ >>> from sklearn.manifold import spectral_embedding
264
+ >>> X, _ = load_digits(return_X_y=True)
265
+ >>> X = X[:100]
266
+ >>> affinity_matrix = kneighbors_graph(
267
+ ... X, n_neighbors=int(X.shape[0] / 10), include_self=True
268
+ ... )
269
+ >>> # make the matrix symmetric
270
+ >>> affinity_matrix = 0.5 * (affinity_matrix + affinity_matrix.T)
271
+ >>> embedding = spectral_embedding(affinity_matrix, n_components=2, random_state=42)
272
+ >>> embedding.shape
273
+ (100, 2)
274
+ """
275
+ adjacency = check_symmetric(adjacency)
276
+
277
+ if eigen_solver == "amg":
278
+ try:
279
+ from pyamg import smoothed_aggregation_solver
280
+ except ImportError as e:
281
+ raise ValueError(
282
+ "The eigen_solver was set to 'amg', but pyamg is not available."
283
+ ) from e
284
+
285
+ if eigen_solver is None:
286
+ eigen_solver = "arpack"
287
+ elif eigen_solver not in ("arpack", "lobpcg", "amg"):
288
+ raise ValueError(
289
+ "Unknown value for eigen_solver: '%s'."
290
+ "Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver
291
+ )
292
+
293
+ random_state = check_random_state(random_state)
294
+
295
+ n_nodes = adjacency.shape[0]
296
+ # Whether to drop the first eigenvector
297
+ if drop_first:
298
+ n_components = n_components + 1
299
+
300
+ if not _graph_is_connected(adjacency):
301
+ warnings.warn(
302
+ "Graph is not fully connected, spectral embedding may not work as expected."
303
+ )
304
+
305
+ laplacian, dd = csgraph_laplacian(
306
+ adjacency, normed=norm_laplacian, return_diag=True
307
+ )
308
+ if (
309
+ eigen_solver == "arpack"
310
+ or eigen_solver != "lobpcg"
311
+ and (not sparse.issparse(laplacian) or n_nodes < 5 * n_components)
312
+ ):
313
+ # lobpcg used with eigen_solver='amg' has bugs for low number of nodes
314
+ # for details see the source code in scipy:
315
+ # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
316
+ # /lobpcg/lobpcg.py#L237
317
+ # or matlab:
318
+ # https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
319
+ laplacian = _set_diag(laplacian, 1, norm_laplacian)
320
+
321
+ # Here we'll use shift-invert mode for fast eigenvalues
322
+ # (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
323
+ # for a short explanation of what this means)
324
+ # Because the normalized Laplacian has eigenvalues between 0 and 2,
325
+ # I - L has eigenvalues between -1 and 1. ARPACK is most efficient
326
+ # when finding eigenvalues of largest magnitude (keyword which='LM')
327
+ # and when these eigenvalues are very large compared to the rest.
328
+ # For very large, very sparse graphs, I - L can have many, many
329
+ # eigenvalues very near 1.0. This leads to slow convergence. So
330
+ # instead, we'll use ARPACK's shift-invert mode, asking for the
331
+ # eigenvalues near 1.0. This effectively spreads-out the spectrum
332
+ # near 1.0 and leads to much faster convergence: potentially an
333
+ # orders-of-magnitude speedup over simply using keyword which='LA'
334
+ # in standard mode.
335
+ try:
336
+ # We are computing the opposite of the laplacian inplace so as
337
+ # to spare a memory allocation of a possibly very large array
338
+ tol = 0 if eigen_tol == "auto" else eigen_tol
339
+ laplacian *= -1
340
+ v0 = _init_arpack_v0(laplacian.shape[0], random_state)
341
+ laplacian = check_array(
342
+ laplacian, accept_sparse="csr", accept_large_sparse=False
343
+ )
344
+ _, diffusion_map = eigsh(
345
+ laplacian, k=n_components, sigma=1.0, which="LM", tol=tol, v0=v0
346
+ )
347
+ embedding = diffusion_map.T[n_components::-1]
348
+ if norm_laplacian:
349
+ # recover u = D^-1/2 x from the eigenvector output x
350
+ embedding = embedding / dd
351
+ except RuntimeError:
352
+ # When submatrices are exactly singular, an LU decomposition
353
+ # in arpack fails. We fallback to lobpcg
354
+ eigen_solver = "lobpcg"
355
+ # Revert the laplacian to its opposite to have lobpcg work
356
+ laplacian *= -1
357
+
358
+ elif eigen_solver == "amg":
359
+ # Use AMG to get a preconditioner and speed up the eigenvalue
360
+ # problem.
361
+ if not sparse.issparse(laplacian):
362
+ warnings.warn("AMG works better for sparse matrices")
363
+ laplacian = check_array(
364
+ laplacian, dtype=[np.float64, np.float32], accept_sparse=True
365
+ )
366
+ laplacian = _set_diag(laplacian, 1, norm_laplacian)
367
+
368
+ # The Laplacian matrix is always singular, having at least one zero
369
+ # eigenvalue, corresponding to the trivial eigenvector, which is a
370
+ # constant. Using a singular matrix for preconditioning may result in
371
+ # random failures in LOBPCG and is not supported by the existing
372
+ # theory:
373
+ # see https://doi.org/10.1007/s10208-015-9297-1
374
+ # Shift the Laplacian so its diagononal is not all ones. The shift
375
+ # does change the eigenpairs however, so we'll feed the shifted
376
+ # matrix to the solver and afterward set it back to the original.
377
+ diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])
378
+ laplacian += diag_shift
379
+ if hasattr(sparse, "csr_array") and isinstance(laplacian, sparse.csr_array):
380
+ # `pyamg` does not work with `csr_array` and we need to convert it to a
381
+ # `csr_matrix` object.
382
+ laplacian = sparse.csr_matrix(laplacian)
383
+ ml = smoothed_aggregation_solver(check_array(laplacian, accept_sparse="csr"))
384
+ laplacian -= diag_shift
385
+
386
+ M = ml.aspreconditioner()
387
+ # Create initial approximation X to eigenvectors
388
+ X = random_state.standard_normal(size=(laplacian.shape[0], n_components + 1))
389
+ X[:, 0] = dd.ravel()
390
+ X = X.astype(laplacian.dtype)
391
+
392
+ tol = None if eigen_tol == "auto" else eigen_tol
393
+ _, diffusion_map = lobpcg(laplacian, X, M=M, tol=tol, largest=False)
394
+ embedding = diffusion_map.T
395
+ if norm_laplacian:
396
+ # recover u = D^-1/2 x from the eigenvector output x
397
+ embedding = embedding / dd
398
+ if embedding.shape[0] == 1:
399
+ raise ValueError
400
+
401
+ if eigen_solver == "lobpcg":
402
+ laplacian = check_array(
403
+ laplacian, dtype=[np.float64, np.float32], accept_sparse=True
404
+ )
405
+ if n_nodes < 5 * n_components + 1:
406
+ # see note above under arpack why lobpcg has problems with small
407
+ # number of nodes
408
+ # lobpcg will fallback to eigh, so we short circuit it
409
+ if sparse.issparse(laplacian):
410
+ laplacian = laplacian.toarray()
411
+ _, diffusion_map = eigh(laplacian, check_finite=False)
412
+ embedding = diffusion_map.T[:n_components]
413
+ if norm_laplacian:
414
+ # recover u = D^-1/2 x from the eigenvector output x
415
+ embedding = embedding / dd
416
+ else:
417
+ laplacian = _set_diag(laplacian, 1, norm_laplacian)
418
+ # We increase the number of eigenvectors requested, as lobpcg
419
+ # doesn't behave well in low dimension and create initial
420
+ # approximation X to eigenvectors
421
+ X = random_state.standard_normal(
422
+ size=(laplacian.shape[0], n_components + 1)
423
+ )
424
+ X[:, 0] = dd.ravel()
425
+ X = X.astype(laplacian.dtype)
426
+ tol = None if eigen_tol == "auto" else eigen_tol
427
+ _, diffusion_map = lobpcg(
428
+ laplacian, X, tol=tol, largest=False, maxiter=2000
429
+ )
430
+ embedding = diffusion_map.T[:n_components]
431
+ if norm_laplacian:
432
+ # recover u = D^-1/2 x from the eigenvector output x
433
+ embedding = embedding / dd
434
+ if embedding.shape[0] == 1:
435
+ raise ValueError
436
+
437
+ embedding = _deterministic_vector_sign_flip(embedding)
438
+ if drop_first:
439
+ return embedding[1:n_components].T
440
+ else:
441
+ return embedding[:n_components].T
442
+
443
+
444
+ class SpectralEmbedding(BaseEstimator):
445
+ """Spectral embedding for non-linear dimensionality reduction.
446
+
447
+ Forms an affinity matrix given by the specified function and
448
+ applies spectral decomposition to the corresponding graph laplacian.
449
+ The resulting transformation is given by the value of the
450
+ eigenvectors for each data point.
451
+
452
+ Note : Laplacian Eigenmaps is the actual algorithm implemented here.
453
+
454
+ Read more in the :ref:`User Guide <spectral_embedding>`.
455
+
456
+ Parameters
457
+ ----------
458
+ n_components : int, default=2
459
+ The dimension of the projected subspace.
460
+
461
+ affinity : {'nearest_neighbors', 'rbf', 'precomputed', \
462
+ 'precomputed_nearest_neighbors'} or callable, \
463
+ default='nearest_neighbors'
464
+ How to construct the affinity matrix.
465
+ - 'nearest_neighbors' : construct the affinity matrix by computing a
466
+ graph of nearest neighbors.
467
+ - 'rbf' : construct the affinity matrix by computing a radial basis
468
+ function (RBF) kernel.
469
+ - 'precomputed' : interpret ``X`` as a precomputed affinity matrix.
470
+ - 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph
471
+ of precomputed nearest neighbors, and constructs the affinity matrix
472
+ by selecting the ``n_neighbors`` nearest neighbors.
473
+ - callable : use passed in function as affinity
474
+ the function takes in data matrix (n_samples, n_features)
475
+ and return affinity matrix (n_samples, n_samples).
476
+
477
+ gamma : float, default=None
478
+ Kernel coefficient for rbf kernel. If None, gamma will be set to
479
+ 1/n_features.
480
+
481
+ random_state : int, RandomState instance or None, default=None
482
+ A pseudo random number generator used for the initialization
483
+ of the lobpcg eigen vectors decomposition when `eigen_solver ==
484
+ 'amg'`, and for the K-Means initialization. Use an int to make
485
+ the results deterministic across calls (See
486
+ :term:`Glossary <random_state>`).
487
+
488
+ .. note::
489
+ When using `eigen_solver == 'amg'`,
490
+ it is necessary to also fix the global numpy seed with
491
+ `np.random.seed(int)` to get deterministic results. See
492
+ https://github.com/pyamg/pyamg/issues/139 for further
493
+ information.
494
+
495
+ eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
496
+ The eigenvalue decomposition strategy to use. AMG requires pyamg
497
+ to be installed. It can be faster on very large, sparse problems.
498
+ If None, then ``'arpack'`` is used.
499
+
500
+ eigen_tol : float, default="auto"
501
+ Stopping criterion for eigendecomposition of the Laplacian matrix.
502
+ If `eigen_tol="auto"` then the passed tolerance will depend on the
503
+ `eigen_solver`:
504
+
505
+ - If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
506
+ - If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
507
+ `eigen_tol=None` which configures the underlying `lobpcg` solver to
508
+ automatically resolve the value according to their heuristics. See,
509
+ :func:`scipy.sparse.linalg.lobpcg` for details.
510
+
511
+ Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"`
512
+ values of `tol<1e-5` may lead to convergence issues and should be
513
+ avoided.
514
+
515
+ .. versionadded:: 1.2
516
+
517
+ n_neighbors : int, default=None
518
+ Number of nearest neighbors for nearest_neighbors graph building.
519
+ If None, n_neighbors will be set to max(n_samples/10, 1).
520
+
521
+ n_jobs : int, default=None
522
+ The number of parallel jobs to run.
523
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
524
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
525
+ for more details.
526
+
527
+ Attributes
528
+ ----------
529
+ embedding_ : ndarray of shape (n_samples, n_components)
530
+ Spectral embedding of the training matrix.
531
+
532
+ affinity_matrix_ : ndarray of shape (n_samples, n_samples)
533
+ Affinity_matrix constructed from samples or precomputed.
534
+
535
+ n_features_in_ : int
536
+ Number of features seen during :term:`fit`.
537
+
538
+ .. versionadded:: 0.24
539
+
540
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
541
+ Names of features seen during :term:`fit`. Defined only when `X`
542
+ has feature names that are all strings.
543
+
544
+ .. versionadded:: 1.0
545
+
546
+ n_neighbors_ : int
547
+ Number of nearest neighbors effectively used.
548
+
549
+ See Also
550
+ --------
551
+ Isomap : Non-linear dimensionality reduction through Isometric Mapping.
552
+
553
+ References
554
+ ----------
555
+
556
+ - :doi:`A Tutorial on Spectral Clustering, 2007
557
+ Ulrike von Luxburg
558
+ <10.1007/s11222-007-9033-z>`
559
+
560
+ - `On Spectral Clustering: Analysis and an algorithm, 2001
561
+ Andrew Y. Ng, Michael I. Jordan, Yair Weiss
562
+ <https://citeseerx.ist.psu.edu/doc_view/pid/796c5d6336fc52aa84db575fb821c78918b65f58>`_
563
+
564
+ - :doi:`Normalized cuts and image segmentation, 2000
565
+ Jianbo Shi, Jitendra Malik
566
+ <10.1109/34.868688>`
567
+
568
+ Examples
569
+ --------
570
+ >>> from sklearn.datasets import load_digits
571
+ >>> from sklearn.manifold import SpectralEmbedding
572
+ >>> X, _ = load_digits(return_X_y=True)
573
+ >>> X.shape
574
+ (1797, 64)
575
+ >>> embedding = SpectralEmbedding(n_components=2)
576
+ >>> X_transformed = embedding.fit_transform(X[:100])
577
+ >>> X_transformed.shape
578
+ (100, 2)
579
+ """
580
+
581
+ _parameter_constraints: dict = {
582
+ "n_components": [Interval(Integral, 1, None, closed="left")],
583
+ "affinity": [
584
+ StrOptions(
585
+ {
586
+ "nearest_neighbors",
587
+ "rbf",
588
+ "precomputed",
589
+ "precomputed_nearest_neighbors",
590
+ },
591
+ ),
592
+ callable,
593
+ ],
594
+ "gamma": [Interval(Real, 0, None, closed="left"), None],
595
+ "random_state": ["random_state"],
596
+ "eigen_solver": [StrOptions({"arpack", "lobpcg", "amg"}), None],
597
+ "eigen_tol": [Interval(Real, 0, None, closed="left"), StrOptions({"auto"})],
598
+ "n_neighbors": [Interval(Integral, 1, None, closed="left"), None],
599
+ "n_jobs": [None, Integral],
600
+ }
601
+
602
+ def __init__(
603
+ self,
604
+ n_components=2,
605
+ *,
606
+ affinity="nearest_neighbors",
607
+ gamma=None,
608
+ random_state=None,
609
+ eigen_solver=None,
610
+ eigen_tol="auto",
611
+ n_neighbors=None,
612
+ n_jobs=None,
613
+ ):
614
+ self.n_components = n_components
615
+ self.affinity = affinity
616
+ self.gamma = gamma
617
+ self.random_state = random_state
618
+ self.eigen_solver = eigen_solver
619
+ self.eigen_tol = eigen_tol
620
+ self.n_neighbors = n_neighbors
621
+ self.n_jobs = n_jobs
622
+
623
+ def _more_tags(self):
624
+ return {
625
+ "pairwise": self.affinity in [
626
+ "precomputed",
627
+ "precomputed_nearest_neighbors",
628
+ ]
629
+ }
630
+
631
+ def _get_affinity_matrix(self, X, Y=None):
632
+ """Calculate the affinity matrix from data
633
+ Parameters
634
+ ----------
635
+ X : array-like of shape (n_samples, n_features)
636
+ Training vector, where `n_samples` is the number of samples
637
+ and `n_features` is the number of features.
638
+
639
+ If affinity is "precomputed"
640
+ X : array-like of shape (n_samples, n_samples),
641
+ Interpret X as precomputed adjacency graph computed from
642
+ samples.
643
+
644
+ Y: Ignored
645
+
646
+ Returns
647
+ -------
648
+ affinity_matrix of shape (n_samples, n_samples)
649
+ """
650
+ if self.affinity == "precomputed":
651
+ self.affinity_matrix_ = X
652
+ return self.affinity_matrix_
653
+ if self.affinity == "precomputed_nearest_neighbors":
654
+ estimator = NearestNeighbors(
655
+ n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
656
+ ).fit(X)
657
+ connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
658
+ self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
659
+ return self.affinity_matrix_
660
+ if self.affinity == "nearest_neighbors":
661
+ if sparse.issparse(X):
662
+ warnings.warn(
663
+ "Nearest neighbors affinity currently does "
664
+ "not support sparse input, falling back to "
665
+ "rbf affinity"
666
+ )
667
+ self.affinity = "rbf"
668
+ else:
669
+ self.n_neighbors_ = (
670
+ self.n_neighbors
671
+ if self.n_neighbors is not None
672
+ else max(int(X.shape[0] / 10), 1)
673
+ )
674
+ self.affinity_matrix_ = kneighbors_graph(
675
+ X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs
676
+ )
677
+ # currently only symmetric affinity_matrix supported
678
+ self.affinity_matrix_ = 0.5 * (
679
+ self.affinity_matrix_ + self.affinity_matrix_.T
680
+ )
681
+ return self.affinity_matrix_
682
+ if self.affinity == "rbf":
683
+ self.gamma_ = self.gamma if self.gamma is not None else 1.0 / X.shape[1]
684
+ self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
685
+ return self.affinity_matrix_
686
+ self.affinity_matrix_ = self.affinity(X)
687
+ return self.affinity_matrix_
688
+
689
+ @_fit_context(prefer_skip_nested_validation=True)
690
+ def fit(self, X, y=None):
691
+ """Fit the model from data in X.
692
+
693
+ Parameters
694
+ ----------
695
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
696
+ Training vector, where `n_samples` is the number of samples
697
+ and `n_features` is the number of features.
698
+
699
+ If affinity is "precomputed"
700
+ X : {array-like, sparse matrix}, shape (n_samples, n_samples),
701
+ Interpret X as precomputed adjacency graph computed from
702
+ samples.
703
+
704
+ y : Ignored
705
+ Not used, present for API consistency by convention.
706
+
707
+ Returns
708
+ -------
709
+ self : object
710
+ Returns the instance itself.
711
+ """
712
+ X = self._validate_data(X, accept_sparse="csr", ensure_min_samples=2)
713
+
714
+ random_state = check_random_state(self.random_state)
715
+
716
+ affinity_matrix = self._get_affinity_matrix(X)
717
+ self.embedding_ = spectral_embedding(
718
+ affinity_matrix,
719
+ n_components=self.n_components,
720
+ eigen_solver=self.eigen_solver,
721
+ eigen_tol=self.eigen_tol,
722
+ random_state=random_state,
723
+ )
724
+ return self
725
+
726
+ def fit_transform(self, X, y=None):
727
+ """Fit the model from data in X and transform X.
728
+
729
+ Parameters
730
+ ----------
731
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
732
+ Training vector, where `n_samples` is the number of samples
733
+ and `n_features` is the number of features.
734
+
735
+ If affinity is "precomputed"
736
+ X : {array-like, sparse matrix} of shape (n_samples, n_samples),
737
+ Interpret X as precomputed adjacency graph computed from
738
+ samples.
739
+
740
+ y : Ignored
741
+ Not used, present for API consistency by convention.
742
+
743
+ Returns
744
+ -------
745
+ X_new : array-like of shape (n_samples, n_components)
746
+ Spectral embedding of the training matrix.
747
+ """
748
+ self.fit(X)
749
+ return self.embedding_
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_t_sne.py ADDED
@@ -0,0 +1,1174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Alexander Fabisch -- <[email protected]>
2
+ # Author: Christopher Moody <[email protected]>
3
+ # Author: Nick Travers <[email protected]>
4
+ # License: BSD 3 clause (C) 2014
5
+
6
+ # This is the exact and Barnes-Hut t-SNE implementation. There are other
7
+ # modifications of the algorithm:
8
+ # * Fast Optimization for t-SNE:
9
+ # https://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
10
+
11
+ from numbers import Integral, Real
12
+ from time import time
13
+
14
+ import numpy as np
15
+ from scipy import linalg
16
+ from scipy.sparse import csr_matrix, issparse
17
+ from scipy.spatial.distance import pdist, squareform
18
+
19
+ from ..base import (
20
+ BaseEstimator,
21
+ ClassNamePrefixFeaturesOutMixin,
22
+ TransformerMixin,
23
+ _fit_context,
24
+ )
25
+ from ..decomposition import PCA
26
+ from ..metrics.pairwise import _VALID_METRICS, pairwise_distances
27
+ from ..neighbors import NearestNeighbors
28
+ from ..utils import check_random_state
29
+ from ..utils._openmp_helpers import _openmp_effective_n_threads
30
+ from ..utils._param_validation import Interval, StrOptions, validate_params
31
+ from ..utils.validation import _num_samples, check_non_negative
32
+
33
+ # mypy error: Module 'sklearn.manifold' has no attribute '_utils'
34
+ # mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne'
35
+ from . import _barnes_hut_tsne, _utils # type: ignore
36
+
37
+ MACHINE_EPSILON = np.finfo(np.double).eps
38
+
39
+
40
+ def _joint_probabilities(distances, desired_perplexity, verbose):
41
+ """Compute joint probabilities p_ij from distances.
42
+
43
+ Parameters
44
+ ----------
45
+ distances : ndarray of shape (n_samples * (n_samples-1) / 2,)
46
+ Distances of samples are stored as condensed matrices, i.e.
47
+ we omit the diagonal and duplicate entries and store everything
48
+ in a one-dimensional array.
49
+
50
+ desired_perplexity : float
51
+ Desired perplexity of the joint probability distributions.
52
+
53
+ verbose : int
54
+ Verbosity level.
55
+
56
+ Returns
57
+ -------
58
+ P : ndarray of shape (n_samples * (n_samples-1) / 2,)
59
+ Condensed joint probability matrix.
60
+ """
61
+ # Compute conditional probabilities such that they approximately match
62
+ # the desired perplexity
63
+ distances = distances.astype(np.float32, copy=False)
64
+ conditional_P = _utils._binary_search_perplexity(
65
+ distances, desired_perplexity, verbose
66
+ )
67
+ P = conditional_P + conditional_P.T
68
+ sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
69
+ P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
70
+ return P
71
+
72
+
73
+ def _joint_probabilities_nn(distances, desired_perplexity, verbose):
74
+ """Compute joint probabilities p_ij from distances using just nearest
75
+ neighbors.
76
+
77
+ This method is approximately equal to _joint_probabilities. The latter
78
+ is O(N), but limiting the joint probability to nearest neighbors improves
79
+ this substantially to O(uN).
80
+
81
+ Parameters
82
+ ----------
83
+ distances : sparse matrix of shape (n_samples, n_samples)
84
+ Distances of samples to its n_neighbors nearest neighbors. All other
85
+ distances are left to zero (and are not materialized in memory).
86
+ Matrix should be of CSR format.
87
+
88
+ desired_perplexity : float
89
+ Desired perplexity of the joint probability distributions.
90
+
91
+ verbose : int
92
+ Verbosity level.
93
+
94
+ Returns
95
+ -------
96
+ P : sparse matrix of shape (n_samples, n_samples)
97
+ Condensed joint probability matrix with only nearest neighbors. Matrix
98
+ will be of CSR format.
99
+ """
100
+ t0 = time()
101
+ # Compute conditional probabilities such that they approximately match
102
+ # the desired perplexity
103
+ distances.sort_indices()
104
+ n_samples = distances.shape[0]
105
+ distances_data = distances.data.reshape(n_samples, -1)
106
+ distances_data = distances_data.astype(np.float32, copy=False)
107
+ conditional_P = _utils._binary_search_perplexity(
108
+ distances_data, desired_perplexity, verbose
109
+ )
110
+ assert np.all(np.isfinite(conditional_P)), "All probabilities should be finite"
111
+
112
+ # Symmetrize the joint probability distribution using sparse operations
113
+ P = csr_matrix(
114
+ (conditional_P.ravel(), distances.indices, distances.indptr),
115
+ shape=(n_samples, n_samples),
116
+ )
117
+ P = P + P.T
118
+
119
+ # Normalize the joint probability distribution
120
+ sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
121
+ P /= sum_P
122
+
123
+ assert np.all(np.abs(P.data) <= 1.0)
124
+ if verbose >= 2:
125
+ duration = time() - t0
126
+ print("[t-SNE] Computed conditional probabilities in {:.3f}s".format(duration))
127
+ return P
128
+
129
+
130
+ def _kl_divergence(
131
+ params,
132
+ P,
133
+ degrees_of_freedom,
134
+ n_samples,
135
+ n_components,
136
+ skip_num_points=0,
137
+ compute_error=True,
138
+ ):
139
+ """t-SNE objective function: gradient of the KL divergence
140
+ of p_ijs and q_ijs and the absolute error.
141
+
142
+ Parameters
143
+ ----------
144
+ params : ndarray of shape (n_params,)
145
+ Unraveled embedding.
146
+
147
+ P : ndarray of shape (n_samples * (n_samples-1) / 2,)
148
+ Condensed joint probability matrix.
149
+
150
+ degrees_of_freedom : int
151
+ Degrees of freedom of the Student's-t distribution.
152
+
153
+ n_samples : int
154
+ Number of samples.
155
+
156
+ n_components : int
157
+ Dimension of the embedded space.
158
+
159
+ skip_num_points : int, default=0
160
+ This does not compute the gradient for points with indices below
161
+ `skip_num_points`. This is useful when computing transforms of new
162
+ data where you'd like to keep the old data fixed.
163
+
164
+ compute_error: bool, default=True
165
+ If False, the kl_divergence is not computed and returns NaN.
166
+
167
+ Returns
168
+ -------
169
+ kl_divergence : float
170
+ Kullback-Leibler divergence of p_ij and q_ij.
171
+
172
+ grad : ndarray of shape (n_params,)
173
+ Unraveled gradient of the Kullback-Leibler divergence with respect to
174
+ the embedding.
175
+ """
176
+ X_embedded = params.reshape(n_samples, n_components)
177
+
178
+ # Q is a heavy-tailed distribution: Student's t-distribution
179
+ dist = pdist(X_embedded, "sqeuclidean")
180
+ dist /= degrees_of_freedom
181
+ dist += 1.0
182
+ dist **= (degrees_of_freedom + 1.0) / -2.0
183
+ Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
184
+
185
+ # Optimization trick below: np.dot(x, y) is faster than
186
+ # np.sum(x * y) because it calls BLAS
187
+
188
+ # Objective: C (Kullback-Leibler divergence of P and Q)
189
+ if compute_error:
190
+ kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
191
+ else:
192
+ kl_divergence = np.nan
193
+
194
+ # Gradient: dC/dY
195
+ # pdist always returns double precision distances. Thus we need to take
196
+ grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
197
+ PQd = squareform((P - Q) * dist)
198
+ for i in range(skip_num_points, n_samples):
199
+ grad[i] = np.dot(np.ravel(PQd[i], order="K"), X_embedded[i] - X_embedded)
200
+ grad = grad.ravel()
201
+ c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
202
+ grad *= c
203
+
204
+ return kl_divergence, grad
205
+
206
+
207
+ def _kl_divergence_bh(
208
+ params,
209
+ P,
210
+ degrees_of_freedom,
211
+ n_samples,
212
+ n_components,
213
+ angle=0.5,
214
+ skip_num_points=0,
215
+ verbose=False,
216
+ compute_error=True,
217
+ num_threads=1,
218
+ ):
219
+ """t-SNE objective function: KL divergence of p_ijs and q_ijs.
220
+
221
+ Uses Barnes-Hut tree methods to calculate the gradient that
222
+ runs in O(NlogN) instead of O(N^2).
223
+
224
+ Parameters
225
+ ----------
226
+ params : ndarray of shape (n_params,)
227
+ Unraveled embedding.
228
+
229
+ P : sparse matrix of shape (n_samples, n_sample)
230
+ Sparse approximate joint probability matrix, computed only for the
231
+ k nearest-neighbors and symmetrized. Matrix should be of CSR format.
232
+
233
+ degrees_of_freedom : int
234
+ Degrees of freedom of the Student's-t distribution.
235
+
236
+ n_samples : int
237
+ Number of samples.
238
+
239
+ n_components : int
240
+ Dimension of the embedded space.
241
+
242
+ angle : float, default=0.5
243
+ This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
244
+ 'angle' is the angular size (referred to as theta in [3]) of a distant
245
+ node as measured from a point. If this size is below 'angle' then it is
246
+ used as a summary node of all points contained within it.
247
+ This method is not very sensitive to changes in this parameter
248
+ in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
249
+ computation time and angle greater 0.8 has quickly increasing error.
250
+
251
+ skip_num_points : int, default=0
252
+ This does not compute the gradient for points with indices below
253
+ `skip_num_points`. This is useful when computing transforms of new
254
+ data where you'd like to keep the old data fixed.
255
+
256
+ verbose : int, default=False
257
+ Verbosity level.
258
+
259
+ compute_error: bool, default=True
260
+ If False, the kl_divergence is not computed and returns NaN.
261
+
262
+ num_threads : int, default=1
263
+ Number of threads used to compute the gradient. This is set here to
264
+ avoid calling _openmp_effective_n_threads for each gradient step.
265
+
266
+ Returns
267
+ -------
268
+ kl_divergence : float
269
+ Kullback-Leibler divergence of p_ij and q_ij.
270
+
271
+ grad : ndarray of shape (n_params,)
272
+ Unraveled gradient of the Kullback-Leibler divergence with respect to
273
+ the embedding.
274
+ """
275
+ params = params.astype(np.float32, copy=False)
276
+ X_embedded = params.reshape(n_samples, n_components)
277
+
278
+ val_P = P.data.astype(np.float32, copy=False)
279
+ neighbors = P.indices.astype(np.int64, copy=False)
280
+ indptr = P.indptr.astype(np.int64, copy=False)
281
+
282
+ grad = np.zeros(X_embedded.shape, dtype=np.float32)
283
+ error = _barnes_hut_tsne.gradient(
284
+ val_P,
285
+ X_embedded,
286
+ neighbors,
287
+ indptr,
288
+ grad,
289
+ angle,
290
+ n_components,
291
+ verbose,
292
+ dof=degrees_of_freedom,
293
+ compute_error=compute_error,
294
+ num_threads=num_threads,
295
+ )
296
+ c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
297
+ grad = grad.ravel()
298
+ grad *= c
299
+
300
+ return error, grad
301
+
302
+
303
+ def _gradient_descent(
304
+ objective,
305
+ p0,
306
+ it,
307
+ n_iter,
308
+ n_iter_check=1,
309
+ n_iter_without_progress=300,
310
+ momentum=0.8,
311
+ learning_rate=200.0,
312
+ min_gain=0.01,
313
+ min_grad_norm=1e-7,
314
+ verbose=0,
315
+ args=None,
316
+ kwargs=None,
317
+ ):
318
+ """Batch gradient descent with momentum and individual gains.
319
+
320
+ Parameters
321
+ ----------
322
+ objective : callable
323
+ Should return a tuple of cost and gradient for a given parameter
324
+ vector. When expensive to compute, the cost can optionally
325
+ be None and can be computed every n_iter_check steps using
326
+ the objective_error function.
327
+
328
+ p0 : array-like of shape (n_params,)
329
+ Initial parameter vector.
330
+
331
+ it : int
332
+ Current number of iterations (this function will be called more than
333
+ once during the optimization).
334
+
335
+ n_iter : int
336
+ Maximum number of gradient descent iterations.
337
+
338
+ n_iter_check : int, default=1
339
+ Number of iterations before evaluating the global error. If the error
340
+ is sufficiently low, we abort the optimization.
341
+
342
+ n_iter_without_progress : int, default=300
343
+ Maximum number of iterations without progress before we abort the
344
+ optimization.
345
+
346
+ momentum : float within (0.0, 1.0), default=0.8
347
+ The momentum generates a weight for previous gradients that decays
348
+ exponentially.
349
+
350
+ learning_rate : float, default=200.0
351
+ The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
352
+ the learning rate is too high, the data may look like a 'ball' with any
353
+ point approximately equidistant from its nearest neighbours. If the
354
+ learning rate is too low, most points may look compressed in a dense
355
+ cloud with few outliers.
356
+
357
+ min_gain : float, default=0.01
358
+ Minimum individual gain for each parameter.
359
+
360
+ min_grad_norm : float, default=1e-7
361
+ If the gradient norm is below this threshold, the optimization will
362
+ be aborted.
363
+
364
+ verbose : int, default=0
365
+ Verbosity level.
366
+
367
+ args : sequence, default=None
368
+ Arguments to pass to objective function.
369
+
370
+ kwargs : dict, default=None
371
+ Keyword arguments to pass to objective function.
372
+
373
+ Returns
374
+ -------
375
+ p : ndarray of shape (n_params,)
376
+ Optimum parameters.
377
+
378
+ error : float
379
+ Optimum.
380
+
381
+ i : int
382
+ Last iteration.
383
+ """
384
+ if args is None:
385
+ args = []
386
+ if kwargs is None:
387
+ kwargs = {}
388
+
389
+ p = p0.copy().ravel()
390
+ update = np.zeros_like(p)
391
+ gains = np.ones_like(p)
392
+ error = np.finfo(float).max
393
+ best_error = np.finfo(float).max
394
+ best_iter = i = it
395
+
396
+ tic = time()
397
+ for i in range(it, n_iter):
398
+ check_convergence = (i + 1) % n_iter_check == 0
399
+ # only compute the error when needed
400
+ kwargs["compute_error"] = check_convergence or i == n_iter - 1
401
+
402
+ error, grad = objective(p, *args, **kwargs)
403
+
404
+ inc = update * grad < 0.0
405
+ dec = np.invert(inc)
406
+ gains[inc] += 0.2
407
+ gains[dec] *= 0.8
408
+ np.clip(gains, min_gain, np.inf, out=gains)
409
+ grad *= gains
410
+ update = momentum * update - learning_rate * grad
411
+ p += update
412
+
413
+ if check_convergence:
414
+ toc = time()
415
+ duration = toc - tic
416
+ tic = toc
417
+ grad_norm = linalg.norm(grad)
418
+
419
+ if verbose >= 2:
420
+ print(
421
+ "[t-SNE] Iteration %d: error = %.7f,"
422
+ " gradient norm = %.7f"
423
+ " (%s iterations in %0.3fs)"
424
+ % (i + 1, error, grad_norm, n_iter_check, duration)
425
+ )
426
+
427
+ if error < best_error:
428
+ best_error = error
429
+ best_iter = i
430
+ elif i - best_iter > n_iter_without_progress:
431
+ if verbose >= 2:
432
+ print(
433
+ "[t-SNE] Iteration %d: did not make any progress "
434
+ "during the last %d episodes. Finished."
435
+ % (i + 1, n_iter_without_progress)
436
+ )
437
+ break
438
+ if grad_norm <= min_grad_norm:
439
+ if verbose >= 2:
440
+ print(
441
+ "[t-SNE] Iteration %d: gradient norm %f. Finished."
442
+ % (i + 1, grad_norm)
443
+ )
444
+ break
445
+
446
+ return p, error, i
447
+
448
+
449
+ @validate_params(
450
+ {
451
+ "X": ["array-like", "sparse matrix"],
452
+ "X_embedded": ["array-like", "sparse matrix"],
453
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
454
+ "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
455
+ },
456
+ prefer_skip_nested_validation=True,
457
+ )
458
+ def trustworthiness(X, X_embedded, *, n_neighbors=5, metric="euclidean"):
459
+ r"""Indicate to what extent the local structure is retained.
460
+
461
+ The trustworthiness is within [0, 1]. It is defined as
462
+
463
+ .. math::
464
+
465
+ T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
466
+ \sum_{j \in \mathcal{N}_{i}^{k}} \max(0, (r(i, j) - k))
467
+
468
+ where for each sample i, :math:`\mathcal{N}_{i}^{k}` are its k nearest
469
+ neighbors in the output space, and every sample j is its :math:`r(i, j)`-th
470
+ nearest neighbor in the input space. In other words, any unexpected nearest
471
+ neighbors in the output space are penalised in proportion to their rank in
472
+ the input space.
473
+
474
+ Parameters
475
+ ----------
476
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
477
+ (n_samples, n_samples)
478
+ If the metric is 'precomputed' X must be a square distance
479
+ matrix. Otherwise it contains a sample per row.
480
+
481
+ X_embedded : {array-like, sparse matrix} of shape (n_samples, n_components)
482
+ Embedding of the training data in low-dimensional space.
483
+
484
+ n_neighbors : int, default=5
485
+ The number of neighbors that will be considered. Should be fewer than
486
+ `n_samples / 2` to ensure the trustworthiness to lies within [0, 1], as
487
+ mentioned in [1]_. An error will be raised otherwise.
488
+
489
+ metric : str or callable, default='euclidean'
490
+ Which metric to use for computing pairwise distances between samples
491
+ from the original input space. If metric is 'precomputed', X must be a
492
+ matrix of pairwise distances or squared distances. Otherwise, for a list
493
+ of available metrics, see the documentation of argument metric in
494
+ `sklearn.pairwise.pairwise_distances` and metrics listed in
495
+ `sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the
496
+ "cosine" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.
497
+
498
+ .. versionadded:: 0.20
499
+
500
+ Returns
501
+ -------
502
+ trustworthiness : float
503
+ Trustworthiness of the low-dimensional embedding.
504
+
505
+ References
506
+ ----------
507
+ .. [1] Jarkko Venna and Samuel Kaski. 2001. Neighborhood
508
+ Preservation in Nonlinear Projection Methods: An Experimental Study.
509
+ In Proceedings of the International Conference on Artificial Neural Networks
510
+ (ICANN '01). Springer-Verlag, Berlin, Heidelberg, 485-491.
511
+
512
+ .. [2] Laurens van der Maaten. Learning a Parametric Embedding by Preserving
513
+ Local Structure. Proceedings of the Twelfth International Conference on
514
+ Artificial Intelligence and Statistics, PMLR 5:384-391, 2009.
515
+
516
+ Examples
517
+ --------
518
+ >>> from sklearn.datasets import make_blobs
519
+ >>> from sklearn.decomposition import PCA
520
+ >>> from sklearn.manifold import trustworthiness
521
+ >>> X, _ = make_blobs(n_samples=100, n_features=10, centers=3, random_state=42)
522
+ >>> X_embedded = PCA(n_components=2).fit_transform(X)
523
+ >>> print(f"{trustworthiness(X, X_embedded, n_neighbors=5):.2f}")
524
+ 0.92
525
+ """
526
+ n_samples = _num_samples(X)
527
+ if n_neighbors >= n_samples / 2:
528
+ raise ValueError(
529
+ f"n_neighbors ({n_neighbors}) should be less than n_samples / 2"
530
+ f" ({n_samples / 2})"
531
+ )
532
+ dist_X = pairwise_distances(X, metric=metric)
533
+ if metric == "precomputed":
534
+ dist_X = dist_X.copy()
535
+ # we set the diagonal to np.inf to exclude the points themselves from
536
+ # their own neighborhood
537
+ np.fill_diagonal(dist_X, np.inf)
538
+ ind_X = np.argsort(dist_X, axis=1)
539
+ # `ind_X[i]` is the index of sorted distances between i and other samples
540
+ ind_X_embedded = (
541
+ NearestNeighbors(n_neighbors=n_neighbors)
542
+ .fit(X_embedded)
543
+ .kneighbors(return_distance=False)
544
+ )
545
+
546
+ # We build an inverted index of neighbors in the input space: For sample i,
547
+ # we define `inverted_index[i]` as the inverted index of sorted distances:
548
+ # inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1)
549
+ inverted_index = np.zeros((n_samples, n_samples), dtype=int)
550
+ ordered_indices = np.arange(n_samples + 1)
551
+ inverted_index[ordered_indices[:-1, np.newaxis], ind_X] = ordered_indices[1:]
552
+ ranks = (
553
+ inverted_index[ordered_indices[:-1, np.newaxis], ind_X_embedded] - n_neighbors
554
+ )
555
+ t = np.sum(ranks[ranks > 0])
556
+ t = 1.0 - t * (
557
+ 2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))
558
+ )
559
+ return t
560
+
561
+
562
+ class TSNE(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
563
+ """T-distributed Stochastic Neighbor Embedding.
564
+
565
+ t-SNE [1] is a tool to visualize high-dimensional data. It converts
566
+ similarities between data points to joint probabilities and tries
567
+ to minimize the Kullback-Leibler divergence between the joint
568
+ probabilities of the low-dimensional embedding and the
569
+ high-dimensional data. t-SNE has a cost function that is not convex,
570
+ i.e. with different initializations we can get different results.
571
+
572
+ It is highly recommended to use another dimensionality reduction
573
+ method (e.g. PCA for dense data or TruncatedSVD for sparse data)
574
+ to reduce the number of dimensions to a reasonable amount (e.g. 50)
575
+ if the number of features is very high. This will suppress some
576
+ noise and speed up the computation of pairwise distances between
577
+ samples. For more tips see Laurens van der Maaten's FAQ [2].
578
+
579
+ Read more in the :ref:`User Guide <t_sne>`.
580
+
581
+ Parameters
582
+ ----------
583
+ n_components : int, default=2
584
+ Dimension of the embedded space.
585
+
586
+ perplexity : float, default=30.0
587
+ The perplexity is related to the number of nearest neighbors that
588
+ is used in other manifold learning algorithms. Larger datasets
589
+ usually require a larger perplexity. Consider selecting a value
590
+ between 5 and 50. Different values can result in significantly
591
+ different results. The perplexity must be less than the number
592
+ of samples.
593
+
594
+ early_exaggeration : float, default=12.0
595
+ Controls how tight natural clusters in the original space are in
596
+ the embedded space and how much space will be between them. For
597
+ larger values, the space between natural clusters will be larger
598
+ in the embedded space. Again, the choice of this parameter is not
599
+ very critical. If the cost function increases during initial
600
+ optimization, the early exaggeration factor or the learning rate
601
+ might be too high.
602
+
603
+ learning_rate : float or "auto", default="auto"
604
+ The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
605
+ the learning rate is too high, the data may look like a 'ball' with any
606
+ point approximately equidistant from its nearest neighbours. If the
607
+ learning rate is too low, most points may look compressed in a dense
608
+ cloud with few outliers. If the cost function gets stuck in a bad local
609
+ minimum increasing the learning rate may help.
610
+ Note that many other t-SNE implementations (bhtsne, FIt-SNE, openTSNE,
611
+ etc.) use a definition of learning_rate that is 4 times smaller than
612
+ ours. So our learning_rate=200 corresponds to learning_rate=800 in
613
+ those other implementations. The 'auto' option sets the learning_rate
614
+ to `max(N / early_exaggeration / 4, 50)` where N is the sample size,
615
+ following [4] and [5].
616
+
617
+ .. versionchanged:: 1.2
618
+ The default value changed to `"auto"`.
619
+
620
+ n_iter : int, default=1000
621
+ Maximum number of iterations for the optimization. Should be at
622
+ least 250.
623
+
624
+ n_iter_without_progress : int, default=300
625
+ Maximum number of iterations without progress before we abort the
626
+ optimization, used after 250 initial iterations with early
627
+ exaggeration. Note that progress is only checked every 50 iterations so
628
+ this value is rounded to the next multiple of 50.
629
+
630
+ .. versionadded:: 0.17
631
+ parameter *n_iter_without_progress* to control stopping criteria.
632
+
633
+ min_grad_norm : float, default=1e-7
634
+ If the gradient norm is below this threshold, the optimization will
635
+ be stopped.
636
+
637
+ metric : str or callable, default='euclidean'
638
+ The metric to use when calculating distance between instances in a
639
+ feature array. If metric is a string, it must be one of the options
640
+ allowed by scipy.spatial.distance.pdist for its metric parameter, or
641
+ a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
642
+ If metric is "precomputed", X is assumed to be a distance matrix.
643
+ Alternatively, if metric is a callable function, it is called on each
644
+ pair of instances (rows) and the resulting value recorded. The callable
645
+ should take two arrays from X as input and return a value indicating
646
+ the distance between them. The default is "euclidean" which is
647
+ interpreted as squared euclidean distance.
648
+
649
+ metric_params : dict, default=None
650
+ Additional keyword arguments for the metric function.
651
+
652
+ .. versionadded:: 1.1
653
+
654
+ init : {"random", "pca"} or ndarray of shape (n_samples, n_components), \
655
+ default="pca"
656
+ Initialization of embedding.
657
+ PCA initialization cannot be used with precomputed distances and is
658
+ usually more globally stable than random initialization.
659
+
660
+ .. versionchanged:: 1.2
661
+ The default value changed to `"pca"`.
662
+
663
+ verbose : int, default=0
664
+ Verbosity level.
665
+
666
+ random_state : int, RandomState instance or None, default=None
667
+ Determines the random number generator. Pass an int for reproducible
668
+ results across multiple function calls. Note that different
669
+ initializations might result in different local minima of the cost
670
+ function. See :term:`Glossary <random_state>`.
671
+
672
+ method : {'barnes_hut', 'exact'}, default='barnes_hut'
673
+ By default the gradient calculation algorithm uses Barnes-Hut
674
+ approximation running in O(NlogN) time. method='exact'
675
+ will run on the slower, but exact, algorithm in O(N^2) time. The
676
+ exact algorithm should be used when nearest-neighbor errors need
677
+ to be better than 3%. However, the exact method cannot scale to
678
+ millions of examples.
679
+
680
+ .. versionadded:: 0.17
681
+ Approximate optimization *method* via the Barnes-Hut.
682
+
683
+ angle : float, default=0.5
684
+ Only used if method='barnes_hut'
685
+ This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
686
+ 'angle' is the angular size (referred to as theta in [3]) of a distant
687
+ node as measured from a point. If this size is below 'angle' then it is
688
+ used as a summary node of all points contained within it.
689
+ This method is not very sensitive to changes in this parameter
690
+ in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
691
+ computation time and angle greater 0.8 has quickly increasing error.
692
+
693
+ n_jobs : int, default=None
694
+ The number of parallel jobs to run for neighbors search. This parameter
695
+ has no impact when ``metric="precomputed"`` or
696
+ (``metric="euclidean"`` and ``method="exact"``).
697
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
698
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
699
+ for more details.
700
+
701
+ .. versionadded:: 0.22
702
+
703
+ Attributes
704
+ ----------
705
+ embedding_ : array-like of shape (n_samples, n_components)
706
+ Stores the embedding vectors.
707
+
708
+ kl_divergence_ : float
709
+ Kullback-Leibler divergence after optimization.
710
+
711
+ n_features_in_ : int
712
+ Number of features seen during :term:`fit`.
713
+
714
+ .. versionadded:: 0.24
715
+
716
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
717
+ Names of features seen during :term:`fit`. Defined only when `X`
718
+ has feature names that are all strings.
719
+
720
+ .. versionadded:: 1.0
721
+
722
+ learning_rate_ : float
723
+ Effective learning rate.
724
+
725
+ .. versionadded:: 1.2
726
+
727
+ n_iter_ : int
728
+ Number of iterations run.
729
+
730
+ See Also
731
+ --------
732
+ sklearn.decomposition.PCA : Principal component analysis that is a linear
733
+ dimensionality reduction method.
734
+ sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using
735
+ kernels and PCA.
736
+ MDS : Manifold learning using multidimensional scaling.
737
+ Isomap : Manifold learning based on Isometric Mapping.
738
+ LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding.
739
+ SpectralEmbedding : Spectral embedding for non-linear dimensionality.
740
+
741
+ Notes
742
+ -----
743
+ For an example of using :class:`~sklearn.manifold.TSNE` in combination with
744
+ :class:`~sklearn.neighbors.KNeighborsTransformer` see
745
+ :ref:`sphx_glr_auto_examples_neighbors_approximate_nearest_neighbors.py`.
746
+
747
+ References
748
+ ----------
749
+
750
+ [1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
751
+ Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
752
+
753
+ [2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
754
+ https://lvdmaaten.github.io/tsne/
755
+
756
+ [3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
757
+ Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
758
+ https://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
759
+
760
+ [4] Belkina, A. C., Ciccolella, C. O., Anno, R., Halpert, R., Spidlen, J.,
761
+ & Snyder-Cappione, J. E. (2019). Automated optimized parameters for
762
+ T-distributed stochastic neighbor embedding improve visualization
763
+ and analysis of large datasets. Nature Communications, 10(1), 1-12.
764
+
765
+ [5] Kobak, D., & Berens, P. (2019). The art of using t-SNE for single-cell
766
+ transcriptomics. Nature Communications, 10(1), 1-14.
767
+
768
+ Examples
769
+ --------
770
+ >>> import numpy as np
771
+ >>> from sklearn.manifold import TSNE
772
+ >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
773
+ >>> X_embedded = TSNE(n_components=2, learning_rate='auto',
774
+ ... init='random', perplexity=3).fit_transform(X)
775
+ >>> X_embedded.shape
776
+ (4, 2)
777
+ """
778
+
779
+ _parameter_constraints: dict = {
780
+ "n_components": [Interval(Integral, 1, None, closed="left")],
781
+ "perplexity": [Interval(Real, 0, None, closed="neither")],
782
+ "early_exaggeration": [Interval(Real, 1, None, closed="left")],
783
+ "learning_rate": [
784
+ StrOptions({"auto"}),
785
+ Interval(Real, 0, None, closed="neither"),
786
+ ],
787
+ "n_iter": [Interval(Integral, 250, None, closed="left")],
788
+ "n_iter_without_progress": [Interval(Integral, -1, None, closed="left")],
789
+ "min_grad_norm": [Interval(Real, 0, None, closed="left")],
790
+ "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
791
+ "metric_params": [dict, None],
792
+ "init": [
793
+ StrOptions({"pca", "random"}),
794
+ np.ndarray,
795
+ ],
796
+ "verbose": ["verbose"],
797
+ "random_state": ["random_state"],
798
+ "method": [StrOptions({"barnes_hut", "exact"})],
799
+ "angle": [Interval(Real, 0, 1, closed="both")],
800
+ "n_jobs": [None, Integral],
801
+ }
802
+
803
+ # Control the number of exploration iterations with early_exaggeration on
804
+ _EXPLORATION_N_ITER = 250
805
+
806
+ # Control the number of iterations between progress checks
807
+ _N_ITER_CHECK = 50
808
+
809
+ def __init__(
810
+ self,
811
+ n_components=2,
812
+ *,
813
+ perplexity=30.0,
814
+ early_exaggeration=12.0,
815
+ learning_rate="auto",
816
+ n_iter=1000,
817
+ n_iter_without_progress=300,
818
+ min_grad_norm=1e-7,
819
+ metric="euclidean",
820
+ metric_params=None,
821
+ init="pca",
822
+ verbose=0,
823
+ random_state=None,
824
+ method="barnes_hut",
825
+ angle=0.5,
826
+ n_jobs=None,
827
+ ):
828
+ self.n_components = n_components
829
+ self.perplexity = perplexity
830
+ self.early_exaggeration = early_exaggeration
831
+ self.learning_rate = learning_rate
832
+ self.n_iter = n_iter
833
+ self.n_iter_without_progress = n_iter_without_progress
834
+ self.min_grad_norm = min_grad_norm
835
+ self.metric = metric
836
+ self.metric_params = metric_params
837
+ self.init = init
838
+ self.verbose = verbose
839
+ self.random_state = random_state
840
+ self.method = method
841
+ self.angle = angle
842
+ self.n_jobs = n_jobs
843
+
844
+ def _check_params_vs_input(self, X):
845
+ if self.perplexity >= X.shape[0]:
846
+ raise ValueError("perplexity must be less than n_samples")
847
+
848
+ def _fit(self, X, skip_num_points=0):
849
+ """Private function to fit the model using X as training data."""
850
+
851
+ if isinstance(self.init, str) and self.init == "pca" and issparse(X):
852
+ raise TypeError(
853
+ "PCA initialization is currently not supported "
854
+ "with the sparse input matrix. Use "
855
+ 'init="random" instead.'
856
+ )
857
+
858
+ if self.learning_rate == "auto":
859
+ # See issue #18018
860
+ self.learning_rate_ = X.shape[0] / self.early_exaggeration / 4
861
+ self.learning_rate_ = np.maximum(self.learning_rate_, 50)
862
+ else:
863
+ self.learning_rate_ = self.learning_rate
864
+
865
+ if self.method == "barnes_hut":
866
+ X = self._validate_data(
867
+ X,
868
+ accept_sparse=["csr"],
869
+ ensure_min_samples=2,
870
+ dtype=[np.float32, np.float64],
871
+ )
872
+ else:
873
+ X = self._validate_data(
874
+ X, accept_sparse=["csr", "csc", "coo"], dtype=[np.float32, np.float64]
875
+ )
876
+ if self.metric == "precomputed":
877
+ if isinstance(self.init, str) and self.init == "pca":
878
+ raise ValueError(
879
+ 'The parameter init="pca" cannot be used with metric="precomputed".'
880
+ )
881
+ if X.shape[0] != X.shape[1]:
882
+ raise ValueError("X should be a square distance matrix")
883
+
884
+ check_non_negative(
885
+ X,
886
+ (
887
+ "TSNE.fit(). With metric='precomputed', X "
888
+ "should contain positive distances."
889
+ ),
890
+ )
891
+
892
+ if self.method == "exact" and issparse(X):
893
+ raise TypeError(
894
+ 'TSNE with method="exact" does not accept sparse '
895
+ 'precomputed distance matrix. Use method="barnes_hut" '
896
+ "or provide the dense distance matrix."
897
+ )
898
+
899
+ if self.method == "barnes_hut" and self.n_components > 3:
900
+ raise ValueError(
901
+ "'n_components' should be inferior to 4 for the "
902
+ "barnes_hut algorithm as it relies on "
903
+ "quad-tree or oct-tree."
904
+ )
905
+ random_state = check_random_state(self.random_state)
906
+
907
+ n_samples = X.shape[0]
908
+
909
+ neighbors_nn = None
910
+ if self.method == "exact":
911
+ # Retrieve the distance matrix, either using the precomputed one or
912
+ # computing it.
913
+ if self.metric == "precomputed":
914
+ distances = X
915
+ else:
916
+ if self.verbose:
917
+ print("[t-SNE] Computing pairwise distances...")
918
+
919
+ if self.metric == "euclidean":
920
+ # Euclidean is squared here, rather than using **= 2,
921
+ # because euclidean_distances already calculates
922
+ # squared distances, and returns np.sqrt(dist) for
923
+ # squared=False.
924
+ # Also, Euclidean is slower for n_jobs>1, so don't set here
925
+ distances = pairwise_distances(X, metric=self.metric, squared=True)
926
+ else:
927
+ metric_params_ = self.metric_params or {}
928
+ distances = pairwise_distances(
929
+ X, metric=self.metric, n_jobs=self.n_jobs, **metric_params_
930
+ )
931
+
932
+ if np.any(distances < 0):
933
+ raise ValueError(
934
+ "All distances should be positive, the metric given is not correct"
935
+ )
936
+
937
+ if self.metric != "euclidean":
938
+ distances **= 2
939
+
940
+ # compute the joint probability distribution for the input space
941
+ P = _joint_probabilities(distances, self.perplexity, self.verbose)
942
+ assert np.all(np.isfinite(P)), "All probabilities should be finite"
943
+ assert np.all(P >= 0), "All probabilities should be non-negative"
944
+ assert np.all(
945
+ P <= 1
946
+ ), "All probabilities should be less or then equal to one"
947
+
948
+ else:
949
+ # Compute the number of nearest neighbors to find.
950
+ # LvdM uses 3 * perplexity as the number of neighbors.
951
+ # In the event that we have very small # of points
952
+ # set the neighbors to n - 1.
953
+ n_neighbors = min(n_samples - 1, int(3.0 * self.perplexity + 1))
954
+
955
+ if self.verbose:
956
+ print("[t-SNE] Computing {} nearest neighbors...".format(n_neighbors))
957
+
958
+ # Find the nearest neighbors for every point
959
+ knn = NearestNeighbors(
960
+ algorithm="auto",
961
+ n_jobs=self.n_jobs,
962
+ n_neighbors=n_neighbors,
963
+ metric=self.metric,
964
+ metric_params=self.metric_params,
965
+ )
966
+ t0 = time()
967
+ knn.fit(X)
968
+ duration = time() - t0
969
+ if self.verbose:
970
+ print(
971
+ "[t-SNE] Indexed {} samples in {:.3f}s...".format(
972
+ n_samples, duration
973
+ )
974
+ )
975
+
976
+ t0 = time()
977
+ distances_nn = knn.kneighbors_graph(mode="distance")
978
+ duration = time() - t0
979
+ if self.verbose:
980
+ print(
981
+ "[t-SNE] Computed neighbors for {} samples in {:.3f}s...".format(
982
+ n_samples, duration
983
+ )
984
+ )
985
+
986
+ # Free the memory used by the ball_tree
987
+ del knn
988
+
989
+ # knn return the euclidean distance but we need it squared
990
+ # to be consistent with the 'exact' method. Note that the
991
+ # the method was derived using the euclidean method as in the
992
+ # input space. Not sure of the implication of using a different
993
+ # metric.
994
+ distances_nn.data **= 2
995
+
996
+ # compute the joint probability distribution for the input space
997
+ P = _joint_probabilities_nn(distances_nn, self.perplexity, self.verbose)
998
+
999
+ if isinstance(self.init, np.ndarray):
1000
+ X_embedded = self.init
1001
+ elif self.init == "pca":
1002
+ pca = PCA(
1003
+ n_components=self.n_components,
1004
+ svd_solver="randomized",
1005
+ random_state=random_state,
1006
+ )
1007
+ # Always output a numpy array, no matter what is configured globally
1008
+ pca.set_output(transform="default")
1009
+ X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
1010
+ # PCA is rescaled so that PC1 has standard deviation 1e-4 which is
1011
+ # the default value for random initialization. See issue #18018.
1012
+ X_embedded = X_embedded / np.std(X_embedded[:, 0]) * 1e-4
1013
+ elif self.init == "random":
1014
+ # The embedding is initialized with iid samples from Gaussians with
1015
+ # standard deviation 1e-4.
1016
+ X_embedded = 1e-4 * random_state.standard_normal(
1017
+ size=(n_samples, self.n_components)
1018
+ ).astype(np.float32)
1019
+
1020
+ # Degrees of freedom of the Student's t-distribution. The suggestion
1021
+ # degrees_of_freedom = n_components - 1 comes from
1022
+ # "Learning a Parametric Embedding by Preserving Local Structure"
1023
+ # Laurens van der Maaten, 2009.
1024
+ degrees_of_freedom = max(self.n_components - 1, 1)
1025
+
1026
+ return self._tsne(
1027
+ P,
1028
+ degrees_of_freedom,
1029
+ n_samples,
1030
+ X_embedded=X_embedded,
1031
+ neighbors=neighbors_nn,
1032
+ skip_num_points=skip_num_points,
1033
+ )
1034
+
1035
+ def _tsne(
1036
+ self,
1037
+ P,
1038
+ degrees_of_freedom,
1039
+ n_samples,
1040
+ X_embedded,
1041
+ neighbors=None,
1042
+ skip_num_points=0,
1043
+ ):
1044
+ """Runs t-SNE."""
1045
+ # t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
1046
+ # and the Student's t-distributions Q. The optimization algorithm that
1047
+ # we use is batch gradient descent with two stages:
1048
+ # * initial optimization with early exaggeration and momentum at 0.5
1049
+ # * final optimization with momentum at 0.8
1050
+ params = X_embedded.ravel()
1051
+
1052
+ opt_args = {
1053
+ "it": 0,
1054
+ "n_iter_check": self._N_ITER_CHECK,
1055
+ "min_grad_norm": self.min_grad_norm,
1056
+ "learning_rate": self.learning_rate_,
1057
+ "verbose": self.verbose,
1058
+ "kwargs": dict(skip_num_points=skip_num_points),
1059
+ "args": [P, degrees_of_freedom, n_samples, self.n_components],
1060
+ "n_iter_without_progress": self._EXPLORATION_N_ITER,
1061
+ "n_iter": self._EXPLORATION_N_ITER,
1062
+ "momentum": 0.5,
1063
+ }
1064
+ if self.method == "barnes_hut":
1065
+ obj_func = _kl_divergence_bh
1066
+ opt_args["kwargs"]["angle"] = self.angle
1067
+ # Repeat verbose argument for _kl_divergence_bh
1068
+ opt_args["kwargs"]["verbose"] = self.verbose
1069
+ # Get the number of threads for gradient computation here to
1070
+ # avoid recomputing it at each iteration.
1071
+ opt_args["kwargs"]["num_threads"] = _openmp_effective_n_threads()
1072
+ else:
1073
+ obj_func = _kl_divergence
1074
+
1075
+ # Learning schedule (part 1): do 250 iteration with lower momentum but
1076
+ # higher learning rate controlled via the early exaggeration parameter
1077
+ P *= self.early_exaggeration
1078
+ params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args)
1079
+ if self.verbose:
1080
+ print(
1081
+ "[t-SNE] KL divergence after %d iterations with early exaggeration: %f"
1082
+ % (it + 1, kl_divergence)
1083
+ )
1084
+
1085
+ # Learning schedule (part 2): disable early exaggeration and finish
1086
+ # optimization with a higher momentum at 0.8
1087
+ P /= self.early_exaggeration
1088
+ remaining = self.n_iter - self._EXPLORATION_N_ITER
1089
+ if it < self._EXPLORATION_N_ITER or remaining > 0:
1090
+ opt_args["n_iter"] = self.n_iter
1091
+ opt_args["it"] = it + 1
1092
+ opt_args["momentum"] = 0.8
1093
+ opt_args["n_iter_without_progress"] = self.n_iter_without_progress
1094
+ params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args)
1095
+
1096
+ # Save the final number of iterations
1097
+ self.n_iter_ = it
1098
+
1099
+ if self.verbose:
1100
+ print(
1101
+ "[t-SNE] KL divergence after %d iterations: %f"
1102
+ % (it + 1, kl_divergence)
1103
+ )
1104
+
1105
+ X_embedded = params.reshape(n_samples, self.n_components)
1106
+ self.kl_divergence_ = kl_divergence
1107
+
1108
+ return X_embedded
1109
+
1110
+ @_fit_context(
1111
+ # TSNE.metric is not validated yet
1112
+ prefer_skip_nested_validation=False
1113
+ )
1114
+ def fit_transform(self, X, y=None):
1115
+ """Fit X into an embedded space and return that transformed output.
1116
+
1117
+ Parameters
1118
+ ----------
1119
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
1120
+ (n_samples, n_samples)
1121
+ If the metric is 'precomputed' X must be a square distance
1122
+ matrix. Otherwise it contains a sample per row. If the method
1123
+ is 'exact', X may be a sparse matrix of type 'csr', 'csc'
1124
+ or 'coo'. If the method is 'barnes_hut' and the metric is
1125
+ 'precomputed', X may be a precomputed sparse graph.
1126
+
1127
+ y : None
1128
+ Ignored.
1129
+
1130
+ Returns
1131
+ -------
1132
+ X_new : ndarray of shape (n_samples, n_components)
1133
+ Embedding of the training data in low-dimensional space.
1134
+ """
1135
+ self._check_params_vs_input(X)
1136
+ embedding = self._fit(X)
1137
+ self.embedding_ = embedding
1138
+ return self.embedding_
1139
+
1140
+ @_fit_context(
1141
+ # TSNE.metric is not validated yet
1142
+ prefer_skip_nested_validation=False
1143
+ )
1144
+ def fit(self, X, y=None):
1145
+ """Fit X into an embedded space.
1146
+
1147
+ Parameters
1148
+ ----------
1149
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
1150
+ (n_samples, n_samples)
1151
+ If the metric is 'precomputed' X must be a square distance
1152
+ matrix. Otherwise it contains a sample per row. If the method
1153
+ is 'exact', X may be a sparse matrix of type 'csr', 'csc'
1154
+ or 'coo'. If the method is 'barnes_hut' and the metric is
1155
+ 'precomputed', X may be a precomputed sparse graph.
1156
+
1157
+ y : None
1158
+ Ignored.
1159
+
1160
+ Returns
1161
+ -------
1162
+ self : object
1163
+ Fitted estimator.
1164
+ """
1165
+ self.fit_transform(X)
1166
+ return self
1167
+
1168
+ @property
1169
+ def _n_features_out(self):
1170
+ """Number of transformed output features."""
1171
+ return self.embedding_.shape[1]
1172
+
1173
+ def _more_tags(self):
1174
+ return {"pairwise": self.metric == "precomputed"}
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/_utils.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (225 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_isomap.cpython-310.pyc ADDED
Binary file (9.23 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_locally_linear.cpython-310.pyc ADDED
Binary file (4.64 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_mds.cpython-310.pyc ADDED
Binary file (3.38 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_spectral_embedding.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_t_sne.cpython-310.pyc ADDED
Binary file (27.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/manifold/tests/test_isomap.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from itertools import product
3
+
4
+ import numpy as np
5
+ import pytest
6
+ from scipy.sparse import rand as sparse_rand
7
+
8
+ from sklearn import clone, datasets, manifold, neighbors, pipeline, preprocessing
9
+ from sklearn.datasets import make_blobs
10
+ from sklearn.metrics.pairwise import pairwise_distances
11
+ from sklearn.utils._testing import (
12
+ assert_allclose,
13
+ assert_allclose_dense_sparse,
14
+ assert_array_equal,
15
+ )
16
+ from sklearn.utils.fixes import CSR_CONTAINERS
17
+
18
+ eigen_solvers = ["auto", "dense", "arpack"]
19
+ path_methods = ["auto", "FW", "D"]
20
+
21
+
22
+ def create_sample_data(dtype, n_pts=25, add_noise=False):
23
+ # grid of equidistant points in 2D, n_components = n_dim
24
+ n_per_side = int(math.sqrt(n_pts))
25
+ X = np.array(list(product(range(n_per_side), repeat=2))).astype(dtype, copy=False)
26
+ if add_noise:
27
+ # add noise in a third dimension
28
+ rng = np.random.RandomState(0)
29
+ noise = 0.1 * rng.randn(n_pts, 1).astype(dtype, copy=False)
30
+ X = np.concatenate((X, noise), 1)
31
+ return X
32
+
33
+
34
+ @pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)])
35
+ @pytest.mark.parametrize("eigen_solver", eigen_solvers)
36
+ @pytest.mark.parametrize("path_method", path_methods)
37
+ def test_isomap_simple_grid(
38
+ global_dtype, n_neighbors, radius, eigen_solver, path_method
39
+ ):
40
+ # Isomap should preserve distances when all neighbors are used
41
+ n_pts = 25
42
+ X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=False)
43
+
44
+ # distances from each point to all others
45
+ if n_neighbors is not None:
46
+ G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance")
47
+ else:
48
+ G = neighbors.radius_neighbors_graph(X, radius, mode="distance")
49
+
50
+ clf = manifold.Isomap(
51
+ n_neighbors=n_neighbors,
52
+ radius=radius,
53
+ n_components=2,
54
+ eigen_solver=eigen_solver,
55
+ path_method=path_method,
56
+ )
57
+ clf.fit(X)
58
+
59
+ if n_neighbors is not None:
60
+ G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance")
61
+ else:
62
+ G_iso = neighbors.radius_neighbors_graph(
63
+ clf.embedding_, radius, mode="distance"
64
+ )
65
+ atol = 1e-5 if global_dtype == np.float32 else 0
66
+ assert_allclose_dense_sparse(G, G_iso, atol=atol)
67
+
68
+
69
+ @pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)])
70
+ @pytest.mark.parametrize("eigen_solver", eigen_solvers)
71
+ @pytest.mark.parametrize("path_method", path_methods)
72
+ def test_isomap_reconstruction_error(
73
+ global_dtype, n_neighbors, radius, eigen_solver, path_method
74
+ ):
75
+ if global_dtype is np.float32:
76
+ pytest.skip(
77
+ "Skipping test due to numerical instabilities on float32 data"
78
+ "from KernelCenterer used in the reconstruction_error method"
79
+ )
80
+
81
+ # Same setup as in test_isomap_simple_grid, with an added dimension
82
+ n_pts = 25
83
+ X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=True)
84
+
85
+ # compute input kernel
86
+ if n_neighbors is not None:
87
+ G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance").toarray()
88
+ else:
89
+ G = neighbors.radius_neighbors_graph(X, radius, mode="distance").toarray()
90
+ centerer = preprocessing.KernelCenterer()
91
+ K = centerer.fit_transform(-0.5 * G**2)
92
+
93
+ clf = manifold.Isomap(
94
+ n_neighbors=n_neighbors,
95
+ radius=radius,
96
+ n_components=2,
97
+ eigen_solver=eigen_solver,
98
+ path_method=path_method,
99
+ )
100
+ clf.fit(X)
101
+
102
+ # compute output kernel
103
+ if n_neighbors is not None:
104
+ G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance")
105
+ else:
106
+ G_iso = neighbors.radius_neighbors_graph(
107
+ clf.embedding_, radius, mode="distance"
108
+ )
109
+ G_iso = G_iso.toarray()
110
+ K_iso = centerer.fit_transform(-0.5 * G_iso**2)
111
+
112
+ # make sure error agrees
113
+ reconstruction_error = np.linalg.norm(K - K_iso) / n_pts
114
+ atol = 1e-5 if global_dtype == np.float32 else 0
115
+ assert_allclose(reconstruction_error, clf.reconstruction_error(), atol=atol)
116
+
117
+
118
+ @pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 0.5)])
119
+ def test_transform(global_dtype, n_neighbors, radius):
120
+ n_samples = 200
121
+ n_components = 10
122
+ noise_scale = 0.01
123
+
124
+ # Create S-curve dataset
125
+ X, y = datasets.make_s_curve(n_samples, random_state=0)
126
+
127
+ X = X.astype(global_dtype, copy=False)
128
+
129
+ # Compute isomap embedding
130
+ iso = manifold.Isomap(
131
+ n_components=n_components, n_neighbors=n_neighbors, radius=radius
132
+ )
133
+ X_iso = iso.fit_transform(X)
134
+
135
+ # Re-embed a noisy version of the points
136
+ rng = np.random.RandomState(0)
137
+ noise = noise_scale * rng.randn(*X.shape)
138
+ X_iso2 = iso.transform(X + noise)
139
+
140
+ # Make sure the rms error on re-embedding is comparable to noise_scale
141
+ assert np.sqrt(np.mean((X_iso - X_iso2) ** 2)) < 2 * noise_scale
142
+
143
+
144
+ @pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 10.0)])
145
+ def test_pipeline(n_neighbors, radius, global_dtype):
146
+ # check that Isomap works fine as a transformer in a Pipeline
147
+ # only checks that no error is raised.
148
+ # TODO check that it actually does something useful
149
+ X, y = datasets.make_blobs(random_state=0)
150
+ X = X.astype(global_dtype, copy=False)
151
+ clf = pipeline.Pipeline(
152
+ [
153
+ ("isomap", manifold.Isomap(n_neighbors=n_neighbors, radius=radius)),
154
+ ("clf", neighbors.KNeighborsClassifier()),
155
+ ]
156
+ )
157
+ clf.fit(X, y)
158
+ assert 0.9 < clf.score(X, y)
159
+
160
+
161
+ def test_pipeline_with_nearest_neighbors_transformer(global_dtype):
162
+ # Test chaining NearestNeighborsTransformer and Isomap with
163
+ # neighbors_algorithm='precomputed'
164
+ algorithm = "auto"
165
+ n_neighbors = 10
166
+
167
+ X, _ = datasets.make_blobs(random_state=0)
168
+ X2, _ = datasets.make_blobs(random_state=1)
169
+
170
+ X = X.astype(global_dtype, copy=False)
171
+ X2 = X2.astype(global_dtype, copy=False)
172
+
173
+ # compare the chained version and the compact version
174
+ est_chain = pipeline.make_pipeline(
175
+ neighbors.KNeighborsTransformer(
176
+ n_neighbors=n_neighbors, algorithm=algorithm, mode="distance"
177
+ ),
178
+ manifold.Isomap(n_neighbors=n_neighbors, metric="precomputed"),
179
+ )
180
+ est_compact = manifold.Isomap(
181
+ n_neighbors=n_neighbors, neighbors_algorithm=algorithm
182
+ )
183
+
184
+ Xt_chain = est_chain.fit_transform(X)
185
+ Xt_compact = est_compact.fit_transform(X)
186
+ assert_allclose(Xt_chain, Xt_compact)
187
+
188
+ Xt_chain = est_chain.transform(X2)
189
+ Xt_compact = est_compact.transform(X2)
190
+ assert_allclose(Xt_chain, Xt_compact)
191
+
192
+
193
+ @pytest.mark.parametrize(
194
+ "metric, p, is_euclidean",
195
+ [
196
+ ("euclidean", 2, True),
197
+ ("manhattan", 1, False),
198
+ ("minkowski", 1, False),
199
+ ("minkowski", 2, True),
200
+ (lambda x1, x2: np.sqrt(np.sum(x1**2 + x2**2)), 2, False),
201
+ ],
202
+ )
203
+ def test_different_metric(global_dtype, metric, p, is_euclidean):
204
+ # Isomap must work on various metric parameters work correctly
205
+ # and must default to euclidean.
206
+ X, _ = datasets.make_blobs(random_state=0)
207
+ X = X.astype(global_dtype, copy=False)
208
+
209
+ reference = manifold.Isomap().fit_transform(X)
210
+ embedding = manifold.Isomap(metric=metric, p=p).fit_transform(X)
211
+
212
+ if is_euclidean:
213
+ assert_allclose(embedding, reference)
214
+ else:
215
+ with pytest.raises(AssertionError, match="Not equal to tolerance"):
216
+ assert_allclose(embedding, reference)
217
+
218
+
219
+ def test_isomap_clone_bug():
220
+ # regression test for bug reported in #6062
221
+ model = manifold.Isomap()
222
+ for n_neighbors in [10, 15, 20]:
223
+ model.set_params(n_neighbors=n_neighbors)
224
+ model.fit(np.random.rand(50, 2))
225
+ assert model.nbrs_.n_neighbors == n_neighbors
226
+
227
+
228
+ @pytest.mark.parametrize("eigen_solver", eigen_solvers)
229
+ @pytest.mark.parametrize("path_method", path_methods)
230
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
231
+ def test_sparse_input(
232
+ global_dtype, eigen_solver, path_method, global_random_seed, csr_container
233
+ ):
234
+ # TODO: compare results on dense and sparse data as proposed in:
235
+ # https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186
236
+ X = csr_container(
237
+ sparse_rand(
238
+ 100,
239
+ 3,
240
+ density=0.1,
241
+ format="csr",
242
+ dtype=global_dtype,
243
+ random_state=global_random_seed,
244
+ )
245
+ )
246
+
247
+ iso_dense = manifold.Isomap(
248
+ n_components=2,
249
+ eigen_solver=eigen_solver,
250
+ path_method=path_method,
251
+ n_neighbors=8,
252
+ )
253
+ iso_sparse = clone(iso_dense)
254
+
255
+ X_trans_dense = iso_dense.fit_transform(X.toarray())
256
+ X_trans_sparse = iso_sparse.fit_transform(X)
257
+
258
+ assert_allclose(X_trans_sparse, X_trans_dense, rtol=1e-4, atol=1e-4)
259
+
260
+
261
+ def test_isomap_fit_precomputed_radius_graph(global_dtype):
262
+ # Isomap.fit_transform must yield similar result when using
263
+ # a precomputed distance matrix.
264
+
265
+ X, y = datasets.make_s_curve(200, random_state=0)
266
+ X = X.astype(global_dtype, copy=False)
267
+ radius = 10
268
+
269
+ g = neighbors.radius_neighbors_graph(X, radius=radius, mode="distance")
270
+ isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="precomputed")
271
+ isomap.fit(g)
272
+ precomputed_result = isomap.embedding_
273
+
274
+ isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="minkowski")
275
+ result = isomap.fit_transform(X)
276
+ atol = 1e-5 if global_dtype == np.float32 else 0
277
+ assert_allclose(precomputed_result, result, atol=atol)
278
+
279
+
280
+ def test_isomap_fitted_attributes_dtype(global_dtype):
281
+ """Check that the fitted attributes are stored accordingly to the
282
+ data type of X."""
283
+ iso = manifold.Isomap(n_neighbors=2)
284
+
285
+ X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype)
286
+
287
+ iso.fit(X)
288
+
289
+ assert iso.dist_matrix_.dtype == global_dtype
290
+ assert iso.embedding_.dtype == global_dtype
291
+
292
+
293
+ def test_isomap_dtype_equivalence():
294
+ """Check the equivalence of the results with 32 and 64 bits input."""
295
+ iso_32 = manifold.Isomap(n_neighbors=2)
296
+ X_32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32)
297
+ iso_32.fit(X_32)
298
+
299
+ iso_64 = manifold.Isomap(n_neighbors=2)
300
+ X_64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64)
301
+ iso_64.fit(X_64)
302
+
303
+ assert_allclose(iso_32.dist_matrix_, iso_64.dist_matrix_)
304
+
305
+
306
+ def test_isomap_raise_error_when_neighbor_and_radius_both_set():
307
+ # Isomap.fit_transform must raise a ValueError if
308
+ # radius and n_neighbors are provided.
309
+
310
+ X, _ = datasets.load_digits(return_X_y=True)
311
+ isomap = manifold.Isomap(n_neighbors=3, radius=5.5)
312
+ msg = "Both n_neighbors and radius are provided"
313
+ with pytest.raises(ValueError, match=msg):
314
+ isomap.fit_transform(X)
315
+
316
+
317
+ def test_multiple_connected_components():
318
+ # Test that a warning is raised when the graph has multiple components
319
+ X = np.array([0, 1, 2, 5, 6, 7])[:, None]
320
+ with pytest.warns(UserWarning, match="number of connected components"):
321
+ manifold.Isomap(n_neighbors=2).fit(X)
322
+
323
+
324
+ def test_multiple_connected_components_metric_precomputed(global_dtype):
325
+ # Test that an error is raised when the graph has multiple components
326
+ # and when X is a precomputed neighbors graph.
327
+ X = np.array([0, 1, 2, 5, 6, 7])[:, None].astype(global_dtype, copy=False)
328
+
329
+ # works with a precomputed distance matrix (dense)
330
+ X_distances = pairwise_distances(X)
331
+ with pytest.warns(UserWarning, match="number of connected components"):
332
+ manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_distances)
333
+
334
+ # does not work with a precomputed neighbors graph (sparse)
335
+ X_graph = neighbors.kneighbors_graph(X, n_neighbors=2, mode="distance")
336
+ with pytest.raises(RuntimeError, match="number of connected components"):
337
+ manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_graph)
338
+
339
+
340
+ def test_get_feature_names_out():
341
+ """Check get_feature_names_out for Isomap."""
342
+ X, y = make_blobs(random_state=0, n_features=4)
343
+ n_components = 2
344
+
345
+ iso = manifold.Isomap(n_components=n_components)
346
+ iso.fit_transform(X)
347
+ names = iso.get_feature_names_out()
348
+ assert_array_equal([f"isomap{i}" for i in range(n_components)], names)