diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..910d196def457bc3be6ac9b3b8387aeed894c2dc
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6b7199ca53aa88edfb814188d365e09987a46b7f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..218f09c1695bed9f03b832bd65b41012627fbb41
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6db1a3bfae54dee8c40b930dddf859bdf3a4b271
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ccb8f1a289302cf294dc993bfecac91ef434cbbd
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d02858fd8558378a84e61f36e770b1cf116a8179
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..58153277cd8ed49e2b0e4affd9326f5546d7411a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79557570818acdc0c29c245cd84867ca6f7c3d54
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0a290bfc1c3afee08c1048d2e3e3287107e9409a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..659132359b3cad13b6f9b45930c08f3843d48f3f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5848d3c6077c2caaace9adf44abc1c7397159e97
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dbe210dacd08897a08de88196298a9703705853d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f78a2408ccddd64de5ef36785771865d9138acbb
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e87461e5ae6cec1669303058578bcc161e25cd3
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d0fd89cbc7ada1a93abd82af85b7d1fb215c3168
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e9da5a2580a64937c4cf6edff3f853beac10595e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b9fd28ba701d511535bd3c4e69b14e91bdb6c71
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..def6ef430bca40b40e9b50784f0219c60bfffb8c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0618ce95e304c16b6f0b0b13236aacba79e6987d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0f71563e548ad217f1cf7a2c610041abd7607bcb
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4540b58fc2a3273074a45f3ea9b847d8b4351a6b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6c138fb9072ee02ccf76af482313b760ed29a03
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d3615451e6b5784e049213176cc04ec0b0ffef55
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..805e9c1019c9d9e8947b42e741786faabad25d99
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38f6b2f62b5075197fcca12659e2d39b592d65f7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6910c3b40b13497af64b314791223992fb25215
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..50aebd206cd25d1bd6ebaa79040d9dd6f021ffc7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5415048e62b13e5f37a4692c22434b48ac15d663
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e881454a65b2eb6519807d4bd65e857d900037d5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..13f55113d544c6c36d154ad9a698a05e6c6b86ec
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0bc21677b4347baa039c5ff093d3d71e32cc5222
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..97d7d7c37576d6629a92971a6ae23391d18abb40
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/sparsefuncs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/sparsefuncs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4c072b01943c174a8769ec1b891ee3736b17dba
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/sparsefuncs.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/stats.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/stats.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6918fb045730c1a7e8467e63ae1436c155f011be
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/stats.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/validation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/validation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..801e3007108c0541fe87efdcdb942f0fbe274aa0
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/validation.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_cython_blas.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_cython_blas.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..aaf9a874f9b2557df52260c47ed062cd48472978
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_cython_blas.cpython-310-x86_64-linux-gnu.so differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_encode.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_encode.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3bf1c2a317ece98f786cda08aae0ef3df2e3390
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_encode.py
@@ -0,0 +1,367 @@
+from collections import Counter
+from contextlib import suppress
+from typing import NamedTuple
+
+import numpy as np
+
+from . import is_scalar_nan
+
+
+def _unique(values, *, return_inverse=False, return_counts=False):
+ """Helper function to find unique values with support for python objects.
+
+ Uses pure python method for object dtype, and numpy method for
+ all other dtypes.
+
+ Parameters
+ ----------
+ values : ndarray
+ Values to check for unknowns.
+
+ return_inverse : bool, default=False
+ If True, also return the indices of the unique values.
+
+ return_counts : bool, default=False
+ If True, also return the number of times each unique item appears in
+ values.
+
+ Returns
+ -------
+ unique : ndarray
+ The sorted unique values.
+
+ unique_inverse : ndarray
+ The indices to reconstruct the original array from the unique array.
+ Only provided if `return_inverse` is True.
+
+ unique_counts : ndarray
+ The number of times each of the unique values comes up in the original
+ array. Only provided if `return_counts` is True.
+ """
+ if values.dtype == object:
+ return _unique_python(
+ values, return_inverse=return_inverse, return_counts=return_counts
+ )
+ # numerical
+ return _unique_np(
+ values, return_inverse=return_inverse, return_counts=return_counts
+ )
+
+
+def _unique_np(values, return_inverse=False, return_counts=False):
+ """Helper function to find unique values for numpy arrays that correctly
+ accounts for nans. See `_unique` documentation for details."""
+ uniques = np.unique(
+ values, return_inverse=return_inverse, return_counts=return_counts
+ )
+
+ inverse, counts = None, None
+
+ if return_counts:
+ *uniques, counts = uniques
+
+ if return_inverse:
+ *uniques, inverse = uniques
+
+ if return_counts or return_inverse:
+ uniques = uniques[0]
+
+ # np.unique will have duplicate missing values at the end of `uniques`
+ # here we clip the nans and remove it from uniques
+ if uniques.size and is_scalar_nan(uniques[-1]):
+ nan_idx = np.searchsorted(uniques, np.nan)
+ uniques = uniques[: nan_idx + 1]
+ if return_inverse:
+ inverse[inverse > nan_idx] = nan_idx
+
+ if return_counts:
+ counts[nan_idx] = np.sum(counts[nan_idx:])
+ counts = counts[: nan_idx + 1]
+
+ ret = (uniques,)
+
+ if return_inverse:
+ ret += (inverse,)
+
+ if return_counts:
+ ret += (counts,)
+
+ return ret[0] if len(ret) == 1 else ret
+
+
+class MissingValues(NamedTuple):
+ """Data class for missing data information"""
+
+ nan: bool
+ none: bool
+
+ def to_list(self):
+ """Convert tuple to a list where None is always first."""
+ output = []
+ if self.none:
+ output.append(None)
+ if self.nan:
+ output.append(np.nan)
+ return output
+
+
+def _extract_missing(values):
+ """Extract missing values from `values`.
+
+ Parameters
+ ----------
+ values: set
+ Set of values to extract missing from.
+
+ Returns
+ -------
+ output: set
+ Set with missing values extracted.
+
+ missing_values: MissingValues
+ Object with missing value information.
+ """
+ missing_values_set = {
+ value for value in values if value is None or is_scalar_nan(value)
+ }
+
+ if not missing_values_set:
+ return values, MissingValues(nan=False, none=False)
+
+ if None in missing_values_set:
+ if len(missing_values_set) == 1:
+ output_missing_values = MissingValues(nan=False, none=True)
+ else:
+ # If there is more than one missing value, then it has to be
+ # float('nan') or np.nan
+ output_missing_values = MissingValues(nan=True, none=True)
+ else:
+ output_missing_values = MissingValues(nan=True, none=False)
+
+ # create set without the missing values
+ output = values - missing_values_set
+ return output, output_missing_values
+
+
+class _nandict(dict):
+ """Dictionary with support for nans."""
+
+ def __init__(self, mapping):
+ super().__init__(mapping)
+ for key, value in mapping.items():
+ if is_scalar_nan(key):
+ self.nan_value = value
+ break
+
+ def __missing__(self, key):
+ if hasattr(self, "nan_value") and is_scalar_nan(key):
+ return self.nan_value
+ raise KeyError(key)
+
+
+def _map_to_integer(values, uniques):
+ """Map values based on its position in uniques."""
+ table = _nandict({val: i for i, val in enumerate(uniques)})
+ return np.array([table[v] for v in values])
+
+
+def _unique_python(values, *, return_inverse, return_counts):
+ # Only used in `_uniques`, see docstring there for details
+ try:
+ uniques_set = set(values)
+ uniques_set, missing_values = _extract_missing(uniques_set)
+
+ uniques = sorted(uniques_set)
+ uniques.extend(missing_values.to_list())
+ uniques = np.array(uniques, dtype=values.dtype)
+ except TypeError:
+ types = sorted(t.__qualname__ for t in set(type(v) for v in values))
+ raise TypeError(
+ "Encoders require their input argument must be uniformly "
+ f"strings or numbers. Got {types}"
+ )
+ ret = (uniques,)
+
+ if return_inverse:
+ ret += (_map_to_integer(values, uniques),)
+
+ if return_counts:
+ ret += (_get_counts(values, uniques),)
+
+ return ret[0] if len(ret) == 1 else ret
+
+
+def _encode(values, *, uniques, check_unknown=True):
+ """Helper function to encode values into [0, n_uniques - 1].
+
+ Uses pure python method for object dtype, and numpy method for
+ all other dtypes.
+ The numpy method has the limitation that the `uniques` need to
+ be sorted. Importantly, this is not checked but assumed to already be
+ the case. The calling method needs to ensure this for all non-object
+ values.
+
+ Parameters
+ ----------
+ values : ndarray
+ Values to encode.
+ uniques : ndarray
+ The unique values in `values`. If the dtype is not object, then
+ `uniques` needs to be sorted.
+ check_unknown : bool, default=True
+ If True, check for values in `values` that are not in `unique`
+ and raise an error. This is ignored for object dtype, and treated as
+ True in this case. This parameter is useful for
+ _BaseEncoder._transform() to avoid calling _check_unknown()
+ twice.
+
+ Returns
+ -------
+ encoded : ndarray
+ Encoded values
+ """
+ if values.dtype.kind in "OUS":
+ try:
+ return _map_to_integer(values, uniques)
+ except KeyError as e:
+ raise ValueError(f"y contains previously unseen labels: {str(e)}")
+ else:
+ if check_unknown:
+ diff = _check_unknown(values, uniques)
+ if diff:
+ raise ValueError(f"y contains previously unseen labels: {str(diff)}")
+ return np.searchsorted(uniques, values)
+
+
+def _check_unknown(values, known_values, return_mask=False):
+ """
+ Helper function to check for unknowns in values to be encoded.
+
+ Uses pure python method for object dtype, and numpy method for
+ all other dtypes.
+
+ Parameters
+ ----------
+ values : array
+ Values to check for unknowns.
+ known_values : array
+ Known values. Must be unique.
+ return_mask : bool, default=False
+ If True, return a mask of the same shape as `values` indicating
+ the valid values.
+
+ Returns
+ -------
+ diff : list
+ The unique values present in `values` and not in `know_values`.
+ valid_mask : boolean array
+ Additionally returned if ``return_mask=True``.
+
+ """
+ valid_mask = None
+
+ if values.dtype.kind in "OUS":
+ values_set = set(values)
+ values_set, missing_in_values = _extract_missing(values_set)
+
+ uniques_set = set(known_values)
+ uniques_set, missing_in_uniques = _extract_missing(uniques_set)
+ diff = values_set - uniques_set
+
+ nan_in_diff = missing_in_values.nan and not missing_in_uniques.nan
+ none_in_diff = missing_in_values.none and not missing_in_uniques.none
+
+ def is_valid(value):
+ return (
+ value in uniques_set
+ or missing_in_uniques.none
+ and value is None
+ or missing_in_uniques.nan
+ and is_scalar_nan(value)
+ )
+
+ if return_mask:
+ if diff or nan_in_diff or none_in_diff:
+ valid_mask = np.array([is_valid(value) for value in values])
+ else:
+ valid_mask = np.ones(len(values), dtype=bool)
+
+ diff = list(diff)
+ if none_in_diff:
+ diff.append(None)
+ if nan_in_diff:
+ diff.append(np.nan)
+ else:
+ unique_values = np.unique(values)
+ diff = np.setdiff1d(unique_values, known_values, assume_unique=True)
+ if return_mask:
+ if diff.size:
+ valid_mask = np.isin(values, known_values)
+ else:
+ valid_mask = np.ones(len(values), dtype=bool)
+
+ # check for nans in the known_values
+ if np.isnan(known_values).any():
+ diff_is_nan = np.isnan(diff)
+ if diff_is_nan.any():
+ # removes nan from valid_mask
+ if diff.size and return_mask:
+ is_nan = np.isnan(values)
+ valid_mask[is_nan] = 1
+
+ # remove nan from diff
+ diff = diff[~diff_is_nan]
+ diff = list(diff)
+
+ if return_mask:
+ return diff, valid_mask
+ return diff
+
+
+class _NaNCounter(Counter):
+ """Counter with support for nan values."""
+
+ def __init__(self, items):
+ super().__init__(self._generate_items(items))
+
+ def _generate_items(self, items):
+ """Generate items without nans. Stores the nan counts separately."""
+ for item in items:
+ if not is_scalar_nan(item):
+ yield item
+ continue
+ if not hasattr(self, "nan_count"):
+ self.nan_count = 0
+ self.nan_count += 1
+
+ def __missing__(self, key):
+ if hasattr(self, "nan_count") and is_scalar_nan(key):
+ return self.nan_count
+ raise KeyError(key)
+
+
+def _get_counts(values, uniques):
+ """Get the count of each of the `uniques` in `values`.
+
+ The counts will use the order passed in by `uniques`. For non-object dtypes,
+ `uniques` is assumed to be sorted and `np.nan` is at the end.
+ """
+ if values.dtype.kind in "OU":
+ counter = _NaNCounter(values)
+ output = np.zeros(len(uniques), dtype=np.int64)
+ for i, item in enumerate(uniques):
+ with suppress(KeyError):
+ output[i] = counter[item]
+ return output
+
+ unique_values, counts = _unique_np(values, return_counts=True)
+
+ # Recorder unique_values based on input: `uniques`
+ uniques_in_values = np.isin(uniques, unique_values, assume_unique=True)
+ if np.isnan(unique_values[-1]) and np.isnan(uniques[-1]):
+ uniques_in_values[-1] = True
+
+ unique_valid_indices = np.searchsorted(unique_values, uniques[uniques_in_values])
+ output = np.zeros_like(uniques, dtype=np.int64)
+ output[uniques_in_values] = counts[unique_valid_indices]
+ return output
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e465234f516bd032b4214c85cf6e50d1573cd5e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.py
@@ -0,0 +1,496 @@
+import html
+import itertools
+from contextlib import closing
+from inspect import isclass
+from io import StringIO
+from pathlib import Path
+from string import Template
+
+from .. import __version__, config_context
+from .fixes import parse_version
+
+
+class _IDCounter:
+ """Generate sequential ids with a prefix."""
+
+ def __init__(self, prefix):
+ self.prefix = prefix
+ self.count = 0
+
+ def get_id(self):
+ self.count += 1
+ return f"{self.prefix}-{self.count}"
+
+
+def _get_css_style():
+ return Path(__file__).with_suffix(".css").read_text(encoding="utf-8")
+
+
+_CONTAINER_ID_COUNTER = _IDCounter("sk-container-id")
+_ESTIMATOR_ID_COUNTER = _IDCounter("sk-estimator-id")
+_CSS_STYLE = _get_css_style()
+
+
+class _VisualBlock:
+ """HTML Representation of Estimator
+
+ Parameters
+ ----------
+ kind : {'serial', 'parallel', 'single'}
+ kind of HTML block
+
+ estimators : list of estimators or `_VisualBlock`s or a single estimator
+ If kind != 'single', then `estimators` is a list of
+ estimators.
+ If kind == 'single', then `estimators` is a single estimator.
+
+ names : list of str, default=None
+ If kind != 'single', then `names` corresponds to estimators.
+ If kind == 'single', then `names` is a single string corresponding to
+ the single estimator.
+
+ name_details : list of str, str, or None, default=None
+ If kind != 'single', then `name_details` corresponds to `names`.
+ If kind == 'single', then `name_details` is a single string
+ corresponding to the single estimator.
+
+ dash_wrapped : bool, default=True
+ If true, wrapped HTML element will be wrapped with a dashed border.
+ Only active when kind != 'single'.
+ """
+
+ def __init__(
+ self, kind, estimators, *, names=None, name_details=None, dash_wrapped=True
+ ):
+ self.kind = kind
+ self.estimators = estimators
+ self.dash_wrapped = dash_wrapped
+
+ if self.kind in ("parallel", "serial"):
+ if names is None:
+ names = (None,) * len(estimators)
+ if name_details is None:
+ name_details = (None,) * len(estimators)
+
+ self.names = names
+ self.name_details = name_details
+
+ def _sk_visual_block_(self):
+ return self
+
+
+def _write_label_html(
+ out,
+ name,
+ name_details,
+ outer_class="sk-label-container",
+ inner_class="sk-label",
+ checked=False,
+ doc_link="",
+ is_fitted_css_class="",
+ is_fitted_icon="",
+):
+ """Write labeled html with or without a dropdown with named details.
+
+ Parameters
+ ----------
+ out : file-like object
+ The file to write the HTML representation to.
+ name : str
+ The label for the estimator. It corresponds either to the estimator class name
+ for a simple estimator or in the case of a `Pipeline` and `ColumnTransformer`,
+ it corresponds to the name of the step.
+ name_details : str
+ The details to show as content in the dropdown part of the toggleable label. It
+ can contain information such as non-default parameters or column information for
+ `ColumnTransformer`.
+ outer_class : {"sk-label-container", "sk-item"}, default="sk-label-container"
+ The CSS class for the outer container.
+ inner_class : {"sk-label", "sk-estimator"}, default="sk-label"
+ The CSS class for the inner container.
+ checked : bool, default=False
+ Whether the dropdown is folded or not. With a single estimator, we intend to
+ unfold the content.
+ doc_link : str, default=""
+ The link to the documentation for the estimator. If an empty string, no link is
+ added to the diagram. This can be generated for an estimator if it uses the
+ `_HTMLDocumentationLinkMixin`.
+ is_fitted_css_class : {"", "fitted"}
+ The CSS class to indicate whether or not the estimator is fitted. The
+ empty string means that the estimator is not fitted and "fitted" means that the
+ estimator is fitted.
+ is_fitted_icon : str, default=""
+ The HTML representation to show the fitted information in the diagram. An empty
+ string means that no information is shown.
+ """
+ # we need to add some padding to the left of the label to be sure it is centered
+ padding_label = " " if is_fitted_icon else "" # add padding for the "i" char
+
+ out.write(
+ f'
'
+ )
+ name = html.escape(name)
+
+ if name_details is not None:
+ name_details = html.escape(str(name_details))
+ label_class = (
+ f"sk-toggleable__label {is_fitted_css_class} sk-toggleable__label-arrow"
+ )
+
+ checked_str = "checked" if checked else ""
+ est_id = _ESTIMATOR_ID_COUNTER.get_id()
+
+ if doc_link:
+ doc_label = "Online documentation"
+ if name is not None:
+ doc_label = f"Documentation for {name}"
+ doc_link = (
+ f'?{doc_label}'
+ )
+ padding_label += " " # add additional padding for the "?" char
+
+ fmt_str = (
+ '
") # outer_class inner_class
+
+
+def _get_visual_block(estimator):
+ """Generate information about how to display an estimator."""
+ if hasattr(estimator, "_sk_visual_block_"):
+ try:
+ return estimator._sk_visual_block_()
+ except Exception:
+ return _VisualBlock(
+ "single",
+ estimator,
+ names=estimator.__class__.__name__,
+ name_details=str(estimator),
+ )
+
+ if isinstance(estimator, str):
+ return _VisualBlock(
+ "single", estimator, names=estimator, name_details=estimator
+ )
+ elif estimator is None:
+ return _VisualBlock("single", estimator, names="None", name_details="None")
+
+ # check if estimator looks like a meta estimator (wraps estimators)
+ if hasattr(estimator, "get_params") and not isclass(estimator):
+ estimators = [
+ (key, est)
+ for key, est in estimator.get_params(deep=False).items()
+ if hasattr(est, "get_params") and hasattr(est, "fit") and not isclass(est)
+ ]
+ if estimators:
+ return _VisualBlock(
+ "parallel",
+ [est for _, est in estimators],
+ names=[f"{key}: {est.__class__.__name__}" for key, est in estimators],
+ name_details=[str(est) for _, est in estimators],
+ )
+
+ return _VisualBlock(
+ "single",
+ estimator,
+ names=estimator.__class__.__name__,
+ name_details=str(estimator),
+ )
+
+
+def _write_estimator_html(
+ out,
+ estimator,
+ estimator_label,
+ estimator_label_details,
+ is_fitted_css_class,
+ is_fitted_icon="",
+ first_call=False,
+):
+ """Write estimator to html in serial, parallel, or by itself (single).
+
+ For multiple estimators, this function is called recursively.
+
+ Parameters
+ ----------
+ out : file-like object
+ The file to write the HTML representation to.
+ estimator : estimator object
+ The estimator to visualize.
+ estimator_label : str
+ The label for the estimator. It corresponds either to the estimator class name
+ for simple estimator or in the case of `Pipeline` and `ColumnTransformer`, it
+ corresponds to the name of the step.
+ estimator_label_details : str
+ The details to show as content in the dropdown part of the toggleable label.
+ It can contain information as non-default parameters or column information for
+ `ColumnTransformer`.
+ is_fitted_css_class : {"", "fitted"}
+ The CSS class to indicate whether or not the estimator is fitted or not. The
+ empty string means that the estimator is not fitted and "fitted" means that the
+ estimator is fitted.
+ is_fitted_icon : str, default=""
+ The HTML representation to show the fitted information in the diagram. An empty
+ string means that no information is shown. If the estimator to be shown is not
+ the first estimator (i.e. `first_call=False`), `is_fitted_icon` is always an
+ empty string.
+ first_call : bool, default=False
+ Whether this is the first time this function is called.
+ """
+ if first_call:
+ est_block = _get_visual_block(estimator)
+ else:
+ is_fitted_icon = ""
+ with config_context(print_changed_only=True):
+ est_block = _get_visual_block(estimator)
+ # `estimator` can also be an instance of `_VisualBlock`
+ if hasattr(estimator, "_get_doc_link"):
+ doc_link = estimator._get_doc_link()
+ else:
+ doc_link = ""
+ if est_block.kind in ("serial", "parallel"):
+ dashed_wrapped = first_call or est_block.dash_wrapped
+ dash_cls = " sk-dashed-wrapped" if dashed_wrapped else ""
+ out.write(f'
")
+
+ html_output = out.getvalue()
+ return html_output
+
+
+class _HTMLDocumentationLinkMixin:
+ """Mixin class allowing to generate a link to the API documentation.
+
+ This mixin relies on three attributes:
+ - `_doc_link_module`: it corresponds to the root module (e.g. `sklearn`). Using this
+ mixin, the default value is `sklearn`.
+ - `_doc_link_template`: it corresponds to the template used to generate the
+ link to the API documentation. Using this mixin, the default value is
+ `"https://scikit-learn.org/{version_url}/modules/generated/
+ {estimator_module}.{estimator_name}.html"`.
+ - `_doc_link_url_param_generator`: it corresponds to a function that generates the
+ parameters to be used in the template when the estimator module and name are not
+ sufficient.
+
+ The method :meth:`_get_doc_link` generates the link to the API documentation for a
+ given estimator.
+
+ This useful provides all the necessary states for
+ :func:`sklearn.utils.estimator_html_repr` to generate a link to the API
+ documentation for the estimator HTML diagram.
+
+ Examples
+ --------
+ If the default values for `_doc_link_module`, `_doc_link_template` are not suitable,
+ then you can override them:
+ >>> from sklearn.base import BaseEstimator
+ >>> estimator = BaseEstimator()
+ >>> estimator._doc_link_template = "https://website.com/{single_param}.html"
+ >>> def url_param_generator(estimator):
+ ... return {"single_param": estimator.__class__.__name__}
+ >>> estimator._doc_link_url_param_generator = url_param_generator
+ >>> estimator._get_doc_link()
+ 'https://website.com/BaseEstimator.html'
+ """
+
+ _doc_link_module = "sklearn"
+ _doc_link_url_param_generator = None
+
+ @property
+ def _doc_link_template(self):
+ sklearn_version = parse_version(__version__)
+ if sklearn_version.dev is None:
+ version_url = f"{sklearn_version.major}.{sklearn_version.minor}"
+ else:
+ version_url = "dev"
+ return getattr(
+ self,
+ "__doc_link_template",
+ (
+ f"https://scikit-learn.org/{version_url}/modules/generated/"
+ "{estimator_module}.{estimator_name}.html"
+ ),
+ )
+
+ @_doc_link_template.setter
+ def _doc_link_template(self, value):
+ setattr(self, "__doc_link_template", value)
+
+ def _get_doc_link(self):
+ """Generates a link to the API documentation for a given estimator.
+
+ This method generates the link to the estimator's documentation page
+ by using the template defined by the attribute `_doc_link_template`.
+
+ Returns
+ -------
+ url : str
+ The URL to the API documentation for this estimator. If the estimator does
+ not belong to module `_doc_link_module`, the empty string (i.e. `""`) is
+ returned.
+ """
+ if self.__class__.__module__.split(".")[0] != self._doc_link_module:
+ return ""
+
+ if self._doc_link_url_param_generator is None:
+ estimator_name = self.__class__.__name__
+ # Construct the estimator's module name, up to the first private submodule.
+ # This works because in scikit-learn all public estimators are exposed at
+ # that level, even if they actually live in a private sub-module.
+ estimator_module = ".".join(
+ itertools.takewhile(
+ lambda part: not part.startswith("_"),
+ self.__class__.__module__.split("."),
+ )
+ )
+ return self._doc_link_template.format(
+ estimator_module=estimator_module, estimator_name=estimator_name
+ )
+ return self._doc_link_template.format(
+ **self._doc_link_url_param_generator(self)
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_fast_dict.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_fast_dict.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..bde968659e9e7f1346513e73fe97764271e72ea6
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_fast_dict.cpython-310-x86_64-linux-gnu.so differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_isfinite.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_isfinite.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..f12e3990fa1cfd36fbbcafabe5aadefc3216168b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_isfinite.cpython-310-x86_64-linux-gnu.so differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_joblib.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_joblib.py
new file mode 100644
index 0000000000000000000000000000000000000000..590fdc6170c64210917f0bb811fe65fc92b3ff36
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_joblib.py
@@ -0,0 +1,38 @@
+import warnings as _warnings
+
+with _warnings.catch_warnings():
+ _warnings.simplefilter("ignore")
+ # joblib imports may raise DeprecationWarning on certain Python
+ # versions
+ import joblib
+ from joblib import (
+ Memory,
+ Parallel,
+ __version__,
+ cpu_count,
+ delayed,
+ dump,
+ effective_n_jobs,
+ hash,
+ load,
+ logger,
+ parallel_backend,
+ register_parallel_backend,
+ )
+
+
+__all__ = [
+ "parallel_backend",
+ "register_parallel_backend",
+ "cpu_count",
+ "Parallel",
+ "Memory",
+ "delayed",
+ "effective_n_jobs",
+ "hash",
+ "logger",
+ "dump",
+ "load",
+ "joblib",
+ "__version__",
+]
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_mocking.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_mocking.py
new file mode 100644
index 0000000000000000000000000000000000000000..16acabf03755bb31509cfb2d9fc46d94f3261fcb
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_mocking.py
@@ -0,0 +1,400 @@
+import numpy as np
+
+from ..base import BaseEstimator, ClassifierMixin
+from ..utils._metadata_requests import RequestMethod
+from .metaestimators import available_if
+from .validation import _check_sample_weight, _num_samples, check_array, check_is_fitted
+
+
+class ArraySlicingWrapper:
+ """
+ Parameters
+ ----------
+ array
+ """
+
+ def __init__(self, array):
+ self.array = array
+
+ def __getitem__(self, aslice):
+ return MockDataFrame(self.array[aslice])
+
+
+class MockDataFrame:
+ """
+ Parameters
+ ----------
+ array
+ """
+
+ # have shape and length but don't support indexing.
+
+ def __init__(self, array):
+ self.array = array
+ self.values = array
+ self.shape = array.shape
+ self.ndim = array.ndim
+ # ugly hack to make iloc work.
+ self.iloc = ArraySlicingWrapper(array)
+
+ def __len__(self):
+ return len(self.array)
+
+ def __array__(self, dtype=None):
+ # Pandas data frames also are array-like: we want to make sure that
+ # input validation in cross-validation does not try to call that
+ # method.
+ return self.array
+
+ def __eq__(self, other):
+ return MockDataFrame(self.array == other.array)
+
+ def __ne__(self, other):
+ return not self == other
+
+ def take(self, indices, axis=0):
+ return MockDataFrame(self.array.take(indices, axis=axis))
+
+
+class CheckingClassifier(ClassifierMixin, BaseEstimator):
+ """Dummy classifier to test pipelining and meta-estimators.
+
+ Checks some property of `X` and `y`in fit / predict.
+ This allows testing whether pipelines / cross-validation or metaestimators
+ changed the input.
+
+ Can also be used to check if `fit_params` are passed correctly, and
+ to force a certain score to be returned.
+
+ Parameters
+ ----------
+ check_y, check_X : callable, default=None
+ The callable used to validate `X` and `y`. These callable should return
+ a bool where `False` will trigger an `AssertionError`. If `None`, the
+ data is not validated. Default is `None`.
+
+ check_y_params, check_X_params : dict, default=None
+ The optional parameters to pass to `check_X` and `check_y`. If `None`,
+ then no parameters are passed in.
+
+ methods_to_check : "all" or list of str, default="all"
+ The methods in which the checks should be applied. By default,
+ all checks will be done on all methods (`fit`, `predict`,
+ `predict_proba`, `decision_function` and `score`).
+
+ foo_param : int, default=0
+ A `foo` param. When `foo > 1`, the output of :meth:`score` will be 1
+ otherwise it is 0.
+
+ expected_sample_weight : bool, default=False
+ Whether to check if a valid `sample_weight` was passed to `fit`.
+
+ expected_fit_params : list of str, default=None
+ A list of the expected parameters given when calling `fit`.
+
+ Attributes
+ ----------
+ classes_ : int
+ The classes seen during `fit`.
+
+ n_features_in_ : int
+ The number of features seen during `fit`.
+
+ Examples
+ --------
+ >>> from sklearn.utils._mocking import CheckingClassifier
+
+ This helper allow to assert to specificities regarding `X` or `y`. In this
+ case we expect `check_X` or `check_y` to return a boolean.
+
+ >>> from sklearn.datasets import load_iris
+ >>> X, y = load_iris(return_X_y=True)
+ >>> clf = CheckingClassifier(check_X=lambda x: x.shape == (150, 4))
+ >>> clf.fit(X, y)
+ CheckingClassifier(...)
+
+ We can also provide a check which might raise an error. In this case, we
+ expect `check_X` to return `X` and `check_y` to return `y`.
+
+ >>> from sklearn.utils import check_array
+ >>> clf = CheckingClassifier(check_X=check_array)
+ >>> clf.fit(X, y)
+ CheckingClassifier(...)
+ """
+
+ def __init__(
+ self,
+ *,
+ check_y=None,
+ check_y_params=None,
+ check_X=None,
+ check_X_params=None,
+ methods_to_check="all",
+ foo_param=0,
+ expected_sample_weight=None,
+ expected_fit_params=None,
+ ):
+ self.check_y = check_y
+ self.check_y_params = check_y_params
+ self.check_X = check_X
+ self.check_X_params = check_X_params
+ self.methods_to_check = methods_to_check
+ self.foo_param = foo_param
+ self.expected_sample_weight = expected_sample_weight
+ self.expected_fit_params = expected_fit_params
+
+ def _check_X_y(self, X, y=None, should_be_fitted=True):
+ """Validate X and y and make extra check.
+
+ Parameters
+ ----------
+ X : array-like of shape (n_samples, n_features)
+ The data set.
+ `X` is checked only if `check_X` is not `None` (default is None).
+ y : array-like of shape (n_samples), default=None
+ The corresponding target, by default `None`.
+ `y` is checked only if `check_y` is not `None` (default is None).
+ should_be_fitted : bool, default=True
+ Whether or not the classifier should be already fitted.
+ By default True.
+
+ Returns
+ -------
+ X, y
+ """
+ if should_be_fitted:
+ check_is_fitted(self)
+ if self.check_X is not None:
+ params = {} if self.check_X_params is None else self.check_X_params
+ checked_X = self.check_X(X, **params)
+ if isinstance(checked_X, (bool, np.bool_)):
+ assert checked_X
+ else:
+ X = checked_X
+ if y is not None and self.check_y is not None:
+ params = {} if self.check_y_params is None else self.check_y_params
+ checked_y = self.check_y(y, **params)
+ if isinstance(checked_y, (bool, np.bool_)):
+ assert checked_y
+ else:
+ y = checked_y
+ return X, y
+
+ def fit(self, X, y, sample_weight=None, **fit_params):
+ """Fit classifier.
+
+ Parameters
+ ----------
+ X : array-like of shape (n_samples, n_features)
+ Training vector, where `n_samples` is the number of samples and
+ `n_features` is the number of features.
+
+ y : array-like of shape (n_samples, n_outputs) or (n_samples,), \
+ default=None
+ Target relative to X for classification or regression;
+ None for unsupervised learning.
+
+ sample_weight : array-like of shape (n_samples,), default=None
+ Sample weights. If None, then samples are equally weighted.
+
+ **fit_params : dict of string -> object
+ Parameters passed to the ``fit`` method of the estimator
+
+ Returns
+ -------
+ self
+ """
+ assert _num_samples(X) == _num_samples(y)
+ if self.methods_to_check == "all" or "fit" in self.methods_to_check:
+ X, y = self._check_X_y(X, y, should_be_fitted=False)
+ self.n_features_in_ = np.shape(X)[1]
+ self.classes_ = np.unique(check_array(y, ensure_2d=False, allow_nd=True))
+ if self.expected_fit_params:
+ missing = set(self.expected_fit_params) - set(fit_params)
+ if missing:
+ raise AssertionError(
+ f"Expected fit parameter(s) {list(missing)} not seen."
+ )
+ for key, value in fit_params.items():
+ if _num_samples(value) != _num_samples(X):
+ raise AssertionError(
+ f"Fit parameter {key} has length {_num_samples(value)}"
+ f"; expected {_num_samples(X)}."
+ )
+ if self.expected_sample_weight:
+ if sample_weight is None:
+ raise AssertionError("Expected sample_weight to be passed")
+ _check_sample_weight(sample_weight, X)
+
+ return self
+
+ def predict(self, X):
+ """Predict the first class seen in `classes_`.
+
+ Parameters
+ ----------
+ X : array-like of shape (n_samples, n_features)
+ The input data.
+
+ Returns
+ -------
+ preds : ndarray of shape (n_samples,)
+ Predictions of the first class seens in `classes_`.
+ """
+ if self.methods_to_check == "all" or "predict" in self.methods_to_check:
+ X, y = self._check_X_y(X)
+ return self.classes_[np.zeros(_num_samples(X), dtype=int)]
+
+ def predict_proba(self, X):
+ """Predict probabilities for each class.
+
+ Here, the dummy classifier will provide a probability of 1 for the
+ first class of `classes_` and 0 otherwise.
+
+ Parameters
+ ----------
+ X : array-like of shape (n_samples, n_features)
+ The input data.
+
+ Returns
+ -------
+ proba : ndarray of shape (n_samples, n_classes)
+ The probabilities for each sample and class.
+ """
+ if self.methods_to_check == "all" or "predict_proba" in self.methods_to_check:
+ X, y = self._check_X_y(X)
+ proba = np.zeros((_num_samples(X), len(self.classes_)))
+ proba[:, 0] = 1
+ return proba
+
+ def decision_function(self, X):
+ """Confidence score.
+
+ Parameters
+ ----------
+ X : array-like of shape (n_samples, n_features)
+ The input data.
+
+ Returns
+ -------
+ decision : ndarray of shape (n_samples,) if n_classes == 2\
+ else (n_samples, n_classes)
+ Confidence score.
+ """
+ if (
+ self.methods_to_check == "all"
+ or "decision_function" in self.methods_to_check
+ ):
+ X, y = self._check_X_y(X)
+ if len(self.classes_) == 2:
+ # for binary classifier, the confidence score is related to
+ # classes_[1] and therefore should be null.
+ return np.zeros(_num_samples(X))
+ else:
+ decision = np.zeros((_num_samples(X), len(self.classes_)))
+ decision[:, 0] = 1
+ return decision
+
+ def score(self, X=None, Y=None):
+ """Fake score.
+
+ Parameters
+ ----------
+ X : array-like of shape (n_samples, n_features)
+ Input data, where `n_samples` is the number of samples and
+ `n_features` is the number of features.
+
+ Y : array-like of shape (n_samples, n_output) or (n_samples,)
+ Target relative to X for classification or regression;
+ None for unsupervised learning.
+
+ Returns
+ -------
+ score : float
+ Either 0 or 1 depending of `foo_param` (i.e. `foo_param > 1 =>
+ score=1` otherwise `score=0`).
+ """
+ if self.methods_to_check == "all" or "score" in self.methods_to_check:
+ self._check_X_y(X, Y)
+ if self.foo_param > 1:
+ score = 1.0
+ else:
+ score = 0.0
+ return score
+
+ def _more_tags(self):
+ return {"_skip_test": True, "X_types": ["1dlabel"]}
+
+
+# Deactivate key validation for CheckingClassifier because we want to be able to
+# call fit with arbitrary fit_params and record them. Without this change, we
+# would get an error because those arbitrary params are not expected.
+CheckingClassifier.set_fit_request = RequestMethod( # type: ignore
+ name="fit", keys=[], validate_keys=False
+)
+
+
+class NoSampleWeightWrapper(BaseEstimator):
+ """Wrap estimator which will not expose `sample_weight`.
+
+ Parameters
+ ----------
+ est : estimator, default=None
+ The estimator to wrap.
+ """
+
+ def __init__(self, est=None):
+ self.est = est
+
+ def fit(self, X, y):
+ return self.est.fit(X, y)
+
+ def predict(self, X):
+ return self.est.predict(X)
+
+ def predict_proba(self, X):
+ return self.est.predict_proba(X)
+
+ def _more_tags(self):
+ return {"_skip_test": True}
+
+
+def _check_response(method):
+ def check(self):
+ return self.response_methods is not None and method in self.response_methods
+
+ return check
+
+
+class _MockEstimatorOnOffPrediction(BaseEstimator):
+ """Estimator for which we can turn on/off the prediction methods.
+
+ Parameters
+ ----------
+ response_methods: list of \
+ {"predict", "predict_proba", "decision_function"}, default=None
+ List containing the response implemented by the estimator. When, the
+ response is in the list, it will return the name of the response method
+ when called. Otherwise, an `AttributeError` is raised. It allows to
+ use `getattr` as any conventional estimator. By default, no response
+ methods are mocked.
+ """
+
+ def __init__(self, response_methods=None):
+ self.response_methods = response_methods
+
+ def fit(self, X, y):
+ self.classes_ = np.unique(y)
+ return self
+
+ @available_if(_check_response("predict"))
+ def predict(self, X):
+ return "predict"
+
+ @available_if(_check_response("predict_proba"))
+ def predict_proba(self, X):
+ return "predict_proba"
+
+ @available_if(_check_response("decision_function"))
+ def decision_function(self, X):
+ return "decision_function"
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_pprint.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_pprint.py
new file mode 100644
index 0000000000000000000000000000000000000000..cea1510746cbed06e708ad939e507aa84ba733f4
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_pprint.py
@@ -0,0 +1,463 @@
+"""This module contains the _EstimatorPrettyPrinter class used in
+BaseEstimator.__repr__ for pretty-printing estimators"""
+
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 Python Software Foundation;
+# All Rights Reserved
+
+# Authors: Fred L. Drake, Jr. (built-in CPython pprint module)
+# Nicolas Hug (scikit-learn specific changes)
+
+# License: PSF License version 2 (see below)
+
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"),
+# and the Individual or Organization ("Licensee") accessing and otherwise
+# using this software ("Python") in source or binary form and its associated
+# documentation.
+
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to
+# reproduce, analyze, test, perform and/or display publicly, prepare
+# derivative works, distribute, and otherwise use Python alone or in any
+# derivative version, provided, however, that PSF's License Agreement and
+# PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004,
+# 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016,
+# 2017, 2018 Python Software Foundation; All Rights Reserved" are retained in
+# Python alone or in any derivative version prepared by Licensee.
+
+# 3. In the event Licensee prepares a derivative work that is based on or
+# incorporates Python or any part thereof, and wants to make the derivative
+# work available to others as provided herein, then Licensee hereby agrees to
+# include in any such work a brief summary of the changes made to Python.
+
+# 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES
+# NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT
+# NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF
+# MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF
+# PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY
+# INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
+# MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE
+# THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote products
+# or services of Licensee, or any third party.
+
+# 8. By copying, installing or otherwise using Python, Licensee agrees to be
+# bound by the terms and conditions of this License Agreement.
+
+
+# Brief summary of changes to original code:
+# - "compact" parameter is supported for dicts, not just lists or tuples
+# - estimators have a custom handler, they're not just treated as objects
+# - long sequences (lists, tuples, dict items) with more than N elements are
+# shortened using ellipsis (', ...') at the end.
+
+import inspect
+import pprint
+from collections import OrderedDict
+
+from .._config import get_config
+from ..base import BaseEstimator
+from . import is_scalar_nan
+
+
+class KeyValTuple(tuple):
+ """Dummy class for correctly rendering key-value tuples from dicts."""
+
+ def __repr__(self):
+ # needed for _dispatch[tuple.__repr__] not to be overridden
+ return super().__repr__()
+
+
+class KeyValTupleParam(KeyValTuple):
+ """Dummy class for correctly rendering key-value tuples from parameters."""
+
+ pass
+
+
+def _changed_params(estimator):
+ """Return dict (param_name: value) of parameters that were given to
+ estimator with non-default values."""
+
+ params = estimator.get_params(deep=False)
+ init_func = getattr(estimator.__init__, "deprecated_original", estimator.__init__)
+ init_params = inspect.signature(init_func).parameters
+ init_params = {name: param.default for name, param in init_params.items()}
+
+ def has_changed(k, v):
+ if k not in init_params: # happens if k is part of a **kwargs
+ return True
+ if init_params[k] == inspect._empty: # k has no default value
+ return True
+ # try to avoid calling repr on nested estimators
+ if isinstance(v, BaseEstimator) and v.__class__ != init_params[k].__class__:
+ return True
+ # Use repr as a last resort. It may be expensive.
+ if repr(v) != repr(init_params[k]) and not (
+ is_scalar_nan(init_params[k]) and is_scalar_nan(v)
+ ):
+ return True
+ return False
+
+ return {k: v for k, v in params.items() if has_changed(k, v)}
+
+
+class _EstimatorPrettyPrinter(pprint.PrettyPrinter):
+ """Pretty Printer class for estimator objects.
+
+ This extends the pprint.PrettyPrinter class, because:
+ - we need estimators to be printed with their parameters, e.g.
+ Estimator(param1=value1, ...) which is not supported by default.
+ - the 'compact' parameter of PrettyPrinter is ignored for dicts, which
+ may lead to very long representations that we want to avoid.
+
+ Quick overview of pprint.PrettyPrinter (see also
+ https://stackoverflow.com/questions/49565047/pprint-with-hex-numbers):
+
+ - the entry point is the _format() method which calls format() (overridden
+ here)
+ - format() directly calls _safe_repr() for a first try at rendering the
+ object
+ - _safe_repr formats the whole object recursively, only calling itself,
+ not caring about line length or anything
+ - back to _format(), if the output string is too long, _format() then calls
+ the appropriate _pprint_TYPE() method (e.g. _pprint_list()) depending on
+ the type of the object. This where the line length and the compact
+ parameters are taken into account.
+ - those _pprint_TYPE() methods will internally use the format() method for
+ rendering the nested objects of an object (e.g. the elements of a list)
+
+ In the end, everything has to be implemented twice: in _safe_repr and in
+ the custom _pprint_TYPE methods. Unfortunately PrettyPrinter is really not
+ straightforward to extend (especially when we want a compact output), so
+ the code is a bit convoluted.
+
+ This class overrides:
+ - format() to support the changed_only parameter
+ - _safe_repr to support printing of estimators (for when they fit on a
+ single line)
+ - _format_dict_items so that dict are correctly 'compacted'
+ - _format_items so that ellipsis is used on long lists and tuples
+
+ When estimators cannot be printed on a single line, the builtin _format()
+ will call _pprint_estimator() because it was registered to do so (see
+ _dispatch[BaseEstimator.__repr__] = _pprint_estimator).
+
+ both _format_dict_items() and _pprint_estimator() use the
+ _format_params_or_dict_items() method that will format parameters and
+ key-value pairs respecting the compact parameter. This method needs another
+ subroutine _pprint_key_val_tuple() used when a parameter or a key-value
+ pair is too long to fit on a single line. This subroutine is called in
+ _format() and is registered as well in the _dispatch dict (just like
+ _pprint_estimator). We had to create the two classes KeyValTuple and
+ KeyValTupleParam for this.
+ """
+
+ def __init__(
+ self,
+ indent=1,
+ width=80,
+ depth=None,
+ stream=None,
+ *,
+ compact=False,
+ indent_at_name=True,
+ n_max_elements_to_show=None,
+ ):
+ super().__init__(indent, width, depth, stream, compact=compact)
+ self._indent_at_name = indent_at_name
+ if self._indent_at_name:
+ self._indent_per_level = 1 # ignore indent param
+ self._changed_only = get_config()["print_changed_only"]
+ # Max number of elements in a list, dict, tuple until we start using
+ # ellipsis. This also affects the number of arguments of an estimators
+ # (they are treated as dicts)
+ self.n_max_elements_to_show = n_max_elements_to_show
+
+ def format(self, object, context, maxlevels, level):
+ return _safe_repr(
+ object, context, maxlevels, level, changed_only=self._changed_only
+ )
+
+ def _pprint_estimator(self, object, stream, indent, allowance, context, level):
+ stream.write(object.__class__.__name__ + "(")
+ if self._indent_at_name:
+ indent += len(object.__class__.__name__)
+
+ if self._changed_only:
+ params = _changed_params(object)
+ else:
+ params = object.get_params(deep=False)
+
+ params = OrderedDict((name, val) for (name, val) in sorted(params.items()))
+
+ self._format_params(
+ params.items(), stream, indent, allowance + 1, context, level
+ )
+ stream.write(")")
+
+ def _format_dict_items(self, items, stream, indent, allowance, context, level):
+ return self._format_params_or_dict_items(
+ items, stream, indent, allowance, context, level, is_dict=True
+ )
+
+ def _format_params(self, items, stream, indent, allowance, context, level):
+ return self._format_params_or_dict_items(
+ items, stream, indent, allowance, context, level, is_dict=False
+ )
+
+ def _format_params_or_dict_items(
+ self, object, stream, indent, allowance, context, level, is_dict
+ ):
+ """Format dict items or parameters respecting the compact=True
+ parameter. For some reason, the builtin rendering of dict items doesn't
+ respect compact=True and will use one line per key-value if all cannot
+ fit in a single line.
+ Dict items will be rendered as <'key': value> while params will be
+ rendered as . The implementation is mostly copy/pasting from
+ the builtin _format_items().
+ This also adds ellipsis if the number of items is greater than
+ self.n_max_elements_to_show.
+ """
+ write = stream.write
+ indent += self._indent_per_level
+ delimnl = ",\n" + " " * indent
+ delim = ""
+ width = max_width = self._width - indent + 1
+ it = iter(object)
+ try:
+ next_ent = next(it)
+ except StopIteration:
+ return
+ last = False
+ n_items = 0
+ while not last:
+ if n_items == self.n_max_elements_to_show:
+ write(", ...")
+ break
+ n_items += 1
+ ent = next_ent
+ try:
+ next_ent = next(it)
+ except StopIteration:
+ last = True
+ max_width -= allowance
+ width -= allowance
+ if self._compact:
+ k, v = ent
+ krepr = self._repr(k, context, level)
+ vrepr = self._repr(v, context, level)
+ if not is_dict:
+ krepr = krepr.strip("'")
+ middle = ": " if is_dict else "="
+ rep = krepr + middle + vrepr
+ w = len(rep) + 2
+ if width < w:
+ width = max_width
+ if delim:
+ delim = delimnl
+ if width >= w:
+ width -= w
+ write(delim)
+ delim = ", "
+ write(rep)
+ continue
+ write(delim)
+ delim = delimnl
+ class_ = KeyValTuple if is_dict else KeyValTupleParam
+ self._format(
+ class_(ent), stream, indent, allowance if last else 1, context, level
+ )
+
+ def _format_items(self, items, stream, indent, allowance, context, level):
+ """Format the items of an iterable (list, tuple...). Same as the
+ built-in _format_items, with support for ellipsis if the number of
+ elements is greater than self.n_max_elements_to_show.
+ """
+ write = stream.write
+ indent += self._indent_per_level
+ if self._indent_per_level > 1:
+ write((self._indent_per_level - 1) * " ")
+ delimnl = ",\n" + " " * indent
+ delim = ""
+ width = max_width = self._width - indent + 1
+ it = iter(items)
+ try:
+ next_ent = next(it)
+ except StopIteration:
+ return
+ last = False
+ n_items = 0
+ while not last:
+ if n_items == self.n_max_elements_to_show:
+ write(", ...")
+ break
+ n_items += 1
+ ent = next_ent
+ try:
+ next_ent = next(it)
+ except StopIteration:
+ last = True
+ max_width -= allowance
+ width -= allowance
+ if self._compact:
+ rep = self._repr(ent, context, level)
+ w = len(rep) + 2
+ if width < w:
+ width = max_width
+ if delim:
+ delim = delimnl
+ if width >= w:
+ width -= w
+ write(delim)
+ delim = ", "
+ write(rep)
+ continue
+ write(delim)
+ delim = delimnl
+ self._format(ent, stream, indent, allowance if last else 1, context, level)
+
+ def _pprint_key_val_tuple(self, object, stream, indent, allowance, context, level):
+ """Pretty printing for key-value tuples from dict or parameters."""
+ k, v = object
+ rep = self._repr(k, context, level)
+ if isinstance(object, KeyValTupleParam):
+ rep = rep.strip("'")
+ middle = "="
+ else:
+ middle = ": "
+ stream.write(rep)
+ stream.write(middle)
+ self._format(
+ v, stream, indent + len(rep) + len(middle), allowance, context, level
+ )
+
+ # Note: need to copy _dispatch to prevent instances of the builtin
+ # PrettyPrinter class to call methods of _EstimatorPrettyPrinter (see issue
+ # 12906)
+ # mypy error: "Type[PrettyPrinter]" has no attribute "_dispatch"
+ _dispatch = pprint.PrettyPrinter._dispatch.copy() # type: ignore
+ _dispatch[BaseEstimator.__repr__] = _pprint_estimator
+ _dispatch[KeyValTuple.__repr__] = _pprint_key_val_tuple
+
+
+def _safe_repr(object, context, maxlevels, level, changed_only=False):
+ """Same as the builtin _safe_repr, with added support for Estimator
+ objects."""
+ typ = type(object)
+
+ if typ in pprint._builtin_scalars:
+ return repr(object), True, False
+
+ r = getattr(typ, "__repr__", None)
+ if issubclass(typ, dict) and r is dict.__repr__:
+ if not object:
+ return "{}", True, False
+ objid = id(object)
+ if maxlevels and level >= maxlevels:
+ return "{...}", False, objid in context
+ if objid in context:
+ return pprint._recursion(object), False, True
+ context[objid] = 1
+ readable = True
+ recursive = False
+ components = []
+ append = components.append
+ level += 1
+ saferepr = _safe_repr
+ items = sorted(object.items(), key=pprint._safe_tuple)
+ for k, v in items:
+ krepr, kreadable, krecur = saferepr(
+ k, context, maxlevels, level, changed_only=changed_only
+ )
+ vrepr, vreadable, vrecur = saferepr(
+ v, context, maxlevels, level, changed_only=changed_only
+ )
+ append("%s: %s" % (krepr, vrepr))
+ readable = readable and kreadable and vreadable
+ if krecur or vrecur:
+ recursive = True
+ del context[objid]
+ return "{%s}" % ", ".join(components), readable, recursive
+
+ if (issubclass(typ, list) and r is list.__repr__) or (
+ issubclass(typ, tuple) and r is tuple.__repr__
+ ):
+ if issubclass(typ, list):
+ if not object:
+ return "[]", True, False
+ format = "[%s]"
+ elif len(object) == 1:
+ format = "(%s,)"
+ else:
+ if not object:
+ return "()", True, False
+ format = "(%s)"
+ objid = id(object)
+ if maxlevels and level >= maxlevels:
+ return format % "...", False, objid in context
+ if objid in context:
+ return pprint._recursion(object), False, True
+ context[objid] = 1
+ readable = True
+ recursive = False
+ components = []
+ append = components.append
+ level += 1
+ for o in object:
+ orepr, oreadable, orecur = _safe_repr(
+ o, context, maxlevels, level, changed_only=changed_only
+ )
+ append(orepr)
+ if not oreadable:
+ readable = False
+ if orecur:
+ recursive = True
+ del context[objid]
+ return format % ", ".join(components), readable, recursive
+
+ if issubclass(typ, BaseEstimator):
+ objid = id(object)
+ if maxlevels and level >= maxlevels:
+ return "{...}", False, objid in context
+ if objid in context:
+ return pprint._recursion(object), False, True
+ context[objid] = 1
+ readable = True
+ recursive = False
+ if changed_only:
+ params = _changed_params(object)
+ else:
+ params = object.get_params(deep=False)
+ components = []
+ append = components.append
+ level += 1
+ saferepr = _safe_repr
+ items = sorted(params.items(), key=pprint._safe_tuple)
+ for k, v in items:
+ krepr, kreadable, krecur = saferepr(
+ k, context, maxlevels, level, changed_only=changed_only
+ )
+ vrepr, vreadable, vrecur = saferepr(
+ v, context, maxlevels, level, changed_only=changed_only
+ )
+ append("%s=%s" % (krepr.strip("'"), vrepr))
+ readable = readable and kreadable and vreadable
+ if krecur or vrecur:
+ recursive = True
+ del context[objid]
+ return ("%s(%s)" % (typ.__name__, ", ".join(components)), readable, recursive)
+
+ rep = repr(object)
+ return rep, (rep and not rep.startswith("<")), False
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_random.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_random.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..820e77e469fbbaa3d752abf33f7051c688fc021c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_random.cpython-310-x86_64-linux-gnu.so differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_response.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_response.py
new file mode 100644
index 0000000000000000000000000000000000000000..e647ba3a4f0094402709eeca4b3f709528cb0746
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_response.py
@@ -0,0 +1,298 @@
+"""Utilities to get the response values of a classifier or a regressor.
+
+It allows to make uniform checks and validation.
+"""
+import numpy as np
+
+from ..base import is_classifier
+from .multiclass import type_of_target
+from .validation import _check_response_method, check_is_fitted
+
+
+def _process_predict_proba(*, y_pred, target_type, classes, pos_label):
+ """Get the response values when the response method is `predict_proba`.
+
+ This function process the `y_pred` array in the binary and multi-label cases.
+ In the binary case, it selects the column corresponding to the positive
+ class. In the multi-label case, it stacks the predictions if they are not
+ in the "compressed" format `(n_samples, n_outputs)`.
+
+ Parameters
+ ----------
+ y_pred : ndarray
+ Output of `estimator.predict_proba`. The shape depends on the target type:
+
+ - for binary classification, it is a 2d array of shape `(n_samples, 2)`;
+ - for multiclass classification, it is a 2d array of shape
+ `(n_samples, n_classes)`;
+ - for multilabel classification, it is either a list of 2d arrays of shape
+ `(n_samples, 2)` (e.g. `RandomForestClassifier` or `KNeighborsClassifier`) or
+ an array of shape `(n_samples, n_outputs)` (e.g. `MLPClassifier` or
+ `RidgeClassifier`).
+
+ target_type : {"binary", "multiclass", "multilabel-indicator"}
+ Type of the target.
+
+ classes : ndarray of shape (n_classes,) or list of such arrays
+ Class labels as reported by `estimator.classes_`.
+
+ pos_label : int, float, bool or str
+ Only used with binary and multiclass targets.
+
+ Returns
+ -------
+ y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
+ (n_samples, n_output)
+ Compressed predictions format as requested by the metrics.
+ """
+ if target_type == "binary" and y_pred.shape[1] < 2:
+ # We don't handle classifiers trained on a single class.
+ raise ValueError(
+ f"Got predict_proba of shape {y_pred.shape}, but need "
+ "classifier with two classes."
+ )
+
+ if target_type == "binary":
+ col_idx = np.flatnonzero(classes == pos_label)[0]
+ return y_pred[:, col_idx]
+ elif target_type == "multilabel-indicator":
+ # Use a compress format of shape `(n_samples, n_output)`.
+ # Only `MLPClassifier` and `RidgeClassifier` return an array of shape
+ # `(n_samples, n_outputs)`.
+ if isinstance(y_pred, list):
+ # list of arrays of shape `(n_samples, 2)`
+ return np.vstack([p[:, -1] for p in y_pred]).T
+ else:
+ # array of shape `(n_samples, n_outputs)`
+ return y_pred
+
+ return y_pred
+
+
+def _process_decision_function(*, y_pred, target_type, classes, pos_label):
+ """Get the response values when the response method is `decision_function`.
+
+ This function process the `y_pred` array in the binary and multi-label cases.
+ In the binary case, it inverts the sign of the score if the positive label
+ is not `classes[1]`. In the multi-label case, it stacks the predictions if
+ they are not in the "compressed" format `(n_samples, n_outputs)`.
+
+ Parameters
+ ----------
+ y_pred : ndarray
+ Output of `estimator.predict_proba`. The shape depends on the target type:
+
+ - for binary classification, it is a 1d array of shape `(n_samples,)` where the
+ sign is assuming that `classes[1]` is the positive class;
+ - for multiclass classification, it is a 2d array of shape
+ `(n_samples, n_classes)`;
+ - for multilabel classification, it is a 2d array of shape `(n_samples,
+ n_outputs)`.
+
+ target_type : {"binary", "multiclass", "multilabel-indicator"}
+ Type of the target.
+
+ classes : ndarray of shape (n_classes,) or list of such arrays
+ Class labels as reported by `estimator.classes_`.
+
+ pos_label : int, float, bool or str
+ Only used with binary and multiclass targets.
+
+ Returns
+ -------
+ y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
+ (n_samples, n_output)
+ Compressed predictions format as requested by the metrics.
+ """
+ if target_type == "binary" and pos_label == classes[0]:
+ return -1 * y_pred
+ return y_pred
+
+
+def _get_response_values(
+ estimator,
+ X,
+ response_method,
+ pos_label=None,
+ return_response_method_used=False,
+):
+ """Compute the response values of a classifier, an outlier detector, or a regressor.
+
+ The response values are predictions such that it follows the following shape:
+
+ - for binary classification, it is a 1d array of shape `(n_samples,)`;
+ - for multiclass classification, it is a 2d array of shape `(n_samples, n_classes)`;
+ - for multilabel classification, it is a 2d array of shape `(n_samples, n_outputs)`;
+ - for outlier detection, it is a 1d array of shape `(n_samples,)`;
+ - for regression, it is a 1d array of shape `(n_samples,)`.
+
+ If `estimator` is a binary classifier, also return the label for the
+ effective positive class.
+
+ This utility is used primarily in the displays and the scikit-learn scorers.
+
+ .. versionadded:: 1.3
+
+ Parameters
+ ----------
+ estimator : estimator instance
+ Fitted classifier, outlier detector, or regressor or a
+ fitted :class:`~sklearn.pipeline.Pipeline` in which the last estimator is a
+ classifier, an outlier detector, or a regressor.
+
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
+ Input values.
+
+ response_method : {"predict_proba", "predict_log_proba", "decision_function", \
+ "predict"} or list of such str
+ Specifies the response method to use get prediction from an estimator
+ (i.e. :term:`predict_proba`, :term:`predict_log_proba`,
+ :term:`decision_function` or :term:`predict`). Possible choices are:
+
+ - if `str`, it corresponds to the name to the method to return;
+ - if a list of `str`, it provides the method names in order of
+ preference. The method returned corresponds to the first method in
+ the list and which is implemented by `estimator`.
+
+ pos_label : int, float, bool or str, default=None
+ The class considered as the positive class when computing
+ the metrics. If `None` and target is 'binary', `estimators.classes_[1]` is
+ considered as the positive class.
+
+ return_response_method_used : bool, default=False
+ Whether to return the response method used to compute the response
+ values.
+
+ .. versionadded:: 1.4
+
+ Returns
+ -------
+ y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
+ (n_samples, n_outputs)
+ Target scores calculated from the provided `response_method`
+ and `pos_label`.
+
+ pos_label : int, float, bool, str or None
+ The class considered as the positive class when computing
+ the metrics. Returns `None` if `estimator` is a regressor or an outlier
+ detector.
+
+ response_method_used : str
+ The response method used to compute the response values. Only returned
+ if `return_response_method_used` is `True`.
+
+ .. versionadded:: 1.4
+
+ Raises
+ ------
+ ValueError
+ If `pos_label` is not a valid label.
+ If the shape of `y_pred` is not consistent for binary classifier.
+ If the response method can be applied to a classifier only and
+ `estimator` is a regressor.
+ """
+ from sklearn.base import is_classifier, is_outlier_detector # noqa
+
+ if is_classifier(estimator):
+ prediction_method = _check_response_method(estimator, response_method)
+ classes = estimator.classes_
+ target_type = type_of_target(classes)
+
+ if target_type in ("binary", "multiclass"):
+ if pos_label is not None and pos_label not in classes.tolist():
+ raise ValueError(
+ f"pos_label={pos_label} is not a valid label: It should be "
+ f"one of {classes}"
+ )
+ elif pos_label is None and target_type == "binary":
+ pos_label = classes[-1]
+
+ y_pred = prediction_method(X)
+
+ if prediction_method.__name__ in ("predict_proba", "predict_log_proba"):
+ y_pred = _process_predict_proba(
+ y_pred=y_pred,
+ target_type=target_type,
+ classes=classes,
+ pos_label=pos_label,
+ )
+ elif prediction_method.__name__ == "decision_function":
+ y_pred = _process_decision_function(
+ y_pred=y_pred,
+ target_type=target_type,
+ classes=classes,
+ pos_label=pos_label,
+ )
+ elif is_outlier_detector(estimator):
+ prediction_method = _check_response_method(estimator, response_method)
+ y_pred, pos_label = prediction_method(X), None
+ else: # estimator is a regressor
+ if response_method != "predict":
+ raise ValueError(
+ f"{estimator.__class__.__name__} should either be a classifier to be "
+ f"used with response_method={response_method} or the response_method "
+ "should be 'predict'. Got a regressor with response_method="
+ f"{response_method} instead."
+ )
+ prediction_method = estimator.predict
+ y_pred, pos_label = prediction_method(X), None
+
+ if return_response_method_used:
+ return y_pred, pos_label, prediction_method.__name__
+ return y_pred, pos_label
+
+
+def _get_response_values_binary(estimator, X, response_method, pos_label=None):
+ """Compute the response values of a binary classifier.
+
+ Parameters
+ ----------
+ estimator : estimator instance
+ Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
+ in which the last estimator is a binary classifier.
+
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
+ Input values.
+
+ response_method : {'auto', 'predict_proba', 'decision_function'}
+ Specifies whether to use :term:`predict_proba` or
+ :term:`decision_function` as the target response. If set to 'auto',
+ :term:`predict_proba` is tried first and if it does not exist
+ :term:`decision_function` is tried next.
+
+ pos_label : int, float, bool or str, default=None
+ The class considered as the positive class when computing
+ the metrics. By default, `estimators.classes_[1]` is
+ considered as the positive class.
+
+ Returns
+ -------
+ y_pred : ndarray of shape (n_samples,)
+ Target scores calculated from the provided response_method
+ and pos_label.
+
+ pos_label : int, float, bool or str
+ The class considered as the positive class when computing
+ the metrics.
+ """
+ classification_error = "Expected 'estimator' to be a binary classifier."
+
+ check_is_fitted(estimator)
+ if not is_classifier(estimator):
+ raise ValueError(
+ classification_error + f" Got {estimator.__class__.__name__} instead."
+ )
+ elif len(estimator.classes_) != 2:
+ raise ValueError(
+ classification_error + f" Got {len(estimator.classes_)} classes instead."
+ )
+
+ if response_method == "auto":
+ response_method = ["predict_proba", "decision_function"]
+
+ return _get_response_values(
+ estimator,
+ X,
+ response_method,
+ pos_label=pos_label,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_show_versions.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_show_versions.py
new file mode 100644
index 0000000000000000000000000000000000000000..89052e88b65fe8dd27a5c63181693581827a8110
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_show_versions.py
@@ -0,0 +1,112 @@
+"""
+Utility methods to print system info for debugging
+
+adapted from :func:`pandas.show_versions`
+"""
+# License: BSD 3 clause
+
+import platform
+import sys
+
+from .. import __version__
+from ..utils.fixes import threadpool_info
+from ._openmp_helpers import _openmp_parallelism_enabled
+
+
+def _get_sys_info():
+ """System information
+
+ Returns
+ -------
+ sys_info : dict
+ system and Python version information
+
+ """
+ python = sys.version.replace("\n", " ")
+
+ blob = [
+ ("python", python),
+ ("executable", sys.executable),
+ ("machine", platform.platform()),
+ ]
+
+ return dict(blob)
+
+
+def _get_deps_info():
+ """Overview of the installed version of main dependencies
+
+ This function does not import the modules to collect the version numbers
+ but instead relies on standard Python package metadata.
+
+ Returns
+ -------
+ deps_info: dict
+ version information on relevant Python libraries
+
+ """
+ deps = [
+ "pip",
+ "setuptools",
+ "numpy",
+ "scipy",
+ "Cython",
+ "pandas",
+ "matplotlib",
+ "joblib",
+ "threadpoolctl",
+ ]
+
+ deps_info = {
+ "sklearn": __version__,
+ }
+
+ from importlib.metadata import PackageNotFoundError, version
+
+ for modname in deps:
+ try:
+ deps_info[modname] = version(modname)
+ except PackageNotFoundError:
+ deps_info[modname] = None
+ return deps_info
+
+
+def show_versions():
+ """Print useful debugging information"
+
+ .. versionadded:: 0.20
+
+ Examples
+ --------
+ >>> from sklearn import show_versions
+ >>> show_versions() # doctest: +SKIP
+ """
+
+ sys_info = _get_sys_info()
+ deps_info = _get_deps_info()
+
+ print("\nSystem:")
+ for k, stat in sys_info.items():
+ print("{k:>10}: {stat}".format(k=k, stat=stat))
+
+ print("\nPython dependencies:")
+ for k, stat in deps_info.items():
+ print("{k:>13}: {stat}".format(k=k, stat=stat))
+
+ print(
+ "\n{k}: {stat}".format(
+ k="Built with OpenMP", stat=_openmp_parallelism_enabled()
+ )
+ )
+
+ # show threadpoolctl results
+ threadpool_results = threadpool_info()
+ if threadpool_results:
+ print()
+ print("threadpoolctl info:")
+
+ for i, result in enumerate(threadpool_results):
+ for key, val in result.items():
+ print(f"{key:>15}: {val}")
+ if i != len(threadpool_results) - 1:
+ print()
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_testing.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..42011e3f719168de01b15275dbd6b91c15c1d14e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_testing.py
@@ -0,0 +1,1169 @@
+"""Testing utilities."""
+
+# Copyright (c) 2011, 2012
+# Authors: Pietro Berkes,
+# Andreas Muller
+# Mathieu Blondel
+# Olivier Grisel
+# Arnaud Joly
+# Denis Engemann
+# Giorgio Patrini
+# Thierry Guillemot
+# License: BSD 3 clause
+import atexit
+import contextlib
+import functools
+import importlib
+import inspect
+import os
+import os.path as op
+import re
+import shutil
+import sys
+import tempfile
+import unittest
+import warnings
+from collections.abc import Iterable
+from dataclasses import dataclass
+from functools import wraps
+from inspect import signature
+from subprocess import STDOUT, CalledProcessError, TimeoutExpired, check_output
+from unittest import TestCase
+
+import joblib
+import numpy as np
+import scipy as sp
+from numpy.testing import assert_allclose as np_assert_allclose
+from numpy.testing import (
+ assert_almost_equal,
+ assert_approx_equal,
+ assert_array_almost_equal,
+ assert_array_equal,
+ assert_array_less,
+ assert_no_warnings,
+)
+
+import sklearn
+from sklearn.utils import (
+ _IS_32BIT,
+ IS_PYPY,
+ _in_unstable_openblas_configuration,
+)
+from sklearn.utils._array_api import _check_array_api_dispatch
+from sklearn.utils.fixes import VisibleDeprecationWarning, parse_version, sp_version
+from sklearn.utils.multiclass import check_classification_targets
+from sklearn.utils.validation import (
+ check_array,
+ check_is_fitted,
+ check_X_y,
+)
+
+__all__ = [
+ "assert_raises",
+ "assert_raises_regexp",
+ "assert_array_equal",
+ "assert_almost_equal",
+ "assert_array_almost_equal",
+ "assert_array_less",
+ "assert_approx_equal",
+ "assert_allclose",
+ "assert_run_python_script_without_output",
+ "assert_no_warnings",
+ "SkipTest",
+]
+
+_dummy = TestCase("__init__")
+assert_raises = _dummy.assertRaises
+SkipTest = unittest.case.SkipTest
+assert_dict_equal = _dummy.assertDictEqual
+
+assert_raises_regex = _dummy.assertRaisesRegex
+# assert_raises_regexp is deprecated in Python 3.4 in favor of
+# assert_raises_regex but lets keep the backward compat in scikit-learn with
+# the old name for now
+assert_raises_regexp = assert_raises_regex
+
+
+def ignore_warnings(obj=None, category=Warning):
+ """Context manager and decorator to ignore warnings.
+
+ Note: Using this (in both variants) will clear all warnings
+ from all python modules loaded. In case you need to test
+ cross-module-warning-logging, this is not your tool of choice.
+
+ Parameters
+ ----------
+ obj : callable, default=None
+ callable where you want to ignore the warnings.
+ category : warning class, default=Warning
+ The category to filter. If Warning, all categories will be muted.
+
+ Examples
+ --------
+ >>> import warnings
+ >>> from sklearn.utils._testing import ignore_warnings
+ >>> with ignore_warnings():
+ ... warnings.warn('buhuhuhu')
+
+ >>> def nasty_warn():
+ ... warnings.warn('buhuhuhu')
+ ... print(42)
+
+ >>> ignore_warnings(nasty_warn)()
+ 42
+ """
+ if isinstance(obj, type) and issubclass(obj, Warning):
+ # Avoid common pitfall of passing category as the first positional
+ # argument which result in the test not being run
+ warning_name = obj.__name__
+ raise ValueError(
+ "'obj' should be a callable where you want to ignore warnings. "
+ "You passed a warning class instead: 'obj={warning_name}'. "
+ "If you want to pass a warning class to ignore_warnings, "
+ "you should use 'category={warning_name}'".format(warning_name=warning_name)
+ )
+ elif callable(obj):
+ return _IgnoreWarnings(category=category)(obj)
+ else:
+ return _IgnoreWarnings(category=category)
+
+
+class _IgnoreWarnings:
+ """Improved and simplified Python warnings context manager and decorator.
+
+ This class allows the user to ignore the warnings raised by a function.
+ Copied from Python 2.7.5 and modified as required.
+
+ Parameters
+ ----------
+ category : tuple of warning class, default=Warning
+ The category to filter. By default, all the categories will be muted.
+
+ """
+
+ def __init__(self, category):
+ self._record = True
+ self._module = sys.modules["warnings"]
+ self._entered = False
+ self.log = []
+ self.category = category
+
+ def __call__(self, fn):
+ """Decorator to catch and hide warnings without visual nesting."""
+
+ @wraps(fn)
+ def wrapper(*args, **kwargs):
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", self.category)
+ return fn(*args, **kwargs)
+
+ return wrapper
+
+ def __repr__(self):
+ args = []
+ if self._record:
+ args.append("record=True")
+ if self._module is not sys.modules["warnings"]:
+ args.append("module=%r" % self._module)
+ name = type(self).__name__
+ return "%s(%s)" % (name, ", ".join(args))
+
+ def __enter__(self):
+ if self._entered:
+ raise RuntimeError("Cannot enter %r twice" % self)
+ self._entered = True
+ self._filters = self._module.filters
+ self._module.filters = self._filters[:]
+ self._showwarning = self._module.showwarning
+ warnings.simplefilter("ignore", self.category)
+
+ def __exit__(self, *exc_info):
+ if not self._entered:
+ raise RuntimeError("Cannot exit %r without entering first" % self)
+ self._module.filters = self._filters
+ self._module.showwarning = self._showwarning
+ self.log[:] = []
+
+
+def assert_raise_message(exceptions, message, function, *args, **kwargs):
+ """Helper function to test the message raised in an exception.
+
+ Given an exception, a callable to raise the exception, and
+ a message string, tests that the correct exception is raised and
+ that the message is a substring of the error thrown. Used to test
+ that the specific message thrown during an exception is correct.
+
+ Parameters
+ ----------
+ exceptions : exception or tuple of exception
+ An Exception object.
+
+ message : str
+ The error message or a substring of the error message.
+
+ function : callable
+ Callable object to raise error.
+
+ *args : the positional arguments to `function`.
+
+ **kwargs : the keyword arguments to `function`.
+ """
+ try:
+ function(*args, **kwargs)
+ except exceptions as e:
+ error_message = str(e)
+ if message not in error_message:
+ raise AssertionError(
+ "Error message does not include the expected"
+ " string: %r. Observed error message: %r" % (message, error_message)
+ )
+ else:
+ # concatenate exception names
+ if isinstance(exceptions, tuple):
+ names = " or ".join(e.__name__ for e in exceptions)
+ else:
+ names = exceptions.__name__
+
+ raise AssertionError("%s not raised by %s" % (names, function.__name__))
+
+
+def assert_allclose(
+ actual, desired, rtol=None, atol=0.0, equal_nan=True, err_msg="", verbose=True
+):
+ """dtype-aware variant of numpy.testing.assert_allclose
+
+ This variant introspects the least precise floating point dtype
+ in the input argument and automatically sets the relative tolerance
+ parameter to 1e-4 float32 and use 1e-7 otherwise (typically float64
+ in scikit-learn).
+
+ `atol` is always left to 0. by default. It should be adjusted manually
+ to an assertion-specific value in case there are null values expected
+ in `desired`.
+
+ The aggregate tolerance is `atol + rtol * abs(desired)`.
+
+ Parameters
+ ----------
+ actual : array_like
+ Array obtained.
+ desired : array_like
+ Array desired.
+ rtol : float, optional, default=None
+ Relative tolerance.
+ If None, it is set based on the provided arrays' dtypes.
+ atol : float, optional, default=0.
+ Absolute tolerance.
+ equal_nan : bool, optional, default=True
+ If True, NaNs will compare equal.
+ err_msg : str, optional, default=''
+ The error message to be printed in case of failure.
+ verbose : bool, optional, default=True
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ numpy.testing.assert_allclose
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from sklearn.utils._testing import assert_allclose
+ >>> x = [1e-5, 1e-3, 1e-1]
+ >>> y = np.arccos(np.cos(x))
+ >>> assert_allclose(x, y, rtol=1e-5, atol=0)
+ >>> a = np.full(shape=10, fill_value=1e-5, dtype=np.float32)
+ >>> assert_allclose(a, 1e-5)
+ """
+ dtypes = []
+
+ actual, desired = np.asanyarray(actual), np.asanyarray(desired)
+ dtypes = [actual.dtype, desired.dtype]
+
+ if rtol is None:
+ rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes]
+ rtol = max(rtols)
+
+ np_assert_allclose(
+ actual,
+ desired,
+ rtol=rtol,
+ atol=atol,
+ equal_nan=equal_nan,
+ err_msg=err_msg,
+ verbose=verbose,
+ )
+
+
+def assert_allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-9, err_msg=""):
+ """Assert allclose for sparse and dense data.
+
+ Both x and y need to be either sparse or dense, they
+ can't be mixed.
+
+ Parameters
+ ----------
+ x : {array-like, sparse matrix}
+ First array to compare.
+
+ y : {array-like, sparse matrix}
+ Second array to compare.
+
+ rtol : float, default=1e-07
+ relative tolerance; see numpy.allclose.
+
+ atol : float, default=1e-9
+ absolute tolerance; see numpy.allclose. Note that the default here is
+ more tolerant than the default for numpy.testing.assert_allclose, where
+ atol=0.
+
+ err_msg : str, default=''
+ Error message to raise.
+ """
+ if sp.sparse.issparse(x) and sp.sparse.issparse(y):
+ x = x.tocsr()
+ y = y.tocsr()
+ x.sum_duplicates()
+ y.sum_duplicates()
+ assert_array_equal(x.indices, y.indices, err_msg=err_msg)
+ assert_array_equal(x.indptr, y.indptr, err_msg=err_msg)
+ assert_allclose(x.data, y.data, rtol=rtol, atol=atol, err_msg=err_msg)
+ elif not sp.sparse.issparse(x) and not sp.sparse.issparse(y):
+ # both dense
+ assert_allclose(x, y, rtol=rtol, atol=atol, err_msg=err_msg)
+ else:
+ raise ValueError(
+ "Can only compare two sparse matrices, not a sparse matrix and an array."
+ )
+
+
+def set_random_state(estimator, random_state=0):
+ """Set random state of an estimator if it has the `random_state` param.
+
+ Parameters
+ ----------
+ estimator : object
+ The estimator.
+ random_state : int, RandomState instance or None, default=0
+ Pseudo random number generator state.
+ Pass an int for reproducible results across multiple function calls.
+ See :term:`Glossary `.
+ """
+ if "random_state" in estimator.get_params():
+ estimator.set_params(random_state=random_state)
+
+
+try:
+ _check_array_api_dispatch(True)
+ ARRAY_API_COMPAT_FUNCTIONAL = True
+except ImportError:
+ ARRAY_API_COMPAT_FUNCTIONAL = False
+
+try:
+ import pytest
+
+ skip_if_32bit = pytest.mark.skipif(_IS_32BIT, reason="skipped on 32bit platforms")
+ fails_if_pypy = pytest.mark.xfail(IS_PYPY, reason="not compatible with PyPy")
+ fails_if_unstable_openblas = pytest.mark.xfail(
+ _in_unstable_openblas_configuration(),
+ reason="OpenBLAS is unstable for this configuration",
+ )
+ skip_if_no_parallel = pytest.mark.skipif(
+ not joblib.parallel.mp, reason="joblib is in serial mode"
+ )
+ skip_if_array_api_compat_not_configured = pytest.mark.skipif(
+ not ARRAY_API_COMPAT_FUNCTIONAL,
+ reason="requires array_api_compat installed and a new enough version of NumPy",
+ )
+
+ # Decorator for tests involving both BLAS calls and multiprocessing.
+ #
+ # Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction
+ # with some implementation of BLAS (or other libraries that manage an
+ # internal posix thread pool) can cause a crash or a freeze of the Python
+ # process.
+ #
+ # In practice all known packaged distributions (from Linux distros or
+ # Anaconda) of BLAS under Linux seems to be safe. So we this problem seems
+ # to only impact OSX users.
+ #
+ # This wrapper makes it possible to skip tests that can possibly cause
+ # this crash under OS X with.
+ #
+ # Under Python 3.4+ it is possible to use the `forkserver` start method
+ # for multiprocessing to avoid this issue. However it can cause pickling
+ # errors on interactively defined functions. It therefore not enabled by
+ # default.
+
+ if_safe_multiprocessing_with_blas = pytest.mark.skipif(
+ sys.platform == "darwin", reason="Possible multi-process bug with some BLAS"
+ )
+except ImportError:
+ pass
+
+
+def check_skip_network():
+ if int(os.environ.get("SKLEARN_SKIP_NETWORK_TESTS", 0)):
+ raise SkipTest("Text tutorial requires large dataset download")
+
+
+def _delete_folder(folder_path, warn=False):
+ """Utility function to cleanup a temporary folder if still existing.
+
+ Copy from joblib.pool (for independence).
+ """
+ try:
+ if os.path.exists(folder_path):
+ # This can fail under windows,
+ # but will succeed when called by atexit
+ shutil.rmtree(folder_path)
+ except OSError:
+ if warn:
+ warnings.warn("Could not delete temporary folder %s" % folder_path)
+
+
+class TempMemmap:
+ """
+ Parameters
+ ----------
+ data
+ mmap_mode : str, default='r'
+ """
+
+ def __init__(self, data, mmap_mode="r"):
+ self.mmap_mode = mmap_mode
+ self.data = data
+
+ def __enter__(self):
+ data_read_only, self.temp_folder = create_memmap_backed_data(
+ self.data, mmap_mode=self.mmap_mode, return_folder=True
+ )
+ return data_read_only
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ _delete_folder(self.temp_folder)
+
+
+def create_memmap_backed_data(data, mmap_mode="r", return_folder=False):
+ """
+ Parameters
+ ----------
+ data
+ mmap_mode : str, default='r'
+ return_folder : bool, default=False
+ """
+ temp_folder = tempfile.mkdtemp(prefix="sklearn_testing_")
+ atexit.register(functools.partial(_delete_folder, temp_folder, warn=True))
+ filename = op.join(temp_folder, "data.pkl")
+ joblib.dump(data, filename)
+ memmap_backed_data = joblib.load(filename, mmap_mode=mmap_mode)
+ result = (
+ memmap_backed_data if not return_folder else (memmap_backed_data, temp_folder)
+ )
+ return result
+
+
+# Utils to test docstrings
+
+
+def _get_args(function, varargs=False):
+ """Helper to get function arguments."""
+
+ try:
+ params = signature(function).parameters
+ except ValueError:
+ # Error on builtin C function
+ return []
+ args = [
+ key
+ for key, param in params.items()
+ if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
+ ]
+ if varargs:
+ varargs = [
+ param.name
+ for param in params.values()
+ if param.kind == param.VAR_POSITIONAL
+ ]
+ if len(varargs) == 0:
+ varargs = None
+ return args, varargs
+ else:
+ return args
+
+
+def _get_func_name(func):
+ """Get function full name.
+
+ Parameters
+ ----------
+ func : callable
+ The function object.
+
+ Returns
+ -------
+ name : str
+ The function name.
+ """
+ parts = []
+ module = inspect.getmodule(func)
+ if module:
+ parts.append(module.__name__)
+
+ qualname = func.__qualname__
+ if qualname != func.__name__:
+ parts.append(qualname[: qualname.find(".")])
+
+ parts.append(func.__name__)
+ return ".".join(parts)
+
+
+def check_docstring_parameters(func, doc=None, ignore=None):
+ """Helper to check docstring.
+
+ Parameters
+ ----------
+ func : callable
+ The function object to test.
+ doc : str, default=None
+ Docstring if it is passed manually to the test.
+ ignore : list, default=None
+ Parameters to ignore.
+
+ Returns
+ -------
+ incorrect : list
+ A list of string describing the incorrect results.
+ """
+ from numpydoc import docscrape
+
+ incorrect = []
+ ignore = [] if ignore is None else ignore
+
+ func_name = _get_func_name(func)
+ if not func_name.startswith("sklearn.") or func_name.startswith(
+ "sklearn.externals"
+ ):
+ return incorrect
+ # Don't check docstring for property-functions
+ if inspect.isdatadescriptor(func):
+ return incorrect
+ # Don't check docstring for setup / teardown pytest functions
+ if func_name.split(".")[-1] in ("setup_module", "teardown_module"):
+ return incorrect
+ # Dont check estimator_checks module
+ if func_name.split(".")[2] == "estimator_checks":
+ return incorrect
+ # Get the arguments from the function signature
+ param_signature = list(filter(lambda x: x not in ignore, _get_args(func)))
+ # drop self
+ if len(param_signature) > 0 and param_signature[0] == "self":
+ param_signature.remove("self")
+
+ # Analyze function's docstring
+ if doc is None:
+ records = []
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter("error", UserWarning)
+ try:
+ doc = docscrape.FunctionDoc(func)
+ except UserWarning as exp:
+ if "potentially wrong underline length" in str(exp):
+ # Catch warning raised as of numpydoc 1.2 when
+ # the underline length for a section of a docstring
+ # is not consistent.
+ message = str(exp).split("\n")[:3]
+ incorrect += [f"In function: {func_name}"] + message
+ return incorrect
+ records.append(str(exp))
+ except Exception as exp:
+ incorrect += [func_name + " parsing error: " + str(exp)]
+ return incorrect
+ if len(records):
+ raise RuntimeError("Error for %s:\n%s" % (func_name, records[0]))
+
+ param_docs = []
+ for name, type_definition, param_doc in doc["Parameters"]:
+ # Type hints are empty only if parameter name ended with :
+ if not type_definition.strip():
+ if ":" in name and name[: name.index(":")][-1:].strip():
+ incorrect += [
+ func_name
+ + " There was no space between the param name and colon (%r)" % name
+ ]
+ elif name.rstrip().endswith(":"):
+ incorrect += [
+ func_name
+ + " Parameter %r has an empty type spec. Remove the colon"
+ % (name.lstrip())
+ ]
+
+ # Create a list of parameters to compare with the parameters gotten
+ # from the func signature
+ if "*" not in name:
+ param_docs.append(name.split(":")[0].strip("` "))
+
+ # If one of the docstring's parameters had an error then return that
+ # incorrect message
+ if len(incorrect) > 0:
+ return incorrect
+
+ # Remove the parameters that should be ignored from list
+ param_docs = list(filter(lambda x: x not in ignore, param_docs))
+
+ # The following is derived from pytest, Copyright (c) 2004-2017 Holger
+ # Krekel and others, Licensed under MIT License. See
+ # https://github.com/pytest-dev/pytest
+
+ message = []
+ for i in range(min(len(param_docs), len(param_signature))):
+ if param_signature[i] != param_docs[i]:
+ message += [
+ "There's a parameter name mismatch in function"
+ " docstring w.r.t. function signature, at index %s"
+ " diff: %r != %r" % (i, param_signature[i], param_docs[i])
+ ]
+ break
+ if len(param_signature) > len(param_docs):
+ message += [
+ "Parameters in function docstring have less items w.r.t."
+ " function signature, first missing item: %s"
+ % param_signature[len(param_docs)]
+ ]
+
+ elif len(param_signature) < len(param_docs):
+ message += [
+ "Parameters in function docstring have more items w.r.t."
+ " function signature, first extra item: %s"
+ % param_docs[len(param_signature)]
+ ]
+
+ # If there wasn't any difference in the parameters themselves between
+ # docstring and signature including having the same length then return
+ # empty list
+ if len(message) == 0:
+ return []
+
+ import difflib
+ import pprint
+
+ param_docs_formatted = pprint.pformat(param_docs).splitlines()
+ param_signature_formatted = pprint.pformat(param_signature).splitlines()
+
+ message += ["Full diff:"]
+
+ message.extend(
+ line.strip()
+ for line in difflib.ndiff(param_signature_formatted, param_docs_formatted)
+ )
+
+ incorrect.extend(message)
+
+ # Prepend function name
+ incorrect = ["In function: " + func_name] + incorrect
+
+ return incorrect
+
+
+def assert_run_python_script_without_output(source_code, pattern=".+", timeout=60):
+ """Utility to check assertions in an independent Python subprocess.
+
+ The script provided in the source code should return 0 and the stdtout +
+ stderr should not match the pattern `pattern`.
+
+ This is a port from cloudpickle https://github.com/cloudpipe/cloudpickle
+
+ Parameters
+ ----------
+ source_code : str
+ The Python source code to execute.
+ pattern : str
+ Pattern that the stdout + stderr should not match. By default, unless
+ stdout + stderr are both empty, an error will be raised.
+ timeout : int, default=60
+ Time in seconds before timeout.
+ """
+ fd, source_file = tempfile.mkstemp(suffix="_src_test_sklearn.py")
+ os.close(fd)
+ try:
+ with open(source_file, "wb") as f:
+ f.write(source_code.encode("utf-8"))
+ cmd = [sys.executable, source_file]
+ cwd = op.normpath(op.join(op.dirname(sklearn.__file__), ".."))
+ env = os.environ.copy()
+ try:
+ env["PYTHONPATH"] = os.pathsep.join([cwd, env["PYTHONPATH"]])
+ except KeyError:
+ env["PYTHONPATH"] = cwd
+ kwargs = {"cwd": cwd, "stderr": STDOUT, "env": env}
+ # If coverage is running, pass the config file to the subprocess
+ coverage_rc = os.environ.get("COVERAGE_PROCESS_START")
+ if coverage_rc:
+ kwargs["env"]["COVERAGE_PROCESS_START"] = coverage_rc
+
+ kwargs["timeout"] = timeout
+ try:
+ try:
+ out = check_output(cmd, **kwargs)
+ except CalledProcessError as e:
+ raise RuntimeError(
+ "script errored with output:\n%s" % e.output.decode("utf-8")
+ )
+
+ out = out.decode("utf-8")
+ if re.search(pattern, out):
+ if pattern == ".+":
+ expectation = "Expected no output"
+ else:
+ expectation = f"The output was not supposed to match {pattern!r}"
+
+ message = f"{expectation}, got the following output instead: {out!r}"
+ raise AssertionError(message)
+ except TimeoutExpired as e:
+ raise RuntimeError(
+ "script timeout, output so far:\n%s" % e.output.decode("utf-8")
+ )
+ finally:
+ os.unlink(source_file)
+
+
+def _convert_container(
+ container,
+ constructor_name,
+ columns_name=None,
+ dtype=None,
+ minversion=None,
+ categorical_feature_names=None,
+):
+ """Convert a given container to a specific array-like with a dtype.
+
+ Parameters
+ ----------
+ container : array-like
+ The container to convert.
+ constructor_name : {"list", "tuple", "array", "sparse", "dataframe", \
+ "series", "index", "slice", "sparse_csr", "sparse_csc"}
+ The type of the returned container.
+ columns_name : index or array-like, default=None
+ For pandas container supporting `columns_names`, it will affect
+ specific names.
+ dtype : dtype, default=None
+ Force the dtype of the container. Does not apply to `"slice"`
+ container.
+ minversion : str, default=None
+ Minimum version for package to install.
+ categorical_feature_names : list of str, default=None
+ List of column names to cast to categorical dtype.
+
+ Returns
+ -------
+ converted_container
+ """
+ if constructor_name == "list":
+ if dtype is None:
+ return list(container)
+ else:
+ return np.asarray(container, dtype=dtype).tolist()
+ elif constructor_name == "tuple":
+ if dtype is None:
+ return tuple(container)
+ else:
+ return tuple(np.asarray(container, dtype=dtype).tolist())
+ elif constructor_name == "array":
+ return np.asarray(container, dtype=dtype)
+ elif constructor_name in ("pandas", "dataframe"):
+ pd = pytest.importorskip("pandas", minversion=minversion)
+ result = pd.DataFrame(container, columns=columns_name, dtype=dtype, copy=False)
+ if categorical_feature_names is not None:
+ for col_name in categorical_feature_names:
+ result[col_name] = result[col_name].astype("category")
+ return result
+ elif constructor_name == "pyarrow":
+ pa = pytest.importorskip("pyarrow", minversion=minversion)
+ array = np.asarray(container)
+ if columns_name is None:
+ columns_name = [f"col{i}" for i in range(array.shape[1])]
+ data = {name: array[:, i] for i, name in enumerate(columns_name)}
+ result = pa.Table.from_pydict(data)
+ if categorical_feature_names is not None:
+ for col_idx, col_name in enumerate(result.column_names):
+ if col_name in categorical_feature_names:
+ result = result.set_column(
+ col_idx, col_name, result.column(col_name).dictionary_encode()
+ )
+ return result
+ elif constructor_name == "polars":
+ pl = pytest.importorskip("polars", minversion=minversion)
+ result = pl.DataFrame(container, schema=columns_name, orient="row")
+ if categorical_feature_names is not None:
+ for col_name in categorical_feature_names:
+ result = result.with_columns(pl.col(col_name).cast(pl.Categorical))
+ return result
+ elif constructor_name == "series":
+ pd = pytest.importorskip("pandas", minversion=minversion)
+ return pd.Series(container, dtype=dtype)
+ elif constructor_name == "index":
+ pd = pytest.importorskip("pandas", minversion=minversion)
+ return pd.Index(container, dtype=dtype)
+ elif constructor_name == "slice":
+ return slice(container[0], container[1])
+ elif "sparse" in constructor_name:
+ if not sp.sparse.issparse(container):
+ # For scipy >= 1.13, sparse array constructed from 1d array may be
+ # 1d or raise an exception. To avoid this, we make sure that the
+ # input container is 2d. For more details, see
+ # https://github.com/scipy/scipy/pull/18530#issuecomment-1878005149
+ container = np.atleast_2d(container)
+
+ if "array" in constructor_name and sp_version < parse_version("1.8"):
+ raise ValueError(
+ f"{constructor_name} is only available with scipy>=1.8.0, got "
+ f"{sp_version}"
+ )
+ if constructor_name in ("sparse", "sparse_csr"):
+ # sparse and sparse_csr are equivalent for legacy reasons
+ return sp.sparse.csr_matrix(container, dtype=dtype)
+ elif constructor_name == "sparse_csr_array":
+ return sp.sparse.csr_array(container, dtype=dtype)
+ elif constructor_name == "sparse_csc":
+ return sp.sparse.csc_matrix(container, dtype=dtype)
+ elif constructor_name == "sparse_csc_array":
+ return sp.sparse.csc_array(container, dtype=dtype)
+
+
+def raises(expected_exc_type, match=None, may_pass=False, err_msg=None):
+ """Context manager to ensure exceptions are raised within a code block.
+
+ This is similar to and inspired from pytest.raises, but supports a few
+ other cases.
+
+ This is only intended to be used in estimator_checks.py where we don't
+ want to use pytest. In the rest of the code base, just use pytest.raises
+ instead.
+
+ Parameters
+ ----------
+ excepted_exc_type : Exception or list of Exception
+ The exception that should be raised by the block. If a list, the block
+ should raise one of the exceptions.
+ match : str or list of str, default=None
+ A regex that the exception message should match. If a list, one of
+ the entries must match. If None, match isn't enforced.
+ may_pass : bool, default=False
+ If True, the block is allowed to not raise an exception. Useful in
+ cases where some estimators may support a feature but others must
+ fail with an appropriate error message. By default, the context
+ manager will raise an exception if the block does not raise an
+ exception.
+ err_msg : str, default=None
+ If the context manager fails (e.g. the block fails to raise the
+ proper exception, or fails to match), then an AssertionError is
+ raised with this message. By default, an AssertionError is raised
+ with a default error message (depends on the kind of failure). Use
+ this to indicate how users should fix their estimators to pass the
+ checks.
+
+ Attributes
+ ----------
+ raised_and_matched : bool
+ True if an exception was raised and a match was found, False otherwise.
+ """
+ return _Raises(expected_exc_type, match, may_pass, err_msg)
+
+
+class _Raises(contextlib.AbstractContextManager):
+ # see raises() for parameters
+ def __init__(self, expected_exc_type, match, may_pass, err_msg):
+ self.expected_exc_types = (
+ expected_exc_type
+ if isinstance(expected_exc_type, Iterable)
+ else [expected_exc_type]
+ )
+ self.matches = [match] if isinstance(match, str) else match
+ self.may_pass = may_pass
+ self.err_msg = err_msg
+ self.raised_and_matched = False
+
+ def __exit__(self, exc_type, exc_value, _):
+ # see
+ # https://docs.python.org/2.5/whatsnew/pep-343.html#SECTION000910000000000000000
+
+ if exc_type is None: # No exception was raised in the block
+ if self.may_pass:
+ return True # CM is happy
+ else:
+ err_msg = self.err_msg or f"Did not raise: {self.expected_exc_types}"
+ raise AssertionError(err_msg)
+
+ if not any(
+ issubclass(exc_type, expected_type)
+ for expected_type in self.expected_exc_types
+ ):
+ if self.err_msg is not None:
+ raise AssertionError(self.err_msg) from exc_value
+ else:
+ return False # will re-raise the original exception
+
+ if self.matches is not None:
+ err_msg = self.err_msg or (
+ "The error message should contain one of the following "
+ "patterns:\n{}\nGot {}".format("\n".join(self.matches), str(exc_value))
+ )
+ if not any(re.search(match, str(exc_value)) for match in self.matches):
+ raise AssertionError(err_msg) from exc_value
+ self.raised_and_matched = True
+
+ return True
+
+
+class MinimalClassifier:
+ """Minimal classifier implementation with inheriting from BaseEstimator.
+
+ This estimator should be tested with:
+
+ * `check_estimator` in `test_estimator_checks.py`;
+ * within a `Pipeline` in `test_pipeline.py`;
+ * within a `SearchCV` in `test_search.py`.
+ """
+
+ _estimator_type = "classifier"
+
+ def __init__(self, param=None):
+ self.param = param
+
+ def get_params(self, deep=True):
+ return {"param": self.param}
+
+ def set_params(self, **params):
+ for key, value in params.items():
+ setattr(self, key, value)
+ return self
+
+ def fit(self, X, y):
+ X, y = check_X_y(X, y)
+ check_classification_targets(y)
+ self.classes_, counts = np.unique(y, return_counts=True)
+ self._most_frequent_class_idx = counts.argmax()
+ return self
+
+ def predict_proba(self, X):
+ check_is_fitted(self)
+ X = check_array(X)
+ proba_shape = (X.shape[0], self.classes_.size)
+ y_proba = np.zeros(shape=proba_shape, dtype=np.float64)
+ y_proba[:, self._most_frequent_class_idx] = 1.0
+ return y_proba
+
+ def predict(self, X):
+ y_proba = self.predict_proba(X)
+ y_pred = y_proba.argmax(axis=1)
+ return self.classes_[y_pred]
+
+ def score(self, X, y):
+ from sklearn.metrics import accuracy_score
+
+ return accuracy_score(y, self.predict(X))
+
+
+class MinimalRegressor:
+ """Minimal regressor implementation with inheriting from BaseEstimator.
+
+ This estimator should be tested with:
+
+ * `check_estimator` in `test_estimator_checks.py`;
+ * within a `Pipeline` in `test_pipeline.py`;
+ * within a `SearchCV` in `test_search.py`.
+ """
+
+ _estimator_type = "regressor"
+
+ def __init__(self, param=None):
+ self.param = param
+
+ def get_params(self, deep=True):
+ return {"param": self.param}
+
+ def set_params(self, **params):
+ for key, value in params.items():
+ setattr(self, key, value)
+ return self
+
+ def fit(self, X, y):
+ X, y = check_X_y(X, y)
+ self.is_fitted_ = True
+ self._mean = np.mean(y)
+ return self
+
+ def predict(self, X):
+ check_is_fitted(self)
+ X = check_array(X)
+ return np.ones(shape=(X.shape[0],)) * self._mean
+
+ def score(self, X, y):
+ from sklearn.metrics import r2_score
+
+ return r2_score(y, self.predict(X))
+
+
+class MinimalTransformer:
+ """Minimal transformer implementation with inheriting from
+ BaseEstimator.
+
+ This estimator should be tested with:
+
+ * `check_estimator` in `test_estimator_checks.py`;
+ * within a `Pipeline` in `test_pipeline.py`;
+ * within a `SearchCV` in `test_search.py`.
+ """
+
+ def __init__(self, param=None):
+ self.param = param
+
+ def get_params(self, deep=True):
+ return {"param": self.param}
+
+ def set_params(self, **params):
+ for key, value in params.items():
+ setattr(self, key, value)
+ return self
+
+ def fit(self, X, y=None):
+ check_array(X)
+ self.is_fitted_ = True
+ return self
+
+ def transform(self, X, y=None):
+ check_is_fitted(self)
+ X = check_array(X)
+ return X
+
+ def fit_transform(self, X, y=None):
+ return self.fit(X, y).transform(X, y)
+
+
+def _array_api_for_tests(array_namespace, device):
+ try:
+ if array_namespace == "numpy.array_api":
+ # FIXME: once it is not experimental anymore
+ with ignore_warnings(category=UserWarning):
+ # UserWarning: numpy.array_api submodule is still experimental.
+ array_mod = importlib.import_module(array_namespace)
+ else:
+ array_mod = importlib.import_module(array_namespace)
+ except ModuleNotFoundError:
+ raise SkipTest(
+ f"{array_namespace} is not installed: not checking array_api input"
+ )
+ try:
+ import array_api_compat # noqa
+ except ImportError:
+ raise SkipTest(
+ "array_api_compat is not installed: not checking array_api input"
+ )
+
+ # First create an array using the chosen array module and then get the
+ # corresponding (compatibility wrapped) array namespace based on it.
+ # This is because `cupy` is not the same as the compatibility wrapped
+ # namespace of a CuPy array.
+ xp = array_api_compat.get_namespace(array_mod.asarray(1))
+ if (
+ array_namespace == "torch"
+ and device == "cuda"
+ and not xp.backends.cuda.is_built()
+ ):
+ raise SkipTest("PyTorch test requires cuda, which is not available")
+ elif array_namespace == "torch" and device == "mps":
+ if os.getenv("PYTORCH_ENABLE_MPS_FALLBACK") != "1":
+ # For now we need PYTORCH_ENABLE_MPS_FALLBACK=1 for all estimators to work
+ # when using the MPS device.
+ raise SkipTest(
+ "Skipping MPS device test because PYTORCH_ENABLE_MPS_FALLBACK is not "
+ "set."
+ )
+ if not xp.backends.mps.is_built():
+ raise SkipTest(
+ "MPS is not available because the current PyTorch install was not "
+ "built with MPS enabled."
+ )
+ elif array_namespace in {"cupy", "cupy.array_api"}: # pragma: nocover
+ import cupy
+
+ if cupy.cuda.runtime.getDeviceCount() == 0:
+ raise SkipTest("CuPy test requires cuda, which is not available")
+ return xp
+
+
+def _get_warnings_filters_info_list():
+ @dataclass
+ class WarningInfo:
+ action: "warnings._ActionKind"
+ message: str = ""
+ category: type[Warning] = Warning
+
+ def to_filterwarning_str(self):
+ if self.category.__module__ == "builtins":
+ category = self.category.__name__
+ else:
+ category = f"{self.category.__module__}.{self.category.__name__}"
+
+ return f"{self.action}:{self.message}:{category}"
+
+ return [
+ WarningInfo("error", category=DeprecationWarning),
+ WarningInfo("error", category=FutureWarning),
+ WarningInfo("error", category=VisibleDeprecationWarning),
+ # TODO: remove when pyamg > 5.0.1
+ # Avoid a deprecation warning due pkg_resources usage in pyamg.
+ WarningInfo(
+ "ignore",
+ message="pkg_resources is deprecated as an API",
+ category=DeprecationWarning,
+ ),
+ WarningInfo(
+ "ignore",
+ message="Deprecated call to `pkg_resources",
+ category=DeprecationWarning,
+ ),
+ # pytest-cov issue https://github.com/pytest-dev/pytest-cov/issues/557 not
+ # fixed although it has been closed. https://github.com/pytest-dev/pytest-cov/pull/623
+ # would probably fix it.
+ WarningInfo(
+ "ignore",
+ message=(
+ "The --rsyncdir command line argument and rsyncdirs config variable are"
+ " deprecated"
+ ),
+ category=DeprecationWarning,
+ ),
+ # XXX: Easiest way to ignore pandas Pyarrow DeprecationWarning in the
+ # short-term. See https://github.com/pandas-dev/pandas/issues/54466 for
+ # more details.
+ WarningInfo(
+ "ignore",
+ message=r"\s*Pyarrow will become a required dependency",
+ category=DeprecationWarning,
+ ),
+ ]
+
+
+def get_pytest_filterwarning_lines():
+ warning_filters_info_list = _get_warnings_filters_info_list()
+ return [
+ warning_info.to_filterwarning_str()
+ for warning_info in warning_filters_info_list
+ ]
+
+
+def turn_warnings_into_errors():
+ warnings_filters_info_list = _get_warnings_filters_info_list()
+ for warning_info in warnings_filters_info_list:
+ warnings.filterwarnings(
+ warning_info.action,
+ message=warning_info.message,
+ category=warning_info.category,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_weight_vector.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_weight_vector.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..a3346298925be5b3aea88e6a3bed670925bb90d7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_weight_vector.cpython-310-x86_64-linux-gnu.so differ
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/deprecation.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/deprecation.py
new file mode 100644
index 0000000000000000000000000000000000000000..839bac125109682db8c6619466562cc18761d05a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/deprecation.py
@@ -0,0 +1,116 @@
+import functools
+import warnings
+
+__all__ = ["deprecated"]
+
+
+class deprecated:
+ """Decorator to mark a function or class as deprecated.
+
+ Issue a warning when the function is called/the class is instantiated and
+ adds a warning to the docstring.
+
+ The optional extra argument will be appended to the deprecation message
+ and the docstring. Note: to use this with the default value for extra, put
+ in an empty of parentheses:
+
+ Examples
+ --------
+ >>> from sklearn.utils import deprecated
+ >>> deprecated()
+
+ >>> @deprecated()
+ ... def some_function(): pass
+
+ Parameters
+ ----------
+ extra : str, default=''
+ To be added to the deprecation messages.
+ """
+
+ # Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary,
+ # but with many changes.
+
+ def __init__(self, extra=""):
+ self.extra = extra
+
+ def __call__(self, obj):
+ """Call method
+
+ Parameters
+ ----------
+ obj : object
+ """
+ if isinstance(obj, type):
+ return self._decorate_class(obj)
+ elif isinstance(obj, property):
+ # Note that this is only triggered properly if the `property`
+ # decorator comes before the `deprecated` decorator, like so:
+ #
+ # @deprecated(msg)
+ # @property
+ # def deprecated_attribute_(self):
+ # ...
+ return self._decorate_property(obj)
+ else:
+ return self._decorate_fun(obj)
+
+ def _decorate_class(self, cls):
+ msg = "Class %s is deprecated" % cls.__name__
+ if self.extra:
+ msg += "; %s" % self.extra
+
+ new = cls.__new__
+
+ def wrapped(cls, *args, **kwargs):
+ warnings.warn(msg, category=FutureWarning)
+ if new is object.__new__:
+ return object.__new__(cls)
+ return new(cls, *args, **kwargs)
+
+ cls.__new__ = wrapped
+
+ wrapped.__name__ = "__new__"
+ wrapped.deprecated_original = new
+
+ return cls
+
+ def _decorate_fun(self, fun):
+ """Decorate function fun"""
+
+ msg = "Function %s is deprecated" % fun.__name__
+ if self.extra:
+ msg += "; %s" % self.extra
+
+ @functools.wraps(fun)
+ def wrapped(*args, **kwargs):
+ warnings.warn(msg, category=FutureWarning)
+ return fun(*args, **kwargs)
+
+ # Add a reference to the wrapped function so that we can introspect
+ # on function arguments in Python 2 (already works in Python 3)
+ wrapped.__wrapped__ = fun
+
+ return wrapped
+
+ def _decorate_property(self, prop):
+ msg = self.extra
+
+ @property
+ @functools.wraps(prop)
+ def wrapped(*args, **kwargs):
+ warnings.warn(msg, category=FutureWarning)
+ return prop.fget(*args, **kwargs)
+
+ return wrapped
+
+
+def _is_deprecated(func):
+ """Helper to check if func is wrapped by our deprecated decorator"""
+ closures = getattr(func, "__closure__", [])
+ if closures is None:
+ closures = []
+ is_deprecated = "deprecated" in "".join(
+ [c.cell_contents for c in closures if isinstance(c.cell_contents, str)]
+ )
+ return is_deprecated
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/discovery.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/discovery.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1fdca3beafb27ce74432ab1170ae718d94eb8ac
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/discovery.py
@@ -0,0 +1,265 @@
+"""
+The :mod:`sklearn.utils.discovery` module includes utilities to discover
+objects (i.e. estimators, displays, functions) from the `sklearn` package.
+"""
+
+import inspect
+import pkgutil
+from importlib import import_module
+from operator import itemgetter
+from pathlib import Path
+
+_MODULE_TO_IGNORE = {
+ "tests",
+ "externals",
+ "setup",
+ "conftest",
+ "experimental",
+ "estimator_checks",
+}
+
+
+def all_estimators(type_filter=None):
+ """Get a list of all estimators from `sklearn`.
+
+ This function crawls the module and gets all classes that inherit
+ from BaseEstimator. Classes that are defined in test-modules are not
+ included.
+
+ Parameters
+ ----------
+ type_filter : {"classifier", "regressor", "cluster", "transformer"} \
+ or list of such str, default=None
+ Which kind of estimators should be returned. If None, no filter is
+ applied and all estimators are returned. Possible values are
+ 'classifier', 'regressor', 'cluster' and 'transformer' to get
+ estimators only of these specific types, or a list of these to
+ get the estimators that fit at least one of the types.
+
+ Returns
+ -------
+ estimators : list of tuples
+ List of (name, class), where ``name`` is the class name as string
+ and ``class`` is the actual type of the class.
+
+ Examples
+ --------
+ >>> from sklearn.utils.discovery import all_estimators
+ >>> estimators = all_estimators()
+ >>> type(estimators)
+
+ >>> type(estimators[0])
+
+ >>> estimators[:2]
+ [('ARDRegression', ),
+ ('AdaBoostClassifier',
+ )]
+ >>> classifiers = all_estimators(type_filter="classifier")
+ >>> classifiers[:2]
+ [('AdaBoostClassifier',
+ ),
+ ('BaggingClassifier', )]
+ >>> regressors = all_estimators(type_filter="regressor")
+ >>> regressors[:2]
+ [('ARDRegression', ),
+ ('AdaBoostRegressor',
+ )]
+ >>> both = all_estimators(type_filter=["classifier", "regressor"])
+ >>> both[:2]
+ [('ARDRegression', ),
+ ('AdaBoostClassifier',
+ )]
+ """
+ # lazy import to avoid circular imports from sklearn.base
+ from ..base import (
+ BaseEstimator,
+ ClassifierMixin,
+ ClusterMixin,
+ RegressorMixin,
+ TransformerMixin,
+ )
+ from . import IS_PYPY
+ from ._testing import ignore_warnings
+
+ def is_abstract(c):
+ if not (hasattr(c, "__abstractmethods__")):
+ return False
+ if not len(c.__abstractmethods__):
+ return False
+ return True
+
+ all_classes = []
+ root = str(Path(__file__).parent.parent) # sklearn package
+ # Ignore deprecation warnings triggered at import time and from walking
+ # packages
+ with ignore_warnings(category=FutureWarning):
+ for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
+ module_parts = module_name.split(".")
+ if (
+ any(part in _MODULE_TO_IGNORE for part in module_parts)
+ or "._" in module_name
+ ):
+ continue
+ module = import_module(module_name)
+ classes = inspect.getmembers(module, inspect.isclass)
+ classes = [
+ (name, est_cls) for name, est_cls in classes if not name.startswith("_")
+ ]
+
+ # TODO: Remove when FeatureHasher is implemented in PYPY
+ # Skips FeatureHasher for PYPY
+ if IS_PYPY and "feature_extraction" in module_name:
+ classes = [
+ (name, est_cls)
+ for name, est_cls in classes
+ if name == "FeatureHasher"
+ ]
+
+ all_classes.extend(classes)
+
+ all_classes = set(all_classes)
+
+ estimators = [
+ c
+ for c in all_classes
+ if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator")
+ ]
+ # get rid of abstract base classes
+ estimators = [c for c in estimators if not is_abstract(c[1])]
+
+ if type_filter is not None:
+ if not isinstance(type_filter, list):
+ type_filter = [type_filter]
+ else:
+ type_filter = list(type_filter) # copy
+ filtered_estimators = []
+ filters = {
+ "classifier": ClassifierMixin,
+ "regressor": RegressorMixin,
+ "transformer": TransformerMixin,
+ "cluster": ClusterMixin,
+ }
+ for name, mixin in filters.items():
+ if name in type_filter:
+ type_filter.remove(name)
+ filtered_estimators.extend(
+ [est for est in estimators if issubclass(est[1], mixin)]
+ )
+ estimators = filtered_estimators
+ if type_filter:
+ raise ValueError(
+ "Parameter type_filter must be 'classifier', "
+ "'regressor', 'transformer', 'cluster' or "
+ "None, got"
+ f" {repr(type_filter)}."
+ )
+
+ # drop duplicates, sort for reproducibility
+ # itemgetter is used to ensure the sort does not extend to the 2nd item of
+ # the tuple
+ return sorted(set(estimators), key=itemgetter(0))
+
+
+def all_displays():
+ """Get a list of all displays from `sklearn`.
+
+ Returns
+ -------
+ displays : list of tuples
+ List of (name, class), where ``name`` is the display class name as
+ string and ``class`` is the actual type of the class.
+
+ Examples
+ --------
+ >>> from sklearn.utils.discovery import all_displays
+ >>> displays = all_displays()
+ >>> displays[0]
+ ('CalibrationDisplay', )
+ """
+ # lazy import to avoid circular imports from sklearn.base
+ from ._testing import ignore_warnings
+
+ all_classes = []
+ root = str(Path(__file__).parent.parent) # sklearn package
+ # Ignore deprecation warnings triggered at import time and from walking
+ # packages
+ with ignore_warnings(category=FutureWarning):
+ for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
+ module_parts = module_name.split(".")
+ if (
+ any(part in _MODULE_TO_IGNORE for part in module_parts)
+ or "._" in module_name
+ ):
+ continue
+ module = import_module(module_name)
+ classes = inspect.getmembers(module, inspect.isclass)
+ classes = [
+ (name, display_class)
+ for name, display_class in classes
+ if not name.startswith("_") and name.endswith("Display")
+ ]
+ all_classes.extend(classes)
+
+ return sorted(set(all_classes), key=itemgetter(0))
+
+
+def _is_checked_function(item):
+ if not inspect.isfunction(item):
+ return False
+
+ if item.__name__.startswith("_"):
+ return False
+
+ mod = item.__module__
+ if not mod.startswith("sklearn.") or mod.endswith("estimator_checks"):
+ return False
+
+ return True
+
+
+def all_functions():
+ """Get a list of all functions from `sklearn`.
+
+ Returns
+ -------
+ functions : list of tuples
+ List of (name, function), where ``name`` is the function name as
+ string and ``function`` is the actual function.
+
+ Examples
+ --------
+ >>> from sklearn.utils.discovery import all_functions
+ >>> functions = all_functions()
+ >>> name, function = functions[0]
+ >>> name
+ 'accuracy_score'
+ """
+ # lazy import to avoid circular imports from sklearn.base
+ from ._testing import ignore_warnings
+
+ all_functions = []
+ root = str(Path(__file__).parent.parent) # sklearn package
+ # Ignore deprecation warnings triggered at import time and from walking
+ # packages
+ with ignore_warnings(category=FutureWarning):
+ for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
+ module_parts = module_name.split(".")
+ if (
+ any(part in _MODULE_TO_IGNORE for part in module_parts)
+ or "._" in module_name
+ ):
+ continue
+
+ module = import_module(module_name)
+ functions = inspect.getmembers(module, _is_checked_function)
+ functions = [
+ (func.__name__, func)
+ for name, func in functions
+ if not name.startswith("_")
+ ]
+ all_functions.extend(functions)
+
+ # drop duplicates, sort for reproducibility
+ # itemgetter is used to ensure the sort does not extend to the 2nd item of
+ # the tuple
+ return sorted(set(all_functions), key=itemgetter(0))
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/metadata_routing.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/metadata_routing.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb98d2f08b93e4498d55f813c460fb4cfffe26fc
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/metadata_routing.py
@@ -0,0 +1,22 @@
+"""
+The :mod:`sklearn.utils.metadata_routing` module includes utilities to route
+metadata within scikit-learn estimators.
+"""
+
+# This module is not a separate sub-folder since that would result in a circular
+# import issue.
+#
+# Author: Adrin Jalali
+# License: BSD 3 clause
+
+from ._metadata_requests import WARN, UNUSED, UNCHANGED # noqa
+from ._metadata_requests import get_routing_for_object # noqa
+from ._metadata_requests import MetadataRouter # noqa
+from ._metadata_requests import MetadataRequest # noqa
+from ._metadata_requests import MethodMapping # noqa
+from ._metadata_requests import process_routing # noqa
+from ._metadata_requests import _MetadataRequester # noqa
+from ._metadata_requests import _routing_enabled # noqa
+from ._metadata_requests import _raise_for_params # noqa
+from ._metadata_requests import _RoutingNotSupportedMixin # noqa
+from ._metadata_requests import _raise_for_unsupported_routing # noqa
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/optimize.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/optimize.py
new file mode 100644
index 0000000000000000000000000000000000000000..024b0bcaf95ee7bfdb0cf67047b84d68dbe24849
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/optimize.py
@@ -0,0 +1,302 @@
+"""
+Our own implementation of the Newton algorithm
+
+Unlike the scipy.optimize version, this version of the Newton conjugate
+gradient solver uses only one function call to retrieve the
+func value, the gradient value and a callable for the Hessian matvec
+product. If the function call is very expensive (e.g. for logistic
+regression with large design matrix), this approach gives very
+significant speedups.
+"""
+# This is a modified file from scipy.optimize
+# Original authors: Travis Oliphant, Eric Jones
+# Modifications by Gael Varoquaux, Mathieu Blondel and Tom Dupre la Tour
+# License: BSD
+
+import warnings
+
+import numpy as np
+import scipy
+
+from ..exceptions import ConvergenceWarning
+from .fixes import line_search_wolfe1, line_search_wolfe2
+
+
+class _LineSearchError(RuntimeError):
+ pass
+
+
+def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs):
+ """
+ Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
+ suitable step length is not found, and raise an exception if a
+ suitable step length is not found.
+
+ Raises
+ ------
+ _LineSearchError
+ If no suitable step size is found.
+
+ """
+ ret = line_search_wolfe1(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs)
+
+ if ret[0] is None:
+ # Have a look at the line_search method of our NewtonSolver class. We borrow
+ # the logic from there
+ # Deal with relative loss differences around machine precision.
+ args = kwargs.get("args", tuple())
+ fval = f(xk + pk, *args)
+ eps = 16 * np.finfo(np.asarray(old_fval).dtype).eps
+ tiny_loss = np.abs(old_fval * eps)
+ loss_improvement = fval - old_fval
+ check = np.abs(loss_improvement) <= tiny_loss
+ if check:
+ # 2.1 Check sum of absolute gradients as alternative condition.
+ sum_abs_grad_old = scipy.linalg.norm(gfk, ord=1)
+ grad = fprime(xk + pk, *args)
+ sum_abs_grad = scipy.linalg.norm(grad, ord=1)
+ check = sum_abs_grad < sum_abs_grad_old
+ if check:
+ ret = (
+ 1.0, # step size
+ ret[1] + 1, # number of function evaluations
+ ret[2] + 1, # number of gradient evaluations
+ fval,
+ old_fval,
+ grad,
+ )
+
+ if ret[0] is None:
+ # line search failed: try different one.
+ # TODO: It seems that the new check for the sum of absolute gradients above
+ # catches all cases that, earlier, ended up here. In fact, our tests never
+ # trigger this "if branch" here and we can consider to remove it.
+ ret = line_search_wolfe2(
+ f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs
+ )
+
+ if ret[0] is None:
+ raise _LineSearchError()
+
+ return ret
+
+
+def _cg(fhess_p, fgrad, maxiter, tol):
+ """
+ Solve iteratively the linear system 'fhess_p . xsupi = fgrad'
+ with a conjugate gradient descent.
+
+ Parameters
+ ----------
+ fhess_p : callable
+ Function that takes the gradient as a parameter and returns the
+ matrix product of the Hessian and gradient.
+
+ fgrad : ndarray of shape (n_features,) or (n_features + 1,)
+ Gradient vector.
+
+ maxiter : int
+ Number of CG iterations.
+
+ tol : float
+ Stopping criterion.
+
+ Returns
+ -------
+ xsupi : ndarray of shape (n_features,) or (n_features + 1,)
+ Estimated solution.
+ """
+ xsupi = np.zeros(len(fgrad), dtype=fgrad.dtype)
+ ri = np.copy(fgrad)
+ psupi = -ri
+ i = 0
+ dri0 = np.dot(ri, ri)
+ # We also track of |p_i|^2.
+ psupi_norm2 = dri0
+
+ while i <= maxiter:
+ if np.sum(np.abs(ri)) <= tol:
+ break
+
+ Ap = fhess_p(psupi)
+ # check curvature
+ curv = np.dot(psupi, Ap)
+ if 0 <= curv <= 16 * np.finfo(np.float64).eps * psupi_norm2:
+ # See https://arxiv.org/abs/1803.02924, Algo 1 Capped Conjugate Gradient.
+ break
+ elif curv < 0:
+ if i > 0:
+ break
+ else:
+ # fall back to steepest descent direction
+ xsupi += dri0 / curv * psupi
+ break
+ alphai = dri0 / curv
+ xsupi += alphai * psupi
+ ri += alphai * Ap
+ dri1 = np.dot(ri, ri)
+ betai = dri1 / dri0
+ psupi = -ri + betai * psupi
+ # We use |p_i|^2 = |r_i|^2 + beta_i^2 |p_{i-1}|^2
+ psupi_norm2 = dri1 + betai**2 * psupi_norm2
+ i = i + 1
+ dri0 = dri1 # update np.dot(ri,ri) for next time.
+
+ return xsupi
+
+
+def _newton_cg(
+ grad_hess,
+ func,
+ grad,
+ x0,
+ args=(),
+ tol=1e-4,
+ maxiter=100,
+ maxinner=200,
+ line_search=True,
+ warn=True,
+):
+ """
+ Minimization of scalar function of one or more variables using the
+ Newton-CG algorithm.
+
+ Parameters
+ ----------
+ grad_hess : callable
+ Should return the gradient and a callable returning the matvec product
+ of the Hessian.
+
+ func : callable
+ Should return the value of the function.
+
+ grad : callable
+ Should return the function value and the gradient. This is used
+ by the linesearch functions.
+
+ x0 : array of float
+ Initial guess.
+
+ args : tuple, default=()
+ Arguments passed to func_grad_hess, func and grad.
+
+ tol : float, default=1e-4
+ Stopping criterion. The iteration will stop when
+ ``max{|g_i | i = 1, ..., n} <= tol``
+ where ``g_i`` is the i-th component of the gradient.
+
+ maxiter : int, default=100
+ Number of Newton iterations.
+
+ maxinner : int, default=200
+ Number of CG iterations.
+
+ line_search : bool, default=True
+ Whether to use a line search or not.
+
+ warn : bool, default=True
+ Whether to warn when didn't converge.
+
+ Returns
+ -------
+ xk : ndarray of float
+ Estimated minimum.
+ """
+ x0 = np.asarray(x0).flatten()
+ xk = np.copy(x0)
+ k = 0
+
+ if line_search:
+ old_fval = func(x0, *args)
+ old_old_fval = None
+
+ # Outer loop: our Newton iteration
+ while k < maxiter:
+ # Compute a search direction pk by applying the CG method to
+ # del2 f(xk) p = - fgrad f(xk) starting from 0.
+ fgrad, fhess_p = grad_hess(xk, *args)
+
+ absgrad = np.abs(fgrad)
+ if np.max(absgrad) <= tol:
+ break
+
+ maggrad = np.sum(absgrad)
+ eta = min([0.5, np.sqrt(maggrad)])
+ termcond = eta * maggrad
+
+ # Inner loop: solve the Newton update by conjugate gradient, to
+ # avoid inverting the Hessian
+ xsupi = _cg(fhess_p, fgrad, maxiter=maxinner, tol=termcond)
+
+ alphak = 1.0
+
+ if line_search:
+ try:
+ alphak, fc, gc, old_fval, old_old_fval, gfkp1 = _line_search_wolfe12(
+ func, grad, xk, xsupi, fgrad, old_fval, old_old_fval, args=args
+ )
+ except _LineSearchError:
+ warnings.warn("Line Search failed")
+ break
+
+ xk += alphak * xsupi # upcast if necessary
+ k += 1
+
+ if warn and k >= maxiter:
+ warnings.warn(
+ "newton-cg failed to converge. Increase the number of iterations.",
+ ConvergenceWarning,
+ )
+ return xk, k
+
+
+def _check_optimize_result(solver, result, max_iter=None, extra_warning_msg=None):
+ """Check the OptimizeResult for successful convergence
+
+ Parameters
+ ----------
+ solver : str
+ Solver name. Currently only `lbfgs` is supported.
+
+ result : OptimizeResult
+ Result of the scipy.optimize.minimize function.
+
+ max_iter : int, default=None
+ Expected maximum number of iterations.
+
+ extra_warning_msg : str, default=None
+ Extra warning message.
+
+ Returns
+ -------
+ n_iter : int
+ Number of iterations.
+ """
+ # handle both scipy and scikit-learn solver names
+ if solver == "lbfgs":
+ if result.status != 0:
+ try:
+ # The message is already decoded in scipy>=1.6.0
+ result_message = result.message.decode("latin1")
+ except AttributeError:
+ result_message = result.message
+ warning_msg = (
+ "{} failed to converge (status={}):\n{}.\n\n"
+ "Increase the number of iterations (max_iter) "
+ "or scale the data as shown in:\n"
+ " https://scikit-learn.org/stable/modules/"
+ "preprocessing.html"
+ ).format(solver, result.status, result_message)
+ if extra_warning_msg is not None:
+ warning_msg += "\n" + extra_warning_msg
+ warnings.warn(warning_msg, ConvergenceWarning, stacklevel=2)
+ if max_iter is not None:
+ # In scipy <= 1.0.0, nit may exceed maxiter for lbfgs.
+ # See https://github.com/scipy/scipy/issues/7854
+ n_iter_i = min(result.nit, max_iter)
+ else:
+ n_iter_i = result.nit
+ else:
+ raise NotImplementedError
+
+ return n_iter_i
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/random.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/random.py
new file mode 100644
index 0000000000000000000000000000000000000000..1dfe8d83a94b354d86ad9c7e6049d9940f13ec00
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/random.py
@@ -0,0 +1,103 @@
+"""
+The mod:`sklearn.utils.random` module includes utilities for random sampling.
+"""
+
+# Author: Hamzeh Alsalhi
+#
+# License: BSD 3 clause
+import array
+
+import numpy as np
+import scipy.sparse as sp
+
+from . import check_random_state
+from ._random import sample_without_replacement
+
+__all__ = ["sample_without_replacement"]
+
+
+def _random_choice_csc(n_samples, classes, class_probability=None, random_state=None):
+ """Generate a sparse random matrix given column class distributions
+
+ Parameters
+ ----------
+ n_samples : int,
+ Number of samples to draw in each column.
+
+ classes : list of size n_outputs of arrays of size (n_classes,)
+ List of classes for each column.
+
+ class_probability : list of size n_outputs of arrays of \
+ shape (n_classes,), default=None
+ Class distribution of each column. If None, uniform distribution is
+ assumed.
+
+ random_state : int, RandomState instance or None, default=None
+ Controls the randomness of the sampled classes.
+ See :term:`Glossary `.
+
+ Returns
+ -------
+ random_matrix : sparse csc matrix of size (n_samples, n_outputs)
+
+ """
+ data = array.array("i")
+ indices = array.array("i")
+ indptr = array.array("i", [0])
+
+ for j in range(len(classes)):
+ classes[j] = np.asarray(classes[j])
+ if classes[j].dtype.kind != "i":
+ raise ValueError("class dtype %s is not supported" % classes[j].dtype)
+ classes[j] = classes[j].astype(np.int64, copy=False)
+
+ # use uniform distribution if no class_probability is given
+ if class_probability is None:
+ class_prob_j = np.empty(shape=classes[j].shape[0])
+ class_prob_j.fill(1 / classes[j].shape[0])
+ else:
+ class_prob_j = np.asarray(class_probability[j])
+
+ if not np.isclose(np.sum(class_prob_j), 1.0):
+ raise ValueError(
+ "Probability array at index {0} does not sum to one".format(j)
+ )
+
+ if class_prob_j.shape[0] != classes[j].shape[0]:
+ raise ValueError(
+ "classes[{0}] (length {1}) and "
+ "class_probability[{0}] (length {2}) have "
+ "different length.".format(
+ j, classes[j].shape[0], class_prob_j.shape[0]
+ )
+ )
+
+ # If 0 is not present in the classes insert it with a probability 0.0
+ if 0 not in classes[j]:
+ classes[j] = np.insert(classes[j], 0, 0)
+ class_prob_j = np.insert(class_prob_j, 0, 0.0)
+
+ # If there are nonzero classes choose randomly using class_probability
+ rng = check_random_state(random_state)
+ if classes[j].shape[0] > 1:
+ index_class_0 = np.flatnonzero(classes[j] == 0).item()
+ p_nonzero = 1 - class_prob_j[index_class_0]
+ nnz = int(n_samples * p_nonzero)
+ ind_sample = sample_without_replacement(
+ n_population=n_samples, n_samples=nnz, random_state=random_state
+ )
+ indices.extend(ind_sample)
+
+ # Normalize probabilities for the nonzero elements
+ classes_j_nonzero = classes[j] != 0
+ class_probability_nz = class_prob_j[classes_j_nonzero]
+ class_probability_nz_norm = class_probability_nz / np.sum(
+ class_probability_nz
+ )
+ classes_ind = np.searchsorted(
+ class_probability_nz_norm.cumsum(), rng.uniform(size=nnz)
+ )
+ data.extend(classes[j][classes_j_nonzero][classes_ind])
+ indptr.append(len(indices))
+
+ return sp.csc_matrix((data, indices, indptr), (n_samples, len(classes)), dtype=int)
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/sparsefuncs.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/sparsefuncs.py
new file mode 100644
index 0000000000000000000000000000000000000000..a46e9e4d9ed934139a1308df8336d957f14be99b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/sparsefuncs.py
@@ -0,0 +1,745 @@
+"""
+The :mod:`sklearn.utils.sparsefuncs` module includes a collection of utilities to
+work with sparse matrices and arrays.
+"""
+
+# Authors: Manoj Kumar
+# Thomas Unterthiner
+# Giorgio Patrini
+#
+# License: BSD 3 clause
+import numpy as np
+import scipy.sparse as sp
+from scipy.sparse.linalg import LinearOperator
+
+from ..utils.fixes import _sparse_min_max, _sparse_nan_min_max
+from ..utils.validation import _check_sample_weight
+from .sparsefuncs_fast import (
+ csc_mean_variance_axis0 as _csc_mean_var_axis0,
+)
+from .sparsefuncs_fast import (
+ csr_mean_variance_axis0 as _csr_mean_var_axis0,
+)
+from .sparsefuncs_fast import (
+ incr_mean_variance_axis0 as _incr_mean_var_axis0,
+)
+
+
+def _raise_typeerror(X):
+ """Raises a TypeError if X is not a CSR or CSC matrix"""
+ input_type = X.format if sp.issparse(X) else type(X)
+ err = "Expected a CSR or CSC sparse matrix, got %s." % input_type
+ raise TypeError(err)
+
+
+def _raise_error_wrong_axis(axis):
+ if axis not in (0, 1):
+ raise ValueError(
+ "Unknown axis value: %d. Use 0 for rows, or 1 for columns" % axis
+ )
+
+
+def inplace_csr_column_scale(X, scale):
+ """Inplace column scaling of a CSR matrix.
+
+ Scale each feature of the data matrix by multiplying with specific scale
+ provided by the caller assuming a (n_samples, n_features) shape.
+
+ Parameters
+ ----------
+ X : sparse matrix of shape (n_samples, n_features)
+ Matrix to normalize using the variance of the features.
+ It should be of CSR format.
+
+ scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
+ Array of precomputed feature-wise values to use for scaling.
+
+ Examples
+ --------
+ >>> from sklearn.utils import sparsefuncs
+ >>> from scipy import sparse
+ >>> import numpy as np
+ >>> indptr = np.array([0, 3, 4, 4, 4])
+ >>> indices = np.array([0, 1, 2, 2])
+ >>> data = np.array([8, 1, 2, 5])
+ >>> scale = np.array([2, 3, 2])
+ >>> csr = sparse.csr_matrix((data, indices, indptr))
+ >>> csr.todense()
+ matrix([[8, 1, 2],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0]])
+ >>> sparsefuncs.inplace_csr_column_scale(csr, scale)
+ >>> csr.todense()
+ matrix([[16, 3, 4],
+ [ 0, 0, 10],
+ [ 0, 0, 0],
+ [ 0, 0, 0]])
+ """
+ assert scale.shape[0] == X.shape[1]
+ X.data *= scale.take(X.indices, mode="clip")
+
+
+def inplace_csr_row_scale(X, scale):
+ """Inplace row scaling of a CSR matrix.
+
+ Scale each sample of the data matrix by multiplying with specific scale
+ provided by the caller assuming a (n_samples, n_features) shape.
+
+ Parameters
+ ----------
+ X : sparse matrix of shape (n_samples, n_features)
+ Matrix to be scaled. It should be of CSR format.
+
+ scale : ndarray of float of shape (n_samples,)
+ Array of precomputed sample-wise values to use for scaling.
+ """
+ assert scale.shape[0] == X.shape[0]
+ X.data *= np.repeat(scale, np.diff(X.indptr))
+
+
+def mean_variance_axis(X, axis, weights=None, return_sum_weights=False):
+ """Compute mean and variance along an axis on a CSR or CSC matrix.
+
+ Parameters
+ ----------
+ X : sparse matrix of shape (n_samples, n_features)
+ Input data. It can be of CSR or CSC format.
+
+ axis : {0, 1}
+ Axis along which the axis should be computed.
+
+ weights : ndarray of shape (n_samples,) or (n_features,), default=None
+ If axis is set to 0 shape is (n_samples,) or
+ if axis is set to 1 shape is (n_features,).
+ If it is set to None, then samples are equally weighted.
+
+ .. versionadded:: 0.24
+
+ return_sum_weights : bool, default=False
+ If True, returns the sum of weights seen for each feature
+ if `axis=0` or each sample if `axis=1`.
+
+ .. versionadded:: 0.24
+
+ Returns
+ -------
+
+ means : ndarray of shape (n_features,), dtype=floating
+ Feature-wise means.
+
+ variances : ndarray of shape (n_features,), dtype=floating
+ Feature-wise variances.
+
+ sum_weights : ndarray of shape (n_features,), dtype=floating
+ Returned if `return_sum_weights` is `True`.
+
+ Examples
+ --------
+ >>> from sklearn.utils import sparsefuncs
+ >>> from scipy import sparse
+ >>> import numpy as np
+ >>> indptr = np.array([0, 3, 4, 4, 4])
+ >>> indices = np.array([0, 1, 2, 2])
+ >>> data = np.array([8, 1, 2, 5])
+ >>> scale = np.array([2, 3, 2])
+ >>> csr = sparse.csr_matrix((data, indices, indptr))
+ >>> csr.todense()
+ matrix([[8, 1, 2],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0]])
+ >>> sparsefuncs.mean_variance_axis(csr, axis=0)
+ (array([2. , 0.25, 1.75]), array([12. , 0.1875, 4.1875]))
+ """
+ _raise_error_wrong_axis(axis)
+
+ if sp.issparse(X) and X.format == "csr":
+ if axis == 0:
+ return _csr_mean_var_axis0(
+ X, weights=weights, return_sum_weights=return_sum_weights
+ )
+ else:
+ return _csc_mean_var_axis0(
+ X.T, weights=weights, return_sum_weights=return_sum_weights
+ )
+ elif sp.issparse(X) and X.format == "csc":
+ if axis == 0:
+ return _csc_mean_var_axis0(
+ X, weights=weights, return_sum_weights=return_sum_weights
+ )
+ else:
+ return _csr_mean_var_axis0(
+ X.T, weights=weights, return_sum_weights=return_sum_weights
+ )
+ else:
+ _raise_typeerror(X)
+
+
+def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None):
+ """Compute incremental mean and variance along an axis on a CSR or CSC matrix.
+
+ last_mean, last_var are the statistics computed at the last step by this
+ function. Both must be initialized to 0-arrays of the proper size, i.e.
+ the number of features in X. last_n is the number of samples encountered
+ until now.
+
+ Parameters
+ ----------
+ X : CSR or CSC sparse matrix of shape (n_samples, n_features)
+ Input data.
+
+ axis : {0, 1}
+ Axis along which the axis should be computed.
+
+ last_mean : ndarray of shape (n_features,) or (n_samples,), dtype=floating
+ Array of means to update with the new data X.
+ Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
+
+ last_var : ndarray of shape (n_features,) or (n_samples,), dtype=floating
+ Array of variances to update with the new data X.
+ Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
+
+ last_n : float or ndarray of shape (n_features,) or (n_samples,), \
+ dtype=floating
+ Sum of the weights seen so far, excluding the current weights
+ If not float, it should be of shape (n_features,) if
+ axis=0 or (n_samples,) if axis=1. If float it corresponds to
+ having same weights for all samples (or features).
+
+ weights : ndarray of shape (n_samples,) or (n_features,), default=None
+ If axis is set to 0 shape is (n_samples,) or
+ if axis is set to 1 shape is (n_features,).
+ If it is set to None, then samples are equally weighted.
+
+ .. versionadded:: 0.24
+
+ Returns
+ -------
+ means : ndarray of shape (n_features,) or (n_samples,), dtype=floating
+ Updated feature-wise means if axis = 0 or
+ sample-wise means if axis = 1.
+
+ variances : ndarray of shape (n_features,) or (n_samples,), dtype=floating
+ Updated feature-wise variances if axis = 0 or
+ sample-wise variances if axis = 1.
+
+ n : ndarray of shape (n_features,) or (n_samples,), dtype=integral
+ Updated number of seen samples per feature if axis=0
+ or number of seen features per sample if axis=1.
+
+ If weights is not None, n is a sum of the weights of the seen
+ samples or features instead of the actual number of seen
+ samples or features.
+
+ Notes
+ -----
+ NaNs are ignored in the algorithm.
+
+ Examples
+ --------
+ >>> from sklearn.utils import sparsefuncs
+ >>> from scipy import sparse
+ >>> import numpy as np
+ >>> indptr = np.array([0, 3, 4, 4, 4])
+ >>> indices = np.array([0, 1, 2, 2])
+ >>> data = np.array([8, 1, 2, 5])
+ >>> scale = np.array([2, 3, 2])
+ >>> csr = sparse.csr_matrix((data, indices, indptr))
+ >>> csr.todense()
+ matrix([[8, 1, 2],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0]])
+ >>> sparsefuncs.incr_mean_variance_axis(
+ ... csr, axis=0, last_mean=np.zeros(3), last_var=np.zeros(3), last_n=2
+ ... )
+ (array([1.3..., 0.1..., 1.1...]), array([8.8..., 0.1..., 3.4...]),
+ array([6., 6., 6.]))
+ """
+ _raise_error_wrong_axis(axis)
+
+ if not (sp.issparse(X) and X.format in ("csc", "csr")):
+ _raise_typeerror(X)
+
+ if np.size(last_n) == 1:
+ last_n = np.full(last_mean.shape, last_n, dtype=last_mean.dtype)
+
+ if not (np.size(last_mean) == np.size(last_var) == np.size(last_n)):
+ raise ValueError("last_mean, last_var, last_n do not have the same shapes.")
+
+ if axis == 1:
+ if np.size(last_mean) != X.shape[0]:
+ raise ValueError(
+ "If axis=1, then last_mean, last_n, last_var should be of "
+ f"size n_samples {X.shape[0]} (Got {np.size(last_mean)})."
+ )
+ else: # axis == 0
+ if np.size(last_mean) != X.shape[1]:
+ raise ValueError(
+ "If axis=0, then last_mean, last_n, last_var should be of "
+ f"size n_features {X.shape[1]} (Got {np.size(last_mean)})."
+ )
+
+ X = X.T if axis == 1 else X
+
+ if weights is not None:
+ weights = _check_sample_weight(weights, X, dtype=X.dtype)
+
+ return _incr_mean_var_axis0(
+ X, last_mean=last_mean, last_var=last_var, last_n=last_n, weights=weights
+ )
+
+
+def inplace_column_scale(X, scale):
+ """Inplace column scaling of a CSC/CSR matrix.
+
+ Scale each feature of the data matrix by multiplying with specific scale
+ provided by the caller assuming a (n_samples, n_features) shape.
+
+ Parameters
+ ----------
+ X : sparse matrix of shape (n_samples, n_features)
+ Matrix to normalize using the variance of the features. It should be
+ of CSC or CSR format.
+
+ scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
+ Array of precomputed feature-wise values to use for scaling.
+
+ Examples
+ --------
+ >>> from sklearn.utils import sparsefuncs
+ >>> from scipy import sparse
+ >>> import numpy as np
+ >>> indptr = np.array([0, 3, 4, 4, 4])
+ >>> indices = np.array([0, 1, 2, 2])
+ >>> data = np.array([8, 1, 2, 5])
+ >>> scale = np.array([2, 3, 2])
+ >>> csr = sparse.csr_matrix((data, indices, indptr))
+ >>> csr.todense()
+ matrix([[8, 1, 2],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0]])
+ >>> sparsefuncs.inplace_column_scale(csr, scale)
+ >>> csr.todense()
+ matrix([[16, 3, 4],
+ [ 0, 0, 10],
+ [ 0, 0, 0],
+ [ 0, 0, 0]])
+ """
+ if sp.issparse(X) and X.format == "csc":
+ inplace_csr_row_scale(X.T, scale)
+ elif sp.issparse(X) and X.format == "csr":
+ inplace_csr_column_scale(X, scale)
+ else:
+ _raise_typeerror(X)
+
+
+def inplace_row_scale(X, scale):
+ """Inplace row scaling of a CSR or CSC matrix.
+
+ Scale each row of the data matrix by multiplying with specific scale
+ provided by the caller assuming a (n_samples, n_features) shape.
+
+ Parameters
+ ----------
+ X : sparse matrix of shape (n_samples, n_features)
+ Matrix to be scaled. It should be of CSR or CSC format.
+
+ scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
+ Array of precomputed sample-wise values to use for scaling.
+
+ Examples
+ --------
+ >>> from sklearn.utils import sparsefuncs
+ >>> from scipy import sparse
+ >>> import numpy as np
+ >>> indptr = np.array([0, 2, 3, 4, 5])
+ >>> indices = np.array([0, 1, 2, 3, 3])
+ >>> data = np.array([8, 1, 2, 5, 6])
+ >>> scale = np.array([2, 3, 4, 5])
+ >>> csr = sparse.csr_matrix((data, indices, indptr))
+ >>> csr.todense()
+ matrix([[8, 1, 0, 0],
+ [0, 0, 2, 0],
+ [0, 0, 0, 5],
+ [0, 0, 0, 6]])
+ >>> sparsefuncs.inplace_row_scale(csr, scale)
+ >>> csr.todense()
+ matrix([[16, 2, 0, 0],
+ [ 0, 0, 6, 0],
+ [ 0, 0, 0, 20],
+ [ 0, 0, 0, 30]])
+ """
+ if sp.issparse(X) and X.format == "csc":
+ inplace_csr_column_scale(X.T, scale)
+ elif sp.issparse(X) and X.format == "csr":
+ inplace_csr_row_scale(X, scale)
+ else:
+ _raise_typeerror(X)
+
+
+def inplace_swap_row_csc(X, m, n):
+ """Swap two rows of a CSC matrix in-place.
+
+ Parameters
+ ----------
+ X : sparse matrix of shape (n_samples, n_features)
+ Matrix whose two rows are to be swapped. It should be of
+ CSC format.
+
+ m : int
+ Index of the row of X to be swapped.
+
+ n : int
+ Index of the row of X to be swapped.
+ """
+ for t in [m, n]:
+ if isinstance(t, np.ndarray):
+ raise TypeError("m and n should be valid integers")
+
+ if m < 0:
+ m += X.shape[0]
+ if n < 0:
+ n += X.shape[0]
+
+ m_mask = X.indices == m
+ X.indices[X.indices == n] = m
+ X.indices[m_mask] = n
+
+
+def inplace_swap_row_csr(X, m, n):
+ """Swap two rows of a CSR matrix in-place.
+
+ Parameters
+ ----------
+ X : sparse matrix of shape (n_samples, n_features)
+ Matrix whose two rows are to be swapped. It should be of
+ CSR format.
+
+ m : int
+ Index of the row of X to be swapped.
+
+ n : int
+ Index of the row of X to be swapped.
+ """
+ for t in [m, n]:
+ if isinstance(t, np.ndarray):
+ raise TypeError("m and n should be valid integers")
+
+ if m < 0:
+ m += X.shape[0]
+ if n < 0:
+ n += X.shape[0]
+
+ # The following swapping makes life easier since m is assumed to be the
+ # smaller integer below.
+ if m > n:
+ m, n = n, m
+
+ indptr = X.indptr
+ m_start = indptr[m]
+ m_stop = indptr[m + 1]
+ n_start = indptr[n]
+ n_stop = indptr[n + 1]
+ nz_m = m_stop - m_start
+ nz_n = n_stop - n_start
+
+ if nz_m != nz_n:
+ # Modify indptr first
+ X.indptr[m + 2 : n] += nz_n - nz_m
+ X.indptr[m + 1] = m_start + nz_n
+ X.indptr[n] = n_stop - nz_m
+
+ X.indices = np.concatenate(
+ [
+ X.indices[:m_start],
+ X.indices[n_start:n_stop],
+ X.indices[m_stop:n_start],
+ X.indices[m_start:m_stop],
+ X.indices[n_stop:],
+ ]
+ )
+ X.data = np.concatenate(
+ [
+ X.data[:m_start],
+ X.data[n_start:n_stop],
+ X.data[m_stop:n_start],
+ X.data[m_start:m_stop],
+ X.data[n_stop:],
+ ]
+ )
+
+
+def inplace_swap_row(X, m, n):
+ """
+ Swap two rows of a CSC/CSR matrix in-place.
+
+ Parameters
+ ----------
+ X : sparse matrix of shape (n_samples, n_features)
+ Matrix whose two rows are to be swapped. It should be of CSR or
+ CSC format.
+
+ m : int
+ Index of the row of X to be swapped.
+
+ n : int
+ Index of the row of X to be swapped.
+
+ Examples
+ --------
+ >>> from sklearn.utils import sparsefuncs
+ >>> from scipy import sparse
+ >>> import numpy as np
+ >>> indptr = np.array([0, 2, 3, 3, 3])
+ >>> indices = np.array([0, 2, 2])
+ >>> data = np.array([8, 2, 5])
+ >>> csr = sparse.csr_matrix((data, indices, indptr))
+ >>> csr.todense()
+ matrix([[8, 0, 2],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0]])
+ >>> sparsefuncs.inplace_swap_row(csr, 0, 1)
+ >>> csr.todense()
+ matrix([[0, 0, 5],
+ [8, 0, 2],
+ [0, 0, 0],
+ [0, 0, 0]])
+ """
+ if sp.issparse(X) and X.format == "csc":
+ inplace_swap_row_csc(X, m, n)
+ elif sp.issparse(X) and X.format == "csr":
+ inplace_swap_row_csr(X, m, n)
+ else:
+ _raise_typeerror(X)
+
+
+def inplace_swap_column(X, m, n):
+ """
+ Swap two columns of a CSC/CSR matrix in-place.
+
+ Parameters
+ ----------
+ X : sparse matrix of shape (n_samples, n_features)
+ Matrix whose two columns are to be swapped. It should be of
+ CSR or CSC format.
+
+ m : int
+ Index of the column of X to be swapped.
+
+ n : int
+ Index of the column of X to be swapped.
+
+ Examples
+ --------
+ >>> from sklearn.utils import sparsefuncs
+ >>> from scipy import sparse
+ >>> import numpy as np
+ >>> indptr = np.array([0, 2, 3, 3, 3])
+ >>> indices = np.array([0, 2, 2])
+ >>> data = np.array([8, 2, 5])
+ >>> csr = sparse.csr_matrix((data, indices, indptr))
+ >>> csr.todense()
+ matrix([[8, 0, 2],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0]])
+ >>> sparsefuncs.inplace_swap_column(csr, 0, 1)
+ >>> csr.todense()
+ matrix([[0, 8, 2],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0]])
+ """
+ if m < 0:
+ m += X.shape[1]
+ if n < 0:
+ n += X.shape[1]
+ if sp.issparse(X) and X.format == "csc":
+ inplace_swap_row_csr(X, m, n)
+ elif sp.issparse(X) and X.format == "csr":
+ inplace_swap_row_csc(X, m, n)
+ else:
+ _raise_typeerror(X)
+
+
+def min_max_axis(X, axis, ignore_nan=False):
+ """Compute minimum and maximum along an axis on a CSR or CSC matrix.
+
+ Optionally ignore NaN values.
+
+ Parameters
+ ----------
+ X : sparse matrix of shape (n_samples, n_features)
+ Input data. It should be of CSR or CSC format.
+
+ axis : {0, 1}
+ Axis along which the axis should be computed.
+
+ ignore_nan : bool, default=False
+ Ignore or passing through NaN values.
+
+ .. versionadded:: 0.20
+
+ Returns
+ -------
+
+ mins : ndarray of shape (n_features,), dtype={np.float32, np.float64}
+ Feature-wise minima.
+
+ maxs : ndarray of shape (n_features,), dtype={np.float32, np.float64}
+ Feature-wise maxima.
+ """
+ if sp.issparse(X) and X.format in ("csr", "csc"):
+ if ignore_nan:
+ return _sparse_nan_min_max(X, axis=axis)
+ else:
+ return _sparse_min_max(X, axis=axis)
+ else:
+ _raise_typeerror(X)
+
+
+def count_nonzero(X, axis=None, sample_weight=None):
+ """A variant of X.getnnz() with extension to weighting on axis 0.
+
+ Useful in efficiently calculating multilabel metrics.
+
+ Parameters
+ ----------
+ X : sparse matrix of shape (n_samples, n_labels)
+ Input data. It should be of CSR format.
+
+ axis : {0, 1}, default=None
+ The axis on which the data is aggregated.
+
+ sample_weight : array-like of shape (n_samples,), default=None
+ Weight for each row of X.
+
+ Returns
+ -------
+ nnz : int, float, ndarray of shape (n_samples,) or ndarray of shape (n_features,)
+ Number of non-zero values in the array along a given axis. Otherwise,
+ the total number of non-zero values in the array is returned.
+ """
+ if axis == -1:
+ axis = 1
+ elif axis == -2:
+ axis = 0
+ elif X.format != "csr":
+ raise TypeError("Expected CSR sparse format, got {0}".format(X.format))
+
+ # We rely here on the fact that np.diff(Y.indptr) for a CSR
+ # will return the number of nonzero entries in each row.
+ # A bincount over Y.indices will return the number of nonzeros
+ # in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.
+ if axis is None:
+ if sample_weight is None:
+ return X.nnz
+ else:
+ return np.dot(np.diff(X.indptr), sample_weight)
+ elif axis == 1:
+ out = np.diff(X.indptr)
+ if sample_weight is None:
+ # astype here is for consistency with axis=0 dtype
+ return out.astype("intp")
+ return out * sample_weight
+ elif axis == 0:
+ if sample_weight is None:
+ return np.bincount(X.indices, minlength=X.shape[1])
+ else:
+ weights = np.repeat(sample_weight, np.diff(X.indptr))
+ return np.bincount(X.indices, minlength=X.shape[1], weights=weights)
+ else:
+ raise ValueError("Unsupported axis: {0}".format(axis))
+
+
+def _get_median(data, n_zeros):
+ """Compute the median of data with n_zeros additional zeros.
+
+ This function is used to support sparse matrices; it modifies data
+ in-place.
+ """
+ n_elems = len(data) + n_zeros
+ if not n_elems:
+ return np.nan
+ n_negative = np.count_nonzero(data < 0)
+ middle, is_odd = divmod(n_elems, 2)
+ data.sort()
+
+ if is_odd:
+ return _get_elem_at_rank(middle, data, n_negative, n_zeros)
+
+ return (
+ _get_elem_at_rank(middle - 1, data, n_negative, n_zeros)
+ + _get_elem_at_rank(middle, data, n_negative, n_zeros)
+ ) / 2.0
+
+
+def _get_elem_at_rank(rank, data, n_negative, n_zeros):
+ """Find the value in data augmented with n_zeros for the given rank"""
+ if rank < n_negative:
+ return data[rank]
+ if rank - n_negative < n_zeros:
+ return 0
+ return data[rank - n_zeros]
+
+
+def csc_median_axis_0(X):
+ """Find the median across axis 0 of a CSC matrix.
+
+ It is equivalent to doing np.median(X, axis=0).
+
+ Parameters
+ ----------
+ X : sparse matrix of shape (n_samples, n_features)
+ Input data. It should be of CSC format.
+
+ Returns
+ -------
+ median : ndarray of shape (n_features,)
+ Median.
+ """
+ if not (sp.issparse(X) and X.format == "csc"):
+ raise TypeError("Expected matrix of CSC format, got %s" % X.format)
+
+ indptr = X.indptr
+ n_samples, n_features = X.shape
+ median = np.zeros(n_features)
+
+ for f_ind, (start, end) in enumerate(zip(indptr[:-1], indptr[1:])):
+ # Prevent modifying X in place
+ data = np.copy(X.data[start:end])
+ nz = n_samples - data.size
+ median[f_ind] = _get_median(data, nz)
+
+ return median
+
+
+def _implicit_column_offset(X, offset):
+ """Create an implicitly offset linear operator.
+
+ This is used by PCA on sparse data to avoid densifying the whole data
+ matrix.
+
+ Params
+ ------
+ X : sparse matrix of shape (n_samples, n_features)
+ offset : ndarray of shape (n_features,)
+
+ Returns
+ -------
+ centered : LinearOperator
+ """
+ offset = offset[None, :]
+ XT = X.T
+ return LinearOperator(
+ matvec=lambda x: X @ x - offset @ x,
+ matmat=lambda x: X @ x - offset @ x,
+ rmatvec=lambda x: XT @ x - (offset * x.sum()),
+ rmatmat=lambda x: XT @ x - offset.T @ x.sum(axis=0)[None, :],
+ dtype=X.dtype,
+ shape=X.shape,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/stats.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/stats.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0e22ea3694f47a89f4c55f9da1dafdc9f54b815
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/stats.py
@@ -0,0 +1,69 @@
+import numpy as np
+
+from .extmath import stable_cumsum
+
+
+def _weighted_percentile(array, sample_weight, percentile=50):
+ """Compute weighted percentile
+
+ Computes lower weighted percentile. If `array` is a 2D array, the
+ `percentile` is computed along the axis 0.
+
+ .. versionchanged:: 0.24
+ Accepts 2D `array`.
+
+ Parameters
+ ----------
+ array : 1D or 2D array
+ Values to take the weighted percentile of.
+
+ sample_weight: 1D or 2D array
+ Weights for each value in `array`. Must be same shape as `array` or
+ of shape `(array.shape[0],)`.
+
+ percentile: int or float, default=50
+ Percentile to compute. Must be value between 0 and 100.
+
+ Returns
+ -------
+ percentile : int if `array` 1D, ndarray if `array` 2D
+ Weighted percentile.
+ """
+ n_dim = array.ndim
+ if n_dim == 0:
+ return array[()]
+ if array.ndim == 1:
+ array = array.reshape((-1, 1))
+ # When sample_weight 1D, repeat for each array.shape[1]
+ if array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]:
+ sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T
+ sorted_idx = np.argsort(array, axis=0)
+ sorted_weights = np.take_along_axis(sample_weight, sorted_idx, axis=0)
+
+ # Find index of median prediction for each sample
+ weight_cdf = stable_cumsum(sorted_weights, axis=0)
+ adjusted_percentile = percentile / 100 * weight_cdf[-1]
+
+ # For percentile=0, ignore leading observations with sample_weight=0. GH20528
+ mask = adjusted_percentile == 0
+ adjusted_percentile[mask] = np.nextafter(
+ adjusted_percentile[mask], adjusted_percentile[mask] + 1
+ )
+
+ percentile_idx = np.array(
+ [
+ np.searchsorted(weight_cdf[:, i], adjusted_percentile[i])
+ for i in range(weight_cdf.shape[1])
+ ]
+ )
+ percentile_idx = np.array(percentile_idx)
+ # In rare cases, percentile_idx equals to sorted_idx.shape[0]
+ max_idx = sorted_idx.shape[0] - 1
+ percentile_idx = np.apply_along_axis(
+ lambda x: np.clip(x, 0, max_idx), axis=0, arr=percentile_idx
+ )
+
+ col_index = np.arange(array.shape[1])
+ percentile_in_sorted = sorted_idx[percentile_idx, col_index]
+ percentile = array[percentile_in_sorted, col_index]
+ return percentile[0] if n_dim == 1 else percentile
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_array_api.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_array_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..1df81cf823bd696703712ab7e2713cfa54b6f510
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_array_api.py
@@ -0,0 +1,331 @@
+from functools import partial
+
+import numpy
+import pytest
+from numpy.testing import assert_allclose
+
+from sklearn._config import config_context
+from sklearn.base import BaseEstimator
+from sklearn.utils._array_api import (
+ _ArrayAPIWrapper,
+ _asarray_with_order,
+ _atol_for_type,
+ _convert_to_numpy,
+ _estimator_with_converted_arrays,
+ _nanmax,
+ _nanmin,
+ _NumPyAPIWrapper,
+ _weighted_sum,
+ get_namespace,
+ supported_float_dtypes,
+ yield_namespace_device_dtype_combinations,
+)
+from sklearn.utils._testing import (
+ _array_api_for_tests,
+ skip_if_array_api_compat_not_configured,
+)
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:The numpy.array_api submodule:UserWarning"
+)
+
+
+@pytest.mark.parametrize("X", [numpy.asarray([1, 2, 3]), [1, 2, 3]])
+def test_get_namespace_ndarray_default(X):
+ """Check that get_namespace returns NumPy wrapper"""
+ xp_out, is_array_api_compliant = get_namespace(X)
+ assert isinstance(xp_out, _NumPyAPIWrapper)
+ assert not is_array_api_compliant
+
+
+def test_get_namespace_ndarray_creation_device():
+ """Check expected behavior with device and creation functions."""
+ X = numpy.asarray([1, 2, 3])
+ xp_out, _ = get_namespace(X)
+
+ full_array = xp_out.full(10, fill_value=2.0, device="cpu")
+ assert_allclose(full_array, [2.0] * 10)
+
+ with pytest.raises(ValueError, match="Unsupported device"):
+ xp_out.zeros(10, device="cuda")
+
+
+@skip_if_array_api_compat_not_configured
+def test_get_namespace_ndarray_with_dispatch():
+ """Test get_namespace on NumPy ndarrays."""
+ array_api_compat = pytest.importorskip("array_api_compat")
+
+ X_np = numpy.asarray([[1, 2, 3]])
+
+ with config_context(array_api_dispatch=True):
+ xp_out, is_array_api_compliant = get_namespace(X_np)
+ assert is_array_api_compliant
+ assert xp_out is array_api_compat.numpy
+
+
+@skip_if_array_api_compat_not_configured
+def test_get_namespace_array_api():
+ """Test get_namespace for ArrayAPI arrays."""
+ xp = pytest.importorskip("numpy.array_api")
+
+ X_np = numpy.asarray([[1, 2, 3]])
+ X_xp = xp.asarray(X_np)
+ with config_context(array_api_dispatch=True):
+ xp_out, is_array_api_compliant = get_namespace(X_xp)
+ assert is_array_api_compliant
+ assert isinstance(xp_out, _ArrayAPIWrapper)
+
+ with pytest.raises(TypeError):
+ xp_out, is_array_api_compliant = get_namespace(X_xp, X_np)
+
+
+class _AdjustableNameAPITestWrapper(_ArrayAPIWrapper):
+ """API wrapper that has an adjustable name. Used for testing."""
+
+ def __init__(self, array_namespace, name):
+ super().__init__(array_namespace=array_namespace)
+ self.__name__ = name
+
+
+def test_array_api_wrapper_astype():
+ """Test _ArrayAPIWrapper for ArrayAPIs that is not NumPy."""
+ numpy_array_api = pytest.importorskip("numpy.array_api")
+ xp_ = _AdjustableNameAPITestWrapper(numpy_array_api, "wrapped_numpy.array_api")
+ xp = _ArrayAPIWrapper(xp_)
+
+ X = xp.asarray(([[1, 2, 3], [3, 4, 5]]), dtype=xp.float64)
+ X_converted = xp.astype(X, xp.float32)
+ assert X_converted.dtype == xp.float32
+
+ X_converted = xp.asarray(X, dtype=xp.float32)
+ assert X_converted.dtype == xp.float32
+
+
+@pytest.mark.parametrize("array_api", ["numpy", "numpy.array_api"])
+def test_asarray_with_order(array_api):
+ """Test _asarray_with_order passes along order for NumPy arrays."""
+ xp = pytest.importorskip(array_api)
+
+ X = xp.asarray([1.2, 3.4, 5.1])
+ X_new = _asarray_with_order(X, order="F", xp=xp)
+
+ X_new_np = numpy.asarray(X_new)
+ assert X_new_np.flags["F_CONTIGUOUS"]
+
+
+def test_asarray_with_order_ignored():
+ """Test _asarray_with_order ignores order for Generic ArrayAPI."""
+ xp = pytest.importorskip("numpy.array_api")
+ xp_ = _AdjustableNameAPITestWrapper(xp, "wrapped.array_api")
+
+ X = numpy.asarray([[1.2, 3.4, 5.1], [3.4, 5.5, 1.2]], order="C")
+ X = xp_.asarray(X)
+
+ X_new = _asarray_with_order(X, order="F", xp=xp_)
+
+ X_new_np = numpy.asarray(X_new)
+ assert X_new_np.flags["C_CONTIGUOUS"]
+ assert not X_new_np.flags["F_CONTIGUOUS"]
+
+
+@pytest.mark.parametrize(
+ "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations()
+)
+@pytest.mark.parametrize(
+ "sample_weight, normalize, expected",
+ [
+ (None, False, 10.0),
+ (None, True, 2.5),
+ ([0.4, 0.4, 0.5, 0.7], False, 5.5),
+ ([0.4, 0.4, 0.5, 0.7], True, 2.75),
+ ([1, 2, 3, 4], False, 30.0),
+ ([1, 2, 3, 4], True, 3.0),
+ ],
+)
+def test_weighted_sum(
+ array_namespace, device, dtype_name, sample_weight, normalize, expected
+):
+ xp = _array_api_for_tests(array_namespace, device)
+ sample_score = numpy.asarray([1, 2, 3, 4], dtype=dtype_name)
+ sample_score = xp.asarray(sample_score, device=device)
+ if sample_weight is not None:
+ sample_weight = numpy.asarray(sample_weight, dtype=dtype_name)
+ sample_weight = xp.asarray(sample_weight, device=device)
+
+ with config_context(array_api_dispatch=True):
+ result = _weighted_sum(sample_score, sample_weight, normalize)
+
+ assert isinstance(result, float)
+ assert_allclose(result, expected, atol=_atol_for_type(dtype_name))
+
+
+@skip_if_array_api_compat_not_configured
+@pytest.mark.parametrize(
+ "library", ["numpy", "numpy.array_api", "cupy", "cupy.array_api", "torch"]
+)
+@pytest.mark.parametrize(
+ "X,reduction,expected",
+ [
+ ([1, 2, numpy.nan], _nanmin, 1),
+ ([1, -2, -numpy.nan], _nanmin, -2),
+ ([numpy.inf, numpy.inf], _nanmin, numpy.inf),
+ (
+ [[1, 2, 3], [numpy.nan, numpy.nan, numpy.nan], [4, 5, 6.0]],
+ partial(_nanmin, axis=0),
+ [1.0, 2.0, 3.0],
+ ),
+ (
+ [[1, 2, 3], [numpy.nan, numpy.nan, numpy.nan], [4, 5, 6.0]],
+ partial(_nanmin, axis=1),
+ [1.0, numpy.nan, 4.0],
+ ),
+ ([1, 2, numpy.nan], _nanmax, 2),
+ ([1, 2, numpy.nan], _nanmax, 2),
+ ([-numpy.inf, -numpy.inf], _nanmax, -numpy.inf),
+ (
+ [[1, 2, 3], [numpy.nan, numpy.nan, numpy.nan], [4, 5, 6.0]],
+ partial(_nanmax, axis=0),
+ [4.0, 5.0, 6.0],
+ ),
+ (
+ [[1, 2, 3], [numpy.nan, numpy.nan, numpy.nan], [4, 5, 6.0]],
+ partial(_nanmax, axis=1),
+ [3.0, numpy.nan, 6.0],
+ ),
+ ],
+)
+def test_nan_reductions(library, X, reduction, expected):
+ """Check NaN reductions like _nanmin and _nanmax"""
+ xp = pytest.importorskip(library)
+
+ if isinstance(expected, list):
+ expected = xp.asarray(expected)
+
+ with config_context(array_api_dispatch=True):
+ result = reduction(xp.asarray(X))
+
+ assert_allclose(result, expected)
+
+
+@skip_if_array_api_compat_not_configured
+@pytest.mark.parametrize("library", ["cupy", "torch", "cupy.array_api"])
+def test_convert_to_numpy_gpu(library): # pragma: nocover
+ """Check convert_to_numpy for GPU backed libraries."""
+ xp = pytest.importorskip(library)
+
+ if library == "torch":
+ if not xp.backends.cuda.is_built():
+ pytest.skip("test requires cuda")
+ X_gpu = xp.asarray([1.0, 2.0, 3.0], device="cuda")
+ else:
+ X_gpu = xp.asarray([1.0, 2.0, 3.0])
+
+ X_cpu = _convert_to_numpy(X_gpu, xp=xp)
+ expected_output = numpy.asarray([1.0, 2.0, 3.0])
+ assert_allclose(X_cpu, expected_output)
+
+
+def test_convert_to_numpy_cpu():
+ """Check convert_to_numpy for PyTorch CPU arrays."""
+ torch = pytest.importorskip("torch")
+ X_torch = torch.asarray([1.0, 2.0, 3.0], device="cpu")
+
+ X_cpu = _convert_to_numpy(X_torch, xp=torch)
+ expected_output = numpy.asarray([1.0, 2.0, 3.0])
+ assert_allclose(X_cpu, expected_output)
+
+
+class SimpleEstimator(BaseEstimator):
+ def fit(self, X, y=None):
+ self.X_ = X
+ self.n_features_ = X.shape[0]
+ return self
+
+
+@skip_if_array_api_compat_not_configured
+@pytest.mark.parametrize(
+ "array_namespace, converter",
+ [
+ ("torch", lambda array: array.cpu().numpy()),
+ ("numpy.array_api", lambda array: numpy.asarray(array)),
+ ("cupy.array_api", lambda array: array._array.get()),
+ ],
+)
+def test_convert_estimator_to_ndarray(array_namespace, converter):
+ """Convert estimator attributes to ndarray."""
+ xp = pytest.importorskip(array_namespace)
+
+ X = xp.asarray([[1.3, 4.5]])
+ est = SimpleEstimator().fit(X)
+
+ new_est = _estimator_with_converted_arrays(est, converter)
+ assert isinstance(new_est.X_, numpy.ndarray)
+
+
+@skip_if_array_api_compat_not_configured
+def test_convert_estimator_to_array_api():
+ """Convert estimator attributes to ArrayAPI arrays."""
+ xp = pytest.importorskip("numpy.array_api")
+
+ X_np = numpy.asarray([[1.3, 4.5]])
+ est = SimpleEstimator().fit(X_np)
+
+ new_est = _estimator_with_converted_arrays(est, lambda array: xp.asarray(array))
+ assert hasattr(new_est.X_, "__array_namespace__")
+
+
+def test_reshape_behavior():
+ """Check reshape behavior with copy and is strict with non-tuple shape."""
+ xp = _NumPyAPIWrapper()
+ X = xp.asarray([[1, 2, 3], [3, 4, 5]])
+
+ X_no_copy = xp.reshape(X, (-1,), copy=False)
+ assert X_no_copy.base is X
+
+ X_copy = xp.reshape(X, (6, 1), copy=True)
+ assert X_copy.base is not X.base
+
+ with pytest.raises(TypeError, match="shape must be a tuple"):
+ xp.reshape(X, -1)
+
+
+@pytest.mark.parametrize("wrapper", [_ArrayAPIWrapper, _NumPyAPIWrapper])
+def test_get_namespace_array_api_isdtype(wrapper):
+ """Test isdtype implementation from _ArrayAPIWrapper and _NumPyAPIWrapper."""
+
+ if wrapper == _ArrayAPIWrapper:
+ xp_ = pytest.importorskip("numpy.array_api")
+ xp = _ArrayAPIWrapper(xp_)
+ else:
+ xp = _NumPyAPIWrapper()
+
+ assert xp.isdtype(xp.float32, xp.float32)
+ assert xp.isdtype(xp.float32, "real floating")
+ assert xp.isdtype(xp.float64, "real floating")
+ assert not xp.isdtype(xp.int32, "real floating")
+
+ for dtype in supported_float_dtypes(xp):
+ assert xp.isdtype(dtype, "real floating")
+
+ assert xp.isdtype(xp.bool, "bool")
+ assert not xp.isdtype(xp.float32, "bool")
+
+ assert xp.isdtype(xp.int16, "signed integer")
+ assert not xp.isdtype(xp.uint32, "signed integer")
+
+ assert xp.isdtype(xp.uint16, "unsigned integer")
+ assert not xp.isdtype(xp.int64, "unsigned integer")
+
+ assert xp.isdtype(xp.int64, "numeric")
+ assert xp.isdtype(xp.float32, "numeric")
+ assert xp.isdtype(xp.uint32, "numeric")
+
+ assert not xp.isdtype(xp.float32, "complex floating")
+
+ if wrapper == _NumPyAPIWrapper:
+ assert not xp.isdtype(xp.int8, "complex floating")
+ assert xp.isdtype(xp.complex64, "complex floating")
+ assert xp.isdtype(xp.complex128, "complex floating")
+
+ with pytest.raises(ValueError, match="Unrecognized data type"):
+ assert xp.isdtype(xp.int16, "unknown")
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_arrayfuncs.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_arrayfuncs.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a80a4c1edefd9df99eeed570133998a27e985d9
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_arrayfuncs.py
@@ -0,0 +1,38 @@
+import numpy as np
+import pytest
+
+from sklearn.utils._testing import assert_allclose
+from sklearn.utils.arrayfuncs import _all_with_any_reduction_axis_1, min_pos
+
+
+def test_min_pos():
+ # Check that min_pos returns a positive value and that it's consistent
+ # between float and double
+ X = np.random.RandomState(0).randn(100)
+
+ min_double = min_pos(X)
+ min_float = min_pos(X.astype(np.float32))
+
+ assert_allclose(min_double, min_float)
+ assert min_double >= 0
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+def test_min_pos_no_positive(dtype):
+ # Check that the return value of min_pos is the maximum representable
+ # value of the input dtype when all input elements are <= 0 (#19328)
+ X = np.full(100, -1.0).astype(dtype, copy=False)
+
+ assert min_pos(X) == np.finfo(dtype).max
+
+
+@pytest.mark.parametrize("dtype", [np.int16, np.int32, np.float32, np.float64])
+@pytest.mark.parametrize("value", [0, 1.5, -1])
+def test_all_with_any_reduction_axis_1(dtype, value):
+ # Check that return value is False when there is no row equal to `value`
+ X = np.arange(12, dtype=dtype).reshape(3, 4)
+ assert not _all_with_any_reduction_axis_1(X, value=value)
+
+ # Make a row equal to `value`
+ X[1, :] = value
+ assert _all_with_any_reduction_axis_1(X, value=value)
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_bunch.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_bunch.py
new file mode 100644
index 0000000000000000000000000000000000000000..15463475747f4229ed7ab320d8f5be8005f9cf0a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_bunch.py
@@ -0,0 +1,32 @@
+import warnings
+
+import numpy as np
+import pytest
+
+from sklearn.utils import Bunch
+
+
+def test_bunch_attribute_deprecation():
+ """Check that bunch raises deprecation message with `__getattr__`."""
+ bunch = Bunch()
+ values = np.asarray([1, 2, 3])
+ msg = (
+ "Key: 'values', is deprecated in 1.3 and will be "
+ "removed in 1.5. Please use 'grid_values' instead"
+ )
+ bunch._set_deprecated(
+ values, new_key="grid_values", deprecated_key="values", warning_message=msg
+ )
+
+ with warnings.catch_warnings():
+ # Does not warn for "grid_values"
+ warnings.simplefilter("error")
+ v = bunch["grid_values"]
+
+ assert v is values
+
+ with pytest.warns(FutureWarning, match=msg):
+ # Warns for "values"
+ v = bunch["values"]
+
+ assert v is values
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_cython_blas.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_cython_blas.py
new file mode 100644
index 0000000000000000000000000000000000000000..e57bfc3ec5a9c58c8202ad06cf9ede7f65673788
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_cython_blas.py
@@ -0,0 +1,234 @@
+import numpy as np
+import pytest
+
+from sklearn.utils._cython_blas import (
+ ColMajor,
+ NoTrans,
+ RowMajor,
+ Trans,
+ _asum_memview,
+ _axpy_memview,
+ _copy_memview,
+ _dot_memview,
+ _gemm_memview,
+ _gemv_memview,
+ _ger_memview,
+ _nrm2_memview,
+ _rot_memview,
+ _rotg_memview,
+ _scal_memview,
+)
+from sklearn.utils._testing import assert_allclose
+
+
+def _numpy_to_cython(dtype):
+ cython = pytest.importorskip("cython")
+ if dtype == np.float32:
+ return cython.float
+ elif dtype == np.float64:
+ return cython.double
+
+
+RTOL = {np.float32: 1e-6, np.float64: 1e-12}
+ORDER = {RowMajor: "C", ColMajor: "F"}
+
+
+def _no_op(x):
+ return x
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+def test_dot(dtype):
+ dot = _dot_memview[_numpy_to_cython(dtype)]
+
+ rng = np.random.RandomState(0)
+ x = rng.random_sample(10).astype(dtype, copy=False)
+ y = rng.random_sample(10).astype(dtype, copy=False)
+
+ expected = x.dot(y)
+ actual = dot(x, y)
+
+ assert_allclose(actual, expected, rtol=RTOL[dtype])
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+def test_asum(dtype):
+ asum = _asum_memview[_numpy_to_cython(dtype)]
+
+ rng = np.random.RandomState(0)
+ x = rng.random_sample(10).astype(dtype, copy=False)
+
+ expected = np.abs(x).sum()
+ actual = asum(x)
+
+ assert_allclose(actual, expected, rtol=RTOL[dtype])
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+def test_axpy(dtype):
+ axpy = _axpy_memview[_numpy_to_cython(dtype)]
+
+ rng = np.random.RandomState(0)
+ x = rng.random_sample(10).astype(dtype, copy=False)
+ y = rng.random_sample(10).astype(dtype, copy=False)
+ alpha = 2.5
+
+ expected = alpha * x + y
+ axpy(alpha, x, y)
+
+ assert_allclose(y, expected, rtol=RTOL[dtype])
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+def test_nrm2(dtype):
+ nrm2 = _nrm2_memview[_numpy_to_cython(dtype)]
+
+ rng = np.random.RandomState(0)
+ x = rng.random_sample(10).astype(dtype, copy=False)
+
+ expected = np.linalg.norm(x)
+ actual = nrm2(x)
+
+ assert_allclose(actual, expected, rtol=RTOL[dtype])
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+def test_copy(dtype):
+ copy = _copy_memview[_numpy_to_cython(dtype)]
+
+ rng = np.random.RandomState(0)
+ x = rng.random_sample(10).astype(dtype, copy=False)
+ y = np.empty_like(x)
+
+ expected = x.copy()
+ copy(x, y)
+
+ assert_allclose(y, expected, rtol=RTOL[dtype])
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+def test_scal(dtype):
+ scal = _scal_memview[_numpy_to_cython(dtype)]
+
+ rng = np.random.RandomState(0)
+ x = rng.random_sample(10).astype(dtype, copy=False)
+ alpha = 2.5
+
+ expected = alpha * x
+ scal(alpha, x)
+
+ assert_allclose(x, expected, rtol=RTOL[dtype])
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+def test_rotg(dtype):
+ rotg = _rotg_memview[_numpy_to_cython(dtype)]
+
+ rng = np.random.RandomState(0)
+ a = dtype(rng.randn())
+ b = dtype(rng.randn())
+ c, s = 0.0, 0.0
+
+ def expected_rotg(a, b):
+ roe = a if abs(a) > abs(b) else b
+ if a == 0 and b == 0:
+ c, s, r, z = (1, 0, 0, 0)
+ else:
+ r = np.sqrt(a**2 + b**2) * (1 if roe >= 0 else -1)
+ c, s = a / r, b / r
+ z = s if roe == a else (1 if c == 0 else 1 / c)
+ return r, z, c, s
+
+ expected = expected_rotg(a, b)
+ actual = rotg(a, b, c, s)
+
+ assert_allclose(actual, expected, rtol=RTOL[dtype])
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+def test_rot(dtype):
+ rot = _rot_memview[_numpy_to_cython(dtype)]
+
+ rng = np.random.RandomState(0)
+ x = rng.random_sample(10).astype(dtype, copy=False)
+ y = rng.random_sample(10).astype(dtype, copy=False)
+ c = dtype(rng.randn())
+ s = dtype(rng.randn())
+
+ expected_x = c * x + s * y
+ expected_y = c * y - s * x
+
+ rot(x, y, c, s)
+
+ assert_allclose(x, expected_x)
+ assert_allclose(y, expected_y)
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+@pytest.mark.parametrize(
+ "opA, transA", [(_no_op, NoTrans), (np.transpose, Trans)], ids=["NoTrans", "Trans"]
+)
+@pytest.mark.parametrize("order", [RowMajor, ColMajor], ids=["RowMajor", "ColMajor"])
+def test_gemv(dtype, opA, transA, order):
+ gemv = _gemv_memview[_numpy_to_cython(dtype)]
+
+ rng = np.random.RandomState(0)
+ A = np.asarray(
+ opA(rng.random_sample((20, 10)).astype(dtype, copy=False)), order=ORDER[order]
+ )
+ x = rng.random_sample(10).astype(dtype, copy=False)
+ y = rng.random_sample(20).astype(dtype, copy=False)
+ alpha, beta = 2.5, -0.5
+
+ expected = alpha * opA(A).dot(x) + beta * y
+ gemv(transA, alpha, A, x, beta, y)
+
+ assert_allclose(y, expected, rtol=RTOL[dtype])
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+@pytest.mark.parametrize("order", [RowMajor, ColMajor], ids=["RowMajor", "ColMajor"])
+def test_ger(dtype, order):
+ ger = _ger_memview[_numpy_to_cython(dtype)]
+
+ rng = np.random.RandomState(0)
+ x = rng.random_sample(10).astype(dtype, copy=False)
+ y = rng.random_sample(20).astype(dtype, copy=False)
+ A = np.asarray(
+ rng.random_sample((10, 20)).astype(dtype, copy=False), order=ORDER[order]
+ )
+ alpha = 2.5
+
+ expected = alpha * np.outer(x, y) + A
+ ger(alpha, x, y, A)
+
+ assert_allclose(A, expected, rtol=RTOL[dtype])
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+@pytest.mark.parametrize(
+ "opB, transB", [(_no_op, NoTrans), (np.transpose, Trans)], ids=["NoTrans", "Trans"]
+)
+@pytest.mark.parametrize(
+ "opA, transA", [(_no_op, NoTrans), (np.transpose, Trans)], ids=["NoTrans", "Trans"]
+)
+@pytest.mark.parametrize("order", [RowMajor, ColMajor], ids=["RowMajor", "ColMajor"])
+def test_gemm(dtype, opA, transA, opB, transB, order):
+ gemm = _gemm_memview[_numpy_to_cython(dtype)]
+
+ rng = np.random.RandomState(0)
+ A = np.asarray(
+ opA(rng.random_sample((30, 10)).astype(dtype, copy=False)), order=ORDER[order]
+ )
+ B = np.asarray(
+ opB(rng.random_sample((10, 20)).astype(dtype, copy=False)), order=ORDER[order]
+ )
+ C = np.asarray(
+ rng.random_sample((30, 20)).astype(dtype, copy=False), order=ORDER[order]
+ )
+ alpha, beta = 2.5, -0.5
+
+ expected = alpha * opA(A).dot(opB(B)) + beta * C
+ gemm(transA, transB, alpha, A, B, beta, C)
+
+ assert_allclose(C, expected, rtol=RTOL[dtype])
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_cython_templating.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_cython_templating.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5c9fa7a9087e81dcf8bd01f155458b5cb1a723f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_cython_templating.py
@@ -0,0 +1,22 @@
+import pathlib
+
+import pytest
+
+import sklearn
+
+
+def test_files_generated_by_templates_are_git_ignored():
+ """Check the consistence of the files generated from template files."""
+ gitignore_file = pathlib.Path(sklearn.__file__).parent.parent / ".gitignore"
+ if not gitignore_file.exists():
+ pytest.skip("Tests are not run from the source folder")
+
+ base_dir = pathlib.Path(sklearn.__file__).parent
+ ignored_files = gitignore_file.read_text().split("\n")
+ ignored_files = [pathlib.Path(line) for line in ignored_files]
+
+ for filename in base_dir.glob("**/*.tp"):
+ filename = filename.relative_to(base_dir.parent)
+ # From "path/to/template.p??.tp" to "path/to/template.p??"
+ filename_wo_tempita_suffix = filename.with_suffix("")
+ assert filename_wo_tempita_suffix in ignored_files
diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_estimator_html_repr.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_estimator_html_repr.py
new file mode 100644
index 0000000000000000000000000000000000000000..d59658998432da760acd0611774106bf85c2284e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/test_estimator_html_repr.py
@@ -0,0 +1,518 @@
+import html
+import locale
+import re
+from contextlib import closing
+from io import StringIO
+from unittest.mock import patch
+
+import pytest
+
+from sklearn import config_context
+from sklearn.base import BaseEstimator
+from sklearn.cluster import AgglomerativeClustering, Birch
+from sklearn.compose import ColumnTransformer, make_column_transformer
+from sklearn.datasets import load_iris
+from sklearn.decomposition import PCA, TruncatedSVD
+from sklearn.ensemble import StackingClassifier, StackingRegressor, VotingClassifier
+from sklearn.feature_selection import SelectPercentile
+from sklearn.gaussian_process.kernels import ExpSineSquared
+from sklearn.impute import SimpleImputer
+from sklearn.kernel_ridge import KernelRidge
+from sklearn.linear_model import LogisticRegression
+from sklearn.model_selection import RandomizedSearchCV
+from sklearn.multiclass import OneVsOneClassifier
+from sklearn.neural_network import MLPClassifier
+from sklearn.pipeline import FeatureUnion, Pipeline, make_pipeline
+from sklearn.preprocessing import OneHotEncoder, StandardScaler
+from sklearn.svm import LinearSVC, LinearSVR
+from sklearn.tree import DecisionTreeClassifier
+from sklearn.utils._estimator_html_repr import (
+ _get_css_style,
+ _get_visual_block,
+ _HTMLDocumentationLinkMixin,
+ _write_label_html,
+ estimator_html_repr,
+)
+from sklearn.utils.fixes import parse_version
+
+
+@pytest.mark.parametrize("checked", [True, False])
+def test_write_label_html(checked):
+ # Test checking logic and labeling
+ name = "LogisticRegression"
+ tool_tip = "hello-world"
+
+ with closing(StringIO()) as out:
+ _write_label_html(out, name, tool_tip, checked=checked)
+ html_label = out.getvalue()
+
+ p = (
+ r'