diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a8ced8aa9d29242760ae40c0062bd79432d55675 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py @@ -0,0 +1,115 @@ +""" +Utilities useful during the build. +""" +# author: Andy Mueller, Gael Varoquaux +# license: BSD + + +import contextlib +import os + +import sklearn + +from .._min_dependencies import CYTHON_MIN_VERSION +from ..externals._packaging.version import parse +from .openmp_helpers import check_openmp_support +from .pre_build_helpers import basic_check_build + +DEFAULT_ROOT = "sklearn" + + +def _check_cython_version(): + message = ( + "Please install Cython with a version >= {0} in order " + "to build a scikit-learn from source." + ).format(CYTHON_MIN_VERSION) + try: + import Cython + except ModuleNotFoundError as e: + # Re-raise with more informative error message instead: + raise ModuleNotFoundError(message) from e + + if parse(Cython.__version__) < parse(CYTHON_MIN_VERSION): + message += " The current version of Cython is {} installed in {}.".format( + Cython.__version__, Cython.__path__ + ) + raise ValueError(message) + + +def cythonize_extensions(extension): + """Check that a recent Cython is available and cythonize extensions""" + _check_cython_version() + from Cython.Build import cythonize + + # Fast fail before cythonization if compiler fails compiling basic test + # code even without OpenMP + basic_check_build() + + # check simple compilation with OpenMP. If it fails scikit-learn will be + # built without OpenMP and the test test_openmp_supported in the test suite + # will fail. + # `check_openmp_support` compiles a small test program to see if the + # compilers are properly configured to build with OpenMP. This is expensive + # and we only want to call this function once. + # The result of this check is cached as a private attribute on the sklearn + # module (only at build-time) to be used in the build_ext subclass defined + # in the top-level setup.py file to actually build the compiled extensions + # with OpenMP flags if needed. + sklearn._OPENMP_SUPPORTED = check_openmp_support() + + n_jobs = 1 + with contextlib.suppress(ImportError): + import joblib + + n_jobs = joblib.cpu_count() + + # Additional checks for Cython + cython_enable_debug_directives = ( + os.environ.get("SKLEARN_ENABLE_DEBUG_CYTHON_DIRECTIVES", "0") != "0" + ) + + compiler_directives = { + "language_level": 3, + "boundscheck": cython_enable_debug_directives, + "wraparound": False, + "initializedcheck": False, + "nonecheck": False, + "cdivision": True, + "profile": False, + } + + return cythonize( + extension, + nthreads=n_jobs, + compiler_directives=compiler_directives, + annotate=False, + ) + + +def gen_from_templates(templates): + """Generate cython files from a list of templates""" + # Lazy import because cython is not a runtime dependency. + from Cython import Tempita + + for template in templates: + outfile = template.replace(".tp", "") + + # if the template is not updated, no need to output the cython file + if not ( + os.path.exists(outfile) + and os.stat(template).st_mtime < os.stat(outfile).st_mtime + ): + with open(template, "r") as f: + tmpl = f.read() + + tmpl_ = Tempita.sub(tmpl) + + warn_msg = ( + "# WARNING: Do not edit this file directly.\n" + f"# It is automatically generated from {template!r}.\n" + "# Changes must be made there.\n\n" + ) + + with open(outfile, "w") as f: + f.write(warn_msg) + f.write(tmpl_) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..727756c4c27ee40f181a6c1c69dd05f8a343b01a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2230fec16bfa763c0b332ffaf0e1c948bfb8ec3b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a62084ef1d104de45696c63174ef53b845958050 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f0cb5cd0f0c401d01e1bee06057c5e42439bbdf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ec8b88e50f6669bc41841a1afd0ce70af33ba6b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/openmp_helpers.py b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/openmp_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..9172d40830bb970488431e31e4d11ef474dfe349 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/openmp_helpers.py @@ -0,0 +1,123 @@ +"""Helpers for OpenMP support during the build.""" + +# This code is adapted for a large part from the astropy openmp helpers, which +# can be found at: https://github.com/astropy/extension-helpers/blob/master/extension_helpers/_openmp_helpers.py # noqa + + +import os +import sys +import textwrap +import warnings + +from .pre_build_helpers import compile_test_program + + +def get_openmp_flag(): + if sys.platform == "win32": + return ["/openmp"] + elif sys.platform == "darwin" and "openmp" in os.getenv("CPPFLAGS", ""): + # -fopenmp can't be passed as compile flag when using Apple-clang. + # OpenMP support has to be enabled during preprocessing. + # + # For example, our macOS wheel build jobs use the following environment + # variables to build with Apple-clang and the brew installed "libomp": + # + # export CPPFLAGS="$CPPFLAGS -Xpreprocessor -fopenmp" + # export CFLAGS="$CFLAGS -I/usr/local/opt/libomp/include" + # export CXXFLAGS="$CXXFLAGS -I/usr/local/opt/libomp/include" + # export LDFLAGS="$LDFLAGS -Wl,-rpath,/usr/local/opt/libomp/lib + # -L/usr/local/opt/libomp/lib -lomp" + return [] + # Default flag for GCC and clang: + return ["-fopenmp"] + + +def check_openmp_support(): + """Check whether OpenMP test code can be compiled and run""" + if "PYODIDE_PACKAGE_ABI" in os.environ: + # Pyodide doesn't support OpenMP + return False + + code = textwrap.dedent("""\ + #include + #include + int main(void) { + #pragma omp parallel + printf("nthreads=%d\\n", omp_get_num_threads()); + return 0; + } + """) + + extra_preargs = os.getenv("LDFLAGS", None) + if extra_preargs is not None: + extra_preargs = extra_preargs.strip().split(" ") + # FIXME: temporary fix to link against system libraries on linux + # "-Wl,--sysroot=/" should be removed + extra_preargs = [ + flag + for flag in extra_preargs + if flag.startswith(("-L", "-Wl,-rpath", "-l", "-Wl,--sysroot=/")) + ] + + extra_postargs = get_openmp_flag() + + openmp_exception = None + try: + output = compile_test_program( + code, extra_preargs=extra_preargs, extra_postargs=extra_postargs + ) + + if output and "nthreads=" in output[0]: + nthreads = int(output[0].strip().split("=")[1]) + openmp_supported = len(output) == nthreads + elif "PYTHON_CROSSENV" in os.environ: + # Since we can't run the test program when cross-compiling + # assume that openmp is supported if the program can be + # compiled. + openmp_supported = True + else: + openmp_supported = False + + except Exception as exception: + # We could be more specific and only catch: CompileError, LinkError, + # and subprocess.CalledProcessError. + # setuptools introduced CompileError and LinkError, but that requires + # version 61.1. Even the latest version of Ubuntu (22.04LTS) only + # ships with 59.6. So for now we catch all exceptions and reraise a + # generic exception with the original error message instead: + openmp_supported = False + openmp_exception = exception + + if not openmp_supported: + if os.getenv("SKLEARN_FAIL_NO_OPENMP"): + raise Exception( + "Failed to build scikit-learn with OpenMP support" + ) from openmp_exception + else: + message = textwrap.dedent(""" + + *********** + * WARNING * + *********** + + It seems that scikit-learn cannot be built with OpenMP. + + - Make sure you have followed the installation instructions: + + https://scikit-learn.org/dev/developers/advanced_installation.html + + - If your compiler supports OpenMP but you still see this + message, please submit a bug report at: + + https://github.com/scikit-learn/scikit-learn/issues + + - The build will continue with OpenMP-based parallelism + disabled. Note however that some estimators will run in + sequential mode instead of leveraging thread-based + parallelism. + + *** + """) + warnings.warn(message) + + return openmp_supported diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/pre_build_helpers.py b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/pre_build_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..f3eb054bb037ec5b884b471fdcd148484f88c3f2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/pre_build_helpers.py @@ -0,0 +1,73 @@ +"""Helpers to check build environment before actual build of scikit-learn""" + +import glob +import os +import subprocess +import sys +import tempfile +import textwrap + +from setuptools.command.build_ext import customize_compiler, new_compiler + + +def compile_test_program(code, extra_preargs=None, extra_postargs=None): + """Check that some C code can be compiled and run""" + ccompiler = new_compiler() + customize_compiler(ccompiler) + + start_dir = os.path.abspath(".") + + with tempfile.TemporaryDirectory() as tmp_dir: + try: + os.chdir(tmp_dir) + + # Write test program + with open("test_program.c", "w") as f: + f.write(code) + + os.mkdir("objects") + + # Compile, test program + ccompiler.compile( + ["test_program.c"], output_dir="objects", extra_postargs=extra_postargs + ) + + # Link test program + objects = glob.glob(os.path.join("objects", "*" + ccompiler.obj_extension)) + ccompiler.link_executable( + objects, + "test_program", + extra_preargs=extra_preargs, + extra_postargs=extra_postargs, + ) + + if "PYTHON_CROSSENV" not in os.environ: + # Run test program if not cross compiling + # will raise a CalledProcessError if return code was non-zero + output = subprocess.check_output("./test_program") + output = output.decode(sys.stdout.encoding or "utf-8").splitlines() + else: + # Return an empty output if we are cross compiling + # as we cannot run the test_program + output = [] + except Exception: + raise + finally: + os.chdir(start_dir) + + return output + + +def basic_check_build(): + """Check basic compilation and linking of C code""" + if "PYODIDE_PACKAGE_ABI" in os.environ: + # The following check won't work in pyodide + return + + code = textwrap.dedent("""\ + #include + int main(void) { + return 0; + } + """) + compile_test_program(code) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py new file mode 100644 index 0000000000000000000000000000000000000000..8da4b9c0e7ace17320687fa88534143530381ea9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py @@ -0,0 +1,57 @@ +import argparse +import os + +from Cython import Tempita as tempita + +# XXX: If this import ever fails (does it really?), vendor either +# cython.tempita or numpy/npy_tempita. + + +def process_tempita(fromfile, outfile=None): + """Process tempita templated file and write out the result. + + The template file is expected to end in `.c.tp` or `.pyx.tp`: + E.g. processing `template.c.in` generates `template.c`. + + """ + with open(fromfile, "r", encoding="utf-8") as f: + template_content = f.read() + + template = tempita.Template(template_content) + content = template.substitute() + + with open(outfile, "w", encoding="utf-8") as f: + f.write(content) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("infile", type=str, help="Path to the input file") + parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory") + parser.add_argument( + "-i", + "--ignore", + type=str, + help=( + "An ignored input - may be useful to add a " + "dependency between custom targets" + ), + ) + args = parser.parse_args() + + if not args.infile.endswith(".tp"): + raise ValueError(f"Unexpected extension: {args.infile}") + + if not args.outdir: + raise ValueError("Missing `--outdir` argument to tempita.py") + + outdir_abs = os.path.join(os.getcwd(), args.outdir) + outfile = os.path.join( + outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0] + ) + + process_tempita(args.infile, outfile) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/version.py b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/version.py new file mode 100644 index 0000000000000000000000000000000000000000..1f8688a008e9d13851333e8c336a93692d4d4419 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/version.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +""" Extract version number from __init__.py +""" + +import os + +sklearn_init = os.path.join(os.path.dirname(__file__), "../__init__.py") + +data = open(sklearn_init).readlines() +version_line = next(line for line in data if line.startswith("__version__")) + +version = version_line.strip().split(" = ")[1].replace('"', "").replace("'", "") + +print(version) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68c4542f17c659fc35b108f717b4ee6778242885 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b01cc0ac174d5a47916887aca580a82373eedb8a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5d31459e0cbf23d2895b07df961db728e14b2f8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1397db07a4dbab83a744b23c52369a75b06bce5d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04f18980f84421cad9baea05bde9c0a35860317e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc391edbbe3afc06fceb5defc2fd0690176a15c6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d860231ad997422a328a724d2d460c33f14a7940 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afdd0a5d0706aa5139273cf4d0e81f6a0b57a3f3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee37dcf1fcb5fba294c011457ac1d01dbd4e8928 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c68291ba164b5e862c2311d86af37eb40543bf9a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..151f8a3fce1ce4543e8a0ff59c195c375b52d748 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c05759c62eb71395f6551ad3ba79a8f729bd977 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39b48badae34538956d74a8fa441cdb1fd9b8151 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6101978366373efb071477ddc98f0101777c08c1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed6f8f6a50229a908a5093a03b4eed2310d2189d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df9e5d3d482418f230db3762a07a9dcd0bf216a9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..010d47febc5975a28f9766fa07c82c1e9dafbf0b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..377865afe3e5e9bd8a38a972f0d9c8e4e0a8e0af Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_feature_agglomeration.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_feature_agglomeration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff4493b58abf376b742aadf1537476ca08047694 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_feature_agglomeration.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hierarchical.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hierarchical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a03dcad0a09bfb16593578e014045987b797c54 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hierarchical.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_k_means.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_k_means.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40c7d86ddb23a1078da840f64259026b4d206500 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_k_means.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_mean_shift.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_mean_shift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a75a41bad54c3e91ee6d45277b768f2259277248 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_mean_shift.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_optics.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_optics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a83da958824c4e70a994f14fba18674b59b5a839 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_optics.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/compose/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b137cdf9e07f2f275c5c78c7ab6ab289c23413f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/__init__.py @@ -0,0 +1,20 @@ +"""Meta-estimators for building composite models with transformers + +In addition to its current contents, this module will eventually be home to +refurbished versions of Pipeline and FeatureUnion. + +""" + +from ._column_transformer import ( + ColumnTransformer, + make_column_selector, + make_column_transformer, +) +from ._target import TransformedTargetRegressor + +__all__ = [ + "ColumnTransformer", + "make_column_transformer", + "TransformedTargetRegressor", + "make_column_selector", +] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af5e712ce6d54023692efbd61aa856f37a83d41a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1199ed29e7f739a0c49613869aaf6f8afccbb099 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5a08bc28102c3c0df52dee258dfb50c95d08516 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..78b66df28c94c4d6c147d40b918824f02c317345 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py @@ -0,0 +1,1463 @@ +""" +The :mod:`sklearn.compose._column_transformer` module implements utilities +to work with heterogeneous data and to apply different transformers to +different columns. +""" + +# Author: Andreas Mueller +# Joris Van den Bossche +# License: BSD +import warnings +from collections import Counter +from itertools import chain +from numbers import Integral, Real + +import numpy as np +from scipy import sparse + +from ..base import TransformerMixin, _fit_context, clone +from ..pipeline import _fit_transform_one, _name_estimators, _transform_one +from ..preprocessing import FunctionTransformer +from ..utils import Bunch, _get_column_indices, _safe_indexing +from ..utils._estimator_html_repr import _VisualBlock +from ..utils._metadata_requests import METHODS +from ..utils._param_validation import HasMethods, Hidden, Interval, StrOptions +from ..utils._set_output import ( + _get_container_adapter, + _get_output_config, + _safe_set_output, +) +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils.metaestimators import _BaseComposition +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _check_feature_names_in, + _get_feature_names, + _is_pandas_df, + _num_samples, + check_array, + check_is_fitted, +) + +__all__ = ["ColumnTransformer", "make_column_transformer", "make_column_selector"] + + +_ERR_MSG_1DCOLUMN = ( + "1D data passed to a transformer that expects 2D data. " + "Try to specify the column selection as a list of one " + "item instead of a scalar." +) + + +class ColumnTransformer(TransformerMixin, _BaseComposition): + """Applies transformers to columns of an array or pandas DataFrame. + + This estimator allows different columns or column subsets of the input + to be transformed separately and the features generated by each transformer + will be concatenated to form a single feature space. + This is useful for heterogeneous or columnar data, to combine several + feature extraction mechanisms or transformations into a single transformer. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + transformers : list of tuples + List of (name, transformer, columns) tuples specifying the + transformer objects to be applied to subsets of the data. + + name : str + Like in Pipeline and FeatureUnion, this allows the transformer and + its parameters to be set using ``set_params`` and searched in grid + search. + transformer : {'drop', 'passthrough'} or estimator + Estimator must support :term:`fit` and :term:`transform`. + Special-cased strings 'drop' and 'passthrough' are accepted as + well, to indicate to drop the columns or to pass them through + untransformed, respectively. + columns : str, array-like of str, int, array-like of int, \ + array-like of bool, slice or callable + Indexes the data on its second axis. Integers are interpreted as + positional columns, while strings can reference DataFrame columns + by name. A scalar string or int should be used where + ``transformer`` expects X to be a 1d array-like (vector), + otherwise a 2d array will be passed to the transformer. + A callable is passed the input data `X` and can return any of the + above. To select multiple columns by name or dtype, you can use + :obj:`make_column_selector`. + + remainder : {'drop', 'passthrough'} or estimator, default='drop' + By default, only the specified columns in `transformers` are + transformed and combined in the output, and the non-specified + columns are dropped. (default of ``'drop'``). + By specifying ``remainder='passthrough'``, all remaining columns that + were not specified in `transformers`, but present in the data passed + to `fit` will be automatically passed through. This subset of columns + is concatenated with the output of the transformers. For dataframes, + extra columns not seen during `fit` will be excluded from the output + of `transform`. + By setting ``remainder`` to be an estimator, the remaining + non-specified columns will use the ``remainder`` estimator. The + estimator must support :term:`fit` and :term:`transform`. + Note that using this feature requires that the DataFrame columns + input at :term:`fit` and :term:`transform` have identical order. + + sparse_threshold : float, default=0.3 + If the output of the different transformers contains sparse matrices, + these will be stacked as a sparse matrix if the overall density is + lower than this value. Use ``sparse_threshold=0`` to always return + dense. When the transformed output consists of all dense data, the + stacked result will be dense, and this keyword will be ignored. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + transformer_weights : dict, default=None + Multiplicative weights for features per transformer. The output of the + transformer is multiplied by these weights. Keys are transformer names, + values the weights. + + verbose : bool, default=False + If True, the time elapsed while fitting each transformer will be + printed as it is completed. + + verbose_feature_names_out : bool, default=True + If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix + all feature names with the name of the transformer that generated that + feature. + If False, :meth:`ColumnTransformer.get_feature_names_out` will not + prefix any feature names and will error if feature names are not + unique. + + .. versionadded:: 1.0 + + Attributes + ---------- + transformers_ : list + The collection of fitted transformers as tuples of (name, + fitted_transformer, column). `fitted_transformer` can be an estimator, + or `'drop'`; `'passthrough'` is replaced with an equivalent + :class:`~sklearn.preprocessing.FunctionTransformer`. In case there were + no columns selected, this will be the unfitted transformer. If there + are remaining columns, the final element is a tuple of the form: + ('remainder', transformer, remaining_columns) corresponding to the + ``remainder`` parameter. If there are remaining columns, then + ``len(transformers_)==len(transformers)+1``, otherwise + ``len(transformers_)==len(transformers)``. + + named_transformers_ : :class:`~sklearn.utils.Bunch` + Read-only attribute to access any transformer by given name. + Keys are transformer names and values are the fitted transformer + objects. + + sparse_output_ : bool + Boolean flag indicating whether the output of ``transform`` is a + sparse matrix or a dense numpy array, which depends on the output + of the individual transformers and the `sparse_threshold` keyword. + + output_indices_ : dict + A dictionary from each transformer name to a slice, where the slice + corresponds to indices in the transformed output. This is useful to + inspect which transformer is responsible for which transformed + feature(s). + + .. versionadded:: 1.0 + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying transformers expose such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + make_column_transformer : Convenience function for + combining the outputs of multiple transformer objects applied to + column subsets of the original feature space. + make_column_selector : Convenience function for selecting + columns based on datatype or the columns name with a regex pattern. + + Notes + ----- + The order of the columns in the transformed feature matrix follows the + order of how the columns are specified in the `transformers` list. + Columns of the original feature matrix that are not specified are + dropped from the resulting transformed feature matrix, unless specified + in the `passthrough` keyword. Those columns specified with `passthrough` + are added at the right to the output of the transformers. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.compose import ColumnTransformer + >>> from sklearn.preprocessing import Normalizer + >>> ct = ColumnTransformer( + ... [("norm1", Normalizer(norm='l1'), [0, 1]), + ... ("norm2", Normalizer(norm='l1'), slice(2, 4))]) + >>> X = np.array([[0., 1., 2., 2.], + ... [1., 1., 0., 1.]]) + >>> # Normalizer scales each row of X to unit norm. A separate scaling + >>> # is applied for the two first and two last elements of each + >>> # row independently. + >>> ct.fit_transform(X) + array([[0. , 1. , 0.5, 0.5], + [0.5, 0.5, 0. , 1. ]]) + + :class:`ColumnTransformer` can be configured with a transformer that requires + a 1d array by setting the column to a string: + + >>> from sklearn.feature_extraction.text import CountVectorizer + >>> from sklearn.preprocessing import MinMaxScaler + >>> import pandas as pd # doctest: +SKIP + >>> X = pd.DataFrame({ + ... "documents": ["First item", "second one here", "Is this the last?"], + ... "width": [3, 4, 5], + ... }) # doctest: +SKIP + >>> # "documents" is a string which configures ColumnTransformer to + >>> # pass the documents column as a 1d array to the CountVectorizer + >>> ct = ColumnTransformer( + ... [("text_preprocess", CountVectorizer(), "documents"), + ... ("num_preprocess", MinMaxScaler(), ["width"])]) + >>> X_trans = ct.fit_transform(X) # doctest: +SKIP + + For a more detailed example of usage, see + :ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`. + """ + + _required_parameters = ["transformers"] + + _parameter_constraints: dict = { + "transformers": [list, Hidden(tuple)], + "remainder": [ + StrOptions({"drop", "passthrough"}), + HasMethods(["fit", "transform"]), + HasMethods(["fit_transform", "transform"]), + ], + "sparse_threshold": [Interval(Real, 0, 1, closed="both")], + "n_jobs": [Integral, None], + "transformer_weights": [dict, None], + "verbose": ["verbose"], + "verbose_feature_names_out": ["boolean"], + } + + def __init__( + self, + transformers, + *, + remainder="drop", + sparse_threshold=0.3, + n_jobs=None, + transformer_weights=None, + verbose=False, + verbose_feature_names_out=True, + ): + self.transformers = transformers + self.remainder = remainder + self.sparse_threshold = sparse_threshold + self.n_jobs = n_jobs + self.transformer_weights = transformer_weights + self.verbose = verbose + self.verbose_feature_names_out = verbose_feature_names_out + + @property + def _transformers(self): + """ + Internal list of transformer only containing the name and + transformers, dropping the columns. + + DO NOT USE: This is for the implementation of get_params via + BaseComposition._get_params which expects lists of tuples of len 2. + + To iterate through the transformers, use ``self._iter`` instead. + """ + try: + return [(name, trans) for name, trans, _ in self.transformers] + except (TypeError, ValueError): + return self.transformers + + @_transformers.setter + def _transformers(self, value): + """DO NOT USE: This is for the implementation of set_params via + BaseComposition._get_params which gives lists of tuples of len 2. + """ + try: + self.transformers = [ + (name, trans, col) + for ((name, trans), (_, _, col)) in zip(value, self.transformers) + ] + except (TypeError, ValueError): + self.transformers = value + + def set_output(self, *, transform=None): + """Set the output container when `"transform"` and `"fit_transform"` are called. + + Calling `set_output` will set the output of all estimators in `transformers` + and `transformers_`. + + Parameters + ---------- + transform : {"default", "pandas"}, default=None + Configure output of `transform` and `fit_transform`. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + .. versionadded:: 1.4 + `"polars"` option was added. + + Returns + ------- + self : estimator instance + Estimator instance. + """ + super().set_output(transform=transform) + + transformers = ( + trans + for _, trans, _ in chain( + self.transformers, getattr(self, "transformers_", []) + ) + if trans not in {"passthrough", "drop"} + ) + for trans in transformers: + _safe_set_output(trans, transform=transform) + + if self.remainder not in {"passthrough", "drop"}: + _safe_set_output(self.remainder, transform=transform) + + return self + + def get_params(self, deep=True): + """Get parameters for this estimator. + + Returns the parameters given in the constructor as well as the + estimators contained within the `transformers` of the + `ColumnTransformer`. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + return self._get_params("_transformers", deep=deep) + + def set_params(self, **kwargs): + """Set the parameters of this estimator. + + Valid parameter keys can be listed with ``get_params()``. Note that you + can directly set the parameters of the estimators contained in + `transformers` of `ColumnTransformer`. + + Parameters + ---------- + **kwargs : dict + Estimator parameters. + + Returns + ------- + self : ColumnTransformer + This estimator. + """ + self._set_params("_transformers", **kwargs) + return self + + def _iter(self, fitted, column_as_labels, skip_drop, skip_empty_columns): + """ + Generate (name, trans, column, weight) tuples. + + + Parameters + ---------- + fitted : bool + If True, use the fitted transformers (``self.transformers_``) to + iterate through transformers, else use the transformers passed by + the user (``self.transformers``). + + column_as_labels : bool + If True, columns are returned as string labels. If False, columns + are returned as they were given by the user. This can only be True + if the ``ColumnTransformer`` is already fitted. + + skip_drop : bool + If True, 'drop' transformers are filtered out. + + skip_empty_columns : bool + If True, transformers with empty selected columns are filtered out. + + Yields + ------ + A generator of tuples containing: + - name : the name of the transformer + - transformer : the transformer object + - columns : the columns for that transformer + - weight : the weight of the transformer + """ + if fitted: + transformers = self.transformers_ + else: + # interleave the validated column specifiers + transformers = [ + (name, trans, column) + for (name, trans, _), column in zip(self.transformers, self._columns) + ] + # add transformer tuple for remainder + if self._remainder[2]: + transformers = chain(transformers, [self._remainder]) + get_weight = (self.transformer_weights or {}).get + + for name, trans, columns in transformers: + if skip_drop and trans == "drop": + continue + if skip_empty_columns and _is_empty_column_selection(columns): + continue + + if column_as_labels: + # Convert all columns to using their string labels + columns_is_scalar = np.isscalar(columns) + + indices = self._transformer_to_input_indices[name] + columns = self.feature_names_in_[indices] + + if columns_is_scalar: + # selection is done with one dimension + columns = columns[0] + + yield (name, trans, columns, get_weight(name)) + + def _validate_transformers(self): + """Validate names of transformers and the transformers themselves. + + This checks whether given transformers have the required methods, i.e. + `fit` or `fit_transform` and `transform` implemented. + """ + if not self.transformers: + return + + names, transformers, _ = zip(*self.transformers) + + # validate names + self._validate_names(names) + + # validate estimators + for t in transformers: + if t in ("drop", "passthrough"): + continue + if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr( + t, "transform" + ): + # Used to validate the transformers in the `transformers` list + raise TypeError( + "All estimators should implement fit and " + "transform, or can be 'drop' or 'passthrough' " + "specifiers. '%s' (type %s) doesn't." % (t, type(t)) + ) + + def _validate_column_callables(self, X): + """ + Converts callable column specifications. + + This stores a dictionary of the form `{step_name: column_indices}` and + calls the `columns` on `X` if `columns` is a callable for a given + transformer. + + The results are then stored in `self._transformer_to_input_indices`. + """ + all_columns = [] + transformer_to_input_indices = {} + for name, _, columns in self.transformers: + if callable(columns): + columns = columns(X) + all_columns.append(columns) + transformer_to_input_indices[name] = _get_column_indices(X, columns) + + self._columns = all_columns + self._transformer_to_input_indices = transformer_to_input_indices + + def _validate_remainder(self, X): + """ + Validates ``remainder`` and defines ``_remainder`` targeting + the remaining columns. + """ + cols = set(chain(*self._transformer_to_input_indices.values())) + remaining = sorted(set(range(self.n_features_in_)) - cols) + self._remainder = ("remainder", self.remainder, remaining) + self._transformer_to_input_indices["remainder"] = remaining + + @property + def named_transformers_(self): + """Access the fitted transformer by name. + + Read-only attribute to access any transformer by given name. + Keys are transformer names and values are the fitted transformer + objects. + """ + # Use Bunch object to improve autocomplete + return Bunch(**{name: trans for name, trans, _ in self.transformers_}) + + def _get_feature_name_out_for_transformer(self, name, trans, feature_names_in): + """Gets feature names of transformer. + + Used in conjunction with self._iter(fitted=True) in get_feature_names_out. + """ + column_indices = self._transformer_to_input_indices[name] + names = feature_names_in[column_indices] + # An actual transformer + if not hasattr(trans, "get_feature_names_out"): + raise AttributeError( + f"Transformer {name} (type {type(trans).__name__}) does " + "not provide get_feature_names_out." + ) + return trans.get_feature_names_out(names) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self) + input_features = _check_feature_names_in(self, input_features) + + # List of tuples (name, feature_names_out) + transformer_with_feature_names_out = [] + for name, trans, *_ in self._iter( + fitted=True, + column_as_labels=False, + skip_empty_columns=True, + skip_drop=True, + ): + feature_names_out = self._get_feature_name_out_for_transformer( + name, trans, input_features + ) + if feature_names_out is None: + continue + transformer_with_feature_names_out.append((name, feature_names_out)) + + if not transformer_with_feature_names_out: + # No feature names + return np.array([], dtype=object) + + return self._add_prefix_for_feature_names_out( + transformer_with_feature_names_out + ) + + def _add_prefix_for_feature_names_out(self, transformer_with_feature_names_out): + """Add prefix for feature names out that includes the transformer names. + + Parameters + ---------- + transformer_with_feature_names_out : list of tuples of (str, array-like of str) + The tuple consistent of the transformer's name and its feature names out. + + Returns + ------- + feature_names_out : ndarray of shape (n_features,), dtype=str + Transformed feature names. + """ + if self.verbose_feature_names_out: + # Prefix the feature names out with the transformers name + names = list( + chain.from_iterable( + (f"{name}__{i}" for i in feature_names_out) + for name, feature_names_out in transformer_with_feature_names_out + ) + ) + return np.asarray(names, dtype=object) + + # verbose_feature_names_out is False + # Check that names are all unique without a prefix + feature_names_count = Counter( + chain.from_iterable(s for _, s in transformer_with_feature_names_out) + ) + top_6_overlap = [ + name for name, count in feature_names_count.most_common(6) if count > 1 + ] + top_6_overlap.sort() + if top_6_overlap: + if len(top_6_overlap) == 6: + # There are more than 5 overlapping names, we only show the 5 + # of the feature names + names_repr = str(top_6_overlap[:5])[:-1] + ", ...]" + else: + names_repr = str(top_6_overlap) + raise ValueError( + f"Output feature names: {names_repr} are not unique. Please set " + "verbose_feature_names_out=True to add prefixes to feature names" + ) + + return np.concatenate( + [name for _, name in transformer_with_feature_names_out], + ) + + def _update_fitted_transformers(self, transformers): + """Set self.transformers_ from given transformers. + + Parameters + ---------- + transformers : list of estimators + The fitted estimators as the output of + `self._call_func_on_transformers(func=_fit_transform_one, ...)`. + That function doesn't include 'drop' or transformers for which no + column is selected. 'drop' is kept as is, and for the no-column + transformers the unfitted transformer is put in + `self.transformers_`. + """ + # transformers are fitted; excludes 'drop' cases + fitted_transformers = iter(transformers) + transformers_ = [] + + for name, old, column, _ in self._iter( + fitted=False, + column_as_labels=False, + skip_drop=False, + skip_empty_columns=False, + ): + if old == "drop": + trans = "drop" + elif _is_empty_column_selection(column): + trans = old + else: + trans = next(fitted_transformers) + transformers_.append((name, trans, column)) + + # sanity check that transformers is exhausted + assert not list(fitted_transformers) + self.transformers_ = transformers_ + + def _validate_output(self, result): + """ + Ensure that the output of each transformer is 2D. Otherwise + hstack can raise an error or produce incorrect results. + """ + names = [ + name + for name, _, _, _ in self._iter( + fitted=True, + column_as_labels=False, + skip_drop=True, + skip_empty_columns=True, + ) + ] + for Xs, name in zip(result, names): + if not getattr(Xs, "ndim", 0) == 2 and not hasattr(Xs, "__dataframe__"): + raise ValueError( + "The output of the '{0}' transformer should be 2D (numpy array, " + "scipy sparse array, dataframe).".format(name) + ) + if _get_output_config("transform", self)["dense"] == "pandas": + return + try: + import pandas as pd + except ImportError: + return + for Xs, name in zip(result, names): + if not _is_pandas_df(Xs): + continue + for col_name, dtype in Xs.dtypes.to_dict().items(): + if getattr(dtype, "na_value", None) is not pd.NA: + continue + if pd.NA not in Xs[col_name].values: + continue + class_name = self.__class__.__name__ + # TODO(1.6): replace warning with ValueError + warnings.warn( + ( + f"The output of the '{name}' transformer for column" + f" '{col_name}' has dtype {dtype} and uses pandas.NA to" + " represent null values. Storing this output in a numpy array" + " can cause errors in downstream scikit-learn estimators, and" + " inefficiencies. Starting with scikit-learn version 1.6, this" + " will raise a ValueError. To avoid this problem you can (i)" + " store the output in a pandas DataFrame by using" + f" {class_name}.set_output(transform='pandas') or (ii) modify" + f" the input data or the '{name}' transformer to avoid the" + " presence of pandas.NA (for example by using" + " pandas.DataFrame.astype)." + ), + FutureWarning, + ) + + def _record_output_indices(self, Xs): + """ + Record which transformer produced which column. + """ + idx = 0 + self.output_indices_ = {} + + for transformer_idx, (name, _, _, _) in enumerate( + self._iter( + fitted=True, + column_as_labels=False, + skip_drop=True, + skip_empty_columns=True, + ) + ): + n_columns = Xs[transformer_idx].shape[1] + self.output_indices_[name] = slice(idx, idx + n_columns) + idx += n_columns + + # `_iter` only generates transformers that have a non empty + # selection. Here we set empty slices for transformers that + # generate no output, which are safe for indexing + all_names = [t[0] for t in self.transformers] + ["remainder"] + for name in all_names: + if name not in self.output_indices_: + self.output_indices_[name] = slice(0, 0) + + def _log_message(self, name, idx, total): + if not self.verbose: + return None + return "(%d of %d) Processing %s" % (idx, total, name) + + def _call_func_on_transformers(self, X, y, func, column_as_labels, routed_params): + """ + Private function to fit and/or transform on demand. + + Parameters + ---------- + X : {array-like, dataframe} of shape (n_samples, n_features) + The data to be used in fit and/or transform. + + y : array-like of shape (n_samples,) + Targets. + + func : callable + Function to call, which can be _fit_transform_one or + _transform_one. + + column_as_labels : bool + Used to iterate through transformers. If True, columns are returned + as strings. If False, columns are returned as they were given by + the user. Can be True only if the ``ColumnTransformer`` is already + fitted. + + routed_params : dict + The routed parameters as the output from ``process_routing``. + + Returns + ------- + Return value (transformers and/or transformed X data) depends + on the passed function. + """ + if func is _fit_transform_one: + fitted = False + else: # func is _transform_one + fitted = True + + transformers = list( + self._iter( + fitted=fitted, + column_as_labels=column_as_labels, + skip_drop=True, + skip_empty_columns=True, + ) + ) + try: + jobs = [] + for idx, (name, trans, column, weight) in enumerate(transformers, start=1): + if func is _fit_transform_one: + if trans == "passthrough": + output_config = _get_output_config("transform", self) + trans = FunctionTransformer( + accept_sparse=True, + check_inverse=False, + feature_names_out="one-to-one", + ).set_output(transform=output_config["dense"]) + + extra_args = dict( + message_clsname="ColumnTransformer", + message=self._log_message(name, idx, len(transformers)), + ) + else: # func is _transform_one + extra_args = {} + jobs.append( + delayed(func)( + transformer=clone(trans) if not fitted else trans, + X=_safe_indexing(X, column, axis=1), + y=y, + weight=weight, + **extra_args, + params=routed_params[name], + ) + ) + + return Parallel(n_jobs=self.n_jobs)(jobs) + + except ValueError as e: + if "Expected 2D array, got 1D array instead" in str(e): + raise ValueError(_ERR_MSG_1DCOLUMN) from e + else: + raise + + def fit(self, X, y=None, **params): + """Fit all transformers using X. + + Parameters + ---------- + X : {array-like, dataframe} of shape (n_samples, n_features) + Input data, of which specified subsets are used to fit the + transformers. + + y : array-like of shape (n_samples,...), default=None + Targets for supervised learning. + + **params : dict, default=None + Parameters to be passed to the underlying transformers' ``fit`` and + ``transform`` methods. + + You can only pass this if metadata routing is enabled, which you + can enable using ``sklearn.set_config(enable_metadata_routing=True)``. + + .. versionadded:: 1.4 + + Returns + ------- + self : ColumnTransformer + This estimator. + """ + _raise_for_params(params, self, "fit") + # we use fit_transform to make sure to set sparse_output_ (for which we + # need the transformed data) to have consistent output type in predict + self.fit_transform(X, y=y, **params) + return self + + @_fit_context( + # estimators in ColumnTransformer.transformers are not validated yet + prefer_skip_nested_validation=False + ) + def fit_transform(self, X, y=None, **params): + """Fit all transformers, transform the data and concatenate results. + + Parameters + ---------- + X : {array-like, dataframe} of shape (n_samples, n_features) + Input data, of which specified subsets are used to fit the + transformers. + + y : array-like of shape (n_samples,), default=None + Targets for supervised learning. + + **params : dict, default=None + Parameters to be passed to the underlying transformers' ``fit`` and + ``transform`` methods. + + You can only pass this if metadata routing is enabled, which you + can enable using ``sklearn.set_config(enable_metadata_routing=True)``. + + .. versionadded:: 1.4 + + Returns + ------- + X_t : {array-like, sparse matrix} of \ + shape (n_samples, sum_n_components) + Horizontally stacked results of transformers. sum_n_components is the + sum of n_components (output dimension) over transformers. If + any result is a sparse matrix, everything will be converted to + sparse matrices. + """ + _raise_for_params(params, self, "fit_transform") + self._check_feature_names(X, reset=True) + + X = _check_X(X) + # set n_features_in_ attribute + self._check_n_features(X, reset=True) + self._validate_transformers() + n_samples = _num_samples(X) + + self._validate_column_callables(X) + self._validate_remainder(X) + + if _routing_enabled(): + routed_params = process_routing(self, "fit_transform", **params) + else: + routed_params = self._get_empty_routing() + + result = self._call_func_on_transformers( + X, + y, + _fit_transform_one, + column_as_labels=False, + routed_params=routed_params, + ) + + if not result: + self._update_fitted_transformers([]) + # All transformers are None + return np.zeros((n_samples, 0)) + + Xs, transformers = zip(*result) + + # determine if concatenated output will be sparse or not + if any(sparse.issparse(X) for X in Xs): + nnz = sum(X.nnz if sparse.issparse(X) else X.size for X in Xs) + total = sum( + X.shape[0] * X.shape[1] if sparse.issparse(X) else X.size for X in Xs + ) + density = nnz / total + self.sparse_output_ = density < self.sparse_threshold + else: + self.sparse_output_ = False + + self._update_fitted_transformers(transformers) + self._validate_output(Xs) + self._record_output_indices(Xs) + + return self._hstack(list(Xs), n_samples=n_samples) + + def transform(self, X, **params): + """Transform X separately by each transformer, concatenate results. + + Parameters + ---------- + X : {array-like, dataframe} of shape (n_samples, n_features) + The data to be transformed by subset. + + **params : dict, default=None + Parameters to be passed to the underlying transformers' ``transform`` + method. + + You can only pass this if metadata routing is enabled, which you + can enable using ``sklearn.set_config(enable_metadata_routing=True)``. + + .. versionadded:: 1.4 + + Returns + ------- + X_t : {array-like, sparse matrix} of \ + shape (n_samples, sum_n_components) + Horizontally stacked results of transformers. sum_n_components is the + sum of n_components (output dimension) over transformers. If + any result is a sparse matrix, everything will be converted to + sparse matrices. + """ + _raise_for_params(params, self, "transform") + check_is_fitted(self) + X = _check_X(X) + + # If ColumnTransformer is fit using a dataframe, and now a dataframe is + # passed to be transformed, we select columns by name instead. This + # enables the user to pass X at transform time with extra columns which + # were not present in fit time, and the order of the columns doesn't + # matter. + fit_dataframe_and_transform_dataframe = hasattr(self, "feature_names_in_") and ( + _is_pandas_df(X) or hasattr(X, "__dataframe__") + ) + + n_samples = _num_samples(X) + column_names = _get_feature_names(X) + + if fit_dataframe_and_transform_dataframe: + named_transformers = self.named_transformers_ + # check that all names seen in fit are in transform, unless + # they were dropped + non_dropped_indices = [ + ind + for name, ind in self._transformer_to_input_indices.items() + if name in named_transformers and named_transformers[name] != "drop" + ] + + all_indices = set(chain(*non_dropped_indices)) + all_names = set(self.feature_names_in_[ind] for ind in all_indices) + + diff = all_names - set(column_names) + if diff: + raise ValueError(f"columns are missing: {diff}") + else: + # ndarray was used for fitting or transforming, thus we only + # check that n_features_in_ is consistent + self._check_n_features(X, reset=False) + + if _routing_enabled(): + routed_params = process_routing(self, "transform", **params) + else: + routed_params = self._get_empty_routing() + + Xs = self._call_func_on_transformers( + X, + None, + _transform_one, + column_as_labels=fit_dataframe_and_transform_dataframe, + routed_params=routed_params, + ) + self._validate_output(Xs) + + if not Xs: + # All transformers are None + return np.zeros((n_samples, 0)) + + return self._hstack(list(Xs), n_samples=n_samples) + + def _hstack(self, Xs, *, n_samples): + """Stacks Xs horizontally. + + This allows subclasses to control the stacking behavior, while reusing + everything else from ColumnTransformer. + + Parameters + ---------- + Xs : list of {array-like, sparse matrix, dataframe} + The container to concatenate. + n_samples : int + The number of samples in the input data to checking the transformation + consistency. + """ + if self.sparse_output_: + try: + # since all columns should be numeric before stacking them + # in a sparse matrix, `check_array` is used for the + # dtype conversion if necessary. + converted_Xs = [ + check_array(X, accept_sparse=True, force_all_finite=False) + for X in Xs + ] + except ValueError as e: + raise ValueError( + "For a sparse output, all columns should " + "be a numeric or convertible to a numeric." + ) from e + + return sparse.hstack(converted_Xs).tocsr() + else: + Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs] + adapter = _get_container_adapter("transform", self) + if adapter and all(adapter.is_supported_container(X) for X in Xs): + # rename before stacking as it avoids to error on temporary duplicated + # columns + transformer_names = [ + t[0] + for t in self._iter( + fitted=True, + column_as_labels=False, + skip_drop=True, + skip_empty_columns=True, + ) + ] + feature_names_outs = [X.columns for X in Xs if X.shape[1] != 0] + if self.verbose_feature_names_out: + # `_add_prefix_for_feature_names_out` takes care about raising + # an error if there are duplicated columns. + feature_names_outs = self._add_prefix_for_feature_names_out( + list(zip(transformer_names, feature_names_outs)) + ) + else: + # check for duplicated columns and raise if any + feature_names_outs = list(chain.from_iterable(feature_names_outs)) + feature_names_count = Counter(feature_names_outs) + if any(count > 1 for count in feature_names_count.values()): + duplicated_feature_names = sorted( + name + for name, count in feature_names_count.items() + if count > 1 + ) + err_msg = ( + "Duplicated feature names found before concatenating the" + " outputs of the transformers:" + f" {duplicated_feature_names}.\n" + ) + for transformer_name, X in zip(transformer_names, Xs): + if X.shape[1] == 0: + continue + dup_cols_in_transformer = sorted( + set(X.columns).intersection(duplicated_feature_names) + ) + if len(dup_cols_in_transformer): + err_msg += ( + f"Transformer {transformer_name} has conflicting " + f"columns names: {dup_cols_in_transformer}.\n" + ) + raise ValueError( + err_msg + + "Either make sure that the transformers named above " + "do not generate columns with conflicting names or set " + "verbose_feature_names_out=True to automatically " + "prefix to the output feature names with the name " + "of the transformer to prevent any conflicting " + "names." + ) + + names_idx = 0 + for X in Xs: + if X.shape[1] == 0: + continue + names_out = feature_names_outs[names_idx : names_idx + X.shape[1]] + adapter.rename_columns(X, names_out) + names_idx += X.shape[1] + + output = adapter.hstack(Xs) + output_samples = output.shape[0] + if output_samples != n_samples: + raise ValueError( + "Concatenating DataFrames from the transformer's output lead to" + " an inconsistent number of samples. The output may have Pandas" + " Indexes that do not match, or that transformers are returning" + " number of samples which are not the same as the number input" + " samples." + ) + + return output + + return np.hstack(Xs) + + def _sk_visual_block_(self): + if isinstance(self.remainder, str) and self.remainder == "drop": + transformers = self.transformers + elif hasattr(self, "_remainder"): + remainder_columns = self._remainder[2] + if ( + hasattr(self, "feature_names_in_") + and remainder_columns + and not all(isinstance(col, str) for col in remainder_columns) + ): + remainder_columns = self.feature_names_in_[remainder_columns].tolist() + transformers = chain( + self.transformers, [("remainder", self.remainder, remainder_columns)] + ) + else: + transformers = chain(self.transformers, [("remainder", self.remainder, "")]) + + names, transformers, name_details = zip(*transformers) + return _VisualBlock( + "parallel", transformers, names=names, name_details=name_details + ) + + def _get_empty_routing(self): + """Return empty routing. + + Used while routing can be disabled. + + TODO: Remove when ``set_config(enable_metadata_routing=False)`` is no + more an option. + """ + return Bunch( + **{ + name: Bunch(**{method: {} for method in METHODS}) + for name, step, _, _ in self._iter( + fitted=False, + column_as_labels=False, + skip_drop=True, + skip_empty_columns=True, + ) + } + ) + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__) + # Here we don't care about which columns are used for which + # transformers, and whether or not a transformer is used at all, which + # might happen if no columns are selected for that transformer. We + # request all metadata requested by all transformers. + transformers = chain(self.transformers, [("remainder", self.remainder, None)]) + for name, step, _ in transformers: + method_mapping = MethodMapping() + if hasattr(step, "fit_transform"): + ( + method_mapping.add(caller="fit", callee="fit_transform").add( + caller="fit_transform", callee="fit_transform" + ) + ) + else: + ( + method_mapping.add(caller="fit", callee="fit") + .add(caller="fit", callee="transform") + .add(caller="fit_transform", callee="fit") + .add(caller="fit_transform", callee="transform") + ) + method_mapping.add(caller="transform", callee="transform") + router.add(method_mapping=method_mapping, **{name: step}) + + return router + + +def _check_X(X): + """Use check_array only when necessary, e.g. on lists and other non-array-likes.""" + if hasattr(X, "__array__") or hasattr(X, "__dataframe__") or sparse.issparse(X): + return X + return check_array(X, force_all_finite="allow-nan", dtype=object) + + +def _is_empty_column_selection(column): + """ + Return True if the column selection is empty (empty list or all-False + boolean array). + + """ + if hasattr(column, "dtype") and np.issubdtype(column.dtype, np.bool_): + return not column.any() + elif hasattr(column, "__len__"): + return ( + len(column) == 0 + or all(isinstance(col, bool) for col in column) + and not any(column) + ) + else: + return False + + +def _get_transformer_list(estimators): + """ + Construct (name, trans, column) tuples from list + + """ + transformers, columns = zip(*estimators) + names, _ = zip(*_name_estimators(transformers)) + + transformer_list = list(zip(names, transformers, columns)) + return transformer_list + + +# This function is not validated using validate_params because +# it's just a factory for ColumnTransformer. +def make_column_transformer( + *transformers, + remainder="drop", + sparse_threshold=0.3, + n_jobs=None, + verbose=False, + verbose_feature_names_out=True, +): + """Construct a ColumnTransformer from the given transformers. + + This is a shorthand for the ColumnTransformer constructor; it does not + require, and does not permit, naming the transformers. Instead, they will + be given names automatically based on their types. It also does not allow + weighting with ``transformer_weights``. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + *transformers : tuples + Tuples of the form (transformer, columns) specifying the + transformer objects to be applied to subsets of the data. + + transformer : {'drop', 'passthrough'} or estimator + Estimator must support :term:`fit` and :term:`transform`. + Special-cased strings 'drop' and 'passthrough' are accepted as + well, to indicate to drop the columns or to pass them through + untransformed, respectively. + columns : str, array-like of str, int, array-like of int, slice, \ + array-like of bool or callable + Indexes the data on its second axis. Integers are interpreted as + positional columns, while strings can reference DataFrame columns + by name. A scalar string or int should be used where + ``transformer`` expects X to be a 1d array-like (vector), + otherwise a 2d array will be passed to the transformer. + A callable is passed the input data `X` and can return any of the + above. To select multiple columns by name or dtype, you can use + :obj:`make_column_selector`. + + remainder : {'drop', 'passthrough'} or estimator, default='drop' + By default, only the specified columns in `transformers` are + transformed and combined in the output, and the non-specified + columns are dropped. (default of ``'drop'``). + By specifying ``remainder='passthrough'``, all remaining columns that + were not specified in `transformers` will be automatically passed + through. This subset of columns is concatenated with the output of + the transformers. + By setting ``remainder`` to be an estimator, the remaining + non-specified columns will use the ``remainder`` estimator. The + estimator must support :term:`fit` and :term:`transform`. + + sparse_threshold : float, default=0.3 + If the transformed output consists of a mix of sparse and dense data, + it will be stacked as a sparse matrix if the density is lower than this + value. Use ``sparse_threshold=0`` to always return dense. + When the transformed output consists of all sparse or all dense data, + the stacked result will be sparse or dense, respectively, and this + keyword will be ignored. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : bool, default=False + If True, the time elapsed while fitting each transformer will be + printed as it is completed. + + verbose_feature_names_out : bool, default=True + If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix + all feature names with the name of the transformer that generated that + feature. + If False, :meth:`ColumnTransformer.get_feature_names_out` will not + prefix any feature names and will error if feature names are not + unique. + + .. versionadded:: 1.0 + + Returns + ------- + ct : ColumnTransformer + Returns a :class:`ColumnTransformer` object. + + See Also + -------- + ColumnTransformer : Class that allows combining the + outputs of multiple transformer objects used on column subsets + of the data into a single feature space. + + Examples + -------- + >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder + >>> from sklearn.compose import make_column_transformer + >>> make_column_transformer( + ... (StandardScaler(), ['numerical_column']), + ... (OneHotEncoder(), ['categorical_column'])) + ColumnTransformer(transformers=[('standardscaler', StandardScaler(...), + ['numerical_column']), + ('onehotencoder', OneHotEncoder(...), + ['categorical_column'])]) + """ + # transformer_weights keyword is not passed through because the user + # would need to know the automatically generated names of the transformers + transformer_list = _get_transformer_list(transformers) + return ColumnTransformer( + transformer_list, + n_jobs=n_jobs, + remainder=remainder, + sparse_threshold=sparse_threshold, + verbose=verbose, + verbose_feature_names_out=verbose_feature_names_out, + ) + + +class make_column_selector: + """Create a callable to select columns to be used with + :class:`ColumnTransformer`. + + :func:`make_column_selector` can select columns based on datatype or the + columns name with a regex. When using multiple selection criteria, **all** + criteria must match for a column to be selected. + + For an example of how to use :func:`make_column_selector` within a + :class:`ColumnTransformer` to select columns based on data type (i.e. + `dtype`), refer to + :ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`. + + Parameters + ---------- + pattern : str, default=None + Name of columns containing this regex pattern will be included. If + None, column selection will not be selected based on pattern. + + dtype_include : column dtype or list of column dtypes, default=None + A selection of dtypes to include. For more details, see + :meth:`pandas.DataFrame.select_dtypes`. + + dtype_exclude : column dtype or list of column dtypes, default=None + A selection of dtypes to exclude. For more details, see + :meth:`pandas.DataFrame.select_dtypes`. + + Returns + ------- + selector : callable + Callable for column selection to be used by a + :class:`ColumnTransformer`. + + See Also + -------- + ColumnTransformer : Class that allows combining the + outputs of multiple transformer objects used on column subsets + of the data into a single feature space. + + Examples + -------- + >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder + >>> from sklearn.compose import make_column_transformer + >>> from sklearn.compose import make_column_selector + >>> import numpy as np + >>> import pandas as pd # doctest: +SKIP + >>> X = pd.DataFrame({'city': ['London', 'London', 'Paris', 'Sallisaw'], + ... 'rating': [5, 3, 4, 5]}) # doctest: +SKIP + >>> ct = make_column_transformer( + ... (StandardScaler(), + ... make_column_selector(dtype_include=np.number)), # rating + ... (OneHotEncoder(), + ... make_column_selector(dtype_include=object))) # city + >>> ct.fit_transform(X) # doctest: +SKIP + array([[ 0.90453403, 1. , 0. , 0. ], + [-1.50755672, 1. , 0. , 0. ], + [-0.30151134, 0. , 1. , 0. ], + [ 0.90453403, 0. , 0. , 1. ]]) + """ + + def __init__(self, pattern=None, *, dtype_include=None, dtype_exclude=None): + self.pattern = pattern + self.dtype_include = dtype_include + self.dtype_exclude = dtype_exclude + + def __call__(self, df): + """Callable for column selection to be used by a + :class:`ColumnTransformer`. + + Parameters + ---------- + df : dataframe of shape (n_features, n_samples) + DataFrame to select columns from. + """ + if not hasattr(df, "iloc"): + raise ValueError( + "make_column_selector can only be applied to pandas dataframes" + ) + df_row = df.iloc[:1] + if self.dtype_include is not None or self.dtype_exclude is not None: + df_row = df_row.select_dtypes( + include=self.dtype_include, exclude=self.dtype_exclude + ) + cols = df_row.columns + if self.pattern is not None: + cols = cols[cols.str.contains(self.pattern, regex=True)] + return cols.tolist() diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/compose/_target.py b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/_target.py new file mode 100644 index 0000000000000000000000000000000000000000..b90d235ac758bb31717b96cf82a04d92ac614599 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/_target.py @@ -0,0 +1,342 @@ +# Authors: Andreas Mueller +# Guillaume Lemaitre +# License: BSD 3 clause + +import warnings + +import numpy as np + +from ..base import BaseEstimator, RegressorMixin, _fit_context, clone +from ..exceptions import NotFittedError +from ..preprocessing import FunctionTransformer +from ..utils import _safe_indexing, check_array +from ..utils._param_validation import HasMethods +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.validation import check_is_fitted + +__all__ = ["TransformedTargetRegressor"] + + +class TransformedTargetRegressor( + _RoutingNotSupportedMixin, RegressorMixin, BaseEstimator +): + """Meta-estimator to regress on a transformed target. + + Useful for applying a non-linear transformation to the target `y` in + regression problems. This transformation can be given as a Transformer + such as the :class:`~sklearn.preprocessing.QuantileTransformer` or as a + function and its inverse such as `np.log` and `np.exp`. + + The computation during :meth:`fit` is:: + + regressor.fit(X, func(y)) + + or:: + + regressor.fit(X, transformer.transform(y)) + + The computation during :meth:`predict` is:: + + inverse_func(regressor.predict(X)) + + or:: + + transformer.inverse_transform(regressor.predict(X)) + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + regressor : object, default=None + Regressor object such as derived from + :class:`~sklearn.base.RegressorMixin`. This regressor will + automatically be cloned each time prior to fitting. If `regressor is + None`, :class:`~sklearn.linear_model.LinearRegression` is created and used. + + transformer : object, default=None + Estimator object such as derived from + :class:`~sklearn.base.TransformerMixin`. Cannot be set at the same time + as `func` and `inverse_func`. If `transformer is None` as well as + `func` and `inverse_func`, the transformer will be an identity + transformer. Note that the transformer will be cloned during fitting. + Also, the transformer is restricting `y` to be a numpy array. + + func : function, default=None + Function to apply to `y` before passing to :meth:`fit`. Cannot be set + at the same time as `transformer`. The function needs to return a + 2-dimensional array. If `func is None`, the function used will be the + identity function. + + inverse_func : function, default=None + Function to apply to the prediction of the regressor. Cannot be set at + the same time as `transformer`. The function needs to return a + 2-dimensional array. The inverse function is used to return + predictions to the same space of the original training labels. + + check_inverse : bool, default=True + Whether to check that `transform` followed by `inverse_transform` + or `func` followed by `inverse_func` leads to the original targets. + + Attributes + ---------- + regressor_ : object + Fitted regressor. + + transformer_ : object + Transformer used in :meth:`fit` and :meth:`predict`. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying regressor exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.preprocessing.FunctionTransformer : Construct a transformer from an + arbitrary callable. + + Notes + ----- + Internally, the target `y` is always converted into a 2-dimensional array + to be used by scikit-learn transformers. At the time of prediction, the + output will be reshaped to a have the same number of dimensions as `y`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import LinearRegression + >>> from sklearn.compose import TransformedTargetRegressor + >>> tt = TransformedTargetRegressor(regressor=LinearRegression(), + ... func=np.log, inverse_func=np.exp) + >>> X = np.arange(4).reshape(-1, 1) + >>> y = np.exp(2 * X).ravel() + >>> tt.fit(X, y) + TransformedTargetRegressor(...) + >>> tt.score(X, y) + 1.0 + >>> tt.regressor_.coef_ + array([2.]) + + For a more detailed example use case refer to + :ref:`sphx_glr_auto_examples_compose_plot_transformed_target.py`. + """ + + _parameter_constraints: dict = { + "regressor": [HasMethods(["fit", "predict"]), None], + "transformer": [HasMethods("transform"), None], + "func": [callable, None], + "inverse_func": [callable, None], + "check_inverse": ["boolean"], + } + + def __init__( + self, + regressor=None, + *, + transformer=None, + func=None, + inverse_func=None, + check_inverse=True, + ): + self.regressor = regressor + self.transformer = transformer + self.func = func + self.inverse_func = inverse_func + self.check_inverse = check_inverse + + def _fit_transformer(self, y): + """Check transformer and fit transformer. + + Create the default transformer, fit it and make additional inverse + check on a subset (optional). + + """ + if self.transformer is not None and ( + self.func is not None or self.inverse_func is not None + ): + raise ValueError( + "'transformer' and functions 'func'/'inverse_func' cannot both be set." + ) + elif self.transformer is not None: + self.transformer_ = clone(self.transformer) + else: + if self.func is not None and self.inverse_func is None: + raise ValueError( + "When 'func' is provided, 'inverse_func' must also be provided" + ) + self.transformer_ = FunctionTransformer( + func=self.func, + inverse_func=self.inverse_func, + validate=True, + check_inverse=self.check_inverse, + ) + # XXX: sample_weight is not currently passed to the + # transformer. However, if transformer starts using sample_weight, the + # code should be modified accordingly. At the time to consider the + # sample_prop feature, it is also a good use case to be considered. + self.transformer_.fit(y) + if self.check_inverse: + idx_selected = slice(None, None, max(1, y.shape[0] // 10)) + y_sel = _safe_indexing(y, idx_selected) + y_sel_t = self.transformer_.transform(y_sel) + if not np.allclose(y_sel, self.transformer_.inverse_transform(y_sel_t)): + warnings.warn( + ( + "The provided functions or transformer are" + " not strictly inverse of each other. If" + " you are sure you want to proceed regardless" + ", set 'check_inverse=False'" + ), + UserWarning, + ) + + @_fit_context( + # TransformedTargetRegressor.regressor/transformer are not validated yet. + prefer_skip_nested_validation=False + ) + def fit(self, X, y, **fit_params): + """Fit the model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + **fit_params : dict + Parameters passed to the `fit` method of the underlying + regressor. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", **fit_params) + if y is None: + raise ValueError( + f"This {self.__class__.__name__} estimator " + "requires y to be passed, but the target y is None." + ) + y = check_array( + y, + input_name="y", + accept_sparse=False, + force_all_finite=True, + ensure_2d=False, + dtype="numeric", + allow_nd=True, + ) + + # store the number of dimension of the target to predict an array of + # similar shape at predict + self._training_dim = y.ndim + + # transformers are designed to modify X which is 2d dimensional, we + # need to modify y accordingly. + if y.ndim == 1: + y_2d = y.reshape(-1, 1) + else: + y_2d = y + self._fit_transformer(y_2d) + + # transform y and convert back to 1d array if needed + y_trans = self.transformer_.transform(y_2d) + # FIXME: a FunctionTransformer can return a 1D array even when validate + # is set to True. Therefore, we need to check the number of dimension + # first. + if y_trans.ndim == 2 and y_trans.shape[1] == 1: + y_trans = y_trans.squeeze(axis=1) + + if self.regressor is None: + from ..linear_model import LinearRegression + + self.regressor_ = LinearRegression() + else: + self.regressor_ = clone(self.regressor) + + self.regressor_.fit(X, y_trans, **fit_params) + + if hasattr(self.regressor_, "feature_names_in_"): + self.feature_names_in_ = self.regressor_.feature_names_in_ + + return self + + def predict(self, X, **predict_params): + """Predict using the base regressor, applying inverse. + + The regressor is used to predict and the `inverse_func` or + `inverse_transform` is applied before returning the prediction. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + **predict_params : dict of str -> object + Parameters passed to the `predict` method of the underlying + regressor. + + Returns + ------- + y_hat : ndarray of shape (n_samples,) + Predicted values. + """ + check_is_fitted(self) + pred = self.regressor_.predict(X, **predict_params) + if pred.ndim == 1: + pred_trans = self.transformer_.inverse_transform(pred.reshape(-1, 1)) + else: + pred_trans = self.transformer_.inverse_transform(pred) + if ( + self._training_dim == 1 + and pred_trans.ndim == 2 + and pred_trans.shape[1] == 1 + ): + pred_trans = pred_trans.squeeze(axis=1) + + return pred_trans + + def _more_tags(self): + regressor = self.regressor + if regressor is None: + from ..linear_model import LinearRegression + + regressor = LinearRegression() + + return { + "poor_score": True, + "multioutput": _safe_tags(regressor, key="multioutput"), + } + + @property + def n_features_in_(self): + """Number of features seen during :term:`fit`.""" + # For consistency with other estimators we raise a AttributeError so + # that hasattr() returns False the estimator isn't fitted. + try: + check_is_fitted(self) + except NotFittedError as nfe: + raise AttributeError( + "{} object has no n_features_in_ attribute.".format( + self.__class__.__name__ + ) + ) from nfe + + return self.regressor_.n_features_in_ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c696e4a12f32848c943470d044dad94315b7e96 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..759f0635391b7b681dd974af282b6fa424632be9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b27ca722472d4b2288db09d163fe404b12a2384 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..e21c1a17010efcba076b4758eb024e0ef760b073 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py @@ -0,0 +1,2582 @@ +""" +Test the ColumnTransformer. +""" + +import pickle +import re +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy import sparse + +from sklearn.base import BaseEstimator, TransformerMixin +from sklearn.compose import ( + ColumnTransformer, + make_column_selector, + make_column_transformer, +) +from sklearn.exceptions import NotFittedError +from sklearn.feature_selection import VarianceThreshold +from sklearn.preprocessing import ( + FunctionTransformer, + Normalizer, + OneHotEncoder, + StandardScaler, +) +from sklearn.tests.metadata_routing_common import ( + ConsumingTransformer, + _Registry, + check_recorded_metadata, +) +from sklearn.utils._testing import ( + _convert_container, + assert_allclose_dense_sparse, + assert_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +class Trans(TransformerMixin, BaseEstimator): + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + # 1D Series -> 2D DataFrame + if hasattr(X, "to_frame"): + return X.to_frame() + # 1D array -> 2D array + if getattr(X, "ndim", 2) == 1: + return np.atleast_2d(X).T + return X + + +class DoubleTrans(BaseEstimator): + def fit(self, X, y=None): + return self + + def transform(self, X): + return 2 * X + + +class SparseMatrixTrans(BaseEstimator): + def __init__(self, csr_container): + self.csr_container = csr_container + + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + n_samples = len(X) + return self.csr_container(sparse.eye(n_samples, n_samples)) + + +class TransNo2D(BaseEstimator): + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + return X + + +class TransRaise(BaseEstimator): + def fit(self, X, y=None): + raise ValueError("specific message") + + def transform(self, X, y=None): + raise ValueError("specific message") + + +def test_column_transformer(): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + X_res_first1D = np.array([0, 1, 2]) + X_res_second1D = np.array([2, 4, 6]) + X_res_first = X_res_first1D.reshape(-1, 1) + X_res_both = X_array + + cases = [ + # single column 1D / 2D + (0, X_res_first), + ([0], X_res_first), + # list-like + ([0, 1], X_res_both), + (np.array([0, 1]), X_res_both), + # slice + (slice(0, 1), X_res_first), + (slice(0, 2), X_res_both), + # boolean mask + (np.array([True, False]), X_res_first), + ([True, False], X_res_first), + (np.array([True, True]), X_res_both), + ([True, True], X_res_both), + ] + + for selection, res in cases: + ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop") + assert_array_equal(ct.fit_transform(X_array), res) + assert_array_equal(ct.fit(X_array).transform(X_array), res) + + # callable that returns any of the allowed specifiers + ct = ColumnTransformer( + [("trans", Trans(), lambda x: selection)], remainder="drop" + ) + assert_array_equal(ct.fit_transform(X_array), res) + assert_array_equal(ct.fit(X_array).transform(X_array), res) + + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])]) + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + + # test with transformer_weights + transformer_weights = {"trans1": 0.1, "trans2": 10} + both = ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], + transformer_weights=transformer_weights, + ) + res = np.vstack( + [ + transformer_weights["trans1"] * X_res_first1D, + transformer_weights["trans2"] * X_res_second1D, + ] + ).T + assert_array_equal(both.fit_transform(X_array), res) + assert_array_equal(both.fit(X_array).transform(X_array), res) + assert len(both.transformers_) == 2 + + both = ColumnTransformer( + [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1} + ) + assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both) + assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both) + assert len(both.transformers_) == 1 + + +def test_column_transformer_tuple_transformers_parameter(): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + transformers = [("trans1", Trans(), [0]), ("trans2", Trans(), [1])] + + ct_with_list = ColumnTransformer(transformers) + ct_with_tuple = ColumnTransformer(tuple(transformers)) + + assert_array_equal( + ct_with_list.fit_transform(X_array), ct_with_tuple.fit_transform(X_array) + ) + assert_array_equal( + ct_with_list.fit(X_array).transform(X_array), + ct_with_tuple.fit(X_array).transform(X_array), + ) + + +@pytest.mark.parametrize("constructor_name", ["dataframe", "polars"]) +def test_column_transformer_dataframe(constructor_name): + if constructor_name == "dataframe": + dataframe_lib = pytest.importorskip("pandas") + else: + dataframe_lib = pytest.importorskip(constructor_name) + + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_df = _convert_container( + X_array, constructor_name, columns_name=["first", "second"] + ) + + X_res_first = np.array([0, 1, 2]).reshape(-1, 1) + X_res_both = X_array + + cases = [ + # String keys: label based + # list + (["first"], X_res_first), + (["first", "second"], X_res_both), + # slice + (slice("first", "second"), X_res_both), + # int keys: positional + # list + ([0], X_res_first), + ([0, 1], X_res_both), + (np.array([0, 1]), X_res_both), + # slice + (slice(0, 1), X_res_first), + (slice(0, 2), X_res_both), + # boolean mask + (np.array([True, False]), X_res_first), + ([True, False], X_res_first), + ] + if constructor_name == "dataframe": + # Scalars are only supported for pandas dataframes. + cases.extend( + [ + # scalar + (0, X_res_first), + ("first", X_res_first), + ( + dataframe_lib.Series([True, False], index=["first", "second"]), + X_res_first, + ), + ] + ) + + for selection, res in cases: + ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop") + assert_array_equal(ct.fit_transform(X_df), res) + assert_array_equal(ct.fit(X_df).transform(X_df), res) + + # callable that returns any of the allowed specifiers + ct = ColumnTransformer( + [("trans", Trans(), lambda X: selection)], remainder="drop" + ) + assert_array_equal(ct.fit_transform(X_df), res) + assert_array_equal(ct.fit(X_df).transform(X_df), res) + + ct = ColumnTransformer( + [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])] + ) + assert_array_equal(ct.fit_transform(X_df), X_res_both) + assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])]) + assert_array_equal(ct.fit_transform(X_df), X_res_both) + assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + # test with transformer_weights + transformer_weights = {"trans1": 0.1, "trans2": 10} + both = ColumnTransformer( + [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])], + transformer_weights=transformer_weights, + ) + res = np.vstack( + [ + transformer_weights["trans1"] * X_df["first"], + transformer_weights["trans2"] * X_df["second"], + ] + ).T + assert_array_equal(both.fit_transform(X_df), res) + assert_array_equal(both.fit(X_df).transform(X_df), res) + assert len(both.transformers_) == 2 + assert both.transformers_[-1][0] != "remainder" + + # test multiple columns + both = ColumnTransformer( + [("trans", Trans(), ["first", "second"])], transformer_weights={"trans": 0.1} + ) + assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both) + assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both) + assert len(both.transformers_) == 1 + assert both.transformers_[-1][0] != "remainder" + + both = ColumnTransformer( + [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1} + ) + assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both) + assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both) + assert len(both.transformers_) == 1 + assert both.transformers_[-1][0] != "remainder" + + # ensure pandas object is passed through + + class TransAssert(BaseEstimator): + def __init__(self, expected_type_transform): + self.expected_type_transform = expected_type_transform + + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + assert isinstance(X, self.expected_type_transform) + if isinstance(X, dataframe_lib.Series): + X = X.to_frame() + return X + + ct = ColumnTransformer( + [ + ( + "trans", + TransAssert(expected_type_transform=dataframe_lib.DataFrame), + ["first", "second"], + ) + ] + ) + ct.fit_transform(X_df) + + if constructor_name == "dataframe": + # DataFrame protocol does not have 1d columns, so we only test on Pandas + # dataframes. + ct = ColumnTransformer( + [ + ( + "trans", + TransAssert(expected_type_transform=dataframe_lib.Series), + "first", + ) + ], + remainder="drop", + ) + ct.fit_transform(X_df) + + # Only test on pandas because the dataframe protocol requires string column + # names + # integer column spec + integer column names -> still use positional + X_df2 = X_df.copy() + X_df2.columns = [1, 0] + ct = ColumnTransformer([("trans", Trans(), 0)], remainder="drop") + assert_array_equal(ct.fit_transform(X_df2), X_res_first) + assert_array_equal(ct.fit(X_df2).transform(X_df2), X_res_first) + + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert ct.transformers_[-1][1] == "drop" + assert_array_equal(ct.transformers_[-1][2], [1]) + + +@pytest.mark.parametrize("pandas", [True, False], ids=["pandas", "numpy"]) +@pytest.mark.parametrize( + "column_selection", + [[], np.array([False, False]), [False, False]], + ids=["list", "bool", "bool_int"], +) +@pytest.mark.parametrize("callable_column", [False, True]) +def test_column_transformer_empty_columns(pandas, column_selection, callable_column): + # test case that ensures that the column transformer does also work when + # a given transformer doesn't have any columns to work on + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_res_both = X_array + + if pandas: + pd = pytest.importorskip("pandas") + X = pd.DataFrame(X_array, columns=["first", "second"]) + else: + X = X_array + + if callable_column: + column = lambda X: column_selection # noqa + else: + column = column_selection + + ct = ColumnTransformer( + [("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), column)] + ) + assert_array_equal(ct.fit_transform(X), X_res_both) + assert_array_equal(ct.fit(X).transform(X), X_res_both) + assert len(ct.transformers_) == 2 + assert isinstance(ct.transformers_[1][1], TransRaise) + + ct = ColumnTransformer( + [("trans1", TransRaise(), column), ("trans2", Trans(), [0, 1])] + ) + assert_array_equal(ct.fit_transform(X), X_res_both) + assert_array_equal(ct.fit(X).transform(X), X_res_both) + assert len(ct.transformers_) == 2 + assert isinstance(ct.transformers_[0][1], TransRaise) + + ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="passthrough") + assert_array_equal(ct.fit_transform(X), X_res_both) + assert_array_equal(ct.fit(X).transform(X), X_res_both) + assert len(ct.transformers_) == 2 # including remainder + assert isinstance(ct.transformers_[0][1], TransRaise) + + fixture = np.array([[], [], []]) + ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="drop") + assert_array_equal(ct.fit_transform(X), fixture) + assert_array_equal(ct.fit(X).transform(X), fixture) + assert len(ct.transformers_) == 2 # including remainder + assert isinstance(ct.transformers_[0][1], TransRaise) + + +def test_column_transformer_output_indices(): + # Checks for the output_indices_ attribute + X_array = np.arange(6).reshape(3, 2) + + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])]) + X_trans = ct.fit_transform(X_array) + assert ct.output_indices_ == { + "trans1": slice(0, 1), + "trans2": slice(1, 2), + "remainder": slice(0, 0), + } + assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]]) + assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]]) + + # test with transformer_weights and multiple columns + ct = ColumnTransformer( + [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1} + ) + X_trans = ct.fit_transform(X_array) + assert ct.output_indices_ == {"trans": slice(0, 2), "remainder": slice(0, 0)} + assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]]) + + # test case that ensures that the attribute does also work when + # a given transformer doesn't have any columns to work on + ct = ColumnTransformer([("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), [])]) + X_trans = ct.fit_transform(X_array) + assert ct.output_indices_ == { + "trans1": slice(0, 2), + "trans2": slice(0, 0), + "remainder": slice(0, 0), + } + assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans1"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans2"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]]) + + ct = ColumnTransformer([("trans", TransRaise(), [])], remainder="passthrough") + X_trans = ct.fit_transform(X_array) + assert ct.output_indices_ == {"trans": slice(0, 0), "remainder": slice(0, 2)} + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans"]]) + assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["remainder"]]) + + +def test_column_transformer_output_indices_df(): + # Checks for the output_indices_ attribute with data frames + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame(np.arange(6).reshape(3, 2), columns=["first", "second"]) + + ct = ColumnTransformer( + [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])] + ) + X_trans = ct.fit_transform(X_df) + assert ct.output_indices_ == { + "trans1": slice(0, 1), + "trans2": slice(1, 2), + "remainder": slice(0, 0), + } + assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]]) + assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]]) + + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])]) + X_trans = ct.fit_transform(X_df) + assert ct.output_indices_ == { + "trans1": slice(0, 1), + "trans2": slice(1, 2), + "remainder": slice(0, 0), + } + assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]]) + assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_column_transformer_sparse_array(csr_container): + X_sparse = csr_container(sparse.eye(3, 2)) + + # no distinction between 1D and 2D + X_res_first = X_sparse[:, [0]] + X_res_both = X_sparse + + for col in [(0,), [0], slice(0, 1)]: + for remainder, res in [("drop", X_res_first), ("passthrough", X_res_both)]: + ct = ColumnTransformer( + [("trans", Trans(), col)], remainder=remainder, sparse_threshold=0.8 + ) + assert sparse.issparse(ct.fit_transform(X_sparse)) + assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res) + assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), res) + + for col in [[0, 1], slice(0, 2)]: + ct = ColumnTransformer([("trans", Trans(), col)], sparse_threshold=0.8) + assert sparse.issparse(ct.fit_transform(X_sparse)) + assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both) + assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), X_res_both) + + +def test_column_transformer_list(): + X_list = [[1, float("nan"), "a"], [0, 0, "b"]] + expected_result = np.array( + [ + [1, float("nan"), 1, 0], + [-1, 0, 0, 1], + ] + ) + + ct = ColumnTransformer( + [ + ("numerical", StandardScaler(), [0, 1]), + ("categorical", OneHotEncoder(), [2]), + ] + ) + + assert_array_equal(ct.fit_transform(X_list), expected_result) + assert_array_equal(ct.fit(X_list).transform(X_list), expected_result) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_column_transformer_sparse_stacking(csr_container): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + col_trans = ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)], + sparse_threshold=0.8, + ) + col_trans.fit(X_array) + X_trans = col_trans.transform(X_array) + assert sparse.issparse(X_trans) + assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1) + assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0])) + assert len(col_trans.transformers_) == 2 + assert col_trans.transformers_[-1][0] != "remainder" + + col_trans = ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)], + sparse_threshold=0.1, + ) + col_trans.fit(X_array) + X_trans = col_trans.transform(X_array) + assert not sparse.issparse(X_trans) + assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1) + assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0])) + + +def test_column_transformer_mixed_cols_sparse(): + df = np.array([["a", 1, True], ["b", 2, False]], dtype="O") + + ct = make_column_transformer( + (OneHotEncoder(), [0]), ("passthrough", [1, 2]), sparse_threshold=1.0 + ) + + # this shouldn't fail, since boolean can be coerced into a numeric + # See: https://github.com/scikit-learn/scikit-learn/issues/11912 + X_trans = ct.fit_transform(df) + assert X_trans.getformat() == "csr" + assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1], [0, 1, 2, 0]])) + + ct = make_column_transformer( + (OneHotEncoder(), [0]), ("passthrough", [0]), sparse_threshold=1.0 + ) + with pytest.raises(ValueError, match="For a sparse output, all columns should"): + # this fails since strings `a` and `b` cannot be + # coerced into a numeric. + ct.fit_transform(df) + + +def test_column_transformer_sparse_threshold(): + X_array = np.array([["a", "b"], ["A", "B"]], dtype=object).T + # above data has sparsity of 4 / 8 = 0.5 + + # apply threshold even if all sparse + col_trans = ColumnTransformer( + [("trans1", OneHotEncoder(), [0]), ("trans2", OneHotEncoder(), [1])], + sparse_threshold=0.2, + ) + res = col_trans.fit_transform(X_array) + assert not sparse.issparse(res) + assert not col_trans.sparse_output_ + + # mixed -> sparsity of (4 + 2) / 8 = 0.75 + for thres in [0.75001, 1]: + col_trans = ColumnTransformer( + [ + ("trans1", OneHotEncoder(sparse_output=True), [0]), + ("trans2", OneHotEncoder(sparse_output=False), [1]), + ], + sparse_threshold=thres, + ) + res = col_trans.fit_transform(X_array) + assert sparse.issparse(res) + assert col_trans.sparse_output_ + + for thres in [0.75, 0]: + col_trans = ColumnTransformer( + [ + ("trans1", OneHotEncoder(sparse_output=True), [0]), + ("trans2", OneHotEncoder(sparse_output=False), [1]), + ], + sparse_threshold=thres, + ) + res = col_trans.fit_transform(X_array) + assert not sparse.issparse(res) + assert not col_trans.sparse_output_ + + # if nothing is sparse -> no sparse + for thres in [0.33, 0, 1]: + col_trans = ColumnTransformer( + [ + ("trans1", OneHotEncoder(sparse_output=False), [0]), + ("trans2", OneHotEncoder(sparse_output=False), [1]), + ], + sparse_threshold=thres, + ) + res = col_trans.fit_transform(X_array) + assert not sparse.issparse(res) + assert not col_trans.sparse_output_ + + +def test_column_transformer_error_msg_1D(): + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + + col_trans = ColumnTransformer([("trans", StandardScaler(), 0)]) + msg = "1D data passed to a transformer" + with pytest.raises(ValueError, match=msg): + col_trans.fit(X_array) + + with pytest.raises(ValueError, match=msg): + col_trans.fit_transform(X_array) + + col_trans = ColumnTransformer([("trans", TransRaise(), 0)]) + for func in [col_trans.fit, col_trans.fit_transform]: + with pytest.raises(ValueError, match="specific message"): + func(X_array) + + +def test_2D_transformer_output(): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + # if one transformer is dropped, test that name is still correct + ct = ColumnTransformer([("trans1", "drop", 0), ("trans2", TransNo2D(), 1)]) + + msg = "the 'trans2' transformer should be 2D" + with pytest.raises(ValueError, match=msg): + ct.fit_transform(X_array) + # because fit is also doing transform, this raises already on fit + with pytest.raises(ValueError, match=msg): + ct.fit(X_array) + + +def test_2D_transformer_output_pandas(): + pd = pytest.importorskip("pandas") + + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_df = pd.DataFrame(X_array, columns=["col1", "col2"]) + + # if one transformer is dropped, test that name is still correct + ct = ColumnTransformer([("trans1", TransNo2D(), "col1")]) + msg = "the 'trans1' transformer should be 2D" + with pytest.raises(ValueError, match=msg): + ct.fit_transform(X_df) + # because fit is also doing transform, this raises already on fit + with pytest.raises(ValueError, match=msg): + ct.fit(X_df) + + +@pytest.mark.parametrize("remainder", ["drop", "passthrough"]) +def test_column_transformer_invalid_columns(remainder): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + # general invalid + for col in [1.5, ["string", 1], slice(1, "s"), np.array([1.0])]: + ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder) + with pytest.raises(ValueError, match="No valid specification"): + ct.fit(X_array) + + # invalid for arrays + for col in ["string", ["string", "other"], slice("a", "b")]: + ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder) + with pytest.raises(ValueError, match="Specifying the columns"): + ct.fit(X_array) + + # transformed n_features does not match fitted n_features + col = [0, 1] + ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder) + ct.fit(X_array) + X_array_more = np.array([[0, 1, 2], [2, 4, 6], [3, 6, 9]]).T + msg = "X has 3 features, but ColumnTransformer is expecting 2 features as input." + with pytest.raises(ValueError, match=msg): + ct.transform(X_array_more) + X_array_fewer = np.array( + [ + [0, 1, 2], + ] + ).T + err_msg = ( + "X has 1 features, but ColumnTransformer is expecting 2 features as input." + ) + with pytest.raises(ValueError, match=err_msg): + ct.transform(X_array_fewer) + + +def test_column_transformer_invalid_transformer(): + class NoTrans(BaseEstimator): + def fit(self, X, y=None): + return self + + def predict(self, X): + return X + + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + ct = ColumnTransformer([("trans", NoTrans(), [0])]) + msg = "All estimators should implement fit and transform" + with pytest.raises(TypeError, match=msg): + ct.fit(X_array) + + +def test_make_column_transformer(): + scaler = StandardScaler() + norm = Normalizer() + ct = make_column_transformer((scaler, "first"), (norm, ["second"])) + names, transformers, columns = zip(*ct.transformers) + assert names == ("standardscaler", "normalizer") + assert transformers == (scaler, norm) + assert columns == ("first", ["second"]) + + +def test_make_column_transformer_pandas(): + pd = pytest.importorskip("pandas") + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_df = pd.DataFrame(X_array, columns=["first", "second"]) + norm = Normalizer() + ct1 = ColumnTransformer([("norm", Normalizer(), X_df.columns)]) + ct2 = make_column_transformer((norm, X_df.columns)) + assert_almost_equal(ct1.fit_transform(X_df), ct2.fit_transform(X_df)) + + +def test_make_column_transformer_kwargs(): + scaler = StandardScaler() + norm = Normalizer() + ct = make_column_transformer( + (scaler, "first"), + (norm, ["second"]), + n_jobs=3, + remainder="drop", + sparse_threshold=0.5, + ) + assert ( + ct.transformers + == make_column_transformer((scaler, "first"), (norm, ["second"])).transformers + ) + assert ct.n_jobs == 3 + assert ct.remainder == "drop" + assert ct.sparse_threshold == 0.5 + # invalid keyword parameters should raise an error message + msg = re.escape( + "make_column_transformer() got an unexpected " + "keyword argument 'transformer_weights'" + ) + with pytest.raises(TypeError, match=msg): + make_column_transformer( + (scaler, "first"), + (norm, ["second"]), + transformer_weights={"pca": 10, "Transf": 1}, + ) + + +def test_make_column_transformer_remainder_transformer(): + scaler = StandardScaler() + norm = Normalizer() + remainder = StandardScaler() + ct = make_column_transformer( + (scaler, "first"), (norm, ["second"]), remainder=remainder + ) + assert ct.remainder == remainder + + +def test_column_transformer_get_set_params(): + ct = ColumnTransformer( + [("trans1", StandardScaler(), [0]), ("trans2", StandardScaler(), [1])] + ) + + exp = { + "n_jobs": None, + "remainder": "drop", + "sparse_threshold": 0.3, + "trans1": ct.transformers[0][1], + "trans1__copy": True, + "trans1__with_mean": True, + "trans1__with_std": True, + "trans2": ct.transformers[1][1], + "trans2__copy": True, + "trans2__with_mean": True, + "trans2__with_std": True, + "transformers": ct.transformers, + "transformer_weights": None, + "verbose_feature_names_out": True, + "verbose": False, + } + + assert ct.get_params() == exp + + ct.set_params(trans1__with_mean=False) + assert not ct.get_params()["trans1__with_mean"] + + ct.set_params(trans1="passthrough") + exp = { + "n_jobs": None, + "remainder": "drop", + "sparse_threshold": 0.3, + "trans1": "passthrough", + "trans2": ct.transformers[1][1], + "trans2__copy": True, + "trans2__with_mean": True, + "trans2__with_std": True, + "transformers": ct.transformers, + "transformer_weights": None, + "verbose_feature_names_out": True, + "verbose": False, + } + + assert ct.get_params() == exp + + +def test_column_transformer_named_estimators(): + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + ct = ColumnTransformer( + [ + ("trans1", StandardScaler(), [0]), + ("trans2", StandardScaler(with_std=False), [1]), + ] + ) + assert not hasattr(ct, "transformers_") + ct.fit(X_array) + assert hasattr(ct, "transformers_") + assert isinstance(ct.named_transformers_["trans1"], StandardScaler) + assert isinstance(ct.named_transformers_.trans1, StandardScaler) + assert isinstance(ct.named_transformers_["trans2"], StandardScaler) + assert isinstance(ct.named_transformers_.trans2, StandardScaler) + assert not ct.named_transformers_.trans2.with_std + # check it are fitted transformers + assert ct.named_transformers_.trans1.mean_ == 1.0 + + +def test_column_transformer_cloning(): + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + + ct = ColumnTransformer([("trans", StandardScaler(), [0])]) + ct.fit(X_array) + assert not hasattr(ct.transformers[0][1], "mean_") + assert hasattr(ct.transformers_[0][1], "mean_") + + ct = ColumnTransformer([("trans", StandardScaler(), [0])]) + ct.fit_transform(X_array) + assert not hasattr(ct.transformers[0][1], "mean_") + assert hasattr(ct.transformers_[0][1], "mean_") + + +def test_column_transformer_get_feature_names(): + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + ct = ColumnTransformer([("trans", Trans(), [0, 1])]) + # raise correct error when not fitted + with pytest.raises(NotFittedError): + ct.get_feature_names_out() + # raise correct error when no feature names are available + ct.fit(X_array) + msg = re.escape( + "Transformer trans (type Trans) does not provide get_feature_names_out" + ) + with pytest.raises(AttributeError, match=msg): + ct.get_feature_names_out() + + +def test_column_transformer_special_strings(): + # one 'drop' -> ignore + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "drop", [1])]) + exp = np.array([[0.0], [1.0], [2.0]]) + assert_array_equal(ct.fit_transform(X_array), exp) + assert_array_equal(ct.fit(X_array).transform(X_array), exp) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + # all 'drop' -> return shape 0 array + ct = ColumnTransformer([("trans1", "drop", [0]), ("trans2", "drop", [1])]) + assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0)) + assert_array_equal(ct.fit_transform(X_array).shape, (3, 0)) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + # 'passthrough' + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "passthrough", [1])]) + exp = X_array + assert_array_equal(ct.fit_transform(X_array), exp) + assert_array_equal(ct.fit(X_array).transform(X_array), exp) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + +def test_column_transformer_remainder(): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + X_res_first = np.array([0, 1, 2]).reshape(-1, 1) + X_res_second = np.array([2, 4, 6]).reshape(-1, 1) + X_res_both = X_array + + # default drop + ct = ColumnTransformer([("trans1", Trans(), [0])]) + assert_array_equal(ct.fit_transform(X_array), X_res_first) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert ct.transformers_[-1][1] == "drop" + assert_array_equal(ct.transformers_[-1][2], [1]) + + # specify passthrough + ct = ColumnTransformer([("trans", Trans(), [0])], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [1]) + + # column order is not preserved (passed through added to end) + ct = ColumnTransformer([("trans1", Trans(), [1])], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_array), X_res_both[:, ::-1]) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both[:, ::-1]) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [0]) + + # passthrough when all actual transformers are skipped + ct = ColumnTransformer([("trans1", "drop", [0])], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_array), X_res_second) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_second) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [1]) + + # check default for make_column_transformer + ct = make_column_transformer((Trans(), [0])) + assert ct.remainder == "drop" + + +@pytest.mark.parametrize( + "key", [[0], np.array([0]), slice(0, 1), np.array([True, False])] +) +def test_column_transformer_remainder_numpy(key): + # test different ways that columns are specified with passthrough + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_res_both = X_array + + ct = ColumnTransformer([("trans1", Trans(), key)], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [1]) + + +@pytest.mark.parametrize( + "key", + [ + [0], + slice(0, 1), + np.array([True, False]), + ["first"], + "pd-index", + np.array(["first"]), + np.array(["first"], dtype=object), + slice(None, "first"), + slice("first", "first"), + ], +) +def test_column_transformer_remainder_pandas(key): + # test different ways that columns are specified with passthrough + pd = pytest.importorskip("pandas") + if isinstance(key, str) and key == "pd-index": + key = pd.Index(["first"]) + + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_df = pd.DataFrame(X_array, columns=["first", "second"]) + X_res_both = X_array + + ct = ColumnTransformer([("trans1", Trans(), key)], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_df), X_res_both) + assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [1]) + + +@pytest.mark.parametrize( + "key", [[0], np.array([0]), slice(0, 1), np.array([True, False, False])] +) +def test_column_transformer_remainder_transformer(key): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + X_res_both = X_array.copy() + + # second and third columns are doubled when remainder = DoubleTrans + X_res_both[:, 1:3] *= 2 + + ct = ColumnTransformer([("trans1", Trans(), key)], remainder=DoubleTrans()) + + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], DoubleTrans) + assert_array_equal(ct.transformers_[-1][2], [1, 2]) + + +def test_column_transformer_no_remaining_remainder_transformer(): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + + ct = ColumnTransformer([("trans1", Trans(), [0, 1, 2])], remainder=DoubleTrans()) + + assert_array_equal(ct.fit_transform(X_array), X_array) + assert_array_equal(ct.fit(X_array).transform(X_array), X_array) + assert len(ct.transformers_) == 1 + assert ct.transformers_[-1][0] != "remainder" + + +def test_column_transformer_drops_all_remainder_transformer(): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + + # columns are doubled when remainder = DoubleTrans + X_res_both = 2 * X_array.copy()[:, 1:3] + + ct = ColumnTransformer([("trans1", "drop", [0])], remainder=DoubleTrans()) + + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], DoubleTrans) + assert_array_equal(ct.transformers_[-1][2], [1, 2]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_column_transformer_sparse_remainder_transformer(csr_container): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + + ct = ColumnTransformer( + [("trans1", Trans(), [0])], + remainder=SparseMatrixTrans(csr_container), + sparse_threshold=0.8, + ) + + X_trans = ct.fit_transform(X_array) + assert sparse.issparse(X_trans) + # SparseMatrixTrans creates 3 features for each column. There is + # one column in ``transformers``, thus: + assert X_trans.shape == (3, 3 + 1) + + exp_array = np.hstack((X_array[:, 0].reshape(-1, 1), np.eye(3))) + assert_array_equal(X_trans.toarray(), exp_array) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans) + assert_array_equal(ct.transformers_[-1][2], [1, 2]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_column_transformer_drop_all_sparse_remainder_transformer(csr_container): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + ct = ColumnTransformer( + [("trans1", "drop", [0])], + remainder=SparseMatrixTrans(csr_container), + sparse_threshold=0.8, + ) + + X_trans = ct.fit_transform(X_array) + assert sparse.issparse(X_trans) + + # SparseMatrixTrans creates 3 features for each column, thus: + assert X_trans.shape == (3, 3) + assert_array_equal(X_trans.toarray(), np.eye(3)) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans) + assert_array_equal(ct.transformers_[-1][2], [1, 2]) + + +def test_column_transformer_get_set_params_with_remainder(): + ct = ColumnTransformer( + [("trans1", StandardScaler(), [0])], remainder=StandardScaler() + ) + + exp = { + "n_jobs": None, + "remainder": ct.remainder, + "remainder__copy": True, + "remainder__with_mean": True, + "remainder__with_std": True, + "sparse_threshold": 0.3, + "trans1": ct.transformers[0][1], + "trans1__copy": True, + "trans1__with_mean": True, + "trans1__with_std": True, + "transformers": ct.transformers, + "transformer_weights": None, + "verbose_feature_names_out": True, + "verbose": False, + } + + assert ct.get_params() == exp + + ct.set_params(remainder__with_std=False) + assert not ct.get_params()["remainder__with_std"] + + ct.set_params(trans1="passthrough") + exp = { + "n_jobs": None, + "remainder": ct.remainder, + "remainder__copy": True, + "remainder__with_mean": True, + "remainder__with_std": False, + "sparse_threshold": 0.3, + "trans1": "passthrough", + "transformers": ct.transformers, + "transformer_weights": None, + "verbose_feature_names_out": True, + "verbose": False, + } + assert ct.get_params() == exp + + +def test_column_transformer_no_estimators(): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).astype("float").T + ct = ColumnTransformer([], remainder=StandardScaler()) + + params = ct.get_params() + assert params["remainder__with_mean"] + + X_trans = ct.fit_transform(X_array) + assert X_trans.shape == X_array.shape + assert len(ct.transformers_) == 1 + assert ct.transformers_[-1][0] == "remainder" + assert ct.transformers_[-1][2] == [0, 1, 2] + + +@pytest.mark.parametrize( + ["est", "pattern"], + [ + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], + remainder=DoubleTrans(), + ), + ( + r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n" + r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], + remainder="passthrough", + ), + ( + r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n" + r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", "drop", [1])], + remainder="passthrough", + ), + ( + r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", "passthrough", [1])], + remainder="passthrough", + ), + ( + r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n" + r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer([("trans1", Trans(), [0])], remainder="passthrough"), + ( + r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], remainder="drop" + ), + ( + r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 2\) Processing trans2.* total=.*\n$" + ), + ), + ( + ColumnTransformer([("trans1", Trans(), [0])], remainder="drop"), + r"\[ColumnTransformer\].*\(1 of 1\) Processing trans1.* total=.*\n$", + ), + ], +) +@pytest.mark.parametrize("method", ["fit", "fit_transform"]) +def test_column_transformer_verbose(est, pattern, method, capsys): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + + func = getattr(est, method) + est.set_params(verbose=False) + func(X_array) + assert not capsys.readouterr().out, "Got output for verbose=False" + + est.set_params(verbose=True) + func(X_array) + assert re.match(pattern, capsys.readouterr()[0]) + + +def test_column_transformer_no_estimators_set_params(): + ct = ColumnTransformer([]).set_params(n_jobs=2) + assert ct.n_jobs == 2 + + +def test_column_transformer_callable_specifier(): + # assert that function gets the full array + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_res_first = np.array([[0, 1, 2]]).T + + def func(X): + assert_array_equal(X, X_array) + return [0] + + ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop") + assert_array_equal(ct.fit_transform(X_array), X_res_first) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first) + assert callable(ct.transformers[0][2]) + assert ct.transformers_[0][2] == [0] + + +def test_column_transformer_callable_specifier_dataframe(): + # assert that function gets the full dataframe + pd = pytest.importorskip("pandas") + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_res_first = np.array([[0, 1, 2]]).T + + X_df = pd.DataFrame(X_array, columns=["first", "second"]) + + def func(X): + assert_array_equal(X.columns, X_df.columns) + assert_array_equal(X.values, X_df.values) + return ["first"] + + ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop") + assert_array_equal(ct.fit_transform(X_df), X_res_first) + assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first) + assert callable(ct.transformers[0][2]) + assert ct.transformers_[0][2] == ["first"] + + +def test_column_transformer_negative_column_indexes(): + X = np.random.randn(2, 2) + X_categories = np.array([[1], [2]]) + X = np.concatenate([X, X_categories], axis=1) + + ohe = OneHotEncoder() + + tf_1 = ColumnTransformer([("ohe", ohe, [-1])], remainder="passthrough") + tf_2 = ColumnTransformer([("ohe", ohe, [2])], remainder="passthrough") + assert_array_equal(tf_1.fit_transform(X), tf_2.fit_transform(X)) + + +@pytest.mark.parametrize("array_type", [np.asarray, *CSR_CONTAINERS]) +def test_column_transformer_mask_indexing(array_type): + # Regression test for #14510 + # Boolean array-like does not behave as boolean array with sparse matrices. + X = np.transpose([[1, 2, 3], [4, 5, 6], [5, 6, 7], [8, 9, 10]]) + X = array_type(X) + column_transformer = ColumnTransformer( + [("identity", FunctionTransformer(), [False, True, False, True])] + ) + X_trans = column_transformer.fit_transform(X) + assert X_trans.shape == (3, 2) + + +def test_n_features_in(): + # make sure n_features_in is what is passed as input to the column + # transformer. + + X = [[1, 2], [3, 4], [5, 6]] + ct = ColumnTransformer([("a", DoubleTrans(), [0]), ("b", DoubleTrans(), [1])]) + assert not hasattr(ct, "n_features_in_") + ct.fit(X) + assert ct.n_features_in_ == 2 + + +@pytest.mark.parametrize( + "cols, pattern, include, exclude", + [ + (["col_int", "col_float"], None, np.number, None), + (["col_int", "col_float"], None, None, object), + (["col_int", "col_float"], None, [int, float], None), + (["col_str"], None, [object], None), + (["col_str"], None, object, None), + (["col_float"], None, float, None), + (["col_float"], "at$", [np.number], None), + (["col_int"], None, [int], None), + (["col_int"], "^col_int", [np.number], None), + (["col_float", "col_str"], "float|str", None, None), + (["col_str"], "^col_s", None, [int]), + ([], "str$", float, None), + (["col_int", "col_float", "col_str"], None, [np.number, object], None), + ], +) +def test_make_column_selector_with_select_dtypes(cols, pattern, include, exclude): + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame( + { + "col_int": np.array([0, 1, 2], dtype=int), + "col_float": np.array([0.0, 1.0, 2.0], dtype=float), + "col_str": ["one", "two", "three"], + }, + columns=["col_int", "col_float", "col_str"], + ) + + selector = make_column_selector( + dtype_include=include, dtype_exclude=exclude, pattern=pattern + ) + + assert_array_equal(selector(X_df), cols) + + +def test_column_transformer_with_make_column_selector(): + # Functional test for column transformer + column selector + pd = pytest.importorskip("pandas") + X_df = pd.DataFrame( + { + "col_int": np.array([0, 1, 2], dtype=int), + "col_float": np.array([0.0, 1.0, 2.0], dtype=float), + "col_cat": ["one", "two", "one"], + "col_str": ["low", "middle", "high"], + }, + columns=["col_int", "col_float", "col_cat", "col_str"], + ) + X_df["col_str"] = X_df["col_str"].astype("category") + + cat_selector = make_column_selector(dtype_include=["category", object]) + num_selector = make_column_selector(dtype_include=np.number) + + ohe = OneHotEncoder() + scaler = StandardScaler() + + ct_selector = make_column_transformer((ohe, cat_selector), (scaler, num_selector)) + ct_direct = make_column_transformer( + (ohe, ["col_cat", "col_str"]), (scaler, ["col_float", "col_int"]) + ) + + X_selector = ct_selector.fit_transform(X_df) + X_direct = ct_direct.fit_transform(X_df) + + assert_allclose(X_selector, X_direct) + + +def test_make_column_selector_error(): + selector = make_column_selector(dtype_include=np.number) + X = np.array([[0.1, 0.2]]) + msg = "make_column_selector can only be applied to pandas dataframes" + with pytest.raises(ValueError, match=msg): + selector(X) + + +def test_make_column_selector_pickle(): + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame( + { + "col_int": np.array([0, 1, 2], dtype=int), + "col_float": np.array([0.0, 1.0, 2.0], dtype=float), + "col_str": ["one", "two", "three"], + }, + columns=["col_int", "col_float", "col_str"], + ) + + selector = make_column_selector(dtype_include=[object]) + selector_picked = pickle.loads(pickle.dumps(selector)) + + assert_array_equal(selector(X_df), selector_picked(X_df)) + + +@pytest.mark.parametrize( + "empty_col", + [[], np.array([], dtype=int), lambda x: []], + ids=["list", "array", "callable"], +) +def test_feature_names_empty_columns(empty_col): + pd = pytest.importorskip("pandas") + + df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]}) + + ct = ColumnTransformer( + transformers=[ + ("ohe", OneHotEncoder(), ["col1", "col2"]), + ("empty_features", OneHotEncoder(), empty_col), + ], + ) + + ct.fit(df) + assert_array_equal( + ct.get_feature_names_out(), ["ohe__col1_a", "ohe__col1_b", "ohe__col2_z"] + ) + + +@pytest.mark.parametrize( + "selector", + [ + [1], + lambda x: [1], + ["col2"], + lambda x: ["col2"], + [False, True], + lambda x: [False, True], + ], +) +def test_feature_names_out_pandas(selector): + """Checks name when selecting only the second column""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]}) + ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)]) + ct.fit(df) + + assert_array_equal(ct.get_feature_names_out(), ["ohe__col2_z"]) + + +@pytest.mark.parametrize( + "selector", [[1], lambda x: [1], [False, True], lambda x: [False, True]] +) +def test_feature_names_out_non_pandas(selector): + """Checks name when selecting the second column with numpy array""" + X = [["a", "z"], ["a", "z"], ["b", "z"]] + ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)]) + ct.fit(X) + + assert_array_equal(ct.get_feature_names_out(), ["ohe__x1_z"]) + + +@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()]) +def test_sk_visual_block_remainder(remainder): + # remainder='passthrough' or an estimator will be shown in repr_html + ohe = OneHotEncoder() + ct = ColumnTransformer( + transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder + ) + visual_block = ct._sk_visual_block_() + assert visual_block.names == ("ohe", "remainder") + assert visual_block.name_details == (["col1", "col2"], "") + assert visual_block.estimators == (ohe, remainder) + + +def test_sk_visual_block_remainder_drop(): + # remainder='drop' is not shown in repr_html + ohe = OneHotEncoder() + ct = ColumnTransformer(transformers=[("ohe", ohe, ["col1", "col2"])]) + visual_block = ct._sk_visual_block_() + assert visual_block.names == ("ohe",) + assert visual_block.name_details == (["col1", "col2"],) + assert visual_block.estimators == (ohe,) + + +@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()]) +def test_sk_visual_block_remainder_fitted_pandas(remainder): + # Remainder shows the columns after fitting + pd = pytest.importorskip("pandas") + ohe = OneHotEncoder() + ct = ColumnTransformer( + transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder + ) + df = pd.DataFrame( + { + "col1": ["a", "b", "c"], + "col2": ["z", "z", "z"], + "col3": [1, 2, 3], + "col4": [3, 4, 5], + } + ) + ct.fit(df) + visual_block = ct._sk_visual_block_() + assert visual_block.names == ("ohe", "remainder") + assert visual_block.name_details == (["col1", "col2"], ["col3", "col4"]) + assert visual_block.estimators == (ohe, remainder) + + +@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()]) +def test_sk_visual_block_remainder_fitted_numpy(remainder): + # Remainder shows the indices after fitting + X = np.array([[1, 2, 3], [4, 5, 6]], dtype=float) + scaler = StandardScaler() + ct = ColumnTransformer( + transformers=[("scale", scaler, [0, 2])], remainder=remainder + ) + ct.fit(X) + visual_block = ct._sk_visual_block_() + assert visual_block.names == ("scale", "remainder") + assert visual_block.name_details == ([0, 2], [1]) + assert visual_block.estimators == (scaler, remainder) + + +@pytest.mark.parametrize("explicit_colname", ["first", "second", 0, 1]) +@pytest.mark.parametrize("remainder", [Trans(), "passthrough", "drop"]) +def test_column_transformer_reordered_column_names_remainder( + explicit_colname, remainder +): + """Test the interaction between remainder and column transformer""" + pd = pytest.importorskip("pandas") + + X_fit_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_fit_df = pd.DataFrame(X_fit_array, columns=["first", "second"]) + + X_trans_array = np.array([[2, 4, 6], [0, 1, 2]]).T + X_trans_df = pd.DataFrame(X_trans_array, columns=["second", "first"]) + + tf = ColumnTransformer([("bycol", Trans(), explicit_colname)], remainder=remainder) + + tf.fit(X_fit_df) + X_fit_trans = tf.transform(X_fit_df) + + # Changing the order still works + X_trans = tf.transform(X_trans_df) + assert_allclose(X_trans, X_fit_trans) + + # extra columns are ignored + X_extended_df = X_fit_df.copy() + X_extended_df["third"] = [3, 6, 9] + X_trans = tf.transform(X_extended_df) + assert_allclose(X_trans, X_fit_trans) + + if isinstance(explicit_colname, str): + # Raise error if columns are specified by names but input only allows + # to specify by position, e.g. numpy array instead of a pandas df. + X_array = X_fit_array.copy() + err_msg = "Specifying the columns" + with pytest.raises(ValueError, match=err_msg): + tf.transform(X_array) + + +def test_feature_name_validation_missing_columns_drop_passthough(): + """Test the interaction between {'drop', 'passthrough'} and + missing column names.""" + pd = pytest.importorskip("pandas") + + X = np.ones(shape=(3, 4)) + df = pd.DataFrame(X, columns=["a", "b", "c", "d"]) + + df_dropped = df.drop("c", axis=1) + + # with remainder='passthrough', all columns seen during `fit` must be + # present + tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="passthrough") + tf.fit(df) + msg = r"columns are missing: {'c'}" + with pytest.raises(ValueError, match=msg): + tf.transform(df_dropped) + + # with remainder='drop', it is allowed to have column 'c' missing + tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="drop") + tf.fit(df) + + df_dropped_trans = tf.transform(df_dropped) + df_fit_trans = tf.transform(df) + assert_allclose(df_dropped_trans, df_fit_trans) + + # bycol drops 'c', thus it is allowed for 'c' to be missing + tf = ColumnTransformer([("bycol", "drop", ["c"])], remainder="passthrough") + tf.fit(df) + df_dropped_trans = tf.transform(df_dropped) + df_fit_trans = tf.transform(df) + assert_allclose(df_dropped_trans, df_fit_trans) + + +def test_feature_names_in_(): + """Feature names are stored in column transformer. + + Column transformer deliberately does not check for column name consistency. + It only checks that the non-dropped names seen in `fit` are seen + in `transform`. This behavior is already tested in + `test_feature_name_validation_missing_columns_drop_passthough`""" + + pd = pytest.importorskip("pandas") + + feature_names = ["a", "c", "d"] + df = pd.DataFrame([[1, 2, 3]], columns=feature_names) + ct = ColumnTransformer([("bycol", Trans(), ["a", "d"])], remainder="passthrough") + + ct.fit(df) + assert_array_equal(ct.feature_names_in_, feature_names) + assert isinstance(ct.feature_names_in_, np.ndarray) + assert ct.feature_names_in_.dtype == object + + +class TransWithNames(Trans): + def __init__(self, feature_names_out=None): + self.feature_names_out = feature_names_out + + def get_feature_names_out(self, input_features=None): + if self.feature_names_out is not None: + return np.asarray(self.feature_names_out, dtype=object) + return input_features + + +@pytest.mark.parametrize( + "transformers, remainder, expected_names", + [ + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", ["d"]), + ], + "passthrough", + ["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", ["d"]), + ], + "drop", + ["bycol1__d", "bycol1__c", "bycol2__d"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", ["d"]), + ], + "passthrough", + ["bycol1__b", "remainder__a", "remainder__c"], + ), + ( + [ + ("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]), + ], + "passthrough", + ["bycol1__pca1", "bycol1__pca2", "remainder__c"], + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), ["d"]), + ("bycol2", "passthrough", ["b"]), + ], + "drop", + ["bycol1__a", "bycol1__b", "bycol2__b"], + ), + ( + [ + ("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]), + ("bycol2", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]), + ], + "passthrough", + [ + "bycol1__pca0", + "bycol1__pca1", + "bycol2__pca0", + "bycol2__pca1", + "remainder__a", + "remainder__c", + "remainder__d", + ], + ), + ( + [ + ("bycol1", "drop", ["d"]), + ], + "drop", + [], + ), + ( + [ + ("bycol1", TransWithNames(), slice(1, 3)), + ], + "drop", + ["bycol1__b", "bycol1__c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", slice(3, 4)), + ], + "passthrough", + ["bycol1__b", "remainder__a", "remainder__c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice(3, 4)), + ], + "passthrough", + ["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"], + ), + ( + [ + ("bycol1", TransWithNames(), slice("b", "c")), + ], + "drop", + ["bycol1__b", "bycol1__c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", slice("c", "d")), + ], + "passthrough", + ["bycol1__b", "remainder__a"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice("c", "d")), + ], + "passthrough", + [ + "bycol1__d", + "bycol1__c", + "bycol2__c", + "bycol2__d", + "remainder__a", + "remainder__b", + ], + ), + ], +) +def test_verbose_feature_names_out_true(transformers, remainder, expected_names): + """Check feature_names_out for verbose_feature_names_out=True (default)""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"]) + ct = ColumnTransformer( + transformers, + remainder=remainder, + ) + ct.fit(df) + + names = ct.get_feature_names_out() + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, expected_names) + + +@pytest.mark.parametrize( + "transformers, remainder, expected_names", + [ + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", ["a"]), + ], + "passthrough", + ["d", "c", "a", "b"], + ), + ( + [ + ("bycol1", TransWithNames(["a"]), ["d", "c"]), + ("bycol2", "passthrough", ["d"]), + ], + "drop", + ["a", "d"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", ["d"]), + ], + "passthrough", + ["b", "a", "c"], + ), + ( + [ + ("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]), + ], + "passthrough", + ["pca1", "pca2", "c"], + ), + ( + [ + ("bycol1", TransWithNames(["a", "c"]), ["d"]), + ("bycol2", "passthrough", ["d"]), + ], + "drop", + ["a", "c", "d"], + ), + ( + [ + ("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]), + ("bycol2", TransWithNames([f"kpca{i}" for i in range(2)]), ["b"]), + ], + "passthrough", + ["pca0", "pca1", "kpca0", "kpca1", "a", "c", "d"], + ), + ( + [ + ("bycol1", "drop", ["d"]), + ], + "drop", + [], + ), + ( + [ + ("bycol1", TransWithNames(), slice(1, 2)), + ("bycol2", "drop", ["d"]), + ], + "passthrough", + ["b", "a", "c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", slice(3, 4)), + ], + "passthrough", + ["b", "a", "c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice(0, 2)), + ], + "drop", + ["d", "c", "a", "b"], + ), + ( + [ + ("bycol1", TransWithNames(), slice("a", "b")), + ("bycol2", "drop", ["d"]), + ], + "passthrough", + ["a", "b", "c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", slice("c", "d")), + ], + "passthrough", + ["b", "a"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice("a", "b")), + ], + "drop", + ["d", "c", "a", "b"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice("b", "b")), + ], + "drop", + ["d", "c", "b"], + ), + ], +) +def test_verbose_feature_names_out_false(transformers, remainder, expected_names): + """Check feature_names_out for verbose_feature_names_out=False""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"]) + ct = ColumnTransformer( + transformers, + remainder=remainder, + verbose_feature_names_out=False, + ) + ct.fit(df) + + names = ct.get_feature_names_out() + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, expected_names) + + +@pytest.mark.parametrize( + "transformers, remainder, colliding_columns", + [ + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "passthrough", ["b"]), + ], + "drop", + "['b']", + ), + ( + [ + ("bycol1", TransWithNames(["c", "d"]), ["c"]), + ("bycol2", "passthrough", ["c"]), + ], + "drop", + "['c']", + ), + ( + [ + ("bycol1", TransWithNames(["a"]), ["b"]), + ("bycol2", "passthrough", ["b"]), + ], + "passthrough", + "['a']", + ), + ( + [ + ("bycol1", TransWithNames(["a"]), ["b"]), + ("bycol2", "drop", ["b"]), + ], + "passthrough", + "['a']", + ), + ( + [ + ("bycol1", TransWithNames(["c", "b"]), ["b"]), + ("bycol2", "passthrough", ["c", "b"]), + ], + "drop", + "['b', 'c']", + ), + ( + [ + ("bycol1", TransWithNames(["a"]), ["b"]), + ("bycol2", "passthrough", ["a"]), + ("bycol3", TransWithNames(["a"]), ["b"]), + ], + "passthrough", + "['a']", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), ["b"]), + ("bycol2", "passthrough", ["a"]), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ( + [ + ("bycol1", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]), + ("bycol2", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]), + ], + "passthrough", + "['pca0', 'pca1', 'pca2', 'pca3', 'pca4', ...]", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), slice(1, 2)), + ("bycol2", "passthrough", ["a"]), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), ["b"]), + ("bycol2", "passthrough", slice(0, 1)), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), slice("b", "c")), + ("bycol2", "passthrough", ["a"]), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), ["b"]), + ("bycol2", "passthrough", slice("a", "a")), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ], +) +def test_verbose_feature_names_out_false_errors( + transformers, remainder, colliding_columns +): + """Check feature_names_out for verbose_feature_names_out=False""" + + pd = pytest.importorskip("pandas") + df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"]) + ct = ColumnTransformer( + transformers, + remainder=remainder, + verbose_feature_names_out=False, + ) + ct.fit(df) + + msg = re.escape( + f"Output feature names: {colliding_columns} are not unique. Please set " + "verbose_feature_names_out=True to add prefixes to feature names" + ) + with pytest.raises(ValueError, match=msg): + ct.get_feature_names_out() + + +@pytest.mark.parametrize("verbose_feature_names_out", [True, False]) +@pytest.mark.parametrize("remainder", ["drop", "passthrough"]) +def test_column_transformer_set_output(verbose_feature_names_out, remainder): + """Check column transformer behavior with set_output.""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"], index=[10]) + ct = ColumnTransformer( + [("first", TransWithNames(), ["a", "c"]), ("second", TransWithNames(), ["d"])], + remainder=remainder, + verbose_feature_names_out=verbose_feature_names_out, + ) + X_trans = ct.fit_transform(df) + assert isinstance(X_trans, np.ndarray) + + ct.set_output(transform="pandas") + + df_test = pd.DataFrame([[1, 2, 3, 4]], columns=df.columns, index=[20]) + X_trans = ct.transform(df_test) + assert isinstance(X_trans, pd.DataFrame) + + feature_names_out = ct.get_feature_names_out() + assert_array_equal(X_trans.columns, feature_names_out) + assert_array_equal(X_trans.index, df_test.index) + + +@pytest.mark.parametrize("remainder", ["drop", "passthrough"]) +@pytest.mark.parametrize("fit_transform", [True, False]) +def test_column_transform_set_output_mixed(remainder, fit_transform): + """Check ColumnTransformer outputs mixed types correctly.""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame( + { + "pet": pd.Series(["dog", "cat", "snake"], dtype="category"), + "color": pd.Series(["green", "blue", "red"], dtype="object"), + "age": [1.4, 2.1, 4.4], + "height": [20, 40, 10], + "distance": pd.Series([20, pd.NA, 100], dtype="Int32"), + } + ) + ct = ColumnTransformer( + [ + ( + "color_encode", + OneHotEncoder(sparse_output=False, dtype="int8"), + ["color"], + ), + ("age", StandardScaler(), ["age"]), + ], + remainder=remainder, + verbose_feature_names_out=False, + ).set_output(transform="pandas") + if fit_transform: + X_trans = ct.fit_transform(df) + else: + X_trans = ct.fit(df).transform(df) + + assert isinstance(X_trans, pd.DataFrame) + assert_array_equal(X_trans.columns, ct.get_feature_names_out()) + + expected_dtypes = { + "color_blue": "int8", + "color_green": "int8", + "color_red": "int8", + "age": "float64", + "pet": "category", + "height": "int64", + "distance": "Int32", + } + for col, dtype in X_trans.dtypes.items(): + assert dtype == expected_dtypes[col] + + +@pytest.mark.parametrize("remainder", ["drop", "passthrough"]) +def test_column_transform_set_output_after_fitting(remainder): + pd = pytest.importorskip("pandas") + df = pd.DataFrame( + { + "pet": pd.Series(["dog", "cat", "snake"], dtype="category"), + "age": [1.4, 2.1, 4.4], + "height": [20, 40, 10], + } + ) + ct = ColumnTransformer( + [ + ( + "color_encode", + OneHotEncoder(sparse_output=False, dtype="int16"), + ["pet"], + ), + ("age", StandardScaler(), ["age"]), + ], + remainder=remainder, + verbose_feature_names_out=False, + ) + + # fit without calling set_output + X_trans = ct.fit_transform(df) + assert isinstance(X_trans, np.ndarray) + assert X_trans.dtype == "float64" + + ct.set_output(transform="pandas") + X_trans_df = ct.transform(df) + expected_dtypes = { + "pet_cat": "int16", + "pet_dog": "int16", + "pet_snake": "int16", + "height": "int64", + "age": "float64", + } + for col, dtype in X_trans_df.dtypes.items(): + assert dtype == expected_dtypes[col] + + +# PandasOutTransformer that does not define get_feature_names_out and always expects +# the input to be a DataFrame. +class PandasOutTransformer(BaseEstimator): + def __init__(self, offset=1.0): + self.offset = offset + + def fit(self, X, y=None): + pd = pytest.importorskip("pandas") + assert isinstance(X, pd.DataFrame) + return self + + def transform(self, X, y=None): + pd = pytest.importorskip("pandas") + assert isinstance(X, pd.DataFrame) + return X - self.offset + + def set_output(self, transform=None): + # This transformer will always output a DataFrame regardless of the + # configuration. + return self + + +@pytest.mark.parametrize( + "trans_1, expected_verbose_names, expected_non_verbose_names", + [ + ( + PandasOutTransformer(offset=2.0), + ["trans_0__feat1", "trans_1__feat0"], + ["feat1", "feat0"], + ), + ( + "drop", + ["trans_0__feat1"], + ["feat1"], + ), + ( + "passthrough", + ["trans_0__feat1", "trans_1__feat0"], + ["feat1", "feat0"], + ), + ], +) +def test_transformers_with_pandas_out_but_not_feature_names_out( + trans_1, expected_verbose_names, expected_non_verbose_names +): + """Check that set_config(transform="pandas") is compatible with more transformers. + + Specifically, if transformers returns a DataFrame, but does not define + `get_feature_names_out`. + """ + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"feat0": [1.0, 2.0, 3.0], "feat1": [2.0, 3.0, 4.0]}) + ct = ColumnTransformer( + [ + ("trans_0", PandasOutTransformer(offset=3.0), ["feat1"]), + ("trans_1", trans_1, ["feat0"]), + ] + ) + X_trans_np = ct.fit_transform(X_df) + assert isinstance(X_trans_np, np.ndarray) + + # `ct` does not have `get_feature_names_out` because `PandasOutTransformer` does + # not define the method. + with pytest.raises(AttributeError, match="not provide get_feature_names_out"): + ct.get_feature_names_out() + + # The feature names are prefixed because verbose_feature_names_out=True is default + ct.set_output(transform="pandas") + X_trans_df0 = ct.fit_transform(X_df) + assert_array_equal(X_trans_df0.columns, expected_verbose_names) + + ct.set_params(verbose_feature_names_out=False) + X_trans_df1 = ct.fit_transform(X_df) + assert_array_equal(X_trans_df1.columns, expected_non_verbose_names) + + +@pytest.mark.parametrize( + "empty_selection", + [[], np.array([False, False]), [False, False]], + ids=["list", "bool", "bool_int"], +) +def test_empty_selection_pandas_output(empty_selection): + """Check that pandas output works when there is an empty selection. + + Non-regression test for gh-25487 + """ + pd = pytest.importorskip("pandas") + + X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"]) + ct = ColumnTransformer( + [ + ("categorical", "passthrough", empty_selection), + ("numerical", StandardScaler(), ["a", "b"]), + ], + verbose_feature_names_out=True, + ) + ct.set_output(transform="pandas") + X_out = ct.fit_transform(X) + assert_array_equal(X_out.columns, ["numerical__a", "numerical__b"]) + + ct.set_params(verbose_feature_names_out=False) + X_out = ct.fit_transform(X) + assert_array_equal(X_out.columns, ["a", "b"]) + + +def test_raise_error_if_index_not_aligned(): + """Check column transformer raises error if indices are not aligned. + + Non-regression test for gh-26210. + """ + pd = pytest.importorskip("pandas") + + X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"], index=[8, 3]) + reset_index_transformer = FunctionTransformer( + lambda x: x.reset_index(drop=True), feature_names_out="one-to-one" + ) + + ct = ColumnTransformer( + [ + ("num1", "passthrough", ["a"]), + ("num2", reset_index_transformer, ["b"]), + ], + ) + ct.set_output(transform="pandas") + msg = ( + "Concatenating DataFrames from the transformer's output lead to" + " an inconsistent number of samples. The output may have Pandas" + " Indexes that do not match." + ) + with pytest.raises(ValueError, match=msg): + ct.fit_transform(X) + + +def test_remainder_set_output(): + """Check that the output is set for the remainder. + + Non-regression test for #26306. + """ + + pd = pytest.importorskip("pandas") + df = pd.DataFrame({"a": [True, False, True], "b": [1, 2, 3]}) + + ct = make_column_transformer( + (VarianceThreshold(), make_column_selector(dtype_include=bool)), + remainder=VarianceThreshold(), + verbose_feature_names_out=False, + ) + ct.set_output(transform="pandas") + + out = ct.fit_transform(df) + pd.testing.assert_frame_equal(out, df) + + ct.set_output(transform="default") + out = ct.fit_transform(df) + assert isinstance(out, np.ndarray) + + +# TODO(1.6): replace the warning by a ValueError exception +def test_transform_pd_na(): + """Check behavior when a tranformer's output contains pandas.NA + + It should emit a warning unless the output config is set to 'pandas'. + """ + pd = pytest.importorskip("pandas") + if not hasattr(pd, "Float64Dtype"): + pytest.skip( + "The issue with pd.NA tested here does not happen in old versions that do" + " not have the extension dtypes" + ) + df = pd.DataFrame({"a": [1.5, None]}) + ct = make_column_transformer(("passthrough", ["a"])) + # No warning with non-extension dtypes and np.nan + with warnings.catch_warnings(): + warnings.simplefilter("error") + ct.fit_transform(df) + df = df.convert_dtypes() + # Error with extension dtype and pd.NA + with pytest.warns(FutureWarning, match=r"set_output\(transform='pandas'\)"): + ct.fit_transform(df) + # No warning when output is set to pandas + with warnings.catch_warnings(): + warnings.simplefilter("error") + ct.set_output(transform="pandas") + ct.fit_transform(df) + ct.set_output(transform="default") + # No warning when there are no pd.NA + with warnings.catch_warnings(): + warnings.simplefilter("error") + ct.fit_transform(df.fillna(-1.0)) + + +def test_dataframe_different_dataframe_libraries(): + """Check fitting and transforming on pandas and polars dataframes.""" + pd = pytest.importorskip("pandas") + pl = pytest.importorskip("polars") + X_train_np = np.array([[0, 1], [2, 4], [4, 5]]) + X_test_np = np.array([[1, 2], [1, 3], [2, 3]]) + + # Fit on pandas and transform on polars + X_train_pd = pd.DataFrame(X_train_np, columns=["a", "b"]) + X_test_pl = pl.DataFrame(X_test_np, schema=["a", "b"]) + + ct = make_column_transformer((Trans(), [0, 1])) + ct.fit(X_train_pd) + + out_pl_in = ct.transform(X_test_pl) + assert_array_equal(out_pl_in, X_test_np) + + # Fit on polars and transform on pandas + X_train_pl = pl.DataFrame(X_train_np, schema=["a", "b"]) + X_test_pd = pd.DataFrame(X_test_np, columns=["a", "b"]) + ct.fit(X_train_pl) + + out_pd_in = ct.transform(X_test_pd) + assert_array_equal(out_pd_in, X_test_np) + + +@pytest.mark.parametrize("transform_output", ["default", "pandas"]) +def test_column_transformer_remainder_passthrough_naming_consistency(transform_output): + """Check that when `remainder="passthrough"`, inconsistent naming is handled + correctly by the underlying `FunctionTransformer`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28232 + """ + pd = pytest.importorskip("pandas") + X = pd.DataFrame(np.random.randn(10, 4)) + + preprocessor = ColumnTransformer( + transformers=[("scaler", StandardScaler(), [0, 1])], + remainder="passthrough", + ).set_output(transform=transform_output) + X_trans = preprocessor.fit_transform(X) + assert X_trans.shape == X.shape + + expected_column_names = [ + "scaler__x0", + "scaler__x1", + "remainder__x2", + "remainder__x3", + ] + if hasattr(X_trans, "columns"): + assert X_trans.columns.tolist() == expected_column_names + assert preprocessor.get_feature_names_out().tolist() == expected_column_names + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +def test_column_transformer_column_renaming(dataframe_lib): + """Check that we properly rename columns when using `ColumnTransformer` and + selected columns are redundant between transformers. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28260 + """ + lib = pytest.importorskip(dataframe_lib) + + df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]}) + + transformer = ColumnTransformer( + transformers=[ + ("A", "passthrough", ["x1", "x2", "x3"]), + ("B", FunctionTransformer(), ["x1", "x2"]), + ("C", StandardScaler(), ["x1", "x3"]), + # special case of empty transformer + ("D", FunctionTransformer(lambda x: x[[]]), ["x1", "x2", "x3"]), + ], + verbose_feature_names_out=True, + ).set_output(transform=dataframe_lib) + df_trans = transformer.fit_transform(df) + assert list(df_trans.columns) == [ + "A__x1", + "A__x2", + "A__x3", + "B__x1", + "B__x2", + "C__x1", + "C__x3", + ] + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +def test_column_transformer_error_with_duplicated_columns(dataframe_lib): + """Check that we raise an error when using `ColumnTransformer` and + the columns names are duplicated between transformers.""" + lib = pytest.importorskip(dataframe_lib) + + df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]}) + + transformer = ColumnTransformer( + transformers=[ + ("A", "passthrough", ["x1", "x2", "x3"]), + ("B", FunctionTransformer(), ["x1", "x2"]), + ("C", StandardScaler(), ["x1", "x3"]), + # special case of empty transformer + ("D", FunctionTransformer(lambda x: x[[]]), ["x1", "x2", "x3"]), + ], + verbose_feature_names_out=False, + ).set_output(transform=dataframe_lib) + err_msg = re.escape( + "Duplicated feature names found before concatenating the outputs of the " + "transformers: ['x1', 'x2', 'x3'].\n" + "Transformer A has conflicting columns names: ['x1', 'x2', 'x3'].\n" + "Transformer B has conflicting columns names: ['x1', 'x2'].\n" + "Transformer C has conflicting columns names: ['x1', 'x3'].\n" + ) + with pytest.raises(ValueError, match=err_msg): + transformer.fit_transform(df) + + +# Metadata Routing Tests +# ====================== + + +@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"]) +def test_routing_passed_metadata_not_supported(method): + """Test that the right error message is raised when metadata is passed while + not supported when `enable_metadata_routing=False`.""" + + X = np.array([[0, 1, 2], [2, 4, 6]]).T + y = [1, 2, 3] + trs = ColumnTransformer([("trans", Trans(), [0])]).fit(X, y) + + with pytest.raises( + ValueError, match="is only supported if enable_metadata_routing=True" + ): + getattr(trs, method)([[1]], sample_weight=[1], prop="a") + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"]) +def test_metadata_routing_for_column_transformer(method): + """Test that metadata is routed correctly for column transformer.""" + X = np.array([[0, 1, 2], [2, 4, 6]]).T + y = [1, 2, 3] + registry = _Registry() + sample_weight, metadata = [1], "a" + trs = ColumnTransformer( + [ + ( + "trans", + ConsumingTransformer(registry=registry) + .set_fit_request(sample_weight=True, metadata=True) + .set_transform_request(sample_weight=True, metadata=True), + [0], + ) + ] + ) + + if method == "transform": + trs.fit(X, y) + trs.transform(X, sample_weight=sample_weight, metadata=metadata) + else: + getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata) + + assert len(registry) + for _trs in registry: + check_recorded_metadata( + obj=_trs, method=method, sample_weight=sample_weight, metadata=metadata + ) + + +@pytest.mark.usefixtures("enable_slep006") +def test_metadata_routing_no_fit_transform(): + """Test metadata routing when the sub-estimator doesn't implement + ``fit_transform``.""" + + class NoFitTransform(BaseEstimator): + def fit(self, X, y=None, sample_weight=None, metadata=None): + assert sample_weight + assert metadata + return self + + def transform(self, X, sample_weight=None, metadata=None): + assert sample_weight + assert metadata + return X + + X = np.array([[0, 1, 2], [2, 4, 6]]).T + y = [1, 2, 3] + _Registry() + sample_weight, metadata = [1], "a" + trs = ColumnTransformer( + [ + ( + "trans", + NoFitTransform() + .set_fit_request(sample_weight=True, metadata=True) + .set_transform_request(sample_weight=True, metadata=True), + [0], + ) + ] + ) + + trs.fit(X, y, sample_weight=sample_weight, metadata=metadata) + trs.fit_transform(X, y, sample_weight=sample_weight, metadata=metadata) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"]) +def test_metadata_routing_error_for_column_transformer(method): + """Test that the right error is raised when metadata is not requested.""" + X = np.array([[0, 1, 2], [2, 4, 6]]).T + y = [1, 2, 3] + sample_weight, metadata = [1], "a" + trs = ColumnTransformer([("trans", ConsumingTransformer(), [0])]) + + error_message = ( + "[sample_weight, metadata] are passed but are not explicitly set as requested" + f" or not for ConsumingTransformer.{method}" + ) + with pytest.raises(ValueError, match=re.escape(error_message)): + if method == "transform": + trs.fit(X, y) + trs.transform(X, sample_weight=sample_weight, metadata=metadata) + else: + getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata) + + +@pytest.mark.usefixtures("enable_slep006") +def test_get_metadata_routing_works_without_fit(): + # Regression test for https://github.com/scikit-learn/scikit-learn/issues/28186 + # Make sure ct.get_metadata_routing() works w/o having called fit. + ct = ColumnTransformer([("trans", ConsumingTransformer(), [0])]) + ct.get_metadata_routing() + + +@pytest.mark.usefixtures("enable_slep006") +def test_remainder_request_always_present(): + # Test that remainder request is always present. + ct = ColumnTransformer( + [("trans", StandardScaler(), [0])], + remainder=ConsumingTransformer() + .set_fit_request(metadata=True) + .set_transform_request(metadata=True), + ) + router = ct.get_metadata_routing() + assert router.consumes("fit", ["metadata"]) == set(["metadata"]) + + +@pytest.mark.usefixtures("enable_slep006") +def test_unused_transformer_request_present(): + # Test that the request of a transformer is always present even when not + # used due to no selected columns. + ct = ColumnTransformer( + [ + ( + "trans", + ConsumingTransformer() + .set_fit_request(metadata=True) + .set_transform_request(metadata=True), + lambda X: [], + ) + ] + ) + router = ct.get_metadata_routing() + assert router.consumes("fit", ["metadata"]) == set(["metadata"]) + + +# End of Metadata Routing Tests +# ============================= diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py new file mode 100644 index 0000000000000000000000000000000000000000..53242b7e0277be30a9ebc1406dd8965e6bbcd96b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py @@ -0,0 +1,387 @@ +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.base import BaseEstimator, TransformerMixin, clone +from sklearn.compose import TransformedTargetRegressor +from sklearn.dummy import DummyRegressor +from sklearn.linear_model import LinearRegression, OrthogonalMatchingPursuit +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import FunctionTransformer, StandardScaler +from sklearn.utils._testing import assert_allclose, assert_no_warnings + +friedman = datasets.make_friedman1(random_state=0) + + +def test_transform_target_regressor_error(): + X, y = friedman + # provide a transformer and functions at the same time + regr = TransformedTargetRegressor( + regressor=LinearRegression(), + transformer=StandardScaler(), + func=np.exp, + inverse_func=np.log, + ) + with pytest.raises( + ValueError, + match="'transformer' and functions 'func'/'inverse_func' cannot both be set.", + ): + regr.fit(X, y) + # fit with sample_weight with a regressor which does not support it + sample_weight = np.ones((y.shape[0],)) + regr = TransformedTargetRegressor( + regressor=OrthogonalMatchingPursuit(), transformer=StandardScaler() + ) + with pytest.raises( + TypeError, + match=r"fit\(\) got an unexpected " "keyword argument 'sample_weight'", + ): + regr.fit(X, y, sample_weight=sample_weight) + # func is given but inverse_func is not + regr = TransformedTargetRegressor(func=np.exp) + with pytest.raises( + ValueError, + match="When 'func' is provided, 'inverse_func' must also be provided", + ): + regr.fit(X, y) + + +def test_transform_target_regressor_invertible(): + X, y = friedman + regr = TransformedTargetRegressor( + regressor=LinearRegression(), + func=np.sqrt, + inverse_func=np.log, + check_inverse=True, + ) + with pytest.warns( + UserWarning, + match=( + "The provided functions or" + " transformer are not strictly inverse of each other." + ), + ): + regr.fit(X, y) + regr = TransformedTargetRegressor( + regressor=LinearRegression(), func=np.sqrt, inverse_func=np.log + ) + regr.set_params(check_inverse=False) + assert_no_warnings(regr.fit, X, y) + + +def _check_standard_scaled(y, y_pred): + y_mean = np.mean(y, axis=0) + y_std = np.std(y, axis=0) + assert_allclose((y - y_mean) / y_std, y_pred) + + +def _check_shifted_by_one(y, y_pred): + assert_allclose(y + 1, y_pred) + + +def test_transform_target_regressor_functions(): + X, y = friedman + regr = TransformedTargetRegressor( + regressor=LinearRegression(), func=np.log, inverse_func=np.exp + ) + y_pred = regr.fit(X, y).predict(X) + # check the transformer output + y_tran = regr.transformer_.transform(y.reshape(-1, 1)).squeeze() + assert_allclose(np.log(y), y_tran) + assert_allclose( + y, regr.transformer_.inverse_transform(y_tran.reshape(-1, 1)).squeeze() + ) + assert y.shape == y_pred.shape + assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X))) + # check the regressor output + lr = LinearRegression().fit(X, regr.func(y)) + assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel()) + + +def test_transform_target_regressor_functions_multioutput(): + X = friedman[0] + y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T + regr = TransformedTargetRegressor( + regressor=LinearRegression(), func=np.log, inverse_func=np.exp + ) + y_pred = regr.fit(X, y).predict(X) + # check the transformer output + y_tran = regr.transformer_.transform(y) + assert_allclose(np.log(y), y_tran) + assert_allclose(y, regr.transformer_.inverse_transform(y_tran)) + assert y.shape == y_pred.shape + assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X))) + # check the regressor output + lr = LinearRegression().fit(X, regr.func(y)) + assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel()) + + +@pytest.mark.parametrize( + "X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)] +) +def test_transform_target_regressor_1d_transformer(X, y): + # All transformer in scikit-learn expect 2D data. FunctionTransformer with + # validate=False lift this constraint without checking that the input is a + # 2D vector. We check the consistency of the data shape using a 1D and 2D y + # array. + transformer = FunctionTransformer( + func=lambda x: x + 1, inverse_func=lambda x: x - 1 + ) + regr = TransformedTargetRegressor( + regressor=LinearRegression(), transformer=transformer + ) + y_pred = regr.fit(X, y).predict(X) + assert y.shape == y_pred.shape + # consistency forward transform + y_tran = regr.transformer_.transform(y) + _check_shifted_by_one(y, y_tran) + assert y.shape == y_pred.shape + # consistency inverse transform + assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze()) + # consistency of the regressor + lr = LinearRegression() + transformer2 = clone(transformer) + lr.fit(X, transformer2.fit_transform(y)) + y_lr_pred = lr.predict(X) + assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred)) + assert_allclose(regr.regressor_.coef_, lr.coef_) + + +@pytest.mark.parametrize( + "X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)] +) +def test_transform_target_regressor_2d_transformer(X, y): + # Check consistency with transformer accepting only 2D array and a 1D/2D y + # array. + transformer = StandardScaler() + regr = TransformedTargetRegressor( + regressor=LinearRegression(), transformer=transformer + ) + y_pred = regr.fit(X, y).predict(X) + assert y.shape == y_pred.shape + # consistency forward transform + if y.ndim == 1: # create a 2D array and squeeze results + y_tran = regr.transformer_.transform(y.reshape(-1, 1)) + else: + y_tran = regr.transformer_.transform(y) + _check_standard_scaled(y, y_tran.squeeze()) + assert y.shape == y_pred.shape + # consistency inverse transform + assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze()) + # consistency of the regressor + lr = LinearRegression() + transformer2 = clone(transformer) + if y.ndim == 1: # create a 2D array and squeeze results + lr.fit(X, transformer2.fit_transform(y.reshape(-1, 1)).squeeze()) + y_lr_pred = lr.predict(X).reshape(-1, 1) + y_pred2 = transformer2.inverse_transform(y_lr_pred).squeeze() + else: + lr.fit(X, transformer2.fit_transform(y)) + y_lr_pred = lr.predict(X) + y_pred2 = transformer2.inverse_transform(y_lr_pred) + + assert_allclose(y_pred, y_pred2) + assert_allclose(regr.regressor_.coef_, lr.coef_) + + +def test_transform_target_regressor_2d_transformer_multioutput(): + # Check consistency with transformer accepting only 2D array and a 2D y + # array. + X = friedman[0] + y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T + transformer = StandardScaler() + regr = TransformedTargetRegressor( + regressor=LinearRegression(), transformer=transformer + ) + y_pred = regr.fit(X, y).predict(X) + assert y.shape == y_pred.shape + # consistency forward transform + y_tran = regr.transformer_.transform(y) + _check_standard_scaled(y, y_tran) + assert y.shape == y_pred.shape + # consistency inverse transform + assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze()) + # consistency of the regressor + lr = LinearRegression() + transformer2 = clone(transformer) + lr.fit(X, transformer2.fit_transform(y)) + y_lr_pred = lr.predict(X) + assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred)) + assert_allclose(regr.regressor_.coef_, lr.coef_) + + +def test_transform_target_regressor_3d_target(): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/18866 + # Check with a 3D target with a transformer that reshapes the target + X = friedman[0] + y = np.tile(friedman[1].reshape(-1, 1, 1), [1, 3, 2]) + + def flatten_data(data): + return data.reshape(data.shape[0], -1) + + def unflatten_data(data): + return data.reshape(data.shape[0], -1, 2) + + transformer = FunctionTransformer(func=flatten_data, inverse_func=unflatten_data) + regr = TransformedTargetRegressor( + regressor=LinearRegression(), transformer=transformer + ) + y_pred = regr.fit(X, y).predict(X) + assert y.shape == y_pred.shape + + +def test_transform_target_regressor_multi_to_single(): + X = friedman[0] + y = np.transpose([friedman[1], (friedman[1] ** 2 + 1)]) + + def func(y): + out = np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2) + return out[:, np.newaxis] + + def inverse_func(y): + return y + + tt = TransformedTargetRegressor( + func=func, inverse_func=inverse_func, check_inverse=False + ) + tt.fit(X, y) + y_pred_2d_func = tt.predict(X) + assert y_pred_2d_func.shape == (100, 1) + + # force that the function only return a 1D array + def func(y): + return np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2) + + tt = TransformedTargetRegressor( + func=func, inverse_func=inverse_func, check_inverse=False + ) + tt.fit(X, y) + y_pred_1d_func = tt.predict(X) + assert y_pred_1d_func.shape == (100, 1) + + assert_allclose(y_pred_1d_func, y_pred_2d_func) + + +class DummyCheckerArrayTransformer(TransformerMixin, BaseEstimator): + def fit(self, X, y=None): + assert isinstance(X, np.ndarray) + return self + + def transform(self, X): + assert isinstance(X, np.ndarray) + return X + + def inverse_transform(self, X): + assert isinstance(X, np.ndarray) + return X + + +class DummyCheckerListRegressor(DummyRegressor): + def fit(self, X, y, sample_weight=None): + assert isinstance(X, list) + return super().fit(X, y, sample_weight) + + def predict(self, X): + assert isinstance(X, list) + return super().predict(X) + + +def test_transform_target_regressor_ensure_y_array(): + # check that the target ``y`` passed to the transformer will always be a + # numpy array. Similarly, if ``X`` is passed as a list, we check that the + # predictor receive as it is. + X, y = friedman + tt = TransformedTargetRegressor( + transformer=DummyCheckerArrayTransformer(), + regressor=DummyCheckerListRegressor(), + check_inverse=False, + ) + tt.fit(X.tolist(), y.tolist()) + tt.predict(X.tolist()) + with pytest.raises(AssertionError): + tt.fit(X, y.tolist()) + with pytest.raises(AssertionError): + tt.predict(X) + + +class DummyTransformer(TransformerMixin, BaseEstimator): + """Dummy transformer which count how many time fit was called.""" + + def __init__(self, fit_counter=0): + self.fit_counter = fit_counter + + def fit(self, X, y=None): + self.fit_counter += 1 + return self + + def transform(self, X): + return X + + def inverse_transform(self, X): + return X + + +@pytest.mark.parametrize("check_inverse", [False, True]) +def test_transform_target_regressor_count_fit(check_inverse): + # regression test for gh-issue #11618 + # check that we only call a single time fit for the transformer + X, y = friedman + ttr = TransformedTargetRegressor( + transformer=DummyTransformer(), check_inverse=check_inverse + ) + ttr.fit(X, y) + assert ttr.transformer_.fit_counter == 1 + + +class DummyRegressorWithExtraFitParams(DummyRegressor): + def fit(self, X, y, sample_weight=None, check_input=True): + # on the test below we force this to false, we make sure this is + # actually passed to the regressor + assert not check_input + return super().fit(X, y, sample_weight) + + +def test_transform_target_regressor_pass_fit_parameters(): + X, y = friedman + regr = TransformedTargetRegressor( + regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer() + ) + + regr.fit(X, y, check_input=False) + assert regr.transformer_.fit_counter == 1 + + +def test_transform_target_regressor_route_pipeline(): + X, y = friedman + + regr = TransformedTargetRegressor( + regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer() + ) + estimators = [("normalize", StandardScaler()), ("est", regr)] + + pip = Pipeline(estimators) + pip.fit(X, y, **{"est__check_input": False}) + + assert regr.transformer_.fit_counter == 1 + + +class DummyRegressorWithExtraPredictParams(DummyRegressor): + def predict(self, X, check_input=True): + # In the test below we make sure that the check input parameter is + # passed as false + self.predict_called = True + assert not check_input + return super().predict(X) + + +def test_transform_target_regressor_pass_extra_predict_parameters(): + # Checks that predict kwargs are passed to regressor. + X, y = friedman + regr = TransformedTargetRegressor( + regressor=DummyRegressorWithExtraPredictParams(), transformer=DummyTransformer() + ) + + regr.fit(X, y) + regr.predict(X, check_input=False) + assert regr.regressor_.predict_called diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-292.json.gz b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-292.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..b3c915315eff5a266c715e3f99584b16ec06ea8f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-292.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:255c16f33ed2967fe100cd8011a7e69f789603724b1ec2ecf91dfeb72067c190 +size 306 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..b3c915315eff5a266c715e3f99584b16ec06ea8f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:255c16f33ed2967fe100cd8011a7e69f789603724b1ec2ecf91dfeb72067c190 +size 306 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1-s-dact.json.gz b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1-s-dact.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..9c2f6f263750517c2b3ca25942cc4a426bc72de0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1-s-dact.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ef6025425fdfc5f736555ea385252af5bcbf62383615db82489366d4f96a0a7 +size 327 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/data-v1-dl-49822.arff.gz b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/data-v1-dl-49822.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..7bdb62f1628f096b9f91eb2e94ffc413bab4696c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/data-v1-dl-49822.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7ee24adabd4aaed6419b43fe9d3f86d55fcf4bee0f1698ae21d86c2701314e3 +size 2532 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..92ba4281fe86b5273792d24afaabb04eef03199d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1d38fdd601b67bb9c6d16152f53ddf166a0cfcfef4fa86438e899bfe449226c +size 1798 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..52ae92392967d187709107d1c1bc9709c085b519 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0703b0ae20b9ff75087dc601640ee58f1c2ad6768858ea21a245151da9ba8e4c +size 301 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-s-act-.json.gz b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..6bde2de0c6047726f26476a514d27a0d03c7d4b5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70d4596ad879547863109da8675c2b789d07df66b526d7ebcbce9616c4c9b94c +size 347 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdq-561.json.gz b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdq-561.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..911f6823bb1bf0d9de5120e23e902e9a0a39a2bc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdq-561.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8743b2d93d2c62a82fb47e1fbc002b97e25adcfb5bf1fcb26b58ad0bed15bd48 +size 1074 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e8d96c7cf94bbcba0adf28ea3138823468d2691 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__init__.py @@ -0,0 +1,21 @@ +""" +The :mod:`sklearn.manifold` module implements data embedding techniques. +""" + +from ._isomap import Isomap +from ._locally_linear import LocallyLinearEmbedding, locally_linear_embedding +from ._mds import MDS, smacof +from ._spectral_embedding import SpectralEmbedding, spectral_embedding +from ._t_sne import TSNE, trustworthiness + +__all__ = [ + "locally_linear_embedding", + "LocallyLinearEmbedding", + "Isomap", + "MDS", + "smacof", + "SpectralEmbedding", + "spectral_embedding", + "TSNE", + "trustworthiness", +] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db1367dacb78c4597fb245ff0fa4df543124cf18 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_isomap.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_isomap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f71f1cc456aa49e00676d2e0f20b787b0f8f346 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_isomap.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_locally_linear.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_locally_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58080233c33b3400e40a5174590937e44461a0c6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_locally_linear.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_mds.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_mds.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0d2231ceaefb2aade17699311b7d6602cdb168d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_mds.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_spectral_embedding.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_spectral_embedding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60a97694d614c39284355f6a29f010ab0f1dbc0e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_spectral_embedding.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_t_sne.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_t_sne.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a796befc74a5d175c13e8ca001a7670899b3564 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_t_sne.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_barnes_hut_tsne.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_barnes_hut_tsne.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9397320f0065fb36c810edcee91b79605425af23 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_barnes_hut_tsne.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_isomap.py b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_isomap.py new file mode 100644 index 0000000000000000000000000000000000000000..c6e8bfdc4268534cca3409b8469edc09e8138cdb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_isomap.py @@ -0,0 +1,438 @@ +"""Isomap for manifold learning""" + +# Author: Jake Vanderplas -- +# License: BSD 3 clause (C) 2011 +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy.sparse import issparse +from scipy.sparse.csgraph import connected_components, shortest_path + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..decomposition import KernelPCA +from ..metrics.pairwise import _VALID_METRICS +from ..neighbors import NearestNeighbors, kneighbors_graph, radius_neighbors_graph +from ..preprocessing import KernelCenterer +from ..utils._param_validation import Interval, StrOptions +from ..utils.graph import _fix_connected_components +from ..utils.validation import check_is_fitted + + +class Isomap(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Isomap Embedding. + + Non-linear dimensionality reduction through Isometric Mapping + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_neighbors : int or None, default=5 + Number of neighbors to consider for each point. If `n_neighbors` is an int, + then `radius` must be `None`. + + radius : float or None, default=None + Limiting distance of neighbors to return. If `radius` is a float, + then `n_neighbors` must be set to `None`. + + .. versionadded:: 1.1 + + n_components : int, default=2 + Number of coordinates for the manifold. + + eigen_solver : {'auto', 'arpack', 'dense'}, default='auto' + 'auto' : Attempt to choose the most efficient solver + for the given problem. + + 'arpack' : Use Arnoldi decomposition to find the eigenvalues + and eigenvectors. + + 'dense' : Use a direct solver (i.e. LAPACK) + for the eigenvalue decomposition. + + tol : float, default=0 + Convergence tolerance passed to arpack or lobpcg. + not used if eigen_solver == 'dense'. + + max_iter : int, default=None + Maximum number of iterations for the arpack solver. + not used if eigen_solver == 'dense'. + + path_method : {'auto', 'FW', 'D'}, default='auto' + Method to use in finding shortest path. + + 'auto' : attempt to choose the best algorithm automatically. + + 'FW' : Floyd-Warshall algorithm. + + 'D' : Dijkstra's algorithm. + + neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'}, \ + default='auto' + Algorithm to use for nearest neighbors search, + passed to neighbors.NearestNeighbors instance. + + n_jobs : int or None, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + metric : str, or callable, default="minkowski" + The metric to use when calculating distance between instances in a + feature array. If metric is a string or callable, it must be one of + the options allowed by :func:`sklearn.metrics.pairwise_distances` for + its metric parameter. + If metric is "precomputed", X is assumed to be a distance matrix and + must be square. X may be a :term:`Glossary `. + + .. versionadded:: 0.22 + + p : float, default=2 + Parameter for the Minkowski metric from + sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + .. versionadded:: 0.22 + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + .. versionadded:: 0.22 + + Attributes + ---------- + embedding_ : array-like, shape (n_samples, n_components) + Stores the embedding vectors. + + kernel_pca_ : object + :class:`~sklearn.decomposition.KernelPCA` object used to implement the + embedding. + + nbrs_ : sklearn.neighbors.NearestNeighbors instance + Stores nearest neighbors instance, including BallTree or KDtree + if applicable. + + dist_matrix_ : array-like, shape (n_samples, n_samples) + Stores the geodesic distance matrix of training data. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.decomposition.PCA : Principal component analysis that is a linear + dimensionality reduction method. + sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using + kernels and PCA. + MDS : Manifold learning using multidimensional scaling. + TSNE : T-distributed Stochastic Neighbor Embedding. + LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding. + SpectralEmbedding : Spectral embedding for non-linear dimensionality. + + References + ---------- + + .. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric + framework for nonlinear dimensionality reduction. Science 290 (5500) + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.manifold import Isomap + >>> X, _ = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> embedding = Isomap(n_components=2) + >>> X_transformed = embedding.fit_transform(X[:100]) + >>> X_transformed.shape + (100, 2) + """ + + _parameter_constraints: dict = { + "n_neighbors": [Interval(Integral, 1, None, closed="left"), None], + "radius": [Interval(Real, 0, None, closed="both"), None], + "n_components": [Interval(Integral, 1, None, closed="left")], + "eigen_solver": [StrOptions({"auto", "arpack", "dense"})], + "tol": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 1, None, closed="left"), None], + "path_method": [StrOptions({"auto", "FW", "D"})], + "neighbors_algorithm": [StrOptions({"auto", "brute", "kd_tree", "ball_tree"})], + "n_jobs": [Integral, None], + "p": [Interval(Real, 1, None, closed="left")], + "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable], + "metric_params": [dict, None], + } + + def __init__( + self, + *, + n_neighbors=5, + radius=None, + n_components=2, + eigen_solver="auto", + tol=0, + max_iter=None, + path_method="auto", + neighbors_algorithm="auto", + n_jobs=None, + metric="minkowski", + p=2, + metric_params=None, + ): + self.n_neighbors = n_neighbors + self.radius = radius + self.n_components = n_components + self.eigen_solver = eigen_solver + self.tol = tol + self.max_iter = max_iter + self.path_method = path_method + self.neighbors_algorithm = neighbors_algorithm + self.n_jobs = n_jobs + self.metric = metric + self.p = p + self.metric_params = metric_params + + def _fit_transform(self, X): + if self.n_neighbors is not None and self.radius is not None: + raise ValueError( + "Both n_neighbors and radius are provided. Use" + f" Isomap(radius={self.radius}, n_neighbors=None) if intended to use" + " radius-based neighbors" + ) + + self.nbrs_ = NearestNeighbors( + n_neighbors=self.n_neighbors, + radius=self.radius, + algorithm=self.neighbors_algorithm, + metric=self.metric, + p=self.p, + metric_params=self.metric_params, + n_jobs=self.n_jobs, + ) + self.nbrs_.fit(X) + self.n_features_in_ = self.nbrs_.n_features_in_ + if hasattr(self.nbrs_, "feature_names_in_"): + self.feature_names_in_ = self.nbrs_.feature_names_in_ + + self.kernel_pca_ = KernelPCA( + n_components=self.n_components, + kernel="precomputed", + eigen_solver=self.eigen_solver, + tol=self.tol, + max_iter=self.max_iter, + n_jobs=self.n_jobs, + ).set_output(transform="default") + + if self.n_neighbors is not None: + nbg = kneighbors_graph( + self.nbrs_, + self.n_neighbors, + metric=self.metric, + p=self.p, + metric_params=self.metric_params, + mode="distance", + n_jobs=self.n_jobs, + ) + else: + nbg = radius_neighbors_graph( + self.nbrs_, + radius=self.radius, + metric=self.metric, + p=self.p, + metric_params=self.metric_params, + mode="distance", + n_jobs=self.n_jobs, + ) + + # Compute the number of connected components, and connect the different + # components to be able to compute a shortest path between all pairs + # of samples in the graph. + # Similar fix to cluster._agglomerative._fix_connectivity. + n_connected_components, labels = connected_components(nbg) + if n_connected_components > 1: + if self.metric == "precomputed" and issparse(X): + raise RuntimeError( + "The number of connected components of the neighbors graph" + f" is {n_connected_components} > 1. The graph cannot be " + "completed with metric='precomputed', and Isomap cannot be" + "fitted. Increase the number of neighbors to avoid this " + "issue, or precompute the full distance matrix instead " + "of passing a sparse neighbors graph." + ) + warnings.warn( + ( + "The number of connected components of the neighbors graph " + f"is {n_connected_components} > 1. Completing the graph to fit" + " Isomap might be slow. Increase the number of neighbors to " + "avoid this issue." + ), + stacklevel=2, + ) + + # use array validated by NearestNeighbors + nbg = _fix_connected_components( + X=self.nbrs_._fit_X, + graph=nbg, + n_connected_components=n_connected_components, + component_labels=labels, + mode="distance", + metric=self.nbrs_.effective_metric_, + **self.nbrs_.effective_metric_params_, + ) + + self.dist_matrix_ = shortest_path(nbg, method=self.path_method, directed=False) + + if self.nbrs_._fit_X.dtype == np.float32: + self.dist_matrix_ = self.dist_matrix_.astype( + self.nbrs_._fit_X.dtype, copy=False + ) + + G = self.dist_matrix_**2 + G *= -0.5 + + self.embedding_ = self.kernel_pca_.fit_transform(G) + self._n_features_out = self.embedding_.shape[1] + + def reconstruction_error(self): + """Compute the reconstruction error for the embedding. + + Returns + ------- + reconstruction_error : float + Reconstruction error. + + Notes + ----- + The cost function of an isomap embedding is + + ``E = frobenius_norm[K(D) - K(D_fit)] / n_samples`` + + Where D is the matrix of distances for the input data X, + D_fit is the matrix of distances for the output embedding X_fit, + and K is the isomap kernel: + + ``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)`` + """ + G = -0.5 * self.dist_matrix_**2 + G_center = KernelCenterer().fit_transform(G) + evals = self.kernel_pca_.eigenvalues_ + return np.sqrt(np.sum(G_center**2) - np.sum(evals**2)) / G.shape[0] + + @_fit_context( + # Isomap.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Compute the embedding vectors for data X. + + Parameters + ---------- + X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} + Sample data, shape = (n_samples, n_features), in the form of a + numpy array, sparse matrix, precomputed tree, or NearestNeighbors + object. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + self._fit_transform(X) + return self + + @_fit_context( + # Isomap.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit_transform(self, X, y=None): + """Fit the model from data in X and transform X. + + Parameters + ---------- + X : {array-like, sparse matrix, BallTree, KDTree} + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + X_new : array-like, shape (n_samples, n_components) + X transformed in the new space. + """ + self._fit_transform(X) + return self.embedding_ + + def transform(self, X): + """Transform X. + + This is implemented by linking the points X into the graph of geodesic + distances of the training data. First the `n_neighbors` nearest + neighbors of X are found in the training data, and from these the + shortest geodesic distances from each point in X to each point in + the training data are computed in order to construct the kernel. + The embedding of X is the projection of this kernel onto the + embedding vectors of the training set. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_queries, n_features) + If neighbors_algorithm='precomputed', X is assumed to be a + distance matrix or a sparse graph of shape + (n_queries, n_samples_fit). + + Returns + ------- + X_new : array-like, shape (n_queries, n_components) + X transformed in the new space. + """ + check_is_fitted(self) + if self.n_neighbors is not None: + distances, indices = self.nbrs_.kneighbors(X, return_distance=True) + else: + distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True) + + # Create the graph of shortest distances from X to + # training data via the nearest neighbors of X. + # This can be done as a single array operation, but it potentially + # takes a lot of memory. To avoid that, use a loop: + + n_samples_fit = self.nbrs_.n_samples_fit_ + n_queries = distances.shape[0] + + if hasattr(X, "dtype") and X.dtype == np.float32: + dtype = np.float32 + else: + dtype = np.float64 + + G_X = np.zeros((n_queries, n_samples_fit), dtype) + for i in range(n_queries): + G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0) + + G_X **= 2 + G_X *= -0.5 + + return self.kernel_pca_.transform(G_X) + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_locally_linear.py b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_locally_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..41d0c233b8f764e4e3b4f5cef88006d7a77a160b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_locally_linear.py @@ -0,0 +1,841 @@ +"""Locally Linear Embedding""" + +# Author: Fabian Pedregosa -- +# Jake Vanderplas -- +# License: BSD 3 clause (C) INRIA 2011 + +from numbers import Integral, Real + +import numpy as np +from scipy.linalg import eigh, qr, solve, svd +from scipy.sparse import csr_matrix, eye +from scipy.sparse.linalg import eigsh + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, + _UnstableArchMixin, +) +from ..neighbors import NearestNeighbors +from ..utils import check_array, check_random_state +from ..utils._arpack import _init_arpack_v0 +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import stable_cumsum +from ..utils.validation import FLOAT_DTYPES, check_is_fitted + + +def barycenter_weights(X, Y, indices, reg=1e-3): + """Compute barycenter weights of X from Y along the first axis + + We estimate the weights to assign to each point in Y[indices] to recover + the point X[i]. The barycenter weights sum to 1. + + Parameters + ---------- + X : array-like, shape (n_samples, n_dim) + + Y : array-like, shape (n_samples, n_dim) + + indices : array-like, shape (n_samples, n_dim) + Indices of the points in Y used to compute the barycenter + + reg : float, default=1e-3 + Amount of regularization to add for the problem to be + well-posed in the case of n_neighbors > n_dim + + Returns + ------- + B : array-like, shape (n_samples, n_neighbors) + + Notes + ----- + See developers note for more information. + """ + X = check_array(X, dtype=FLOAT_DTYPES) + Y = check_array(Y, dtype=FLOAT_DTYPES) + indices = check_array(indices, dtype=int) + + n_samples, n_neighbors = indices.shape + assert X.shape[0] == n_samples + + B = np.empty((n_samples, n_neighbors), dtype=X.dtype) + v = np.ones(n_neighbors, dtype=X.dtype) + + # this might raise a LinalgError if G is singular and has trace + # zero + for i, ind in enumerate(indices): + A = Y[ind] + C = A - X[i] # broadcasting + G = np.dot(C, C.T) + trace = np.trace(G) + if trace > 0: + R = reg * trace + else: + R = reg + G.flat[:: n_neighbors + 1] += R + w = solve(G, v, assume_a="pos") + B[i, :] = w / np.sum(w) + return B + + +def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=None): + """Computes the barycenter weighted graph of k-Neighbors for points in X + + Parameters + ---------- + X : {array-like, NearestNeighbors} + Sample data, shape = (n_samples, n_features), in the form of a + numpy array or a NearestNeighbors object. + + n_neighbors : int + Number of neighbors for each sample. + + reg : float, default=1e-3 + Amount of regularization when solving the least-squares + problem. Only relevant if mode='barycenter'. If None, use the + default. + + n_jobs : int or None, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + A : sparse matrix in CSR format, shape = [n_samples, n_samples] + A[i, j] is assigned the weight of edge that connects i to j. + + See Also + -------- + sklearn.neighbors.kneighbors_graph + sklearn.neighbors.radius_neighbors_graph + """ + knn = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs).fit(X) + X = knn._fit_X + n_samples = knn.n_samples_fit_ + ind = knn.kneighbors(X, return_distance=False)[:, 1:] + data = barycenter_weights(X, X, ind, reg=reg) + indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors) + return csr_matrix((data.ravel(), ind.ravel(), indptr), shape=(n_samples, n_samples)) + + +def null_space( + M, k, k_skip=1, eigen_solver="arpack", tol=1e-6, max_iter=100, random_state=None +): + """ + Find the null space of a matrix M. + + Parameters + ---------- + M : {array, matrix, sparse matrix, LinearOperator} + Input covariance matrix: should be symmetric positive semi-definite + + k : int + Number of eigenvalues/vectors to return + + k_skip : int, default=1 + Number of low eigenvalues to skip. + + eigen_solver : {'auto', 'arpack', 'dense'}, default='arpack' + auto : algorithm will attempt to choose the best method for input data + arpack : use arnoldi iteration in shift-invert mode. + For this method, M may be a dense matrix, sparse matrix, + or general linear operator. + Warning: ARPACK can be unstable for some problems. It is + best to try several random seeds in order to check results. + dense : use standard dense matrix operations for the eigenvalue + decomposition. For this method, M must be an array + or matrix type. This method should be avoided for + large problems. + + tol : float, default=1e-6 + Tolerance for 'arpack' method. + Not used if eigen_solver=='dense'. + + max_iter : int, default=100 + Maximum number of iterations for 'arpack' method. + Not used if eigen_solver=='dense' + + random_state : int, RandomState instance, default=None + Determines the random number generator when ``solver`` == 'arpack'. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + """ + if eigen_solver == "auto": + if M.shape[0] > 200 and k + k_skip < 10: + eigen_solver = "arpack" + else: + eigen_solver = "dense" + + if eigen_solver == "arpack": + v0 = _init_arpack_v0(M.shape[0], random_state) + try: + eigen_values, eigen_vectors = eigsh( + M, k + k_skip, sigma=0.0, tol=tol, maxiter=max_iter, v0=v0 + ) + except RuntimeError as e: + raise ValueError( + "Error in determining null-space with ARPACK. Error message: " + "'%s'. Note that eigen_solver='arpack' can fail when the " + "weight matrix is singular or otherwise ill-behaved. In that " + "case, eigen_solver='dense' is recommended. See online " + "documentation for more information." % e + ) from e + + return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:]) + elif eigen_solver == "dense": + if hasattr(M, "toarray"): + M = M.toarray() + eigen_values, eigen_vectors = eigh( + M, subset_by_index=(k_skip, k + k_skip - 1), overwrite_a=True + ) + index = np.argsort(np.abs(eigen_values)) + return eigen_vectors[:, index], np.sum(eigen_values) + else: + raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver) + + +def locally_linear_embedding( + X, + *, + n_neighbors, + n_components, + reg=1e-3, + eigen_solver="auto", + tol=1e-6, + max_iter=100, + method="standard", + hessian_tol=1e-4, + modified_tol=1e-12, + random_state=None, + n_jobs=None, +): + """Perform a Locally Linear Embedding analysis on the data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, NearestNeighbors} + Sample data, shape = (n_samples, n_features), in the form of a + numpy array or a NearestNeighbors object. + + n_neighbors : int + Number of neighbors to consider for each point. + + n_components : int + Number of coordinates for the manifold. + + reg : float, default=1e-3 + Regularization constant, multiplies the trace of the local covariance + matrix of the distances. + + eigen_solver : {'auto', 'arpack', 'dense'}, default='auto' + auto : algorithm will attempt to choose the best method for input data + + arpack : use arnoldi iteration in shift-invert mode. + For this method, M may be a dense matrix, sparse matrix, + or general linear operator. + Warning: ARPACK can be unstable for some problems. It is + best to try several random seeds in order to check results. + + dense : use standard dense matrix operations for the eigenvalue + decomposition. For this method, M must be an array + or matrix type. This method should be avoided for + large problems. + + tol : float, default=1e-6 + Tolerance for 'arpack' method + Not used if eigen_solver=='dense'. + + max_iter : int, default=100 + Maximum number of iterations for the arpack solver. + + method : {'standard', 'hessian', 'modified', 'ltsa'}, default='standard' + standard : use the standard locally linear embedding algorithm. + see reference [1]_ + hessian : use the Hessian eigenmap method. This method requires + n_neighbors > n_components * (1 + (n_components + 1) / 2. + see reference [2]_ + modified : use the modified locally linear embedding algorithm. + see reference [3]_ + ltsa : use local tangent space alignment algorithm + see reference [4]_ + + hessian_tol : float, default=1e-4 + Tolerance for Hessian eigenmapping method. + Only used if method == 'hessian'. + + modified_tol : float, default=1e-12 + Tolerance for modified LLE method. + Only used if method == 'modified'. + + random_state : int, RandomState instance, default=None + Determines the random number generator when ``solver`` == 'arpack'. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + n_jobs : int or None, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + Y : array-like, shape [n_samples, n_components] + Embedding vectors. + + squared_error : float + Reconstruction error for the embedding vectors. Equivalent to + ``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights. + + References + ---------- + + .. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction + by locally linear embedding. Science 290:2323 (2000). + .. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally + linear embedding techniques for high-dimensional data. + Proc Natl Acad Sci U S A. 100:5591 (2003). + .. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear + Embedding Using Multiple Weights. + `_ + .. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear + dimensionality reduction via tangent space alignment. + Journal of Shanghai Univ. 8:406 (2004) + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.manifold import locally_linear_embedding + >>> X, _ = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> embedding, _ = locally_linear_embedding(X[:100],n_neighbors=5, n_components=2) + >>> embedding.shape + (100, 2) + """ + if eigen_solver not in ("auto", "arpack", "dense"): + raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver) + + if method not in ("standard", "hessian", "modified", "ltsa"): + raise ValueError("unrecognized method '%s'" % method) + + nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs) + nbrs.fit(X) + X = nbrs._fit_X + + N, d_in = X.shape + + if n_components > d_in: + raise ValueError( + "output dimension must be less than or equal to input dimension" + ) + if n_neighbors >= N: + raise ValueError( + "Expected n_neighbors <= n_samples, but n_samples = %d, n_neighbors = %d" + % (N, n_neighbors) + ) + + if n_neighbors <= 0: + raise ValueError("n_neighbors must be positive") + + M_sparse = eigen_solver != "dense" + + if method == "standard": + W = barycenter_kneighbors_graph( + nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs + ) + + # we'll compute M = (I-W)'(I-W) + # depending on the solver, we'll do this differently + if M_sparse: + M = eye(*W.shape, format=W.format) - W + M = (M.T * M).tocsr() + else: + M = (W.T * W - W.T - W).toarray() + M.flat[:: M.shape[0] + 1] += 1 # W = W - I = W - I + + elif method == "hessian": + dp = n_components * (n_components + 1) // 2 + + if n_neighbors <= n_components + dp: + raise ValueError( + "for method='hessian', n_neighbors must be " + "greater than " + "[n_components * (n_components + 3) / 2]" + ) + + neighbors = nbrs.kneighbors( + X, n_neighbors=n_neighbors + 1, return_distance=False + ) + neighbors = neighbors[:, 1:] + + Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64) + Yi[:, 0] = 1 + + M = np.zeros((N, N), dtype=np.float64) + + use_svd = n_neighbors > d_in + + for i in range(N): + Gi = X[neighbors[i]] + Gi -= Gi.mean(0) + + # build Hessian estimator + if use_svd: + U = svd(Gi, full_matrices=0)[0] + else: + Ci = np.dot(Gi, Gi.T) + U = eigh(Ci)[1][:, ::-1] + + Yi[:, 1 : 1 + n_components] = U[:, :n_components] + + j = 1 + n_components + for k in range(n_components): + Yi[:, j : j + n_components - k] = U[:, k : k + 1] * U[:, k:n_components] + j += n_components - k + + Q, R = qr(Yi) + + w = Q[:, n_components + 1 :] + S = w.sum(0) + + S[np.where(abs(S) < hessian_tol)] = 1 + w /= S + + nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i]) + M[nbrs_x, nbrs_y] += np.dot(w, w.T) + + if M_sparse: + M = csr_matrix(M) + + elif method == "modified": + if n_neighbors < n_components: + raise ValueError("modified LLE requires n_neighbors >= n_components") + + neighbors = nbrs.kneighbors( + X, n_neighbors=n_neighbors + 1, return_distance=False + ) + neighbors = neighbors[:, 1:] + + # find the eigenvectors and eigenvalues of each local covariance + # matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix, + # where the columns are eigenvectors + V = np.zeros((N, n_neighbors, n_neighbors)) + nev = min(d_in, n_neighbors) + evals = np.zeros([N, nev]) + + # choose the most efficient way to find the eigenvectors + use_svd = n_neighbors > d_in + + if use_svd: + for i in range(N): + X_nbrs = X[neighbors[i]] - X[i] + V[i], evals[i], _ = svd(X_nbrs, full_matrices=True) + evals **= 2 + else: + for i in range(N): + X_nbrs = X[neighbors[i]] - X[i] + C_nbrs = np.dot(X_nbrs, X_nbrs.T) + evi, vi = eigh(C_nbrs) + evals[i] = evi[::-1] + V[i] = vi[:, ::-1] + + # find regularized weights: this is like normal LLE. + # because we've already computed the SVD of each covariance matrix, + # it's faster to use this rather than np.linalg.solve + reg = 1e-3 * evals.sum(1) + + tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors)) + tmp[:, :nev] /= evals + reg[:, None] + tmp[:, nev:] /= reg[:, None] + + w_reg = np.zeros((N, n_neighbors)) + for i in range(N): + w_reg[i] = np.dot(V[i], tmp[i]) + w_reg /= w_reg.sum(1)[:, None] + + # calculate eta: the median of the ratio of small to large eigenvalues + # across the points. This is used to determine s_i, below + rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1) + eta = np.median(rho) + + # find s_i, the size of the "almost null space" for each point: + # this is the size of the largest set of eigenvalues + # such that Sum[v; v in set]/Sum[v; v not in set] < eta + s_range = np.zeros(N, dtype=int) + evals_cumsum = stable_cumsum(evals, 1) + eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1 + for i in range(N): + s_range[i] = np.searchsorted(eta_range[i, ::-1], eta) + s_range += n_neighbors - nev # number of zero eigenvalues + + # Now calculate M. + # This is the [N x N] matrix whose null space is the desired embedding + M = np.zeros((N, N), dtype=np.float64) + for i in range(N): + s_i = s_range[i] + + # select bottom s_i eigenvectors and calculate alpha + Vi = V[i, :, n_neighbors - s_i :] + alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i) + + # compute Householder matrix which satisfies + # Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s) + # using prescription from paper + h = np.full(s_i, alpha_i) - np.dot(Vi.T, np.ones(n_neighbors)) + + norm_h = np.linalg.norm(h) + if norm_h < modified_tol: + h *= 0 + else: + h /= norm_h + + # Householder matrix is + # >> Hi = np.identity(s_i) - 2*np.outer(h,h) + # Then the weight matrix is + # >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None] + # We do this much more efficiently: + Wi = Vi - 2 * np.outer(np.dot(Vi, h), h) + (1 - alpha_i) * w_reg[i, :, None] + + # Update M as follows: + # >> W_hat = np.zeros( (N,s_i) ) + # >> W_hat[neighbors[i],:] = Wi + # >> W_hat[i] -= 1 + # >> M += np.dot(W_hat,W_hat.T) + # We can do this much more efficiently: + nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i]) + M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T) + Wi_sum1 = Wi.sum(1) + M[i, neighbors[i]] -= Wi_sum1 + M[neighbors[i], i] -= Wi_sum1 + M[i, i] += s_i + + if M_sparse: + M = csr_matrix(M) + + elif method == "ltsa": + neighbors = nbrs.kneighbors( + X, n_neighbors=n_neighbors + 1, return_distance=False + ) + neighbors = neighbors[:, 1:] + + M = np.zeros((N, N)) + + use_svd = n_neighbors > d_in + + for i in range(N): + Xi = X[neighbors[i]] + Xi -= Xi.mean(0) + + # compute n_components largest eigenvalues of Xi * Xi^T + if use_svd: + v = svd(Xi, full_matrices=True)[0] + else: + Ci = np.dot(Xi, Xi.T) + v = eigh(Ci)[1][:, ::-1] + + Gi = np.zeros((n_neighbors, n_components + 1)) + Gi[:, 1:] = v[:, :n_components] + Gi[:, 0] = 1.0 / np.sqrt(n_neighbors) + + GiGiT = np.dot(Gi, Gi.T) + + nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i]) + M[nbrs_x, nbrs_y] -= GiGiT + M[neighbors[i], neighbors[i]] += 1 + + return null_space( + M, + n_components, + k_skip=1, + eigen_solver=eigen_solver, + tol=tol, + max_iter=max_iter, + random_state=random_state, + ) + + +class LocallyLinearEmbedding( + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _UnstableArchMixin, + BaseEstimator, +): + """Locally Linear Embedding. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_neighbors : int, default=5 + Number of neighbors to consider for each point. + + n_components : int, default=2 + Number of coordinates for the manifold. + + reg : float, default=1e-3 + Regularization constant, multiplies the trace of the local covariance + matrix of the distances. + + eigen_solver : {'auto', 'arpack', 'dense'}, default='auto' + The solver used to compute the eigenvectors. The available options are: + + - `'auto'` : algorithm will attempt to choose the best method for input + data. + - `'arpack'` : use arnoldi iteration in shift-invert mode. For this + method, M may be a dense matrix, sparse matrix, or general linear + operator. + - `'dense'` : use standard dense matrix operations for the eigenvalue + decomposition. For this method, M must be an array or matrix type. + This method should be avoided for large problems. + + .. warning:: + ARPACK can be unstable for some problems. It is best to try several + random seeds in order to check results. + + tol : float, default=1e-6 + Tolerance for 'arpack' method + Not used if eigen_solver=='dense'. + + max_iter : int, default=100 + Maximum number of iterations for the arpack solver. + Not used if eigen_solver=='dense'. + + method : {'standard', 'hessian', 'modified', 'ltsa'}, default='standard' + - `standard`: use the standard locally linear embedding algorithm. see + reference [1]_ + - `hessian`: use the Hessian eigenmap method. This method requires + ``n_neighbors > n_components * (1 + (n_components + 1) / 2``. see + reference [2]_ + - `modified`: use the modified locally linear embedding algorithm. + see reference [3]_ + - `ltsa`: use local tangent space alignment algorithm. see + reference [4]_ + + hessian_tol : float, default=1e-4 + Tolerance for Hessian eigenmapping method. + Only used if ``method == 'hessian'``. + + modified_tol : float, default=1e-12 + Tolerance for modified LLE method. + Only used if ``method == 'modified'``. + + neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'}, \ + default='auto' + Algorithm to use for nearest neighbors search, passed to + :class:`~sklearn.neighbors.NearestNeighbors` instance. + + random_state : int, RandomState instance, default=None + Determines the random number generator when + ``eigen_solver`` == 'arpack'. Pass an int for reproducible results + across multiple function calls. See :term:`Glossary `. + + n_jobs : int or None, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + embedding_ : array-like, shape [n_samples, n_components] + Stores the embedding vectors + + reconstruction_error_ : float + Reconstruction error associated with `embedding_` + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + nbrs_ : NearestNeighbors object + Stores nearest neighbors instance, including BallTree or KDtree + if applicable. + + See Also + -------- + SpectralEmbedding : Spectral embedding for non-linear dimensionality + reduction. + TSNE : Distributed Stochastic Neighbor Embedding. + + References + ---------- + + .. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction + by locally linear embedding. Science 290:2323 (2000). + .. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally + linear embedding techniques for high-dimensional data. + Proc Natl Acad Sci U S A. 100:5591 (2003). + .. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear + Embedding Using Multiple Weights. + `_ + .. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear + dimensionality reduction via tangent space alignment. + Journal of Shanghai Univ. 8:406 (2004) + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.manifold import LocallyLinearEmbedding + >>> X, _ = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> embedding = LocallyLinearEmbedding(n_components=2) + >>> X_transformed = embedding.fit_transform(X[:100]) + >>> X_transformed.shape + (100, 2) + """ + + _parameter_constraints: dict = { + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "n_components": [Interval(Integral, 1, None, closed="left")], + "reg": [Interval(Real, 0, None, closed="left")], + "eigen_solver": [StrOptions({"auto", "arpack", "dense"})], + "tol": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "method": [StrOptions({"standard", "hessian", "modified", "ltsa"})], + "hessian_tol": [Interval(Real, 0, None, closed="left")], + "modified_tol": [Interval(Real, 0, None, closed="left")], + "neighbors_algorithm": [StrOptions({"auto", "brute", "kd_tree", "ball_tree"})], + "random_state": ["random_state"], + "n_jobs": [None, Integral], + } + + def __init__( + self, + *, + n_neighbors=5, + n_components=2, + reg=1e-3, + eigen_solver="auto", + tol=1e-6, + max_iter=100, + method="standard", + hessian_tol=1e-4, + modified_tol=1e-12, + neighbors_algorithm="auto", + random_state=None, + n_jobs=None, + ): + self.n_neighbors = n_neighbors + self.n_components = n_components + self.reg = reg + self.eigen_solver = eigen_solver + self.tol = tol + self.max_iter = max_iter + self.method = method + self.hessian_tol = hessian_tol + self.modified_tol = modified_tol + self.random_state = random_state + self.neighbors_algorithm = neighbors_algorithm + self.n_jobs = n_jobs + + def _fit_transform(self, X): + self.nbrs_ = NearestNeighbors( + n_neighbors=self.n_neighbors, + algorithm=self.neighbors_algorithm, + n_jobs=self.n_jobs, + ) + + random_state = check_random_state(self.random_state) + X = self._validate_data(X, dtype=float) + self.nbrs_.fit(X) + self.embedding_, self.reconstruction_error_ = locally_linear_embedding( + X=self.nbrs_, + n_neighbors=self.n_neighbors, + n_components=self.n_components, + eigen_solver=self.eigen_solver, + tol=self.tol, + max_iter=self.max_iter, + method=self.method, + hessian_tol=self.hessian_tol, + modified_tol=self.modified_tol, + random_state=random_state, + reg=self.reg, + n_jobs=self.n_jobs, + ) + self._n_features_out = self.embedding_.shape[1] + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Compute the embedding vectors for data X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training set. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Fitted `LocallyLinearEmbedding` class instance. + """ + self._fit_transform(X) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Compute the embedding vectors for data X and transform X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training set. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + X_new : array-like, shape (n_samples, n_components) + Returns the instance itself. + """ + self._fit_transform(X) + return self.embedding_ + + def transform(self, X): + """ + Transform new points into embedding space. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training set. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Returns the instance itself. + + Notes + ----- + Because of scaling performed by this method, it is discouraged to use + it together with methods that are not scale-invariant (like SVMs). + """ + check_is_fitted(self) + + X = self._validate_data(X, reset=False) + ind = self.nbrs_.kneighbors( + X, n_neighbors=self.n_neighbors, return_distance=False + ) + weights = barycenter_weights(X, self.nbrs_._fit_X, ind, reg=self.reg) + X_new = np.empty((X.shape[0], self.n_components)) + for i in range(X.shape[0]): + X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i]) + return X_new diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_mds.py b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_mds.py new file mode 100644 index 0000000000000000000000000000000000000000..760336da52e9f2487d9a5d86bb69972b0c964cfd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_mds.py @@ -0,0 +1,653 @@ +""" +Multi-dimensional Scaling (MDS). +""" + +# author: Nelle Varoquaux +# License: BSD + +import warnings +from numbers import Integral, Real + +import numpy as np +from joblib import effective_n_jobs + +from ..base import BaseEstimator, _fit_context +from ..isotonic import IsotonicRegression +from ..metrics import euclidean_distances +from ..utils import check_array, check_random_state, check_symmetric +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.parallel import Parallel, delayed + + +def _smacof_single( + dissimilarities, + metric=True, + n_components=2, + init=None, + max_iter=300, + verbose=0, + eps=1e-3, + random_state=None, + normalized_stress=False, +): + """Computes multidimensional scaling using SMACOF algorithm. + + Parameters + ---------- + dissimilarities : ndarray of shape (n_samples, n_samples) + Pairwise dissimilarities between the points. Must be symmetric. + + metric : bool, default=True + Compute metric or nonmetric SMACOF algorithm. + When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as + missing values. + + n_components : int, default=2 + Number of dimensions in which to immerse the dissimilarities. If an + ``init`` array is provided, this option is overridden and the shape of + ``init`` is used to determine the dimensionality of the embedding + space. + + init : ndarray of shape (n_samples, n_components), default=None + Starting configuration of the embedding to initialize the algorithm. By + default, the algorithm is initialized with a randomly chosen array. + + max_iter : int, default=300 + Maximum number of iterations of the SMACOF algorithm for a single run. + + verbose : int, default=0 + Level of verbosity. + + eps : float, default=1e-3 + Relative tolerance with respect to stress at which to declare + convergence. The value of `eps` should be tuned separately depending + on whether or not `normalized_stress` is being used. + + random_state : int, RandomState instance or None, default=None + Determines the random number generator used to initialize the centers. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + normalized_stress : bool, default=False + Whether use and return normed stress value (Stress-1) instead of raw + stress calculated by default. Only supported in non-metric MDS. The + caller must ensure that if `normalized_stress=True` then `metric=False` + + .. versionadded:: 1.2 + + Returns + ------- + X : ndarray of shape (n_samples, n_components) + Coordinates of the points in a ``n_components``-space. + + stress : float + The final value of the stress (sum of squared distance of the + disparities and the distances for all constrained points). + If `normalized_stress=True`, and `metric=False` returns Stress-1. + A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good, + 0.1 fair, and 0.2 poor [1]_. + + n_iter : int + The number of iterations corresponding to the best stress. + + References + ---------- + .. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J. + Psychometrika, 29 (1964) + + .. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric + hypothesis" Kruskal, J. Psychometrika, 29, (1964) + + .. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.; + Groenen P. Springer Series in Statistics (1997) + """ + dissimilarities = check_symmetric(dissimilarities, raise_exception=True) + + n_samples = dissimilarities.shape[0] + random_state = check_random_state(random_state) + + sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel() + sim_flat_w = sim_flat[sim_flat != 0] + if init is None: + # Randomly choose initial configuration + X = random_state.uniform(size=n_samples * n_components) + X = X.reshape((n_samples, n_components)) + else: + # overrides the parameter p + n_components = init.shape[1] + if n_samples != init.shape[0]: + raise ValueError( + "init matrix should be of shape (%d, %d)" % (n_samples, n_components) + ) + X = init + + old_stress = None + ir = IsotonicRegression() + for it in range(max_iter): + # Compute distance and monotonic regression + dis = euclidean_distances(X) + + if metric: + disparities = dissimilarities + else: + dis_flat = dis.ravel() + # dissimilarities with 0 are considered as missing values + dis_flat_w = dis_flat[sim_flat != 0] + + # Compute the disparities using a monotonic regression + disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w) + disparities = dis_flat.copy() + disparities[sim_flat != 0] = disparities_flat + disparities = disparities.reshape((n_samples, n_samples)) + disparities *= np.sqrt( + (n_samples * (n_samples - 1) / 2) / (disparities**2).sum() + ) + + # Compute stress + stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2 + if normalized_stress: + stress = np.sqrt(stress / ((disparities.ravel() ** 2).sum() / 2)) + # Update X using the Guttman transform + dis[dis == 0] = 1e-5 + ratio = disparities / dis + B = -ratio + B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1) + X = 1.0 / n_samples * np.dot(B, X) + + dis = np.sqrt((X**2).sum(axis=1)).sum() + if verbose >= 2: + print("it: %d, stress %s" % (it, stress)) + if old_stress is not None: + if (old_stress - stress / dis) < eps: + if verbose: + print("breaking at iteration %d with stress %s" % (it, stress)) + break + old_stress = stress / dis + + return X, stress, it + 1 + + +@validate_params( + { + "dissimilarities": ["array-like"], + "metric": ["boolean"], + "n_components": [Interval(Integral, 1, None, closed="left")], + "init": ["array-like", None], + "n_init": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "verbose": ["verbose"], + "eps": [Interval(Real, 0, None, closed="left")], + "random_state": ["random_state"], + "return_n_iter": ["boolean"], + "normalized_stress": ["boolean", StrOptions({"auto"})], + }, + prefer_skip_nested_validation=True, +) +def smacof( + dissimilarities, + *, + metric=True, + n_components=2, + init=None, + n_init=8, + n_jobs=None, + max_iter=300, + verbose=0, + eps=1e-3, + random_state=None, + return_n_iter=False, + normalized_stress="auto", +): + """Compute multidimensional scaling using the SMACOF algorithm. + + The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a + multidimensional scaling algorithm which minimizes an objective function + (the *stress*) using a majorization technique. Stress majorization, also + known as the Guttman Transform, guarantees a monotone convergence of + stress, and is more powerful than traditional techniques such as gradient + descent. + + The SMACOF algorithm for metric MDS can be summarized by the following + steps: + + 1. Set an initial start configuration, randomly or not. + 2. Compute the stress + 3. Compute the Guttman Transform + 4. Iterate 2 and 3 until convergence. + + The nonmetric algorithm adds a monotonic regression step before computing + the stress. + + Parameters + ---------- + dissimilarities : array-like of shape (n_samples, n_samples) + Pairwise dissimilarities between the points. Must be symmetric. + + metric : bool, default=True + Compute metric or nonmetric SMACOF algorithm. + When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as + missing values. + + n_components : int, default=2 + Number of dimensions in which to immerse the dissimilarities. If an + ``init`` array is provided, this option is overridden and the shape of + ``init`` is used to determine the dimensionality of the embedding + space. + + init : array-like of shape (n_samples, n_components), default=None + Starting configuration of the embedding to initialize the algorithm. By + default, the algorithm is initialized with a randomly chosen array. + + n_init : int, default=8 + Number of times the SMACOF algorithm will be run with different + initializations. The final results will be the best output of the runs, + determined by the run with the smallest final stress. If ``init`` is + provided, this option is overridden and a single run is performed. + + n_jobs : int, default=None + The number of jobs to use for the computation. If multiple + initializations are used (``n_init``), each run of the algorithm is + computed in parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + max_iter : int, default=300 + Maximum number of iterations of the SMACOF algorithm for a single run. + + verbose : int, default=0 + Level of verbosity. + + eps : float, default=1e-3 + Relative tolerance with respect to stress at which to declare + convergence. The value of `eps` should be tuned separately depending + on whether or not `normalized_stress` is being used. + + random_state : int, RandomState instance or None, default=None + Determines the random number generator used to initialize the centers. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + normalized_stress : bool or "auto" default="auto" + Whether use and return normed stress value (Stress-1) instead of raw + stress calculated by default. Only supported in non-metric MDS. + + .. versionadded:: 1.2 + + .. versionchanged:: 1.4 + The default value changed from `False` to `"auto"` in version 1.4. + + Returns + ------- + X : ndarray of shape (n_samples, n_components) + Coordinates of the points in a ``n_components``-space. + + stress : float + The final value of the stress (sum of squared distance of the + disparities and the distances for all constrained points). + If `normalized_stress=True`, and `metric=False` returns Stress-1. + A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good, + 0.1 fair, and 0.2 poor [1]_. + + n_iter : int + The number of iterations corresponding to the best stress. Returned + only if ``return_n_iter`` is set to ``True``. + + References + ---------- + .. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J. + Psychometrika, 29 (1964) + + .. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric + hypothesis" Kruskal, J. Psychometrika, 29, (1964) + + .. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.; + Groenen P. Springer Series in Statistics (1997) + + Examples + -------- + >>> import numpy as np + >>> from sklearn.manifold import smacof + >>> from sklearn.metrics import euclidean_distances + >>> X = np.array([[0, 1, 2], [1, 0, 3],[2, 3, 0]]) + >>> dissimilarities = euclidean_distances(X) + >>> mds_result, stress = smacof(dissimilarities, n_components=2, random_state=42) + >>> mds_result + array([[ 0.05... -1.07... ], + [ 1.74..., -0.75...], + [-1.79..., 1.83...]]) + >>> stress + 0.0012... + """ + + dissimilarities = check_array(dissimilarities) + random_state = check_random_state(random_state) + + if normalized_stress == "auto": + normalized_stress = not metric + + if normalized_stress and metric: + raise ValueError( + "Normalized stress is not supported for metric MDS. Either set" + " `normalized_stress=False` or use `metric=False`." + ) + if hasattr(init, "__array__"): + init = np.asarray(init).copy() + if not n_init == 1: + warnings.warn( + "Explicit initial positions passed: " + "performing only one init of the MDS instead of %d" % n_init + ) + n_init = 1 + + best_pos, best_stress = None, None + + if effective_n_jobs(n_jobs) == 1: + for it in range(n_init): + pos, stress, n_iter_ = _smacof_single( + dissimilarities, + metric=metric, + n_components=n_components, + init=init, + max_iter=max_iter, + verbose=verbose, + eps=eps, + random_state=random_state, + normalized_stress=normalized_stress, + ) + if best_stress is None or stress < best_stress: + best_stress = stress + best_pos = pos.copy() + best_iter = n_iter_ + else: + seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) + results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))( + delayed(_smacof_single)( + dissimilarities, + metric=metric, + n_components=n_components, + init=init, + max_iter=max_iter, + verbose=verbose, + eps=eps, + random_state=seed, + normalized_stress=normalized_stress, + ) + for seed in seeds + ) + positions, stress, n_iters = zip(*results) + best = np.argmin(stress) + best_stress = stress[best] + best_pos = positions[best] + best_iter = n_iters[best] + + if return_n_iter: + return best_pos, best_stress, best_iter + else: + return best_pos, best_stress + + +class MDS(BaseEstimator): + """Multidimensional scaling. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=2 + Number of dimensions in which to immerse the dissimilarities. + + metric : bool, default=True + If ``True``, perform metric MDS; otherwise, perform nonmetric MDS. + When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as + missing values. + + n_init : int, default=4 + Number of times the SMACOF algorithm will be run with different + initializations. The final results will be the best output of the runs, + determined by the run with the smallest final stress. + + max_iter : int, default=300 + Maximum number of iterations of the SMACOF algorithm for a single run. + + verbose : int, default=0 + Level of verbosity. + + eps : float, default=1e-3 + Relative tolerance with respect to stress at which to declare + convergence. The value of `eps` should be tuned separately depending + on whether or not `normalized_stress` is being used. + + n_jobs : int, default=None + The number of jobs to use for the computation. If multiple + initializations are used (``n_init``), each run of the algorithm is + computed in parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance or None, default=None + Determines the random number generator used to initialize the centers. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + dissimilarity : {'euclidean', 'precomputed'}, default='euclidean' + Dissimilarity measure to use: + + - 'euclidean': + Pairwise Euclidean distances between points in the dataset. + + - 'precomputed': + Pre-computed dissimilarities are passed directly to ``fit`` and + ``fit_transform``. + + normalized_stress : bool or "auto" default="auto" + Whether use and return normed stress value (Stress-1) instead of raw + stress calculated by default. Only supported in non-metric MDS. + + .. versionadded:: 1.2 + + .. versionchanged:: 1.4 + The default value changed from `False` to `"auto"` in version 1.4. + + Attributes + ---------- + embedding_ : ndarray of shape (n_samples, n_components) + Stores the position of the dataset in the embedding space. + + stress_ : float + The final value of the stress (sum of squared distance of the + disparities and the distances for all constrained points). + If `normalized_stress=True`, and `metric=False` returns Stress-1. + A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good, + 0.1 fair, and 0.2 poor [1]_. + + dissimilarity_matrix_ : ndarray of shape (n_samples, n_samples) + Pairwise dissimilarities between the points. Symmetric matrix that: + + - either uses a custom dissimilarity matrix by setting `dissimilarity` + to 'precomputed'; + - or constructs a dissimilarity matrix from data using + Euclidean distances. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The number of iterations corresponding to the best stress. + + See Also + -------- + sklearn.decomposition.PCA : Principal component analysis that is a linear + dimensionality reduction method. + sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using + kernels and PCA. + TSNE : T-distributed Stochastic Neighbor Embedding. + Isomap : Manifold learning based on Isometric Mapping. + LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding. + SpectralEmbedding : Spectral embedding for non-linear dimensionality. + + References + ---------- + .. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J. + Psychometrika, 29 (1964) + + .. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric + hypothesis" Kruskal, J. Psychometrika, 29, (1964) + + .. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.; + Groenen P. Springer Series in Statistics (1997) + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.manifold import MDS + >>> X, _ = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> embedding = MDS(n_components=2, normalized_stress='auto') + >>> X_transformed = embedding.fit_transform(X[:100]) + >>> X_transformed.shape + (100, 2) + + For a more detailed example of usage, see: + :ref:`sphx_glr_auto_examples_manifold_plot_mds.py` + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left")], + "metric": ["boolean"], + "n_init": [Interval(Integral, 1, None, closed="left")], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "verbose": ["verbose"], + "eps": [Interval(Real, 0.0, None, closed="left")], + "n_jobs": [None, Integral], + "random_state": ["random_state"], + "dissimilarity": [StrOptions({"euclidean", "precomputed"})], + "normalized_stress": ["boolean", StrOptions({"auto"})], + } + + def __init__( + self, + n_components=2, + *, + metric=True, + n_init=4, + max_iter=300, + verbose=0, + eps=1e-3, + n_jobs=None, + random_state=None, + dissimilarity="euclidean", + normalized_stress="auto", + ): + self.n_components = n_components + self.dissimilarity = dissimilarity + self.metric = metric + self.n_init = n_init + self.max_iter = max_iter + self.eps = eps + self.verbose = verbose + self.n_jobs = n_jobs + self.random_state = random_state + self.normalized_stress = normalized_stress + + def _more_tags(self): + return {"pairwise": self.dissimilarity == "precomputed"} + + def fit(self, X, y=None, init=None): + """ + Compute the position of the points in the embedding space. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or \ + (n_samples, n_samples) + Input data. If ``dissimilarity=='precomputed'``, the input should + be the dissimilarity matrix. + + y : Ignored + Not used, present for API consistency by convention. + + init : ndarray of shape (n_samples, n_components), default=None + Starting configuration of the embedding to initialize the SMACOF + algorithm. By default, the algorithm is initialized with a randomly + chosen array. + + Returns + ------- + self : object + Fitted estimator. + """ + self.fit_transform(X, init=init) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None, init=None): + """ + Fit the data from `X`, and returns the embedded coordinates. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or \ + (n_samples, n_samples) + Input data. If ``dissimilarity=='precomputed'``, the input should + be the dissimilarity matrix. + + y : Ignored + Not used, present for API consistency by convention. + + init : ndarray of shape (n_samples, n_components), default=None + Starting configuration of the embedding to initialize the SMACOF + algorithm. By default, the algorithm is initialized with a randomly + chosen array. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + X transformed in the new space. + """ + X = self._validate_data(X) + if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed": + warnings.warn( + "The MDS API has changed. ``fit`` now constructs an" + " dissimilarity matrix from data. To use a custom " + "dissimilarity matrix, set " + "``dissimilarity='precomputed'``." + ) + + if self.dissimilarity == "precomputed": + self.dissimilarity_matrix_ = X + elif self.dissimilarity == "euclidean": + self.dissimilarity_matrix_ = euclidean_distances(X) + + self.embedding_, self.stress_, self.n_iter_ = smacof( + self.dissimilarity_matrix_, + metric=self.metric, + n_components=self.n_components, + init=init, + n_init=self.n_init, + n_jobs=self.n_jobs, + max_iter=self.max_iter, + verbose=self.verbose, + eps=self.eps, + random_state=self.random_state, + return_n_iter=True, + normalized_stress=self.normalized_stress, + ) + + return self.embedding_ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_spectral_embedding.py b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_spectral_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..a2839954c117ad283c125bacc8f3b5e1f6483969 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_spectral_embedding.py @@ -0,0 +1,749 @@ +"""Spectral Embedding.""" + +# Author: Gael Varoquaux +# Wei LI +# License: BSD 3 clause + + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import sparse +from scipy.linalg import eigh +from scipy.sparse.csgraph import connected_components +from scipy.sparse.linalg import eigsh, lobpcg + +from ..base import BaseEstimator, _fit_context +from ..metrics.pairwise import rbf_kernel +from ..neighbors import NearestNeighbors, kneighbors_graph +from ..utils import ( + check_array, + check_random_state, + check_symmetric, +) +from ..utils._arpack import _init_arpack_v0 +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import _deterministic_vector_sign_flip +from ..utils.fixes import laplacian as csgraph_laplacian +from ..utils.fixes import parse_version, sp_version + + +def _graph_connected_component(graph, node_id): + """Find the largest graph connected components that contains one + given node. + + Parameters + ---------- + graph : array-like of shape (n_samples, n_samples) + Adjacency matrix of the graph, non-zero weight means an edge + between the nodes. + + node_id : int + The index of the query node of the graph. + + Returns + ------- + connected_components_matrix : array-like of shape (n_samples,) + An array of bool value indicating the indexes of the nodes + belonging to the largest connected components of the given query + node. + """ + n_node = graph.shape[0] + if sparse.issparse(graph): + # speed up row-wise access to boolean connection mask + graph = graph.tocsr() + connected_nodes = np.zeros(n_node, dtype=bool) + nodes_to_explore = np.zeros(n_node, dtype=bool) + nodes_to_explore[node_id] = True + for _ in range(n_node): + last_num_component = connected_nodes.sum() + np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes) + if last_num_component >= connected_nodes.sum(): + break + indices = np.where(nodes_to_explore)[0] + nodes_to_explore.fill(False) + for i in indices: + if sparse.issparse(graph): + # scipy not yet implemented 1D sparse slices; can be changed back to + # `neighbors = graph[i].toarray().ravel()` once implemented + neighbors = graph[[i], :].toarray().ravel() + else: + neighbors = graph[i] + np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore) + return connected_nodes + + +def _graph_is_connected(graph): + """Return whether the graph is connected (True) or Not (False). + + Parameters + ---------- + graph : {array-like, sparse matrix} of shape (n_samples, n_samples) + Adjacency matrix of the graph, non-zero weight means an edge + between the nodes. + + Returns + ------- + is_connected : bool + True means the graph is fully connected and False means not. + """ + if sparse.issparse(graph): + # Before Scipy 1.11.3, `connected_components` only supports 32-bit indices. + # PR: https://github.com/scipy/scipy/pull/18913 + # First integration in 1.11.3: https://github.com/scipy/scipy/pull/19279 + # TODO(jjerphan): Once SciPy 1.11.3 is the minimum supported version, use + # `accept_large_sparse=True`. + accept_large_sparse = sp_version >= parse_version("1.11.3") + graph = check_array( + graph, accept_sparse=True, accept_large_sparse=accept_large_sparse + ) + # sparse graph, find all the connected components + n_connected_components, _ = connected_components(graph) + return n_connected_components == 1 + else: + # dense graph, find all connected components start from node 0 + return _graph_connected_component(graph, 0).sum() == graph.shape[0] + + +def _set_diag(laplacian, value, norm_laplacian): + """Set the diagonal of the laplacian matrix and convert it to a + sparse format well suited for eigenvalue decomposition. + + Parameters + ---------- + laplacian : {ndarray, sparse matrix} + The graph laplacian. + + value : float + The value of the diagonal. + + norm_laplacian : bool + Whether the value of the diagonal should be changed or not. + + Returns + ------- + laplacian : {array, sparse matrix} + An array of matrix in a form that is well suited to fast + eigenvalue decomposition, depending on the band width of the + matrix. + """ + n_nodes = laplacian.shape[0] + # We need all entries in the diagonal to values + if not sparse.issparse(laplacian): + if norm_laplacian: + laplacian.flat[:: n_nodes + 1] = value + else: + laplacian = laplacian.tocoo() + if norm_laplacian: + diag_idx = laplacian.row == laplacian.col + laplacian.data[diag_idx] = value + # If the matrix has a small number of diagonals (as in the + # case of structured matrices coming from images), the + # dia format might be best suited for matvec products: + n_diags = np.unique(laplacian.row - laplacian.col).size + if n_diags <= 7: + # 3 or less outer diagonals on each side + laplacian = laplacian.todia() + else: + # csr has the fastest matvec and is thus best suited to + # arpack + laplacian = laplacian.tocsr() + return laplacian + + +def spectral_embedding( + adjacency, + *, + n_components=8, + eigen_solver=None, + random_state=None, + eigen_tol="auto", + norm_laplacian=True, + drop_first=True, +): + """Project the sample on the first eigenvectors of the graph Laplacian. + + The adjacency matrix is used to compute a normalized graph Laplacian + whose spectrum (especially the eigenvectors associated to the + smallest eigenvalues) has an interpretation in terms of minimal + number of cuts necessary to split the graph into comparably sized + components. + + This embedding can also 'work' even if the ``adjacency`` variable is + not strictly the adjacency matrix of a graph but more generally + an affinity or similarity matrix between samples (for instance the + heat kernel of a euclidean distance matrix or a k-NN matrix). + + However care must taken to always make the affinity matrix symmetric + so that the eigenvector decomposition works as expected. + + Note : Laplacian Eigenmaps is the actual algorithm implemented here. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + adjacency : {array-like, sparse graph} of shape (n_samples, n_samples) + The adjacency matrix of the graph to embed. + + n_components : int, default=8 + The dimension of the projection subspace. + + eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None + The eigenvalue decomposition strategy to use. AMG requires pyamg + to be installed. It can be faster on very large, sparse problems, + but may also lead to instabilities. If None, then ``'arpack'`` is + used. + + random_state : int, RandomState instance or None, default=None + A pseudo random number generator used for the initialization + of the lobpcg eigen vectors decomposition when `eigen_solver == + 'amg'`, and for the K-Means initialization. Use an int to make + the results deterministic across calls (See + :term:`Glossary `). + + .. note:: + When using `eigen_solver == 'amg'`, + it is necessary to also fix the global numpy seed with + `np.random.seed(int)` to get deterministic results. See + https://github.com/pyamg/pyamg/issues/139 for further + information. + + eigen_tol : float, default="auto" + Stopping criterion for eigendecomposition of the Laplacian matrix. + If `eigen_tol="auto"` then the passed tolerance will depend on the + `eigen_solver`: + + - If `eigen_solver="arpack"`, then `eigen_tol=0.0`; + - If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then + `eigen_tol=None` which configures the underlying `lobpcg` solver to + automatically resolve the value according to their heuristics. See, + :func:`scipy.sparse.linalg.lobpcg` for details. + + Note that when using `eigen_solver="amg"` values of `tol<1e-5` may lead + to convergence issues and should be avoided. + + .. versionadded:: 1.2 + Added 'auto' option. + + norm_laplacian : bool, default=True + If True, then compute symmetric normalized Laplacian. + + drop_first : bool, default=True + Whether to drop the first eigenvector. For spectral embedding, this + should be True as the first eigenvector should be constant vector for + connected graph, but for spectral clustering, this should be kept as + False to retain the first eigenvector. + + Returns + ------- + embedding : ndarray of shape (n_samples, n_components) + The reduced samples. + + Notes + ----- + Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph + has one connected component. If there graph has many components, the first + few eigenvectors will simply uncover the connected components of the graph. + + References + ---------- + * https://en.wikipedia.org/wiki/LOBPCG + + * :doi:`"Toward the Optimal Preconditioned Eigensolver: Locally Optimal + Block Preconditioned Conjugate Gradient Method", + Andrew V. Knyazev + <10.1137/S1064827500366124>` + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.neighbors import kneighbors_graph + >>> from sklearn.manifold import spectral_embedding + >>> X, _ = load_digits(return_X_y=True) + >>> X = X[:100] + >>> affinity_matrix = kneighbors_graph( + ... X, n_neighbors=int(X.shape[0] / 10), include_self=True + ... ) + >>> # make the matrix symmetric + >>> affinity_matrix = 0.5 * (affinity_matrix + affinity_matrix.T) + >>> embedding = spectral_embedding(affinity_matrix, n_components=2, random_state=42) + >>> embedding.shape + (100, 2) + """ + adjacency = check_symmetric(adjacency) + + if eigen_solver == "amg": + try: + from pyamg import smoothed_aggregation_solver + except ImportError as e: + raise ValueError( + "The eigen_solver was set to 'amg', but pyamg is not available." + ) from e + + if eigen_solver is None: + eigen_solver = "arpack" + elif eigen_solver not in ("arpack", "lobpcg", "amg"): + raise ValueError( + "Unknown value for eigen_solver: '%s'." + "Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver + ) + + random_state = check_random_state(random_state) + + n_nodes = adjacency.shape[0] + # Whether to drop the first eigenvector + if drop_first: + n_components = n_components + 1 + + if not _graph_is_connected(adjacency): + warnings.warn( + "Graph is not fully connected, spectral embedding may not work as expected." + ) + + laplacian, dd = csgraph_laplacian( + adjacency, normed=norm_laplacian, return_diag=True + ) + if ( + eigen_solver == "arpack" + or eigen_solver != "lobpcg" + and (not sparse.issparse(laplacian) or n_nodes < 5 * n_components) + ): + # lobpcg used with eigen_solver='amg' has bugs for low number of nodes + # for details see the source code in scipy: + # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen + # /lobpcg/lobpcg.py#L237 + # or matlab: + # https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m + laplacian = _set_diag(laplacian, 1, norm_laplacian) + + # Here we'll use shift-invert mode for fast eigenvalues + # (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html + # for a short explanation of what this means) + # Because the normalized Laplacian has eigenvalues between 0 and 2, + # I - L has eigenvalues between -1 and 1. ARPACK is most efficient + # when finding eigenvalues of largest magnitude (keyword which='LM') + # and when these eigenvalues are very large compared to the rest. + # For very large, very sparse graphs, I - L can have many, many + # eigenvalues very near 1.0. This leads to slow convergence. So + # instead, we'll use ARPACK's shift-invert mode, asking for the + # eigenvalues near 1.0. This effectively spreads-out the spectrum + # near 1.0 and leads to much faster convergence: potentially an + # orders-of-magnitude speedup over simply using keyword which='LA' + # in standard mode. + try: + # We are computing the opposite of the laplacian inplace so as + # to spare a memory allocation of a possibly very large array + tol = 0 if eigen_tol == "auto" else eigen_tol + laplacian *= -1 + v0 = _init_arpack_v0(laplacian.shape[0], random_state) + laplacian = check_array( + laplacian, accept_sparse="csr", accept_large_sparse=False + ) + _, diffusion_map = eigsh( + laplacian, k=n_components, sigma=1.0, which="LM", tol=tol, v0=v0 + ) + embedding = diffusion_map.T[n_components::-1] + if norm_laplacian: + # recover u = D^-1/2 x from the eigenvector output x + embedding = embedding / dd + except RuntimeError: + # When submatrices are exactly singular, an LU decomposition + # in arpack fails. We fallback to lobpcg + eigen_solver = "lobpcg" + # Revert the laplacian to its opposite to have lobpcg work + laplacian *= -1 + + elif eigen_solver == "amg": + # Use AMG to get a preconditioner and speed up the eigenvalue + # problem. + if not sparse.issparse(laplacian): + warnings.warn("AMG works better for sparse matrices") + laplacian = check_array( + laplacian, dtype=[np.float64, np.float32], accept_sparse=True + ) + laplacian = _set_diag(laplacian, 1, norm_laplacian) + + # The Laplacian matrix is always singular, having at least one zero + # eigenvalue, corresponding to the trivial eigenvector, which is a + # constant. Using a singular matrix for preconditioning may result in + # random failures in LOBPCG and is not supported by the existing + # theory: + # see https://doi.org/10.1007/s10208-015-9297-1 + # Shift the Laplacian so its diagononal is not all ones. The shift + # does change the eigenpairs however, so we'll feed the shifted + # matrix to the solver and afterward set it back to the original. + diag_shift = 1e-5 * sparse.eye(laplacian.shape[0]) + laplacian += diag_shift + if hasattr(sparse, "csr_array") and isinstance(laplacian, sparse.csr_array): + # `pyamg` does not work with `csr_array` and we need to convert it to a + # `csr_matrix` object. + laplacian = sparse.csr_matrix(laplacian) + ml = smoothed_aggregation_solver(check_array(laplacian, accept_sparse="csr")) + laplacian -= diag_shift + + M = ml.aspreconditioner() + # Create initial approximation X to eigenvectors + X = random_state.standard_normal(size=(laplacian.shape[0], n_components + 1)) + X[:, 0] = dd.ravel() + X = X.astype(laplacian.dtype) + + tol = None if eigen_tol == "auto" else eigen_tol + _, diffusion_map = lobpcg(laplacian, X, M=M, tol=tol, largest=False) + embedding = diffusion_map.T + if norm_laplacian: + # recover u = D^-1/2 x from the eigenvector output x + embedding = embedding / dd + if embedding.shape[0] == 1: + raise ValueError + + if eigen_solver == "lobpcg": + laplacian = check_array( + laplacian, dtype=[np.float64, np.float32], accept_sparse=True + ) + if n_nodes < 5 * n_components + 1: + # see note above under arpack why lobpcg has problems with small + # number of nodes + # lobpcg will fallback to eigh, so we short circuit it + if sparse.issparse(laplacian): + laplacian = laplacian.toarray() + _, diffusion_map = eigh(laplacian, check_finite=False) + embedding = diffusion_map.T[:n_components] + if norm_laplacian: + # recover u = D^-1/2 x from the eigenvector output x + embedding = embedding / dd + else: + laplacian = _set_diag(laplacian, 1, norm_laplacian) + # We increase the number of eigenvectors requested, as lobpcg + # doesn't behave well in low dimension and create initial + # approximation X to eigenvectors + X = random_state.standard_normal( + size=(laplacian.shape[0], n_components + 1) + ) + X[:, 0] = dd.ravel() + X = X.astype(laplacian.dtype) + tol = None if eigen_tol == "auto" else eigen_tol + _, diffusion_map = lobpcg( + laplacian, X, tol=tol, largest=False, maxiter=2000 + ) + embedding = diffusion_map.T[:n_components] + if norm_laplacian: + # recover u = D^-1/2 x from the eigenvector output x + embedding = embedding / dd + if embedding.shape[0] == 1: + raise ValueError + + embedding = _deterministic_vector_sign_flip(embedding) + if drop_first: + return embedding[1:n_components].T + else: + return embedding[:n_components].T + + +class SpectralEmbedding(BaseEstimator): + """Spectral embedding for non-linear dimensionality reduction. + + Forms an affinity matrix given by the specified function and + applies spectral decomposition to the corresponding graph laplacian. + The resulting transformation is given by the value of the + eigenvectors for each data point. + + Note : Laplacian Eigenmaps is the actual algorithm implemented here. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=2 + The dimension of the projected subspace. + + affinity : {'nearest_neighbors', 'rbf', 'precomputed', \ + 'precomputed_nearest_neighbors'} or callable, \ + default='nearest_neighbors' + How to construct the affinity matrix. + - 'nearest_neighbors' : construct the affinity matrix by computing a + graph of nearest neighbors. + - 'rbf' : construct the affinity matrix by computing a radial basis + function (RBF) kernel. + - 'precomputed' : interpret ``X`` as a precomputed affinity matrix. + - 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph + of precomputed nearest neighbors, and constructs the affinity matrix + by selecting the ``n_neighbors`` nearest neighbors. + - callable : use passed in function as affinity + the function takes in data matrix (n_samples, n_features) + and return affinity matrix (n_samples, n_samples). + + gamma : float, default=None + Kernel coefficient for rbf kernel. If None, gamma will be set to + 1/n_features. + + random_state : int, RandomState instance or None, default=None + A pseudo random number generator used for the initialization + of the lobpcg eigen vectors decomposition when `eigen_solver == + 'amg'`, and for the K-Means initialization. Use an int to make + the results deterministic across calls (See + :term:`Glossary `). + + .. note:: + When using `eigen_solver == 'amg'`, + it is necessary to also fix the global numpy seed with + `np.random.seed(int)` to get deterministic results. See + https://github.com/pyamg/pyamg/issues/139 for further + information. + + eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None + The eigenvalue decomposition strategy to use. AMG requires pyamg + to be installed. It can be faster on very large, sparse problems. + If None, then ``'arpack'`` is used. + + eigen_tol : float, default="auto" + Stopping criterion for eigendecomposition of the Laplacian matrix. + If `eigen_tol="auto"` then the passed tolerance will depend on the + `eigen_solver`: + + - If `eigen_solver="arpack"`, then `eigen_tol=0.0`; + - If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then + `eigen_tol=None` which configures the underlying `lobpcg` solver to + automatically resolve the value according to their heuristics. See, + :func:`scipy.sparse.linalg.lobpcg` for details. + + Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"` + values of `tol<1e-5` may lead to convergence issues and should be + avoided. + + .. versionadded:: 1.2 + + n_neighbors : int, default=None + Number of nearest neighbors for nearest_neighbors graph building. + If None, n_neighbors will be set to max(n_samples/10, 1). + + n_jobs : int, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + embedding_ : ndarray of shape (n_samples, n_components) + Spectral embedding of the training matrix. + + affinity_matrix_ : ndarray of shape (n_samples, n_samples) + Affinity_matrix constructed from samples or precomputed. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_neighbors_ : int + Number of nearest neighbors effectively used. + + See Also + -------- + Isomap : Non-linear dimensionality reduction through Isometric Mapping. + + References + ---------- + + - :doi:`A Tutorial on Spectral Clustering, 2007 + Ulrike von Luxburg + <10.1007/s11222-007-9033-z>` + + - `On Spectral Clustering: Analysis and an algorithm, 2001 + Andrew Y. Ng, Michael I. Jordan, Yair Weiss + `_ + + - :doi:`Normalized cuts and image segmentation, 2000 + Jianbo Shi, Jitendra Malik + <10.1109/34.868688>` + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.manifold import SpectralEmbedding + >>> X, _ = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> embedding = SpectralEmbedding(n_components=2) + >>> X_transformed = embedding.fit_transform(X[:100]) + >>> X_transformed.shape + (100, 2) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left")], + "affinity": [ + StrOptions( + { + "nearest_neighbors", + "rbf", + "precomputed", + "precomputed_nearest_neighbors", + }, + ), + callable, + ], + "gamma": [Interval(Real, 0, None, closed="left"), None], + "random_state": ["random_state"], + "eigen_solver": [StrOptions({"arpack", "lobpcg", "amg"}), None], + "eigen_tol": [Interval(Real, 0, None, closed="left"), StrOptions({"auto"})], + "n_neighbors": [Interval(Integral, 1, None, closed="left"), None], + "n_jobs": [None, Integral], + } + + def __init__( + self, + n_components=2, + *, + affinity="nearest_neighbors", + gamma=None, + random_state=None, + eigen_solver=None, + eigen_tol="auto", + n_neighbors=None, + n_jobs=None, + ): + self.n_components = n_components + self.affinity = affinity + self.gamma = gamma + self.random_state = random_state + self.eigen_solver = eigen_solver + self.eigen_tol = eigen_tol + self.n_neighbors = n_neighbors + self.n_jobs = n_jobs + + def _more_tags(self): + return { + "pairwise": self.affinity in [ + "precomputed", + "precomputed_nearest_neighbors", + ] + } + + def _get_affinity_matrix(self, X, Y=None): + """Calculate the affinity matrix from data + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + If affinity is "precomputed" + X : array-like of shape (n_samples, n_samples), + Interpret X as precomputed adjacency graph computed from + samples. + + Y: Ignored + + Returns + ------- + affinity_matrix of shape (n_samples, n_samples) + """ + if self.affinity == "precomputed": + self.affinity_matrix_ = X + return self.affinity_matrix_ + if self.affinity == "precomputed_nearest_neighbors": + estimator = NearestNeighbors( + n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed" + ).fit(X) + connectivity = estimator.kneighbors_graph(X=X, mode="connectivity") + self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T) + return self.affinity_matrix_ + if self.affinity == "nearest_neighbors": + if sparse.issparse(X): + warnings.warn( + "Nearest neighbors affinity currently does " + "not support sparse input, falling back to " + "rbf affinity" + ) + self.affinity = "rbf" + else: + self.n_neighbors_ = ( + self.n_neighbors + if self.n_neighbors is not None + else max(int(X.shape[0] / 10), 1) + ) + self.affinity_matrix_ = kneighbors_graph( + X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs + ) + # currently only symmetric affinity_matrix supported + self.affinity_matrix_ = 0.5 * ( + self.affinity_matrix_ + self.affinity_matrix_.T + ) + return self.affinity_matrix_ + if self.affinity == "rbf": + self.gamma_ = self.gamma if self.gamma is not None else 1.0 / X.shape[1] + self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_) + return self.affinity_matrix_ + self.affinity_matrix_ = self.affinity(X) + return self.affinity_matrix_ + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + If affinity is "precomputed" + X : {array-like, sparse matrix}, shape (n_samples, n_samples), + Interpret X as precomputed adjacency graph computed from + samples. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X, accept_sparse="csr", ensure_min_samples=2) + + random_state = check_random_state(self.random_state) + + affinity_matrix = self._get_affinity_matrix(X) + self.embedding_ = spectral_embedding( + affinity_matrix, + n_components=self.n_components, + eigen_solver=self.eigen_solver, + eigen_tol=self.eigen_tol, + random_state=random_state, + ) + return self + + def fit_transform(self, X, y=None): + """Fit the model from data in X and transform X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + If affinity is "precomputed" + X : {array-like, sparse matrix} of shape (n_samples, n_samples), + Interpret X as precomputed adjacency graph computed from + samples. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + X_new : array-like of shape (n_samples, n_components) + Spectral embedding of the training matrix. + """ + self.fit(X) + return self.embedding_ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_t_sne.py b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_t_sne.py new file mode 100644 index 0000000000000000000000000000000000000000..2233bea3a768197684f820f9b92841e0670a1338 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_t_sne.py @@ -0,0 +1,1174 @@ +# Author: Alexander Fabisch -- +# Author: Christopher Moody +# Author: Nick Travers +# License: BSD 3 clause (C) 2014 + +# This is the exact and Barnes-Hut t-SNE implementation. There are other +# modifications of the algorithm: +# * Fast Optimization for t-SNE: +# https://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf + +from numbers import Integral, Real +from time import time + +import numpy as np +from scipy import linalg +from scipy.sparse import csr_matrix, issparse +from scipy.spatial.distance import pdist, squareform + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..decomposition import PCA +from ..metrics.pairwise import _VALID_METRICS, pairwise_distances +from ..neighbors import NearestNeighbors +from ..utils import check_random_state +from ..utils._openmp_helpers import _openmp_effective_n_threads +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.validation import _num_samples, check_non_negative + +# mypy error: Module 'sklearn.manifold' has no attribute '_utils' +# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne' +from . import _barnes_hut_tsne, _utils # type: ignore + +MACHINE_EPSILON = np.finfo(np.double).eps + + +def _joint_probabilities(distances, desired_perplexity, verbose): + """Compute joint probabilities p_ij from distances. + + Parameters + ---------- + distances : ndarray of shape (n_samples * (n_samples-1) / 2,) + Distances of samples are stored as condensed matrices, i.e. + we omit the diagonal and duplicate entries and store everything + in a one-dimensional array. + + desired_perplexity : float + Desired perplexity of the joint probability distributions. + + verbose : int + Verbosity level. + + Returns + ------- + P : ndarray of shape (n_samples * (n_samples-1) / 2,) + Condensed joint probability matrix. + """ + # Compute conditional probabilities such that they approximately match + # the desired perplexity + distances = distances.astype(np.float32, copy=False) + conditional_P = _utils._binary_search_perplexity( + distances, desired_perplexity, verbose + ) + P = conditional_P + conditional_P.T + sum_P = np.maximum(np.sum(P), MACHINE_EPSILON) + P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON) + return P + + +def _joint_probabilities_nn(distances, desired_perplexity, verbose): + """Compute joint probabilities p_ij from distances using just nearest + neighbors. + + This method is approximately equal to _joint_probabilities. The latter + is O(N), but limiting the joint probability to nearest neighbors improves + this substantially to O(uN). + + Parameters + ---------- + distances : sparse matrix of shape (n_samples, n_samples) + Distances of samples to its n_neighbors nearest neighbors. All other + distances are left to zero (and are not materialized in memory). + Matrix should be of CSR format. + + desired_perplexity : float + Desired perplexity of the joint probability distributions. + + verbose : int + Verbosity level. + + Returns + ------- + P : sparse matrix of shape (n_samples, n_samples) + Condensed joint probability matrix with only nearest neighbors. Matrix + will be of CSR format. + """ + t0 = time() + # Compute conditional probabilities such that they approximately match + # the desired perplexity + distances.sort_indices() + n_samples = distances.shape[0] + distances_data = distances.data.reshape(n_samples, -1) + distances_data = distances_data.astype(np.float32, copy=False) + conditional_P = _utils._binary_search_perplexity( + distances_data, desired_perplexity, verbose + ) + assert np.all(np.isfinite(conditional_P)), "All probabilities should be finite" + + # Symmetrize the joint probability distribution using sparse operations + P = csr_matrix( + (conditional_P.ravel(), distances.indices, distances.indptr), + shape=(n_samples, n_samples), + ) + P = P + P.T + + # Normalize the joint probability distribution + sum_P = np.maximum(P.sum(), MACHINE_EPSILON) + P /= sum_P + + assert np.all(np.abs(P.data) <= 1.0) + if verbose >= 2: + duration = time() - t0 + print("[t-SNE] Computed conditional probabilities in {:.3f}s".format(duration)) + return P + + +def _kl_divergence( + params, + P, + degrees_of_freedom, + n_samples, + n_components, + skip_num_points=0, + compute_error=True, +): + """t-SNE objective function: gradient of the KL divergence + of p_ijs and q_ijs and the absolute error. + + Parameters + ---------- + params : ndarray of shape (n_params,) + Unraveled embedding. + + P : ndarray of shape (n_samples * (n_samples-1) / 2,) + Condensed joint probability matrix. + + degrees_of_freedom : int + Degrees of freedom of the Student's-t distribution. + + n_samples : int + Number of samples. + + n_components : int + Dimension of the embedded space. + + skip_num_points : int, default=0 + This does not compute the gradient for points with indices below + `skip_num_points`. This is useful when computing transforms of new + data where you'd like to keep the old data fixed. + + compute_error: bool, default=True + If False, the kl_divergence is not computed and returns NaN. + + Returns + ------- + kl_divergence : float + Kullback-Leibler divergence of p_ij and q_ij. + + grad : ndarray of shape (n_params,) + Unraveled gradient of the Kullback-Leibler divergence with respect to + the embedding. + """ + X_embedded = params.reshape(n_samples, n_components) + + # Q is a heavy-tailed distribution: Student's t-distribution + dist = pdist(X_embedded, "sqeuclidean") + dist /= degrees_of_freedom + dist += 1.0 + dist **= (degrees_of_freedom + 1.0) / -2.0 + Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON) + + # Optimization trick below: np.dot(x, y) is faster than + # np.sum(x * y) because it calls BLAS + + # Objective: C (Kullback-Leibler divergence of P and Q) + if compute_error: + kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q)) + else: + kl_divergence = np.nan + + # Gradient: dC/dY + # pdist always returns double precision distances. Thus we need to take + grad = np.ndarray((n_samples, n_components), dtype=params.dtype) + PQd = squareform((P - Q) * dist) + for i in range(skip_num_points, n_samples): + grad[i] = np.dot(np.ravel(PQd[i], order="K"), X_embedded[i] - X_embedded) + grad = grad.ravel() + c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom + grad *= c + + return kl_divergence, grad + + +def _kl_divergence_bh( + params, + P, + degrees_of_freedom, + n_samples, + n_components, + angle=0.5, + skip_num_points=0, + verbose=False, + compute_error=True, + num_threads=1, +): + """t-SNE objective function: KL divergence of p_ijs and q_ijs. + + Uses Barnes-Hut tree methods to calculate the gradient that + runs in O(NlogN) instead of O(N^2). + + Parameters + ---------- + params : ndarray of shape (n_params,) + Unraveled embedding. + + P : sparse matrix of shape (n_samples, n_sample) + Sparse approximate joint probability matrix, computed only for the + k nearest-neighbors and symmetrized. Matrix should be of CSR format. + + degrees_of_freedom : int + Degrees of freedom of the Student's-t distribution. + + n_samples : int + Number of samples. + + n_components : int + Dimension of the embedded space. + + angle : float, default=0.5 + This is the trade-off between speed and accuracy for Barnes-Hut T-SNE. + 'angle' is the angular size (referred to as theta in [3]) of a distant + node as measured from a point. If this size is below 'angle' then it is + used as a summary node of all points contained within it. + This method is not very sensitive to changes in this parameter + in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing + computation time and angle greater 0.8 has quickly increasing error. + + skip_num_points : int, default=0 + This does not compute the gradient for points with indices below + `skip_num_points`. This is useful when computing transforms of new + data where you'd like to keep the old data fixed. + + verbose : int, default=False + Verbosity level. + + compute_error: bool, default=True + If False, the kl_divergence is not computed and returns NaN. + + num_threads : int, default=1 + Number of threads used to compute the gradient. This is set here to + avoid calling _openmp_effective_n_threads for each gradient step. + + Returns + ------- + kl_divergence : float + Kullback-Leibler divergence of p_ij and q_ij. + + grad : ndarray of shape (n_params,) + Unraveled gradient of the Kullback-Leibler divergence with respect to + the embedding. + """ + params = params.astype(np.float32, copy=False) + X_embedded = params.reshape(n_samples, n_components) + + val_P = P.data.astype(np.float32, copy=False) + neighbors = P.indices.astype(np.int64, copy=False) + indptr = P.indptr.astype(np.int64, copy=False) + + grad = np.zeros(X_embedded.shape, dtype=np.float32) + error = _barnes_hut_tsne.gradient( + val_P, + X_embedded, + neighbors, + indptr, + grad, + angle, + n_components, + verbose, + dof=degrees_of_freedom, + compute_error=compute_error, + num_threads=num_threads, + ) + c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom + grad = grad.ravel() + grad *= c + + return error, grad + + +def _gradient_descent( + objective, + p0, + it, + n_iter, + n_iter_check=1, + n_iter_without_progress=300, + momentum=0.8, + learning_rate=200.0, + min_gain=0.01, + min_grad_norm=1e-7, + verbose=0, + args=None, + kwargs=None, +): + """Batch gradient descent with momentum and individual gains. + + Parameters + ---------- + objective : callable + Should return a tuple of cost and gradient for a given parameter + vector. When expensive to compute, the cost can optionally + be None and can be computed every n_iter_check steps using + the objective_error function. + + p0 : array-like of shape (n_params,) + Initial parameter vector. + + it : int + Current number of iterations (this function will be called more than + once during the optimization). + + n_iter : int + Maximum number of gradient descent iterations. + + n_iter_check : int, default=1 + Number of iterations before evaluating the global error. If the error + is sufficiently low, we abort the optimization. + + n_iter_without_progress : int, default=300 + Maximum number of iterations without progress before we abort the + optimization. + + momentum : float within (0.0, 1.0), default=0.8 + The momentum generates a weight for previous gradients that decays + exponentially. + + learning_rate : float, default=200.0 + The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If + the learning rate is too high, the data may look like a 'ball' with any + point approximately equidistant from its nearest neighbours. If the + learning rate is too low, most points may look compressed in a dense + cloud with few outliers. + + min_gain : float, default=0.01 + Minimum individual gain for each parameter. + + min_grad_norm : float, default=1e-7 + If the gradient norm is below this threshold, the optimization will + be aborted. + + verbose : int, default=0 + Verbosity level. + + args : sequence, default=None + Arguments to pass to objective function. + + kwargs : dict, default=None + Keyword arguments to pass to objective function. + + Returns + ------- + p : ndarray of shape (n_params,) + Optimum parameters. + + error : float + Optimum. + + i : int + Last iteration. + """ + if args is None: + args = [] + if kwargs is None: + kwargs = {} + + p = p0.copy().ravel() + update = np.zeros_like(p) + gains = np.ones_like(p) + error = np.finfo(float).max + best_error = np.finfo(float).max + best_iter = i = it + + tic = time() + for i in range(it, n_iter): + check_convergence = (i + 1) % n_iter_check == 0 + # only compute the error when needed + kwargs["compute_error"] = check_convergence or i == n_iter - 1 + + error, grad = objective(p, *args, **kwargs) + + inc = update * grad < 0.0 + dec = np.invert(inc) + gains[inc] += 0.2 + gains[dec] *= 0.8 + np.clip(gains, min_gain, np.inf, out=gains) + grad *= gains + update = momentum * update - learning_rate * grad + p += update + + if check_convergence: + toc = time() + duration = toc - tic + tic = toc + grad_norm = linalg.norm(grad) + + if verbose >= 2: + print( + "[t-SNE] Iteration %d: error = %.7f," + " gradient norm = %.7f" + " (%s iterations in %0.3fs)" + % (i + 1, error, grad_norm, n_iter_check, duration) + ) + + if error < best_error: + best_error = error + best_iter = i + elif i - best_iter > n_iter_without_progress: + if verbose >= 2: + print( + "[t-SNE] Iteration %d: did not make any progress " + "during the last %d episodes. Finished." + % (i + 1, n_iter_without_progress) + ) + break + if grad_norm <= min_grad_norm: + if verbose >= 2: + print( + "[t-SNE] Iteration %d: gradient norm %f. Finished." + % (i + 1, grad_norm) + ) + break + + return p, error, i + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "X_embedded": ["array-like", "sparse matrix"], + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable], + }, + prefer_skip_nested_validation=True, +) +def trustworthiness(X, X_embedded, *, n_neighbors=5, metric="euclidean"): + r"""Indicate to what extent the local structure is retained. + + The trustworthiness is within [0, 1]. It is defined as + + .. math:: + + T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1} + \sum_{j \in \mathcal{N}_{i}^{k}} \max(0, (r(i, j) - k)) + + where for each sample i, :math:`\mathcal{N}_{i}^{k}` are its k nearest + neighbors in the output space, and every sample j is its :math:`r(i, j)`-th + nearest neighbor in the input space. In other words, any unexpected nearest + neighbors in the output space are penalised in proportion to their rank in + the input space. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) + If the metric is 'precomputed' X must be a square distance + matrix. Otherwise it contains a sample per row. + + X_embedded : {array-like, sparse matrix} of shape (n_samples, n_components) + Embedding of the training data in low-dimensional space. + + n_neighbors : int, default=5 + The number of neighbors that will be considered. Should be fewer than + `n_samples / 2` to ensure the trustworthiness to lies within [0, 1], as + mentioned in [1]_. An error will be raised otherwise. + + metric : str or callable, default='euclidean' + Which metric to use for computing pairwise distances between samples + from the original input space. If metric is 'precomputed', X must be a + matrix of pairwise distances or squared distances. Otherwise, for a list + of available metrics, see the documentation of argument metric in + `sklearn.pairwise.pairwise_distances` and metrics listed in + `sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the + "cosine" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`. + + .. versionadded:: 0.20 + + Returns + ------- + trustworthiness : float + Trustworthiness of the low-dimensional embedding. + + References + ---------- + .. [1] Jarkko Venna and Samuel Kaski. 2001. Neighborhood + Preservation in Nonlinear Projection Methods: An Experimental Study. + In Proceedings of the International Conference on Artificial Neural Networks + (ICANN '01). Springer-Verlag, Berlin, Heidelberg, 485-491. + + .. [2] Laurens van der Maaten. Learning a Parametric Embedding by Preserving + Local Structure. Proceedings of the Twelfth International Conference on + Artificial Intelligence and Statistics, PMLR 5:384-391, 2009. + + Examples + -------- + >>> from sklearn.datasets import make_blobs + >>> from sklearn.decomposition import PCA + >>> from sklearn.manifold import trustworthiness + >>> X, _ = make_blobs(n_samples=100, n_features=10, centers=3, random_state=42) + >>> X_embedded = PCA(n_components=2).fit_transform(X) + >>> print(f"{trustworthiness(X, X_embedded, n_neighbors=5):.2f}") + 0.92 + """ + n_samples = _num_samples(X) + if n_neighbors >= n_samples / 2: + raise ValueError( + f"n_neighbors ({n_neighbors}) should be less than n_samples / 2" + f" ({n_samples / 2})" + ) + dist_X = pairwise_distances(X, metric=metric) + if metric == "precomputed": + dist_X = dist_X.copy() + # we set the diagonal to np.inf to exclude the points themselves from + # their own neighborhood + np.fill_diagonal(dist_X, np.inf) + ind_X = np.argsort(dist_X, axis=1) + # `ind_X[i]` is the index of sorted distances between i and other samples + ind_X_embedded = ( + NearestNeighbors(n_neighbors=n_neighbors) + .fit(X_embedded) + .kneighbors(return_distance=False) + ) + + # We build an inverted index of neighbors in the input space: For sample i, + # we define `inverted_index[i]` as the inverted index of sorted distances: + # inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1) + inverted_index = np.zeros((n_samples, n_samples), dtype=int) + ordered_indices = np.arange(n_samples + 1) + inverted_index[ordered_indices[:-1, np.newaxis], ind_X] = ordered_indices[1:] + ranks = ( + inverted_index[ordered_indices[:-1, np.newaxis], ind_X_embedded] - n_neighbors + ) + t = np.sum(ranks[ranks > 0]) + t = 1.0 - t * ( + 2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0)) + ) + return t + + +class TSNE(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """T-distributed Stochastic Neighbor Embedding. + + t-SNE [1] is a tool to visualize high-dimensional data. It converts + similarities between data points to joint probabilities and tries + to minimize the Kullback-Leibler divergence between the joint + probabilities of the low-dimensional embedding and the + high-dimensional data. t-SNE has a cost function that is not convex, + i.e. with different initializations we can get different results. + + It is highly recommended to use another dimensionality reduction + method (e.g. PCA for dense data or TruncatedSVD for sparse data) + to reduce the number of dimensions to a reasonable amount (e.g. 50) + if the number of features is very high. This will suppress some + noise and speed up the computation of pairwise distances between + samples. For more tips see Laurens van der Maaten's FAQ [2]. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=2 + Dimension of the embedded space. + + perplexity : float, default=30.0 + The perplexity is related to the number of nearest neighbors that + is used in other manifold learning algorithms. Larger datasets + usually require a larger perplexity. Consider selecting a value + between 5 and 50. Different values can result in significantly + different results. The perplexity must be less than the number + of samples. + + early_exaggeration : float, default=12.0 + Controls how tight natural clusters in the original space are in + the embedded space and how much space will be between them. For + larger values, the space between natural clusters will be larger + in the embedded space. Again, the choice of this parameter is not + very critical. If the cost function increases during initial + optimization, the early exaggeration factor or the learning rate + might be too high. + + learning_rate : float or "auto", default="auto" + The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If + the learning rate is too high, the data may look like a 'ball' with any + point approximately equidistant from its nearest neighbours. If the + learning rate is too low, most points may look compressed in a dense + cloud with few outliers. If the cost function gets stuck in a bad local + minimum increasing the learning rate may help. + Note that many other t-SNE implementations (bhtsne, FIt-SNE, openTSNE, + etc.) use a definition of learning_rate that is 4 times smaller than + ours. So our learning_rate=200 corresponds to learning_rate=800 in + those other implementations. The 'auto' option sets the learning_rate + to `max(N / early_exaggeration / 4, 50)` where N is the sample size, + following [4] and [5]. + + .. versionchanged:: 1.2 + The default value changed to `"auto"`. + + n_iter : int, default=1000 + Maximum number of iterations for the optimization. Should be at + least 250. + + n_iter_without_progress : int, default=300 + Maximum number of iterations without progress before we abort the + optimization, used after 250 initial iterations with early + exaggeration. Note that progress is only checked every 50 iterations so + this value is rounded to the next multiple of 50. + + .. versionadded:: 0.17 + parameter *n_iter_without_progress* to control stopping criteria. + + min_grad_norm : float, default=1e-7 + If the gradient norm is below this threshold, the optimization will + be stopped. + + metric : str or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. If metric is a string, it must be one of the options + allowed by scipy.spatial.distance.pdist for its metric parameter, or + a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. + If metric is "precomputed", X is assumed to be a distance matrix. + Alternatively, if metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two arrays from X as input and return a value indicating + the distance between them. The default is "euclidean" which is + interpreted as squared euclidean distance. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + .. versionadded:: 1.1 + + init : {"random", "pca"} or ndarray of shape (n_samples, n_components), \ + default="pca" + Initialization of embedding. + PCA initialization cannot be used with precomputed distances and is + usually more globally stable than random initialization. + + .. versionchanged:: 1.2 + The default value changed to `"pca"`. + + verbose : int, default=0 + Verbosity level. + + random_state : int, RandomState instance or None, default=None + Determines the random number generator. Pass an int for reproducible + results across multiple function calls. Note that different + initializations might result in different local minima of the cost + function. See :term:`Glossary `. + + method : {'barnes_hut', 'exact'}, default='barnes_hut' + By default the gradient calculation algorithm uses Barnes-Hut + approximation running in O(NlogN) time. method='exact' + will run on the slower, but exact, algorithm in O(N^2) time. The + exact algorithm should be used when nearest-neighbor errors need + to be better than 3%. However, the exact method cannot scale to + millions of examples. + + .. versionadded:: 0.17 + Approximate optimization *method* via the Barnes-Hut. + + angle : float, default=0.5 + Only used if method='barnes_hut' + This is the trade-off between speed and accuracy for Barnes-Hut T-SNE. + 'angle' is the angular size (referred to as theta in [3]) of a distant + node as measured from a point. If this size is below 'angle' then it is + used as a summary node of all points contained within it. + This method is not very sensitive to changes in this parameter + in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing + computation time and angle greater 0.8 has quickly increasing error. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. This parameter + has no impact when ``metric="precomputed"`` or + (``metric="euclidean"`` and ``method="exact"``). + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionadded:: 0.22 + + Attributes + ---------- + embedding_ : array-like of shape (n_samples, n_components) + Stores the embedding vectors. + + kl_divergence_ : float + Kullback-Leibler divergence after optimization. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + learning_rate_ : float + Effective learning rate. + + .. versionadded:: 1.2 + + n_iter_ : int + Number of iterations run. + + See Also + -------- + sklearn.decomposition.PCA : Principal component analysis that is a linear + dimensionality reduction method. + sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using + kernels and PCA. + MDS : Manifold learning using multidimensional scaling. + Isomap : Manifold learning based on Isometric Mapping. + LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding. + SpectralEmbedding : Spectral embedding for non-linear dimensionality. + + Notes + ----- + For an example of using :class:`~sklearn.manifold.TSNE` in combination with + :class:`~sklearn.neighbors.KNeighborsTransformer` see + :ref:`sphx_glr_auto_examples_neighbors_approximate_nearest_neighbors.py`. + + References + ---------- + + [1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data + Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008. + + [2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding + https://lvdmaaten.github.io/tsne/ + + [3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms. + Journal of Machine Learning Research 15(Oct):3221-3245, 2014. + https://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf + + [4] Belkina, A. C., Ciccolella, C. O., Anno, R., Halpert, R., Spidlen, J., + & Snyder-Cappione, J. E. (2019). Automated optimized parameters for + T-distributed stochastic neighbor embedding improve visualization + and analysis of large datasets. Nature Communications, 10(1), 1-12. + + [5] Kobak, D., & Berens, P. (2019). The art of using t-SNE for single-cell + transcriptomics. Nature Communications, 10(1), 1-14. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.manifold import TSNE + >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) + >>> X_embedded = TSNE(n_components=2, learning_rate='auto', + ... init='random', perplexity=3).fit_transform(X) + >>> X_embedded.shape + (4, 2) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left")], + "perplexity": [Interval(Real, 0, None, closed="neither")], + "early_exaggeration": [Interval(Real, 1, None, closed="left")], + "learning_rate": [ + StrOptions({"auto"}), + Interval(Real, 0, None, closed="neither"), + ], + "n_iter": [Interval(Integral, 250, None, closed="left")], + "n_iter_without_progress": [Interval(Integral, -1, None, closed="left")], + "min_grad_norm": [Interval(Real, 0, None, closed="left")], + "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable], + "metric_params": [dict, None], + "init": [ + StrOptions({"pca", "random"}), + np.ndarray, + ], + "verbose": ["verbose"], + "random_state": ["random_state"], + "method": [StrOptions({"barnes_hut", "exact"})], + "angle": [Interval(Real, 0, 1, closed="both")], + "n_jobs": [None, Integral], + } + + # Control the number of exploration iterations with early_exaggeration on + _EXPLORATION_N_ITER = 250 + + # Control the number of iterations between progress checks + _N_ITER_CHECK = 50 + + def __init__( + self, + n_components=2, + *, + perplexity=30.0, + early_exaggeration=12.0, + learning_rate="auto", + n_iter=1000, + n_iter_without_progress=300, + min_grad_norm=1e-7, + metric="euclidean", + metric_params=None, + init="pca", + verbose=0, + random_state=None, + method="barnes_hut", + angle=0.5, + n_jobs=None, + ): + self.n_components = n_components + self.perplexity = perplexity + self.early_exaggeration = early_exaggeration + self.learning_rate = learning_rate + self.n_iter = n_iter + self.n_iter_without_progress = n_iter_without_progress + self.min_grad_norm = min_grad_norm + self.metric = metric + self.metric_params = metric_params + self.init = init + self.verbose = verbose + self.random_state = random_state + self.method = method + self.angle = angle + self.n_jobs = n_jobs + + def _check_params_vs_input(self, X): + if self.perplexity >= X.shape[0]: + raise ValueError("perplexity must be less than n_samples") + + def _fit(self, X, skip_num_points=0): + """Private function to fit the model using X as training data.""" + + if isinstance(self.init, str) and self.init == "pca" and issparse(X): + raise TypeError( + "PCA initialization is currently not supported " + "with the sparse input matrix. Use " + 'init="random" instead.' + ) + + if self.learning_rate == "auto": + # See issue #18018 + self.learning_rate_ = X.shape[0] / self.early_exaggeration / 4 + self.learning_rate_ = np.maximum(self.learning_rate_, 50) + else: + self.learning_rate_ = self.learning_rate + + if self.method == "barnes_hut": + X = self._validate_data( + X, + accept_sparse=["csr"], + ensure_min_samples=2, + dtype=[np.float32, np.float64], + ) + else: + X = self._validate_data( + X, accept_sparse=["csr", "csc", "coo"], dtype=[np.float32, np.float64] + ) + if self.metric == "precomputed": + if isinstance(self.init, str) and self.init == "pca": + raise ValueError( + 'The parameter init="pca" cannot be used with metric="precomputed".' + ) + if X.shape[0] != X.shape[1]: + raise ValueError("X should be a square distance matrix") + + check_non_negative( + X, + ( + "TSNE.fit(). With metric='precomputed', X " + "should contain positive distances." + ), + ) + + if self.method == "exact" and issparse(X): + raise TypeError( + 'TSNE with method="exact" does not accept sparse ' + 'precomputed distance matrix. Use method="barnes_hut" ' + "or provide the dense distance matrix." + ) + + if self.method == "barnes_hut" and self.n_components > 3: + raise ValueError( + "'n_components' should be inferior to 4 for the " + "barnes_hut algorithm as it relies on " + "quad-tree or oct-tree." + ) + random_state = check_random_state(self.random_state) + + n_samples = X.shape[0] + + neighbors_nn = None + if self.method == "exact": + # Retrieve the distance matrix, either using the precomputed one or + # computing it. + if self.metric == "precomputed": + distances = X + else: + if self.verbose: + print("[t-SNE] Computing pairwise distances...") + + if self.metric == "euclidean": + # Euclidean is squared here, rather than using **= 2, + # because euclidean_distances already calculates + # squared distances, and returns np.sqrt(dist) for + # squared=False. + # Also, Euclidean is slower for n_jobs>1, so don't set here + distances = pairwise_distances(X, metric=self.metric, squared=True) + else: + metric_params_ = self.metric_params or {} + distances = pairwise_distances( + X, metric=self.metric, n_jobs=self.n_jobs, **metric_params_ + ) + + if np.any(distances < 0): + raise ValueError( + "All distances should be positive, the metric given is not correct" + ) + + if self.metric != "euclidean": + distances **= 2 + + # compute the joint probability distribution for the input space + P = _joint_probabilities(distances, self.perplexity, self.verbose) + assert np.all(np.isfinite(P)), "All probabilities should be finite" + assert np.all(P >= 0), "All probabilities should be non-negative" + assert np.all( + P <= 1 + ), "All probabilities should be less or then equal to one" + + else: + # Compute the number of nearest neighbors to find. + # LvdM uses 3 * perplexity as the number of neighbors. + # In the event that we have very small # of points + # set the neighbors to n - 1. + n_neighbors = min(n_samples - 1, int(3.0 * self.perplexity + 1)) + + if self.verbose: + print("[t-SNE] Computing {} nearest neighbors...".format(n_neighbors)) + + # Find the nearest neighbors for every point + knn = NearestNeighbors( + algorithm="auto", + n_jobs=self.n_jobs, + n_neighbors=n_neighbors, + metric=self.metric, + metric_params=self.metric_params, + ) + t0 = time() + knn.fit(X) + duration = time() - t0 + if self.verbose: + print( + "[t-SNE] Indexed {} samples in {:.3f}s...".format( + n_samples, duration + ) + ) + + t0 = time() + distances_nn = knn.kneighbors_graph(mode="distance") + duration = time() - t0 + if self.verbose: + print( + "[t-SNE] Computed neighbors for {} samples in {:.3f}s...".format( + n_samples, duration + ) + ) + + # Free the memory used by the ball_tree + del knn + + # knn return the euclidean distance but we need it squared + # to be consistent with the 'exact' method. Note that the + # the method was derived using the euclidean method as in the + # input space. Not sure of the implication of using a different + # metric. + distances_nn.data **= 2 + + # compute the joint probability distribution for the input space + P = _joint_probabilities_nn(distances_nn, self.perplexity, self.verbose) + + if isinstance(self.init, np.ndarray): + X_embedded = self.init + elif self.init == "pca": + pca = PCA( + n_components=self.n_components, + svd_solver="randomized", + random_state=random_state, + ) + # Always output a numpy array, no matter what is configured globally + pca.set_output(transform="default") + X_embedded = pca.fit_transform(X).astype(np.float32, copy=False) + # PCA is rescaled so that PC1 has standard deviation 1e-4 which is + # the default value for random initialization. See issue #18018. + X_embedded = X_embedded / np.std(X_embedded[:, 0]) * 1e-4 + elif self.init == "random": + # The embedding is initialized with iid samples from Gaussians with + # standard deviation 1e-4. + X_embedded = 1e-4 * random_state.standard_normal( + size=(n_samples, self.n_components) + ).astype(np.float32) + + # Degrees of freedom of the Student's t-distribution. The suggestion + # degrees_of_freedom = n_components - 1 comes from + # "Learning a Parametric Embedding by Preserving Local Structure" + # Laurens van der Maaten, 2009. + degrees_of_freedom = max(self.n_components - 1, 1) + + return self._tsne( + P, + degrees_of_freedom, + n_samples, + X_embedded=X_embedded, + neighbors=neighbors_nn, + skip_num_points=skip_num_points, + ) + + def _tsne( + self, + P, + degrees_of_freedom, + n_samples, + X_embedded, + neighbors=None, + skip_num_points=0, + ): + """Runs t-SNE.""" + # t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P + # and the Student's t-distributions Q. The optimization algorithm that + # we use is batch gradient descent with two stages: + # * initial optimization with early exaggeration and momentum at 0.5 + # * final optimization with momentum at 0.8 + params = X_embedded.ravel() + + opt_args = { + "it": 0, + "n_iter_check": self._N_ITER_CHECK, + "min_grad_norm": self.min_grad_norm, + "learning_rate": self.learning_rate_, + "verbose": self.verbose, + "kwargs": dict(skip_num_points=skip_num_points), + "args": [P, degrees_of_freedom, n_samples, self.n_components], + "n_iter_without_progress": self._EXPLORATION_N_ITER, + "n_iter": self._EXPLORATION_N_ITER, + "momentum": 0.5, + } + if self.method == "barnes_hut": + obj_func = _kl_divergence_bh + opt_args["kwargs"]["angle"] = self.angle + # Repeat verbose argument for _kl_divergence_bh + opt_args["kwargs"]["verbose"] = self.verbose + # Get the number of threads for gradient computation here to + # avoid recomputing it at each iteration. + opt_args["kwargs"]["num_threads"] = _openmp_effective_n_threads() + else: + obj_func = _kl_divergence + + # Learning schedule (part 1): do 250 iteration with lower momentum but + # higher learning rate controlled via the early exaggeration parameter + P *= self.early_exaggeration + params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args) + if self.verbose: + print( + "[t-SNE] KL divergence after %d iterations with early exaggeration: %f" + % (it + 1, kl_divergence) + ) + + # Learning schedule (part 2): disable early exaggeration and finish + # optimization with a higher momentum at 0.8 + P /= self.early_exaggeration + remaining = self.n_iter - self._EXPLORATION_N_ITER + if it < self._EXPLORATION_N_ITER or remaining > 0: + opt_args["n_iter"] = self.n_iter + opt_args["it"] = it + 1 + opt_args["momentum"] = 0.8 + opt_args["n_iter_without_progress"] = self.n_iter_without_progress + params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args) + + # Save the final number of iterations + self.n_iter_ = it + + if self.verbose: + print( + "[t-SNE] KL divergence after %d iterations: %f" + % (it + 1, kl_divergence) + ) + + X_embedded = params.reshape(n_samples, self.n_components) + self.kl_divergence_ = kl_divergence + + return X_embedded + + @_fit_context( + # TSNE.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit_transform(self, X, y=None): + """Fit X into an embedded space and return that transformed output. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) + If the metric is 'precomputed' X must be a square distance + matrix. Otherwise it contains a sample per row. If the method + is 'exact', X may be a sparse matrix of type 'csr', 'csc' + or 'coo'. If the method is 'barnes_hut' and the metric is + 'precomputed', X may be a precomputed sparse graph. + + y : None + Ignored. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Embedding of the training data in low-dimensional space. + """ + self._check_params_vs_input(X) + embedding = self._fit(X) + self.embedding_ = embedding + return self.embedding_ + + @_fit_context( + # TSNE.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Fit X into an embedded space. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) + If the metric is 'precomputed' X must be a square distance + matrix. Otherwise it contains a sample per row. If the method + is 'exact', X may be a sparse matrix of type 'csr', 'csc' + or 'coo'. If the method is 'barnes_hut' and the metric is + 'precomputed', X may be a precomputed sparse graph. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted estimator. + """ + self.fit_transform(X) + return self + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.embedding_.shape[1] + + def _more_tags(self): + return {"pairwise": self.metric == "precomputed"} diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_utils.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2018207539b6ff2113cf154ee3a070d84e5d7d9c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/_utils.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1556b5bc79a5858bbe80890458c26ffe5c7a9dc3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_isomap.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_isomap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..301d790eb5c3021138923cbf85e7f239945fea39 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_isomap.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_locally_linear.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_locally_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32fc70070ba2da44b6863430a20b303827ddef1a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_locally_linear.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_mds.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_mds.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..911ae498cada64ebfd32e4d075ec7524b0a23e2d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_mds.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_spectral_embedding.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_spectral_embedding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..749040ec42c51baadc6c8142a0c498a26c4eed81 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_spectral_embedding.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_t_sne.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_t_sne.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b91f2e3ed4929e9395c17a2dad2339598ef5e59 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_t_sne.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_isomap.py b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_isomap.py new file mode 100644 index 0000000000000000000000000000000000000000..e38b92442e58d9881726bdee85073ad38a7c95e1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_isomap.py @@ -0,0 +1,348 @@ +import math +from itertools import product + +import numpy as np +import pytest +from scipy.sparse import rand as sparse_rand + +from sklearn import clone, datasets, manifold, neighbors, pipeline, preprocessing +from sklearn.datasets import make_blobs +from sklearn.metrics.pairwise import pairwise_distances +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +eigen_solvers = ["auto", "dense", "arpack"] +path_methods = ["auto", "FW", "D"] + + +def create_sample_data(dtype, n_pts=25, add_noise=False): + # grid of equidistant points in 2D, n_components = n_dim + n_per_side = int(math.sqrt(n_pts)) + X = np.array(list(product(range(n_per_side), repeat=2))).astype(dtype, copy=False) + if add_noise: + # add noise in a third dimension + rng = np.random.RandomState(0) + noise = 0.1 * rng.randn(n_pts, 1).astype(dtype, copy=False) + X = np.concatenate((X, noise), 1) + return X + + +@pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)]) +@pytest.mark.parametrize("eigen_solver", eigen_solvers) +@pytest.mark.parametrize("path_method", path_methods) +def test_isomap_simple_grid( + global_dtype, n_neighbors, radius, eigen_solver, path_method +): + # Isomap should preserve distances when all neighbors are used + n_pts = 25 + X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=False) + + # distances from each point to all others + if n_neighbors is not None: + G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance") + else: + G = neighbors.radius_neighbors_graph(X, radius, mode="distance") + + clf = manifold.Isomap( + n_neighbors=n_neighbors, + radius=radius, + n_components=2, + eigen_solver=eigen_solver, + path_method=path_method, + ) + clf.fit(X) + + if n_neighbors is not None: + G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance") + else: + G_iso = neighbors.radius_neighbors_graph( + clf.embedding_, radius, mode="distance" + ) + atol = 1e-5 if global_dtype == np.float32 else 0 + assert_allclose_dense_sparse(G, G_iso, atol=atol) + + +@pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)]) +@pytest.mark.parametrize("eigen_solver", eigen_solvers) +@pytest.mark.parametrize("path_method", path_methods) +def test_isomap_reconstruction_error( + global_dtype, n_neighbors, radius, eigen_solver, path_method +): + if global_dtype is np.float32: + pytest.skip( + "Skipping test due to numerical instabilities on float32 data" + "from KernelCenterer used in the reconstruction_error method" + ) + + # Same setup as in test_isomap_simple_grid, with an added dimension + n_pts = 25 + X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=True) + + # compute input kernel + if n_neighbors is not None: + G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance").toarray() + else: + G = neighbors.radius_neighbors_graph(X, radius, mode="distance").toarray() + centerer = preprocessing.KernelCenterer() + K = centerer.fit_transform(-0.5 * G**2) + + clf = manifold.Isomap( + n_neighbors=n_neighbors, + radius=radius, + n_components=2, + eigen_solver=eigen_solver, + path_method=path_method, + ) + clf.fit(X) + + # compute output kernel + if n_neighbors is not None: + G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance") + else: + G_iso = neighbors.radius_neighbors_graph( + clf.embedding_, radius, mode="distance" + ) + G_iso = G_iso.toarray() + K_iso = centerer.fit_transform(-0.5 * G_iso**2) + + # make sure error agrees + reconstruction_error = np.linalg.norm(K - K_iso) / n_pts + atol = 1e-5 if global_dtype == np.float32 else 0 + assert_allclose(reconstruction_error, clf.reconstruction_error(), atol=atol) + + +@pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 0.5)]) +def test_transform(global_dtype, n_neighbors, radius): + n_samples = 200 + n_components = 10 + noise_scale = 0.01 + + # Create S-curve dataset + X, y = datasets.make_s_curve(n_samples, random_state=0) + + X = X.astype(global_dtype, copy=False) + + # Compute isomap embedding + iso = manifold.Isomap( + n_components=n_components, n_neighbors=n_neighbors, radius=radius + ) + X_iso = iso.fit_transform(X) + + # Re-embed a noisy version of the points + rng = np.random.RandomState(0) + noise = noise_scale * rng.randn(*X.shape) + X_iso2 = iso.transform(X + noise) + + # Make sure the rms error on re-embedding is comparable to noise_scale + assert np.sqrt(np.mean((X_iso - X_iso2) ** 2)) < 2 * noise_scale + + +@pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 10.0)]) +def test_pipeline(n_neighbors, radius, global_dtype): + # check that Isomap works fine as a transformer in a Pipeline + # only checks that no error is raised. + # TODO check that it actually does something useful + X, y = datasets.make_blobs(random_state=0) + X = X.astype(global_dtype, copy=False) + clf = pipeline.Pipeline( + [ + ("isomap", manifold.Isomap(n_neighbors=n_neighbors, radius=radius)), + ("clf", neighbors.KNeighborsClassifier()), + ] + ) + clf.fit(X, y) + assert 0.9 < clf.score(X, y) + + +def test_pipeline_with_nearest_neighbors_transformer(global_dtype): + # Test chaining NearestNeighborsTransformer and Isomap with + # neighbors_algorithm='precomputed' + algorithm = "auto" + n_neighbors = 10 + + X, _ = datasets.make_blobs(random_state=0) + X2, _ = datasets.make_blobs(random_state=1) + + X = X.astype(global_dtype, copy=False) + X2 = X2.astype(global_dtype, copy=False) + + # compare the chained version and the compact version + est_chain = pipeline.make_pipeline( + neighbors.KNeighborsTransformer( + n_neighbors=n_neighbors, algorithm=algorithm, mode="distance" + ), + manifold.Isomap(n_neighbors=n_neighbors, metric="precomputed"), + ) + est_compact = manifold.Isomap( + n_neighbors=n_neighbors, neighbors_algorithm=algorithm + ) + + Xt_chain = est_chain.fit_transform(X) + Xt_compact = est_compact.fit_transform(X) + assert_allclose(Xt_chain, Xt_compact) + + Xt_chain = est_chain.transform(X2) + Xt_compact = est_compact.transform(X2) + assert_allclose(Xt_chain, Xt_compact) + + +@pytest.mark.parametrize( + "metric, p, is_euclidean", + [ + ("euclidean", 2, True), + ("manhattan", 1, False), + ("minkowski", 1, False), + ("minkowski", 2, True), + (lambda x1, x2: np.sqrt(np.sum(x1**2 + x2**2)), 2, False), + ], +) +def test_different_metric(global_dtype, metric, p, is_euclidean): + # Isomap must work on various metric parameters work correctly + # and must default to euclidean. + X, _ = datasets.make_blobs(random_state=0) + X = X.astype(global_dtype, copy=False) + + reference = manifold.Isomap().fit_transform(X) + embedding = manifold.Isomap(metric=metric, p=p).fit_transform(X) + + if is_euclidean: + assert_allclose(embedding, reference) + else: + with pytest.raises(AssertionError, match="Not equal to tolerance"): + assert_allclose(embedding, reference) + + +def test_isomap_clone_bug(): + # regression test for bug reported in #6062 + model = manifold.Isomap() + for n_neighbors in [10, 15, 20]: + model.set_params(n_neighbors=n_neighbors) + model.fit(np.random.rand(50, 2)) + assert model.nbrs_.n_neighbors == n_neighbors + + +@pytest.mark.parametrize("eigen_solver", eigen_solvers) +@pytest.mark.parametrize("path_method", path_methods) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input( + global_dtype, eigen_solver, path_method, global_random_seed, csr_container +): + # TODO: compare results on dense and sparse data as proposed in: + # https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186 + X = csr_container( + sparse_rand( + 100, + 3, + density=0.1, + format="csr", + dtype=global_dtype, + random_state=global_random_seed, + ) + ) + + iso_dense = manifold.Isomap( + n_components=2, + eigen_solver=eigen_solver, + path_method=path_method, + n_neighbors=8, + ) + iso_sparse = clone(iso_dense) + + X_trans_dense = iso_dense.fit_transform(X.toarray()) + X_trans_sparse = iso_sparse.fit_transform(X) + + assert_allclose(X_trans_sparse, X_trans_dense, rtol=1e-4, atol=1e-4) + + +def test_isomap_fit_precomputed_radius_graph(global_dtype): + # Isomap.fit_transform must yield similar result when using + # a precomputed distance matrix. + + X, y = datasets.make_s_curve(200, random_state=0) + X = X.astype(global_dtype, copy=False) + radius = 10 + + g = neighbors.radius_neighbors_graph(X, radius=radius, mode="distance") + isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="precomputed") + isomap.fit(g) + precomputed_result = isomap.embedding_ + + isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="minkowski") + result = isomap.fit_transform(X) + atol = 1e-5 if global_dtype == np.float32 else 0 + assert_allclose(precomputed_result, result, atol=atol) + + +def test_isomap_fitted_attributes_dtype(global_dtype): + """Check that the fitted attributes are stored accordingly to the + data type of X.""" + iso = manifold.Isomap(n_neighbors=2) + + X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype) + + iso.fit(X) + + assert iso.dist_matrix_.dtype == global_dtype + assert iso.embedding_.dtype == global_dtype + + +def test_isomap_dtype_equivalence(): + """Check the equivalence of the results with 32 and 64 bits input.""" + iso_32 = manifold.Isomap(n_neighbors=2) + X_32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) + iso_32.fit(X_32) + + iso_64 = manifold.Isomap(n_neighbors=2) + X_64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64) + iso_64.fit(X_64) + + assert_allclose(iso_32.dist_matrix_, iso_64.dist_matrix_) + + +def test_isomap_raise_error_when_neighbor_and_radius_both_set(): + # Isomap.fit_transform must raise a ValueError if + # radius and n_neighbors are provided. + + X, _ = datasets.load_digits(return_X_y=True) + isomap = manifold.Isomap(n_neighbors=3, radius=5.5) + msg = "Both n_neighbors and radius are provided" + with pytest.raises(ValueError, match=msg): + isomap.fit_transform(X) + + +def test_multiple_connected_components(): + # Test that a warning is raised when the graph has multiple components + X = np.array([0, 1, 2, 5, 6, 7])[:, None] + with pytest.warns(UserWarning, match="number of connected components"): + manifold.Isomap(n_neighbors=2).fit(X) + + +def test_multiple_connected_components_metric_precomputed(global_dtype): + # Test that an error is raised when the graph has multiple components + # and when X is a precomputed neighbors graph. + X = np.array([0, 1, 2, 5, 6, 7])[:, None].astype(global_dtype, copy=False) + + # works with a precomputed distance matrix (dense) + X_distances = pairwise_distances(X) + with pytest.warns(UserWarning, match="number of connected components"): + manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_distances) + + # does not work with a precomputed neighbors graph (sparse) + X_graph = neighbors.kneighbors_graph(X, n_neighbors=2, mode="distance") + with pytest.raises(RuntimeError, match="number of connected components"): + manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_graph) + + +def test_get_feature_names_out(): + """Check get_feature_names_out for Isomap.""" + X, y = make_blobs(random_state=0, n_features=4) + n_components = 2 + + iso = manifold.Isomap(n_components=n_components) + iso.fit_transform(X) + names = iso.get_feature_names_out() + assert_array_equal([f"isomap{i}" for i in range(n_components)], names) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_locally_linear.py b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_locally_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..835aa20fd1d32ace684eea9afd451bcdcf695f79 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_locally_linear.py @@ -0,0 +1,171 @@ +from itertools import product + +import numpy as np +import pytest +from scipy import linalg + +from sklearn import manifold, neighbors +from sklearn.datasets import make_blobs +from sklearn.manifold._locally_linear import barycenter_kneighbors_graph +from sklearn.utils._testing import ( + assert_allclose, + assert_array_equal, + ignore_warnings, +) + +eigen_solvers = ["dense", "arpack"] + + +# ---------------------------------------------------------------------- +# Test utility routines +def test_barycenter_kneighbors_graph(global_dtype): + X = np.array([[0, 1], [1.01, 1.0], [2, 0]], dtype=global_dtype) + + graph = barycenter_kneighbors_graph(X, 1) + expected_graph = np.array( + [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=global_dtype + ) + + assert graph.dtype == global_dtype + + assert_allclose(graph.toarray(), expected_graph) + + graph = barycenter_kneighbors_graph(X, 2) + # check that columns sum to one + assert_allclose(np.sum(graph.toarray(), axis=1), np.ones(3)) + pred = np.dot(graph.toarray(), X) + assert linalg.norm(pred - X) / X.shape[0] < 1 + + +# ---------------------------------------------------------------------- +# Test LLE by computing the reconstruction error on some manifolds. + + +def test_lle_simple_grid(global_dtype): + # note: ARPACK is numerically unstable, so this test will fail for + # some random seeds. We choose 42 because the tests pass. + # for arm64 platforms 2 makes the test fail. + # TODO: rewrite this test to make less sensitive to the random seed, + # irrespective of the platform. + rng = np.random.RandomState(42) + + # grid of equidistant points in 2D, n_components = n_dim + X = np.array(list(product(range(5), repeat=2))) + X = X + 1e-10 * rng.uniform(size=X.shape) + X = X.astype(global_dtype, copy=False) + + n_components = 2 + clf = manifold.LocallyLinearEmbedding( + n_neighbors=5, n_components=n_components, random_state=rng + ) + tol = 0.1 + + N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray() + reconstruction_error = linalg.norm(np.dot(N, X) - X, "fro") + assert reconstruction_error < tol + + for solver in eigen_solvers: + clf.set_params(eigen_solver=solver) + clf.fit(X) + assert clf.embedding_.shape[1] == n_components + reconstruction_error = ( + linalg.norm(np.dot(N, clf.embedding_) - clf.embedding_, "fro") ** 2 + ) + + assert reconstruction_error < tol + assert_allclose(clf.reconstruction_error_, reconstruction_error, atol=1e-1) + + # re-embed a noisy version of X using the transform method + noise = rng.randn(*X.shape).astype(global_dtype, copy=False) / 100 + X_reembedded = clf.transform(X + noise) + assert linalg.norm(X_reembedded - clf.embedding_) < tol + + +@pytest.mark.parametrize("method", ["standard", "hessian", "modified", "ltsa"]) +@pytest.mark.parametrize("solver", eigen_solvers) +def test_lle_manifold(global_dtype, method, solver): + rng = np.random.RandomState(0) + # similar test on a slightly more complex manifold + X = np.array(list(product(np.arange(18), repeat=2))) + X = np.c_[X, X[:, 0] ** 2 / 18] + X = X + 1e-10 * rng.uniform(size=X.shape) + X = X.astype(global_dtype, copy=False) + n_components = 2 + + clf = manifold.LocallyLinearEmbedding( + n_neighbors=6, n_components=n_components, method=method, random_state=0 + ) + tol = 1.5 if method == "standard" else 3 + + N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray() + reconstruction_error = linalg.norm(np.dot(N, X) - X) + assert reconstruction_error < tol + + clf.set_params(eigen_solver=solver) + clf.fit(X) + assert clf.embedding_.shape[1] == n_components + reconstruction_error = ( + linalg.norm(np.dot(N, clf.embedding_) - clf.embedding_, "fro") ** 2 + ) + details = "solver: %s, method: %s" % (solver, method) + assert reconstruction_error < tol, details + assert ( + np.abs(clf.reconstruction_error_ - reconstruction_error) + < tol * reconstruction_error + ), details + + +def test_pipeline(): + # check that LocallyLinearEmbedding works fine as a Pipeline + # only checks that no error is raised. + # TODO check that it actually does something useful + from sklearn import datasets, pipeline + + X, y = datasets.make_blobs(random_state=0) + clf = pipeline.Pipeline( + [ + ("filter", manifold.LocallyLinearEmbedding(random_state=0)), + ("clf", neighbors.KNeighborsClassifier()), + ] + ) + clf.fit(X, y) + assert 0.9 < clf.score(X, y) + + +# Test the error raised when the weight matrix is singular +def test_singular_matrix(): + M = np.ones((200, 3)) + f = ignore_warnings + with pytest.raises(ValueError, match="Error in determining null-space with ARPACK"): + f( + manifold.locally_linear_embedding( + M, + n_neighbors=2, + n_components=1, + method="standard", + eigen_solver="arpack", + ) + ) + + +# regression test for #6033 +def test_integer_input(): + rand = np.random.RandomState(0) + X = rand.randint(0, 100, size=(20, 3)) + + for method in ["standard", "hessian", "modified", "ltsa"]: + clf = manifold.LocallyLinearEmbedding(method=method, n_neighbors=10) + clf.fit(X) # this previously raised a TypeError + + +def test_get_feature_names_out(): + """Check get_feature_names_out for LocallyLinearEmbedding.""" + X, y = make_blobs(random_state=0, n_features=4) + n_components = 2 + + iso = manifold.LocallyLinearEmbedding(n_components=n_components) + iso.fit(X) + names = iso.get_feature_names_out() + assert_array_equal( + [f"locallylinearembedding{i}" for i in range(n_components)], names + ) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_mds.py b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_mds.py new file mode 100644 index 0000000000000000000000000000000000000000..2d286ef0942bfe65802dad803da5c2eee8c0e89e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_mds.py @@ -0,0 +1,87 @@ +from unittest.mock import Mock + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal + +from sklearn.manifold import _mds as mds +from sklearn.metrics import euclidean_distances + + +def test_smacof(): + # test metric smacof using the data of "Modern Multidimensional Scaling", + # Borg & Groenen, p 154 + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + Z = np.array([[-0.266, -0.539], [0.451, 0.252], [0.016, -0.238], [-0.200, 0.524]]) + X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1) + X_true = np.array( + [[-1.415, -2.471], [1.633, 1.107], [0.249, -0.067], [-0.468, 1.431]] + ) + assert_array_almost_equal(X, X_true, decimal=3) + + +def test_smacof_error(): + # Not symmetric similarity matrix: + sim = np.array([[0, 5, 9, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + + with pytest.raises(ValueError): + mds.smacof(sim) + + # Not squared similarity matrix: + sim = np.array([[0, 5, 9, 4], [5, 0, 2, 2], [4, 2, 1, 0]]) + + with pytest.raises(ValueError): + mds.smacof(sim) + + # init not None and not correct format: + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + + Z = np.array([[-0.266, -0.539], [0.016, -0.238], [-0.200, 0.524]]) + with pytest.raises(ValueError): + mds.smacof(sim, init=Z, n_init=1) + + +def test_MDS(): + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed") + mds_clf.fit(sim) + + +@pytest.mark.parametrize("k", [0.5, 1.5, 2]) +def test_normed_stress(k): + """Test that non-metric MDS normalized stress is scale-invariant.""" + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + + X1, stress1 = mds.smacof(sim, metric=False, max_iter=5, random_state=0) + X2, stress2 = mds.smacof(k * sim, metric=False, max_iter=5, random_state=0) + + assert_allclose(stress1, stress2, rtol=1e-5) + assert_allclose(X1, X2, rtol=1e-5) + + +def test_normalize_metric_warning(): + """ + Test that a UserWarning is emitted when using normalized stress with + metric-MDS. + """ + msg = "Normalized stress is not supported" + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + with pytest.raises(ValueError, match=msg): + mds.smacof(sim, metric=True, normalized_stress=True) + + +@pytest.mark.parametrize("metric", [True, False]) +def test_normalized_stress_auto(metric, monkeypatch): + rng = np.random.RandomState(0) + X = rng.randn(4, 3) + dist = euclidean_distances(X) + + mock = Mock(side_effect=mds._smacof_single) + monkeypatch.setattr("sklearn.manifold._mds._smacof_single", mock) + + est = mds.MDS(metric=metric, normalized_stress="auto", random_state=rng) + est.fit_transform(X) + assert mock.call_args[1]["normalized_stress"] != metric + + mds.smacof(dist, metric=metric, normalized_stress="auto", random_state=rng) + assert mock.call_args[1]["normalized_stress"] != metric diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_spectral_embedding.py b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_spectral_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..14bb13c0800992e6520dc00f05d7795021887849 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_spectral_embedding.py @@ -0,0 +1,541 @@ +from unittest.mock import Mock + +import numpy as np +import pytest +from scipy import sparse +from scipy.linalg import eigh +from scipy.sparse.linalg import eigsh, lobpcg + +from sklearn.cluster import KMeans +from sklearn.datasets import make_blobs +from sklearn.manifold import SpectralEmbedding, _spectral_embedding, spectral_embedding +from sklearn.manifold._spectral_embedding import ( + _graph_connected_component, + _graph_is_connected, +) +from sklearn.metrics import normalized_mutual_info_score, pairwise_distances +from sklearn.metrics.pairwise import rbf_kernel +from sklearn.neighbors import NearestNeighbors +from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal +from sklearn.utils.extmath import _deterministic_vector_sign_flip +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + parse_version, + sp_version, +) +from sklearn.utils.fixes import laplacian as csgraph_laplacian + +try: + from pyamg import smoothed_aggregation_solver # noqa + + pyamg_available = True +except ImportError: + pyamg_available = False +skip_if_no_pyamg = pytest.mark.skipif( + not pyamg_available, reason="PyAMG is required for the tests in this function." +) + +# non centered, sparse centers to check the +centers = np.array( + [ + [0.0, 5.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 4.0, 0.0, 0.0], + [1.0, 0.0, 0.0, 5.0, 1.0], + ] +) +n_samples = 1000 +n_clusters, n_features = centers.shape +S, true_labels = make_blobs( + n_samples=n_samples, centers=centers, cluster_std=1.0, random_state=42 +) + + +def _assert_equal_with_sign_flipping(A, B, tol=0.0): + """Check array A and B are equal with possible sign flipping on + each columns""" + tol_squared = tol**2 + for A_col, B_col in zip(A.T, B.T): + assert ( + np.max((A_col - B_col) ** 2) <= tol_squared + or np.max((A_col + B_col) ** 2) <= tol_squared + ) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_sparse_graph_connected_component(coo_container): + rng = np.random.RandomState(42) + n_samples = 300 + boundaries = [0, 42, 121, 200, n_samples] + p = rng.permutation(n_samples) + connections = [] + + for start, stop in zip(boundaries[:-1], boundaries[1:]): + group = p[start:stop] + # Connect all elements within the group at least once via an + # arbitrary path that spans the group. + for i in range(len(group) - 1): + connections.append((group[i], group[i + 1])) + + # Add some more random connections within the group + min_idx, max_idx = 0, len(group) - 1 + n_random_connections = 1000 + source = rng.randint(min_idx, max_idx, size=n_random_connections) + target = rng.randint(min_idx, max_idx, size=n_random_connections) + connections.extend(zip(group[source], group[target])) + + # Build a symmetric affinity matrix + row_idx, column_idx = tuple(np.array(connections).T) + data = rng.uniform(0.1, 42, size=len(connections)) + affinity = coo_container((data, (row_idx, column_idx))) + affinity = 0.5 * (affinity + affinity.T) + + for start, stop in zip(boundaries[:-1], boundaries[1:]): + component_1 = _graph_connected_component(affinity, p[start]) + component_size = stop - start + assert component_1.sum() == component_size + + # We should retrieve the same component mask by starting by both ends + # of the group + component_2 = _graph_connected_component(affinity, p[stop - 1]) + assert component_2.sum() == component_size + assert_array_equal(component_1, component_2) + + +# TODO: investigate why this test is seed-sensitive on 32-bit Python +# runtimes. Is this revealing a numerical stability problem ? Or is it +# expected from the test numerical design ? In the latter case the test +# should be made less seed-sensitive instead. +@pytest.mark.parametrize( + "eigen_solver", + [ + "arpack", + "lobpcg", + pytest.param("amg", marks=skip_if_no_pyamg), + ], +) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_spectral_embedding_two_components(eigen_solver, dtype, seed=0): + # Test spectral embedding with two components + random_state = np.random.RandomState(seed) + n_sample = 100 + affinity = np.zeros(shape=[n_sample * 2, n_sample * 2]) + # first component + affinity[0:n_sample, 0:n_sample] = ( + np.abs(random_state.randn(n_sample, n_sample)) + 2 + ) + # second component + affinity[n_sample::, n_sample::] = ( + np.abs(random_state.randn(n_sample, n_sample)) + 2 + ) + + # Test of internal _graph_connected_component before connection + component = _graph_connected_component(affinity, 0) + assert component[:n_sample].all() + assert not component[n_sample:].any() + component = _graph_connected_component(affinity, -1) + assert not component[:n_sample].any() + assert component[n_sample:].all() + + # connection + affinity[0, n_sample + 1] = 1 + affinity[n_sample + 1, 0] = 1 + affinity.flat[:: 2 * n_sample + 1] = 0 + affinity = 0.5 * (affinity + affinity.T) + + true_label = np.zeros(shape=2 * n_sample) + true_label[0:n_sample] = 1 + + se_precomp = SpectralEmbedding( + n_components=1, + affinity="precomputed", + random_state=np.random.RandomState(seed), + eigen_solver=eigen_solver, + ) + + embedded_coordinate = se_precomp.fit_transform(affinity.astype(dtype)) + # thresholding on the first components using 0. + label_ = np.array(embedded_coordinate.ravel() < 0, dtype=np.int64) + assert normalized_mutual_info_score(true_label, label_) == pytest.approx(1.0) + + +@pytest.mark.parametrize("sparse_container", [None, *CSR_CONTAINERS]) +@pytest.mark.parametrize( + "eigen_solver", + [ + "arpack", + "lobpcg", + pytest.param("amg", marks=skip_if_no_pyamg), + ], +) +@pytest.mark.parametrize("dtype", (np.float32, np.float64)) +def test_spectral_embedding_precomputed_affinity( + sparse_container, eigen_solver, dtype, seed=36 +): + # Test spectral embedding with precomputed kernel + gamma = 1.0 + X = S if sparse_container is None else sparse_container(S) + + se_precomp = SpectralEmbedding( + n_components=2, + affinity="precomputed", + random_state=np.random.RandomState(seed), + eigen_solver=eigen_solver, + ) + se_rbf = SpectralEmbedding( + n_components=2, + affinity="rbf", + gamma=gamma, + random_state=np.random.RandomState(seed), + eigen_solver=eigen_solver, + ) + embed_precomp = se_precomp.fit_transform(rbf_kernel(X.astype(dtype), gamma=gamma)) + embed_rbf = se_rbf.fit_transform(X.astype(dtype)) + assert_array_almost_equal(se_precomp.affinity_matrix_, se_rbf.affinity_matrix_) + _assert_equal_with_sign_flipping(embed_precomp, embed_rbf, 0.05) + + +def test_precomputed_nearest_neighbors_filtering(): + # Test precomputed graph filtering when containing too many neighbors + n_neighbors = 2 + results = [] + for additional_neighbors in [0, 10]: + nn = NearestNeighbors(n_neighbors=n_neighbors + additional_neighbors).fit(S) + graph = nn.kneighbors_graph(S, mode="connectivity") + embedding = ( + SpectralEmbedding( + random_state=0, + n_components=2, + affinity="precomputed_nearest_neighbors", + n_neighbors=n_neighbors, + ) + .fit(graph) + .embedding_ + ) + results.append(embedding) + + assert_array_equal(results[0], results[1]) + + +@pytest.mark.parametrize("sparse_container", [None, *CSR_CONTAINERS]) +def test_spectral_embedding_callable_affinity(sparse_container, seed=36): + # Test spectral embedding with callable affinity + gamma = 0.9 + kern = rbf_kernel(S, gamma=gamma) + X = S if sparse_container is None else sparse_container(S) + + se_callable = SpectralEmbedding( + n_components=2, + affinity=(lambda x: rbf_kernel(x, gamma=gamma)), + gamma=gamma, + random_state=np.random.RandomState(seed), + ) + se_rbf = SpectralEmbedding( + n_components=2, + affinity="rbf", + gamma=gamma, + random_state=np.random.RandomState(seed), + ) + embed_rbf = se_rbf.fit_transform(X) + embed_callable = se_callable.fit_transform(X) + assert_array_almost_equal(se_callable.affinity_matrix_, se_rbf.affinity_matrix_) + assert_array_almost_equal(kern, se_rbf.affinity_matrix_) + _assert_equal_with_sign_flipping(embed_rbf, embed_callable, 0.05) + + +# TODO: Remove when pyamg does replaces sp.rand call with np.random.rand +# https://github.com/scikit-learn/scikit-learn/issues/15913 +@pytest.mark.filterwarnings( + "ignore:scipy.rand is deprecated:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of np.float +@pytest.mark.filterwarnings( + "ignore:`np.float` is a deprecated alias:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of pinv2 +@pytest.mark.filterwarnings( + "ignore:scipy.linalg.pinv2 is deprecated:DeprecationWarning:pyamg.*" +) +@pytest.mark.filterwarnings( + "ignore:np.find_common_type is deprecated:DeprecationWarning:pyamg.*" +) +@pytest.mark.skipif( + not pyamg_available, reason="PyAMG is required for the tests in this function." +) +@pytest.mark.parametrize("dtype", (np.float32, np.float64)) +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_spectral_embedding_amg_solver(dtype, coo_container, seed=36): + se_amg = SpectralEmbedding( + n_components=2, + affinity="nearest_neighbors", + eigen_solver="amg", + n_neighbors=5, + random_state=np.random.RandomState(seed), + ) + se_arpack = SpectralEmbedding( + n_components=2, + affinity="nearest_neighbors", + eigen_solver="arpack", + n_neighbors=5, + random_state=np.random.RandomState(seed), + ) + embed_amg = se_amg.fit_transform(S.astype(dtype)) + embed_arpack = se_arpack.fit_transform(S.astype(dtype)) + _assert_equal_with_sign_flipping(embed_amg, embed_arpack, 1e-5) + + # same with special case in which amg is not actually used + # regression test for #10715 + # affinity between nodes + row = np.array([0, 0, 1, 2, 3, 3, 4], dtype=np.int32) + col = np.array([1, 2, 2, 3, 4, 5, 5], dtype=np.int32) + val = np.array([100, 100, 100, 1, 100, 100, 100], dtype=np.int64) + + affinity = coo_container( + (np.hstack([val, val]), (np.hstack([row, col]), np.hstack([col, row]))), + shape=(6, 6), + ) + se_amg.affinity = "precomputed" + se_arpack.affinity = "precomputed" + embed_amg = se_amg.fit_transform(affinity.astype(dtype)) + embed_arpack = se_arpack.fit_transform(affinity.astype(dtype)) + _assert_equal_with_sign_flipping(embed_amg, embed_arpack, 1e-5) + + # Check that passing a sparse matrix with `np.int64` indices dtype raises an error + # or is successful based on the version of SciPy which is installed. + # Use a CSR matrix to avoid any conversion during the validation + affinity = affinity.tocsr() + affinity.indptr = affinity.indptr.astype(np.int64) + affinity.indices = affinity.indices.astype(np.int64) + + # PR: https://github.com/scipy/scipy/pull/18913 + # First integration in 1.11.3: https://github.com/scipy/scipy/pull/19279 + scipy_graph_traversal_supports_int64_index = sp_version >= parse_version("1.11.3") + if scipy_graph_traversal_supports_int64_index: + se_amg.fit_transform(affinity) + else: + err_msg = "Only sparse matrices with 32-bit integer indices are accepted" + with pytest.raises(ValueError, match=err_msg): + se_amg.fit_transform(affinity) + + +# TODO: Remove filterwarnings when pyamg does replaces sp.rand call with +# np.random.rand: +# https://github.com/scikit-learn/scikit-learn/issues/15913 +@pytest.mark.filterwarnings( + "ignore:scipy.rand is deprecated:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of np.float +@pytest.mark.filterwarnings( + "ignore:`np.float` is a deprecated alias:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of pinv2 +@pytest.mark.filterwarnings( + "ignore:scipy.linalg.pinv2 is deprecated:DeprecationWarning:pyamg.*" +) +@pytest.mark.skipif( + not pyamg_available, reason="PyAMG is required for the tests in this function." +) +# TODO: Remove when pyamg removes the use of np.find_common_type +@pytest.mark.filterwarnings( + "ignore:np.find_common_type is deprecated:DeprecationWarning:pyamg.*" +) +@pytest.mark.parametrize("dtype", (np.float32, np.float64)) +def test_spectral_embedding_amg_solver_failure(dtype, seed=36): + # Non-regression test for amg solver failure (issue #13393 on github) + num_nodes = 100 + X = sparse.rand(num_nodes, num_nodes, density=0.1, random_state=seed) + X = X.astype(dtype) + upper = sparse.triu(X) - sparse.diags(X.diagonal()) + sym_matrix = upper + upper.T + embedding = spectral_embedding( + sym_matrix, n_components=10, eigen_solver="amg", random_state=0 + ) + + # Check that the learned embedding is stable w.r.t. random solver init: + for i in range(3): + new_embedding = spectral_embedding( + sym_matrix, n_components=10, eigen_solver="amg", random_state=i + 1 + ) + _assert_equal_with_sign_flipping(embedding, new_embedding, tol=0.05) + + +@pytest.mark.filterwarnings("ignore:the behavior of nmi will change in version 0.22") +def test_pipeline_spectral_clustering(seed=36): + # Test using pipeline to do spectral clustering + random_state = np.random.RandomState(seed) + se_rbf = SpectralEmbedding( + n_components=n_clusters, affinity="rbf", random_state=random_state + ) + se_knn = SpectralEmbedding( + n_components=n_clusters, + affinity="nearest_neighbors", + n_neighbors=5, + random_state=random_state, + ) + for se in [se_rbf, se_knn]: + km = KMeans(n_clusters=n_clusters, random_state=random_state, n_init=10) + km.fit(se.fit_transform(S)) + assert_array_almost_equal( + normalized_mutual_info_score(km.labels_, true_labels), 1.0, 2 + ) + + +def test_connectivity(seed=36): + # Test that graph connectivity test works as expected + graph = np.array( + [ + [1, 0, 0, 0, 0], + [0, 1, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 1, 1], + [0, 0, 0, 1, 1], + ] + ) + assert not _graph_is_connected(graph) + for csr_container in CSR_CONTAINERS: + assert not _graph_is_connected(csr_container(graph)) + for csc_container in CSC_CONTAINERS: + assert not _graph_is_connected(csc_container(graph)) + + graph = np.array( + [ + [1, 1, 0, 0, 0], + [1, 1, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 1, 1], + [0, 0, 0, 1, 1], + ] + ) + assert _graph_is_connected(graph) + for csr_container in CSR_CONTAINERS: + assert _graph_is_connected(csr_container(graph)) + for csc_container in CSC_CONTAINERS: + assert _graph_is_connected(csc_container(graph)) + + +def test_spectral_embedding_deterministic(): + # Test that Spectral Embedding is deterministic + random_state = np.random.RandomState(36) + data = random_state.randn(10, 30) + sims = rbf_kernel(data) + embedding_1 = spectral_embedding(sims) + embedding_2 = spectral_embedding(sims) + assert_array_almost_equal(embedding_1, embedding_2) + + +def test_spectral_embedding_unnormalized(): + # Test that spectral_embedding is also processing unnormalized laplacian + # correctly + random_state = np.random.RandomState(36) + data = random_state.randn(10, 30) + sims = rbf_kernel(data) + n_components = 8 + embedding_1 = spectral_embedding( + sims, norm_laplacian=False, n_components=n_components, drop_first=False + ) + + # Verify using manual computation with dense eigh + laplacian, dd = csgraph_laplacian(sims, normed=False, return_diag=True) + _, diffusion_map = eigh(laplacian) + embedding_2 = diffusion_map.T[:n_components] + embedding_2 = _deterministic_vector_sign_flip(embedding_2).T + + assert_array_almost_equal(embedding_1, embedding_2) + + +def test_spectral_embedding_first_eigen_vector(): + # Test that the first eigenvector of spectral_embedding + # is constant and that the second is not (for a connected graph) + random_state = np.random.RandomState(36) + data = random_state.randn(10, 30) + sims = rbf_kernel(data) + n_components = 2 + + for seed in range(10): + embedding = spectral_embedding( + sims, + norm_laplacian=False, + n_components=n_components, + drop_first=False, + random_state=seed, + ) + + assert np.std(embedding[:, 0]) == pytest.approx(0) + assert np.std(embedding[:, 1]) > 1e-3 + + +@pytest.mark.parametrize( + "eigen_solver", + [ + "arpack", + "lobpcg", + pytest.param("amg", marks=skip_if_no_pyamg), + ], +) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_spectral_embedding_preserves_dtype(eigen_solver, dtype): + """Check that `SpectralEmbedding is preserving the dtype of the fitted + attribute and transformed data. + + Ideally, this test should be covered by the common test + `check_transformer_preserve_dtypes`. However, this test only run + with transformers implementing `transform` while `SpectralEmbedding` + implements only `fit_transform`. + """ + X = S.astype(dtype) + se = SpectralEmbedding( + n_components=2, affinity="rbf", eigen_solver=eigen_solver, random_state=0 + ) + X_trans = se.fit_transform(X) + + assert X_trans.dtype == dtype + assert se.embedding_.dtype == dtype + assert se.affinity_matrix_.dtype == dtype + + +@pytest.mark.skipif( + pyamg_available, + reason="PyAMG is installed and we should not test for an error.", +) +def test_error_pyamg_not_available(): + se_precomp = SpectralEmbedding( + n_components=2, + affinity="rbf", + eigen_solver="amg", + ) + err_msg = "The eigen_solver was set to 'amg', but pyamg is not available." + with pytest.raises(ValueError, match=err_msg): + se_precomp.fit_transform(S) + + +# TODO: Remove when pyamg removes the use of np.find_common_type +@pytest.mark.filterwarnings( + "ignore:np.find_common_type is deprecated:DeprecationWarning:pyamg.*" +) +@pytest.mark.parametrize("solver", ["arpack", "amg", "lobpcg"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_spectral_eigen_tol_auto(monkeypatch, solver, csr_container): + """Test that `eigen_tol="auto"` is resolved correctly""" + if solver == "amg" and not pyamg_available: + pytest.skip("PyAMG is not available.") + X, _ = make_blobs( + n_samples=200, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 + ) + D = pairwise_distances(X) # Distance matrix + S = np.max(D) - D # Similarity matrix + + solver_func = eigsh if solver == "arpack" else lobpcg + default_value = 0 if solver == "arpack" else None + if solver == "amg": + S = csr_container(S) + + mocked_solver = Mock(side_effect=solver_func) + + monkeypatch.setattr(_spectral_embedding, solver_func.__qualname__, mocked_solver) + + spectral_embedding(S, random_state=42, eigen_solver=solver, eigen_tol="auto") + mocked_solver.assert_called() + + _, kwargs = mocked_solver.call_args + assert kwargs["tol"] == default_value diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_t_sne.py b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_t_sne.py new file mode 100644 index 0000000000000000000000000000000000000000..ea037fa5f83910988275e3b0ddf8ec5ef36fcd58 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/manifold/tests/test_t_sne.py @@ -0,0 +1,1181 @@ +import sys +from io import StringIO + +import numpy as np +import pytest +import scipy.sparse as sp +from numpy.testing import assert_allclose +from scipy.optimize import check_grad +from scipy.spatial.distance import pdist, squareform + +from sklearn import config_context +from sklearn.datasets import make_blobs +from sklearn.exceptions import EfficiencyWarning + +# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne' +from sklearn.manifold import ( # type: ignore + TSNE, + _barnes_hut_tsne, +) +from sklearn.manifold._t_sne import ( + _gradient_descent, + _joint_probabilities, + _joint_probabilities_nn, + _kl_divergence, + _kl_divergence_bh, + trustworthiness, +) +from sklearn.manifold._utils import _binary_search_perplexity +from sklearn.metrics.pairwise import ( + cosine_distances, + manhattan_distances, + pairwise_distances, +) +from sklearn.neighbors import NearestNeighbors, kneighbors_graph +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, + skip_if_32bit, +) +from sklearn.utils.fixes import CSR_CONTAINERS, LIL_CONTAINERS + +x = np.linspace(0, 1, 10) +xx, yy = np.meshgrid(x, x) +X_2d_grid = np.hstack( + [ + xx.ravel().reshape(-1, 1), + yy.ravel().reshape(-1, 1), + ] +) + + +def test_gradient_descent_stops(): + # Test stopping conditions of gradient descent. + class ObjectiveSmallGradient: + def __init__(self): + self.it = -1 + + def __call__(self, _, compute_error=True): + self.it += 1 + return (10 - self.it) / 10.0, np.array([1e-5]) + + def flat_function(_, compute_error=True): + return 0.0, np.ones(1) + + # Gradient norm + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + ObjectiveSmallGradient(), + np.zeros(1), + 0, + n_iter=100, + n_iter_without_progress=100, + momentum=0.0, + learning_rate=0.0, + min_gain=0.0, + min_grad_norm=1e-5, + verbose=2, + ) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert error == 1.0 + assert it == 0 + assert "gradient norm" in out + + # Maximum number of iterations without improvement + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + flat_function, + np.zeros(1), + 0, + n_iter=100, + n_iter_without_progress=10, + momentum=0.0, + learning_rate=0.0, + min_gain=0.0, + min_grad_norm=0.0, + verbose=2, + ) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert error == 0.0 + assert it == 11 + assert "did not make any progress" in out + + # Maximum number of iterations + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + ObjectiveSmallGradient(), + np.zeros(1), + 0, + n_iter=11, + n_iter_without_progress=100, + momentum=0.0, + learning_rate=0.0, + min_gain=0.0, + min_grad_norm=0.0, + verbose=2, + ) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert error == 0.0 + assert it == 10 + assert "Iteration 10" in out + + +def test_binary_search(): + # Test if the binary search finds Gaussians with desired perplexity. + random_state = check_random_state(0) + data = random_state.randn(50, 5) + distances = pairwise_distances(data).astype(np.float32) + desired_perplexity = 25.0 + P = _binary_search_perplexity(distances, desired_perplexity, verbose=0) + P = np.maximum(P, np.finfo(np.double).eps) + mean_perplexity = np.mean( + [np.exp(-np.sum(P[i] * np.log(P[i]))) for i in range(P.shape[0])] + ) + assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3) + + +def test_binary_search_underflow(): + # Test if the binary search finds Gaussians with desired perplexity. + # A more challenging case than the one above, producing numeric + # underflow in float precision (see issue #19471 and PR #19472). + random_state = check_random_state(42) + data = random_state.randn(1, 90).astype(np.float32) + 100 + desired_perplexity = 30.0 + P = _binary_search_perplexity(data, desired_perplexity, verbose=0) + perplexity = 2 ** -np.nansum(P[0, 1:] * np.log2(P[0, 1:])) + assert_almost_equal(perplexity, desired_perplexity, decimal=3) + + +def test_binary_search_neighbors(): + # Binary perplexity search approximation. + # Should be approximately equal to the slow method when we use + # all points as neighbors. + n_samples = 200 + desired_perplexity = 25.0 + random_state = check_random_state(0) + data = random_state.randn(n_samples, 2).astype(np.float32, copy=False) + distances = pairwise_distances(data) + P1 = _binary_search_perplexity(distances, desired_perplexity, verbose=0) + + # Test that when we use all the neighbors the results are identical + n_neighbors = n_samples - 1 + nn = NearestNeighbors().fit(data) + distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors, mode="distance") + distances_nn = distance_graph.data.astype(np.float32, copy=False) + distances_nn = distances_nn.reshape(n_samples, n_neighbors) + P2 = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0) + + indptr = distance_graph.indptr + P1_nn = np.array( + [ + P1[k, distance_graph.indices[indptr[k] : indptr[k + 1]]] + for k in range(n_samples) + ] + ) + assert_array_almost_equal(P1_nn, P2, decimal=4) + + # Test that the highest P_ij are the same when fewer neighbors are used + for k in np.linspace(150, n_samples - 1, 5): + k = int(k) + topn = k * 10 # check the top 10 * k entries out of k * k entries + distance_graph = nn.kneighbors_graph(n_neighbors=k, mode="distance") + distances_nn = distance_graph.data.astype(np.float32, copy=False) + distances_nn = distances_nn.reshape(n_samples, k) + P2k = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0) + assert_array_almost_equal(P1_nn, P2, decimal=2) + idx = np.argsort(P1.ravel())[::-1] + P1top = P1.ravel()[idx][:topn] + idx = np.argsort(P2k.ravel())[::-1] + P2top = P2k.ravel()[idx][:topn] + assert_array_almost_equal(P1top, P2top, decimal=2) + + +def test_binary_perplexity_stability(): + # Binary perplexity search should be stable. + # The binary_search_perplexity had a bug wherein the P array + # was uninitialized, leading to sporadically failing tests. + n_neighbors = 10 + n_samples = 100 + random_state = check_random_state(0) + data = random_state.randn(n_samples, 5) + nn = NearestNeighbors().fit(data) + distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors, mode="distance") + distances = distance_graph.data.astype(np.float32, copy=False) + distances = distances.reshape(n_samples, n_neighbors) + last_P = None + desired_perplexity = 3 + for _ in range(100): + P = _binary_search_perplexity(distances.copy(), desired_perplexity, verbose=0) + P1 = _joint_probabilities_nn(distance_graph, desired_perplexity, verbose=0) + # Convert the sparse matrix to a dense one for testing + P1 = P1.toarray() + if last_P is None: + last_P = P + last_P1 = P1 + else: + assert_array_almost_equal(P, last_P, decimal=4) + assert_array_almost_equal(P1, last_P1, decimal=4) + + +def test_gradient(): + # Test gradient of Kullback-Leibler divergence. + random_state = check_random_state(0) + + n_samples = 50 + n_features = 2 + n_components = 2 + alpha = 1.0 + + distances = random_state.randn(n_samples, n_features).astype(np.float32) + distances = np.abs(distances.dot(distances.T)) + np.fill_diagonal(distances, 0.0) + X_embedded = random_state.randn(n_samples, n_components).astype(np.float32) + + P = _joint_probabilities(distances, desired_perplexity=25.0, verbose=0) + + def fun(params): + return _kl_divergence(params, P, alpha, n_samples, n_components)[0] + + def grad(params): + return _kl_divergence(params, P, alpha, n_samples, n_components)[1] + + assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0, decimal=5) + + +def test_trustworthiness(): + # Test trustworthiness score. + random_state = check_random_state(0) + + # Affine transformation + X = random_state.randn(100, 2) + assert trustworthiness(X, 5.0 + X / 10.0) == 1.0 + + # Randomly shuffled + X = np.arange(100).reshape(-1, 1) + X_embedded = X.copy() + random_state.shuffle(X_embedded) + assert trustworthiness(X, X_embedded) < 0.6 + + # Completely different + X = np.arange(5).reshape(-1, 1) + X_embedded = np.array([[0], [2], [4], [1], [3]]) + assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2) + + +def test_trustworthiness_n_neighbors_error(): + """Raise an error when n_neighbors >= n_samples / 2. + + Non-regression test for #18567. + """ + regex = "n_neighbors .+ should be less than .+" + rng = np.random.RandomState(42) + X = rng.rand(7, 4) + X_embedded = rng.rand(7, 2) + with pytest.raises(ValueError, match=regex): + trustworthiness(X, X_embedded, n_neighbors=5) + + trust = trustworthiness(X, X_embedded, n_neighbors=3) + assert 0 <= trust <= 1 + + +@pytest.mark.parametrize("method", ["exact", "barnes_hut"]) +@pytest.mark.parametrize("init", ("random", "pca")) +def test_preserve_trustworthiness_approximately(method, init): + # Nearest neighbors should be preserved approximately. + random_state = check_random_state(0) + n_components = 2 + X = random_state.randn(50, n_components).astype(np.float32) + tsne = TSNE( + n_components=n_components, + init=init, + random_state=0, + method=method, + n_iter=700, + learning_rate="auto", + ) + X_embedded = tsne.fit_transform(X) + t = trustworthiness(X, X_embedded, n_neighbors=1) + assert t > 0.85 + + +def test_optimization_minimizes_kl_divergence(): + """t-SNE should give a lower KL divergence with more iterations.""" + random_state = check_random_state(0) + X, _ = make_blobs(n_features=3, random_state=random_state) + kl_divergences = [] + for n_iter in [250, 300, 350]: + tsne = TSNE( + n_components=2, + init="random", + perplexity=10, + learning_rate=100.0, + n_iter=n_iter, + random_state=0, + ) + tsne.fit_transform(X) + kl_divergences.append(tsne.kl_divergence_) + assert kl_divergences[1] <= kl_divergences[0] + assert kl_divergences[2] <= kl_divergences[1] + + +@pytest.mark.parametrize("method", ["exact", "barnes_hut"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_fit_transform_csr_matrix(method, csr_container): + # TODO: compare results on dense and sparse data as proposed in: + # https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186 + # X can be a sparse matrix. + rng = check_random_state(0) + X = rng.randn(50, 2) + X[(rng.randint(0, 50, 25), rng.randint(0, 2, 25))] = 0.0 + X_csr = csr_container(X) + tsne = TSNE( + n_components=2, + init="random", + perplexity=10, + learning_rate=100.0, + random_state=0, + method=method, + n_iter=750, + ) + X_embedded = tsne.fit_transform(X_csr) + assert_allclose(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0, rtol=1.1e-1) + + +def test_preserve_trustworthiness_approximately_with_precomputed_distances(): + # Nearest neighbors should be preserved approximately. + random_state = check_random_state(0) + for i in range(3): + X = random_state.randn(80, 2) + D = squareform(pdist(X), "sqeuclidean") + tsne = TSNE( + n_components=2, + perplexity=2, + learning_rate=100.0, + early_exaggeration=2.0, + metric="precomputed", + random_state=i, + verbose=0, + n_iter=500, + init="random", + ) + X_embedded = tsne.fit_transform(D) + t = trustworthiness(D, X_embedded, n_neighbors=1, metric="precomputed") + assert t > 0.95 + + +def test_trustworthiness_not_euclidean_metric(): + # Test trustworthiness with a metric different from 'euclidean' and + # 'precomputed' + random_state = check_random_state(0) + X = random_state.randn(100, 2) + assert trustworthiness(X, X, metric="cosine") == trustworthiness( + pairwise_distances(X, metric="cosine"), X, metric="precomputed" + ) + + +@pytest.mark.parametrize( + "method, retype", + [ + ("exact", np.asarray), + ("barnes_hut", np.asarray), + *[("barnes_hut", csr_container) for csr_container in CSR_CONTAINERS], + ], +) +@pytest.mark.parametrize( + "D, message_regex", + [ + ([[0.0], [1.0]], ".* square distance matrix"), + ([[0.0, -1.0], [1.0, 0.0]], ".* positive.*"), + ], +) +def test_bad_precomputed_distances(method, D, retype, message_regex): + tsne = TSNE( + metric="precomputed", + method=method, + init="random", + random_state=42, + perplexity=1, + ) + with pytest.raises(ValueError, match=message_regex): + tsne.fit_transform(retype(D)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_exact_no_precomputed_sparse(csr_container): + tsne = TSNE( + metric="precomputed", + method="exact", + init="random", + random_state=42, + perplexity=1, + ) + with pytest.raises(TypeError, match="sparse"): + tsne.fit_transform(csr_container([[0, 5], [5, 0]])) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_high_perplexity_precomputed_sparse_distances(csr_container): + # Perplexity should be less than 50 + dist = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]) + bad_dist = csr_container(dist) + tsne = TSNE(metric="precomputed", init="random", random_state=42, perplexity=1) + msg = "3 neighbors per samples are required, but some samples have only 1" + with pytest.raises(ValueError, match=msg): + tsne.fit_transform(bad_dist) + + +@ignore_warnings(category=EfficiencyWarning) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + LIL_CONTAINERS) +def test_sparse_precomputed_distance(sparse_container): + """Make sure that TSNE works identically for sparse and dense matrix""" + random_state = check_random_state(0) + X = random_state.randn(100, 2) + + D_sparse = kneighbors_graph(X, n_neighbors=100, mode="distance", include_self=True) + D = pairwise_distances(X) + assert sp.issparse(D_sparse) + assert_almost_equal(D_sparse.toarray(), D) + + tsne = TSNE( + metric="precomputed", random_state=0, init="random", learning_rate="auto" + ) + Xt_dense = tsne.fit_transform(D) + + Xt_sparse = tsne.fit_transform(sparse_container(D_sparse)) + assert_almost_equal(Xt_dense, Xt_sparse) + + +def test_non_positive_computed_distances(): + # Computed distance matrices must be positive. + def metric(x, y): + return -1 + + # Negative computed distances should be caught even if result is squared + tsne = TSNE(metric=metric, method="exact", perplexity=1) + X = np.array([[0.0, 0.0], [1.0, 1.0]]) + with pytest.raises(ValueError, match="All distances .*metric given.*"): + tsne.fit_transform(X) + + +def test_init_ndarray(): + # Initialize TSNE with ndarray and test fit + tsne = TSNE(init=np.zeros((100, 2)), learning_rate="auto") + X_embedded = tsne.fit_transform(np.ones((100, 5))) + assert_array_equal(np.zeros((100, 2)), X_embedded) + + +def test_init_ndarray_precomputed(): + # Initialize TSNE with ndarray and metric 'precomputed' + # Make sure no FutureWarning is thrown from _fit + tsne = TSNE( + init=np.zeros((100, 2)), + metric="precomputed", + learning_rate=50.0, + ) + tsne.fit(np.zeros((100, 100))) + + +def test_pca_initialization_not_compatible_with_precomputed_kernel(): + # Precomputed distance matrices cannot use PCA initialization. + tsne = TSNE(metric="precomputed", init="pca", perplexity=1) + with pytest.raises( + ValueError, + match='The parameter init="pca" cannot be used with metric="precomputed".', + ): + tsne.fit_transform(np.array([[0.0], [1.0]])) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_pca_initialization_not_compatible_with_sparse_input(csr_container): + # Sparse input matrices cannot use PCA initialization. + tsne = TSNE(init="pca", learning_rate=100.0, perplexity=1) + with pytest.raises(TypeError, match="PCA initialization.*"): + tsne.fit_transform(csr_container([[0, 5], [5, 0]])) + + +def test_n_components_range(): + # barnes_hut method should only be used with n_components <= 3 + tsne = TSNE(n_components=4, method="barnes_hut", perplexity=1) + with pytest.raises(ValueError, match="'n_components' should be .*"): + tsne.fit_transform(np.array([[0.0], [1.0]])) + + +def test_early_exaggeration_used(): + # check that the ``early_exaggeration`` parameter has an effect + random_state = check_random_state(0) + n_components = 2 + methods = ["exact", "barnes_hut"] + X = random_state.randn(25, n_components).astype(np.float32) + for method in methods: + tsne = TSNE( + n_components=n_components, + perplexity=1, + learning_rate=100.0, + init="pca", + random_state=0, + method=method, + early_exaggeration=1.0, + n_iter=250, + ) + X_embedded1 = tsne.fit_transform(X) + tsne = TSNE( + n_components=n_components, + perplexity=1, + learning_rate=100.0, + init="pca", + random_state=0, + method=method, + early_exaggeration=10.0, + n_iter=250, + ) + X_embedded2 = tsne.fit_transform(X) + + assert not np.allclose(X_embedded1, X_embedded2) + + +def test_n_iter_used(): + # check that the ``n_iter`` parameter has an effect + random_state = check_random_state(0) + n_components = 2 + methods = ["exact", "barnes_hut"] + X = random_state.randn(25, n_components).astype(np.float32) + for method in methods: + for n_iter in [251, 500]: + tsne = TSNE( + n_components=n_components, + perplexity=1, + learning_rate=0.5, + init="random", + random_state=0, + method=method, + early_exaggeration=1.0, + n_iter=n_iter, + ) + tsne.fit_transform(X) + + assert tsne.n_iter_ == n_iter - 1 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_answer_gradient_two_points(csr_container): + # Test the tree with only a single set of children. + # + # These tests & answers have been checked against the reference + # implementation by LvdM. + pos_input = np.array([[1.0, 0.0], [0.0, 1.0]]) + pos_output = np.array( + [[-4.961291e-05, -1.072243e-04], [9.259460e-05, 2.702024e-04]] + ) + neighbors = np.array([[1], [0]]) + grad_output = np.array( + [[-2.37012478e-05, -6.29044398e-05], [2.37012478e-05, 6.29044398e-05]] + ) + _run_answer_test(pos_input, pos_output, neighbors, grad_output, csr_container) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_answer_gradient_four_points(csr_container): + # Four points tests the tree with multiple levels of children. + # + # These tests & answers have been checked against the reference + # implementation by LvdM. + pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]]) + pos_output = np.array( + [ + [6.080564e-05, -7.120823e-05], + [-1.718945e-04, -4.000536e-05], + [-2.271720e-04, 8.663310e-05], + [-1.032577e-04, -3.582033e-05], + ] + ) + neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]]) + grad_output = np.array( + [ + [5.81128448e-05, -7.78033454e-06], + [-5.81526851e-05, 7.80976444e-06], + [4.24275173e-08, -3.69569698e-08], + [-2.58720939e-09, 7.52706374e-09], + ] + ) + _run_answer_test(pos_input, pos_output, neighbors, grad_output, csr_container) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_skip_num_points_gradient(csr_container): + # Test the kwargs option skip_num_points. + # + # Skip num points should make it such that the Barnes_hut gradient + # is not calculated for indices below skip_num_point. + # Aside from skip_num_points=2 and the first two gradient rows + # being set to zero, these data points are the same as in + # test_answer_gradient_four_points() + pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]]) + pos_output = np.array( + [ + [6.080564e-05, -7.120823e-05], + [-1.718945e-04, -4.000536e-05], + [-2.271720e-04, 8.663310e-05], + [-1.032577e-04, -3.582033e-05], + ] + ) + neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]]) + grad_output = np.array( + [ + [0.0, 0.0], + [0.0, 0.0], + [4.24275173e-08, -3.69569698e-08], + [-2.58720939e-09, 7.52706374e-09], + ] + ) + _run_answer_test( + pos_input, pos_output, neighbors, grad_output, csr_container, False, 0.1, 2 + ) + + +def _run_answer_test( + pos_input, + pos_output, + neighbors, + grad_output, + csr_container, + verbose=False, + perplexity=0.1, + skip_num_points=0, +): + distances = pairwise_distances(pos_input).astype(np.float32) + args = distances, perplexity, verbose + pos_output = pos_output.astype(np.float32) + neighbors = neighbors.astype(np.int64, copy=False) + pij_input = _joint_probabilities(*args) + pij_input = squareform(pij_input).astype(np.float32) + grad_bh = np.zeros(pos_output.shape, dtype=np.float32) + + P = csr_container(pij_input) + + neighbors = P.indices.astype(np.int64) + indptr = P.indptr.astype(np.int64) + + _barnes_hut_tsne.gradient( + P.data, pos_output, neighbors, indptr, grad_bh, 0.5, 2, 1, skip_num_points=0 + ) + assert_array_almost_equal(grad_bh, grad_output, decimal=4) + + +def test_verbose(): + # Verbose options write to stdout. + random_state = check_random_state(0) + tsne = TSNE(verbose=2, perplexity=4) + X = random_state.randn(5, 2) + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + assert "[t-SNE]" in out + assert "nearest neighbors..." in out + assert "Computed conditional probabilities" in out + assert "Mean sigma" in out + assert "early exaggeration" in out + + +def test_chebyshev_metric(): + # t-SNE should allow metrics that cannot be squared (issue #3526). + random_state = check_random_state(0) + tsne = TSNE(metric="chebyshev", perplexity=4) + X = random_state.randn(5, 2) + tsne.fit_transform(X) + + +def test_reduction_to_one_component(): + # t-SNE should allow reduction to one component (issue #4154). + random_state = check_random_state(0) + tsne = TSNE(n_components=1, perplexity=4) + X = random_state.randn(5, 2) + X_embedded = tsne.fit(X).embedding_ + assert np.all(np.isfinite(X_embedded)) + + +@pytest.mark.parametrize("method", ["barnes_hut", "exact"]) +@pytest.mark.parametrize("dt", [np.float32, np.float64]) +def test_64bit(method, dt): + # Ensure 64bit arrays are handled correctly. + random_state = check_random_state(0) + + X = random_state.randn(10, 2).astype(dt, copy=False) + tsne = TSNE( + n_components=2, + perplexity=2, + learning_rate=100.0, + random_state=0, + method=method, + verbose=0, + n_iter=300, + init="random", + ) + X_embedded = tsne.fit_transform(X) + effective_type = X_embedded.dtype + + # tsne cython code is only single precision, so the output will + # always be single precision, irrespectively of the input dtype + assert effective_type == np.float32 + + +@pytest.mark.parametrize("method", ["barnes_hut", "exact"]) +def test_kl_divergence_not_nan(method): + # Ensure kl_divergence_ is computed at last iteration + # even though n_iter % n_iter_check != 0, i.e. 1003 % 50 != 0 + random_state = check_random_state(0) + + X = random_state.randn(50, 2) + tsne = TSNE( + n_components=2, + perplexity=2, + learning_rate=100.0, + random_state=0, + method=method, + verbose=0, + n_iter=503, + init="random", + ) + tsne.fit_transform(X) + + assert not np.isnan(tsne.kl_divergence_) + + +def test_barnes_hut_angle(): + # When Barnes-Hut's angle=0 this corresponds to the exact method. + angle = 0.0 + perplexity = 10 + n_samples = 100 + for n_components in [2, 3]: + n_features = 5 + degrees_of_freedom = float(n_components - 1.0) + + random_state = check_random_state(0) + data = random_state.randn(n_samples, n_features) + distances = pairwise_distances(data) + params = random_state.randn(n_samples, n_components) + P = _joint_probabilities(distances, perplexity, verbose=0) + kl_exact, grad_exact = _kl_divergence( + params, P, degrees_of_freedom, n_samples, n_components + ) + + n_neighbors = n_samples - 1 + distances_csr = ( + NearestNeighbors() + .fit(data) + .kneighbors_graph(n_neighbors=n_neighbors, mode="distance") + ) + P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0) + kl_bh, grad_bh = _kl_divergence_bh( + params, + P_bh, + degrees_of_freedom, + n_samples, + n_components, + angle=angle, + skip_num_points=0, + verbose=0, + ) + + P = squareform(P) + P_bh = P_bh.toarray() + assert_array_almost_equal(P_bh, P, decimal=5) + assert_almost_equal(kl_exact, kl_bh, decimal=3) + + +@skip_if_32bit +def test_n_iter_without_progress(): + # Use a dummy negative n_iter_without_progress and check output on stdout + random_state = check_random_state(0) + X = random_state.randn(100, 10) + for method in ["barnes_hut", "exact"]: + tsne = TSNE( + n_iter_without_progress=-1, + verbose=2, + learning_rate=1e8, + random_state=0, + method=method, + n_iter=351, + init="random", + ) + tsne._N_ITER_CHECK = 1 + tsne._EXPLORATION_N_ITER = 0 + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + # The output needs to contain the value of n_iter_without_progress + assert "did not make any progress during the last -1 episodes. Finished." in out + + +def test_min_grad_norm(): + # Make sure that the parameter min_grad_norm is used correctly + random_state = check_random_state(0) + X = random_state.randn(100, 2) + min_grad_norm = 0.002 + tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2, random_state=0, method="exact") + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + lines_out = out.split("\n") + + # extract the gradient norm from the verbose output + gradient_norm_values = [] + for line in lines_out: + # When the computation is Finished just an old gradient norm value + # is repeated that we do not need to store + if "Finished" in line: + break + + start_grad_norm = line.find("gradient norm") + if start_grad_norm >= 0: + line = line[start_grad_norm:] + line = line.replace("gradient norm = ", "").split(" ")[0] + gradient_norm_values.append(float(line)) + + # Compute how often the gradient norm is smaller than min_grad_norm + gradient_norm_values = np.array(gradient_norm_values) + n_smaller_gradient_norms = len( + gradient_norm_values[gradient_norm_values <= min_grad_norm] + ) + + # The gradient norm can be smaller than min_grad_norm at most once, + # because in the moment it becomes smaller the optimization stops + assert n_smaller_gradient_norms <= 1 + + +def test_accessible_kl_divergence(): + # Ensures that the accessible kl_divergence matches the computed value + random_state = check_random_state(0) + X = random_state.randn(50, 2) + tsne = TSNE( + n_iter_without_progress=2, verbose=2, random_state=0, method="exact", n_iter=500 + ) + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + # The output needs to contain the accessible kl_divergence as the error at + # the last iteration + for line in out.split("\n")[::-1]: + if "Iteration" in line: + _, _, error = line.partition("error = ") + if error: + error, _, _ = error.partition(",") + break + assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5) + + +@pytest.mark.parametrize("method", ["barnes_hut", "exact"]) +def test_uniform_grid(method): + """Make sure that TSNE can approximately recover a uniform 2D grid + + Due to ties in distances between point in X_2d_grid, this test is platform + dependent for ``method='barnes_hut'`` due to numerical imprecision. + + Also, t-SNE is not assured to converge to the right solution because bad + initialization can lead to convergence to bad local minimum (the + optimization problem is non-convex). To avoid breaking the test too often, + we re-run t-SNE from the final point when the convergence is not good + enough. + """ + seeds = range(3) + n_iter = 500 + for seed in seeds: + tsne = TSNE( + n_components=2, + init="random", + random_state=seed, + perplexity=50, + n_iter=n_iter, + method=method, + learning_rate="auto", + ) + Y = tsne.fit_transform(X_2d_grid) + + try_name = "{}_{}".format(method, seed) + try: + assert_uniform_grid(Y, try_name) + except AssertionError: + # If the test fails a first time, re-run with init=Y to see if + # this was caused by a bad initialization. Note that this will + # also run an early_exaggeration step. + try_name += ":rerun" + tsne.init = Y + Y = tsne.fit_transform(X_2d_grid) + assert_uniform_grid(Y, try_name) + + +def assert_uniform_grid(Y, try_name=None): + # Ensure that the resulting embedding leads to approximately + # uniformly spaced points: the distance to the closest neighbors + # should be non-zero and approximately constant. + nn = NearestNeighbors(n_neighbors=1).fit(Y) + dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel() + assert dist_to_nn.min() > 0.1 + + smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn) + largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn) + + assert smallest_to_mean > 0.5, try_name + assert largest_to_mean < 2, try_name + + +def test_bh_match_exact(): + # check that the ``barnes_hut`` method match the exact one when + # ``angle = 0`` and ``perplexity > n_samples / 3`` + random_state = check_random_state(0) + n_features = 10 + X = random_state.randn(30, n_features).astype(np.float32) + X_embeddeds = {} + n_iter = {} + for method in ["exact", "barnes_hut"]: + tsne = TSNE( + n_components=2, + method=method, + learning_rate=1.0, + init="random", + random_state=0, + n_iter=251, + perplexity=29.5, + angle=0, + ) + # Kill the early_exaggeration + tsne._EXPLORATION_N_ITER = 0 + X_embeddeds[method] = tsne.fit_transform(X) + n_iter[method] = tsne.n_iter_ + + assert n_iter["exact"] == n_iter["barnes_hut"] + assert_allclose(X_embeddeds["exact"], X_embeddeds["barnes_hut"], rtol=1e-4) + + +def test_gradient_bh_multithread_match_sequential(): + # check that the bh gradient with different num_threads gives the same + # results + + n_features = 10 + n_samples = 30 + n_components = 2 + degrees_of_freedom = 1 + + angle = 3 + perplexity = 5 + + random_state = check_random_state(0) + data = random_state.randn(n_samples, n_features).astype(np.float32) + params = random_state.randn(n_samples, n_components) + + n_neighbors = n_samples - 1 + distances_csr = ( + NearestNeighbors() + .fit(data) + .kneighbors_graph(n_neighbors=n_neighbors, mode="distance") + ) + P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0) + kl_sequential, grad_sequential = _kl_divergence_bh( + params, + P_bh, + degrees_of_freedom, + n_samples, + n_components, + angle=angle, + skip_num_points=0, + verbose=0, + num_threads=1, + ) + for num_threads in [2, 4]: + kl_multithread, grad_multithread = _kl_divergence_bh( + params, + P_bh, + degrees_of_freedom, + n_samples, + n_components, + angle=angle, + skip_num_points=0, + verbose=0, + num_threads=num_threads, + ) + + assert_allclose(kl_multithread, kl_sequential, rtol=1e-6) + assert_allclose(grad_multithread, grad_multithread) + + +@pytest.mark.parametrize( + "metric, dist_func", + [("manhattan", manhattan_distances), ("cosine", cosine_distances)], +) +@pytest.mark.parametrize("method", ["barnes_hut", "exact"]) +def test_tsne_with_different_distance_metrics(metric, dist_func, method): + """Make sure that TSNE works for different distance metrics""" + + if method == "barnes_hut" and metric == "manhattan": + # The distances computed by `manhattan_distances` differ slightly from those + # computed internally by NearestNeighbors via the PairwiseDistancesReduction + # Cython code-based. This in turns causes T-SNE to converge to a different + # solution but this should not impact the qualitative results as both + # methods. + # NOTE: it's probably not valid from a mathematical point of view to use the + # Manhattan distance for T-SNE... + # TODO: re-enable this test if/when `manhattan_distances` is refactored to + # reuse the same underlying Cython code NearestNeighbors. + # For reference, see: + # https://github.com/scikit-learn/scikit-learn/pull/23865/files#r925721573 + pytest.xfail( + "Distance computations are different for method == 'barnes_hut' and metric" + " == 'manhattan', but this is expected." + ) + + random_state = check_random_state(0) + n_components_original = 3 + n_components_embedding = 2 + X = random_state.randn(50, n_components_original).astype(np.float32) + X_transformed_tsne = TSNE( + metric=metric, + method=method, + n_components=n_components_embedding, + random_state=0, + n_iter=300, + init="random", + learning_rate="auto", + ).fit_transform(X) + X_transformed_tsne_precomputed = TSNE( + metric="precomputed", + method=method, + n_components=n_components_embedding, + random_state=0, + n_iter=300, + init="random", + learning_rate="auto", + ).fit_transform(dist_func(X)) + assert_array_equal(X_transformed_tsne, X_transformed_tsne_precomputed) + + +@pytest.mark.parametrize("method", ["exact", "barnes_hut"]) +def test_tsne_n_jobs(method): + """Make sure that the n_jobs parameter doesn't impact the output""" + random_state = check_random_state(0) + n_features = 10 + X = random_state.randn(30, n_features) + X_tr_ref = TSNE( + n_components=2, + method=method, + perplexity=25.0, + angle=0, + n_jobs=1, + random_state=0, + init="random", + learning_rate="auto", + ).fit_transform(X) + X_tr = TSNE( + n_components=2, + method=method, + perplexity=25.0, + angle=0, + n_jobs=2, + random_state=0, + init="random", + learning_rate="auto", + ).fit_transform(X) + + assert_allclose(X_tr_ref, X_tr) + + +def test_tsne_with_mahalanobis_distance(): + """Make sure that method_parameters works with mahalanobis distance.""" + random_state = check_random_state(0) + n_samples, n_features = 300, 10 + X = random_state.randn(n_samples, n_features) + default_params = { + "perplexity": 40, + "n_iter": 250, + "learning_rate": "auto", + "init": "random", + "n_components": 3, + "random_state": 0, + } + + tsne = TSNE(metric="mahalanobis", **default_params) + msg = "Must provide either V or VI for Mahalanobis distance" + with pytest.raises(ValueError, match=msg): + tsne.fit_transform(X) + + precomputed_X = squareform(pdist(X, metric="mahalanobis"), checks=True) + X_trans_expected = TSNE(metric="precomputed", **default_params).fit_transform( + precomputed_X + ) + + X_trans = TSNE( + metric="mahalanobis", metric_params={"V": np.cov(X.T)}, **default_params + ).fit_transform(X) + assert_allclose(X_trans, X_trans_expected) + + +@pytest.mark.parametrize("perplexity", (20, 30)) +def test_tsne_perplexity_validation(perplexity): + """Make sure that perplexity > n_samples results in a ValueError""" + + random_state = check_random_state(0) + X = random_state.randn(20, 2) + est = TSNE( + learning_rate="auto", + init="pca", + perplexity=perplexity, + random_state=random_state, + ) + msg = "perplexity must be less than n_samples" + with pytest.raises(ValueError, match=msg): + est.fit_transform(X) + + +def test_tsne_works_with_pandas_output(): + """Make sure that TSNE works when the output is set to "pandas". + + Non-regression test for gh-25365. + """ + pytest.importorskip("pandas") + with config_context(transform_output="pandas"): + arr = np.arange(35 * 4).reshape(35, 4) + TSNE(n_components=2).fit_transform(arr) diff --git a/llmeval-env/lib/python3.10/site-packages/sqlitedict-2.1.0.egg-info/PKG-INFO b/llmeval-env/lib/python3.10/site-packages/sqlitedict-2.1.0.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..5a5226e0a866ab60221b295653899239b8bb8092 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sqlitedict-2.1.0.egg-info/PKG-INFO @@ -0,0 +1,298 @@ +Metadata-Version: 2.1 +Name: sqlitedict +Version: 2.1.0 +Summary: Persistent dict in Python, backed up by sqlite3 and pickle, multithread-safe. +Home-page: https://github.com/piskvorky/sqlitedict +Author: Radim Rehurek, Victor R. Escobar, Andrey Usov, Prasanna Swaminathan, Jeff Quast +Author-email: me@radimrehurek.com +Maintainer: Radim Rehurek +Maintainer-email: me@radimrehurek.com +License: Apache 2.0 +Download-URL: http://pypi.python.org/pypi/sqlitedict +Keywords: sqlite,persistent dict,multithreaded +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Database :: Front-Ends +License-File: LICENSE.md + +=================================================== +sqlitedict -- persistent ``dict``, backed by SQLite +=================================================== + +|GithubActions|_ +|License|_ + +.. |GithubActions| image:: https://github.com/RaRe-Technologies/sqlitedict/actions/workflows/python-package.yml/badge.svg +.. |Downloads| image:: https://img.shields.io/pypi/dm/sqlitedict.svg +.. |License| image:: https://img.shields.io/pypi/l/sqlitedict.svg +.. _GithubActions: https://github.com/RaRe-Technologies/sqlitedict/actions/workflows/python-package.yml +.. _Downloads: https://pypi.python.org/pypi/sqlitedict +.. _License: https://pypi.python.org/pypi/sqlitedict + +A lightweight wrapper around Python's sqlite3 database with a simple, Pythonic +dict-like interface and support for multi-thread access: + +Usage +===== + +Write +----- + +.. code-block:: python + + >>> from sqlitedict import SqliteDict + >>> db = SqliteDict("example.sqlite") + >>> + >>> db["1"] = {"name": "first item"} + >>> db["2"] = {"name": "second item"} + >>> db["3"] = {"name": "yet another item"} + >>> + >>> # Commit to save the objects. + >>> db.commit() + >>> + >>> db["4"] = {"name": "yet another item"} + >>> # Oops, forgot to commit here, that object will never be saved. + >>> # Always remember to commit, or enable autocommit with SqliteDict("example.sqlite", autocommit=True) + >>> # Autocommit is off by default for performance. + >>> + >>> db.close() + +Read +---- + +.. code-block:: python + + >>> from sqlitedict import SqliteDict + >>> db = SqliteDict("example.sqlite") + >>> + >>> print("There are %d items in the database" % len(db)) + There are 3 items in the database + >>> + >>> # Standard dict interface. items() values() keys() etc... + >>> for key, item in db.items(): + ... print("%s=%s" % (key, item)) + 1={'name': 'first item'} + 2={'name': 'second item'} + 3={'name': 'yet another item'} + >>> + >>> db.close() + +Efficiency +---------- + +By default, sqlitedict's exception handling favors verbosity over efficiency. +It extracts and outputs the outer exception stack to the error logs. +If you favor efficiency, then initialize the DB with outer_stack=False. + +.. code-block:: python + + >>> from sqlitedict import SqliteDict + >>> db = SqliteDict("example.sqlite", outer_stack=False) # True is the default + >>> db[1] + {'name': 'first item'} + +Context Manager +--------------- + +.. code-block:: python + + >>> from sqlitedict import SqliteDict + >>> + >>> # The database is automatically closed when leaving the with section. + >>> # Uncommitted objects are not saved on close. REMEMBER TO COMMIT! + >>> + >>> with SqliteDict("example.sqlite") as db: + ... print("There are %d items in the database" % len(db)) + There are 3 items in the database + +Tables +------ + +A database file can store multiple tables. +A default table is used when no table name is specified. + +Note: Writes are serialized, having multiple tables does not improve performance. + +.. code-block:: python + + >>> from sqlitedict import SqliteDict + >>> + >>> products = SqliteDict("example.sqlite", tablename="product", autocommit=True) + >>> manufacturers = SqliteDict("example.sqlite", tablename="manufacturer", autocommit=True) + >>> + >>> products["1"] = {"name": "first item", "manufacturer_id": "1"} + >>> products["2"] = {"name": "second item", "manufacturer_id": "1"} + >>> + >>> manufacturers["1"] = {"manufacturer_name": "afactory", "location": "US"} + >>> manufacturers["2"] = {"manufacturer_name": "anotherfactory", "location": "UK"} + >>> + >>> tables = products.get_tablenames('example.sqlite') + >>> print(tables) + ['unnamed', 'product', 'manufacturer'] + >>> + >>> products.close() + >>> manufacturers.close() + +In case you're wondering, the unnamed table comes from the previous examples, +where we did not specify a table name. + +Serialization +------------- + +Keys are strings. Values are any serializeable object. + +By default Pickle is used internally to (de)serialize the values. + +It's possible to use a custom (de)serializer, notably for JSON and for compression. + +.. code-block:: python + + >>> # Use JSON instead of pickle + >>> import json + >>> with SqliteDict("example.sqlite", encode=json.dumps, decode=json.loads) as mydict: + ... pass + >>> + >>> # Apply zlib compression after pickling + >>> import zlib, pickle, sqlite3 + >>> + >>> def my_encode(obj): + ... return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL))) + >>> + >>> def my_decode(obj): + ... return pickle.loads(zlib.decompress(bytes(obj))) + >>> + >>> with SqliteDict("example.sqlite", encode=my_encode, decode=my_decode) as mydict: + ... pass + +It's also possible to use a custom (de)serializer for keys to allow non-string keys. + +.. code-block:: python + + >>> # Use key encoding instead of default string keys only + >>> from sqlitedict import encode_key, decode_key + >>> with SqliteDict("example.sqlite", encode_key=encode_key, decode_key=decode_key) as mydict: + ... pass + +More +---- + +Functions are well documented, see docstrings directly in ``sqlitedict.py`` or call ``help(sqlitedict)``. + +**Beware**: because of Python semantics, ``sqlitedict`` cannot know when a mutable +SqliteDict-backed entry was modified in RAM. You'll need to +explicitly assign the mutated object back to SqliteDict: + +.. code-block:: python + + >>> from sqlitedict import SqliteDict + >>> db = SqliteDict("example.sqlite") + >>> db["colors"] = {"red": (255, 0, 0)} + >>> db.commit() + >>> + >>> colors = db["colors"] + >>> colors["blue"] = (0, 0, 255) # sqlite DB not updated here! + >>> db["colors"] = colors # now updated + >>> + >>> db.commit() # remember to commit (or set autocommit) + >>> db.close() + +Features +======== + +* Values can be **any picklable objects** (uses ``pickle`` with the highest protocol). +* Support for **multiple tables** (=dicts) living in the same database file. +* Support for **access from multiple threads** to the same connection (needed by e.g. Pyro). + Vanilla sqlite3 gives you ``ProgrammingError: SQLite objects created in a thread can + only be used in that same thread.`` + + Concurrent requests are still serialized internally, so this "multithreaded support" + **doesn't** give you any performance benefits. It is a work-around for sqlite limitations in Python. + +* Support for **custom serialization or compression**: + +.. code-block:: python + + # use JSON instead of pickle + >>> import json + >>> mydict = SqliteDict('./my_db.sqlite', encode=json.dumps, decode=json.loads) + + # apply zlib compression after pickling + >>> import zlib, pickle, sqlite3 + >>> def my_encode(obj): + ... return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL))) + >>> def my_decode(obj): + ... return pickle.loads(zlib.decompress(bytes(obj))) + >>> mydict = SqliteDict('./my_db.sqlite', encode=my_encode, decode=my_decode) + +* sqlite is efficient and can work effectively with large databases (multi gigabytes), not limited by memory. +* sqlitedict is mostly a thin wrapper around sqlite. +* ``items()`` ``keys()`` ``values()`` are iterating one by one, the rows are loaded in a worker thread and queued in memory. +* ``len()`` is calling sqlite to count rows, that is scanning the whole table. +* For better performance, write objects in batch and ``commit()`` once. + +Installation +============ + +The module has no dependencies beyond Python itself. +The minimum supported Python version is 3.7, continuously tested on Python 3.7, 3.8, 3.9, and 3.10 `on Travis `_. + +Install or upgrade with:: + + pip install -U sqlitedict + +or from the `source tar.gz `_:: + + python setup.py install + +Contributions +============= + +Testing +------- + +Install:: + + $ pip install pytest coverage pytest-coverage + +To perform all tests:: + + $ mkdir -p tests/db + $ pytest tests + $ python -m doctest README.rst + +To perform all tests with coverage:: + + $ pytest tests --cov=sqlitedict + +Comments, bug reports +--------------------- + +``sqlitedict`` resides on `github `_. You can file +issues or pull requests there. + +License +======= + +``sqlitedict`` is open source software released under the `Apache 2.0 license `_. +Copyright (c) 2011-now `Radim Řehůřek `_ and contributors. + +Housekeeping +============ + +Clean up the test database to keep each doctest run idempotent: + +.. code-block:: python + + >>> import os + >>> if __name__ == '__main__': + ... os.unlink('example.sqlite') + + diff --git a/llmeval-env/lib/python3.10/site-packages/sqlitedict-2.1.0.egg-info/SOURCES.txt b/llmeval-env/lib/python3.10/site-packages/sqlitedict-2.1.0.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..5031dd45e8b86e4833887cd9608413603e86906b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sqlitedict-2.1.0.egg-info/SOURCES.txt @@ -0,0 +1,18 @@ +LICENSE.md +MANIFEST.in +Makefile +README.rst +setup.cfg +setup.py +sqlitedict.py +sqlitedict.egg-info/PKG-INFO +sqlitedict.egg-info/SOURCES.txt +sqlitedict.egg-info/dependency_links.txt +sqlitedict.egg-info/top_level.txt +tests/accessories.py +tests/autocommit.py +tests/test_autocommit.py +tests/test_core.py +tests/test_named_db.py +tests/test_onimport.py +tests/test_temp_db.py \ No newline at end of file diff --git a/llmeval-env/lib/python3.10/site-packages/sqlitedict-2.1.0.egg-info/dependency_links.txt b/llmeval-env/lib/python3.10/site-packages/sqlitedict-2.1.0.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sqlitedict-2.1.0.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/llmeval-env/lib/python3.10/site-packages/sqlitedict-2.1.0.egg-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/sqlitedict-2.1.0.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..6bb8973f26eb653aaa4a60f1c20541229ca028e0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sqlitedict-2.1.0.egg-info/top_level.txt @@ -0,0 +1 @@ +sqlitedict