applied-ai-018 commited on
Commit
639e462
·
verified ·
1 Parent(s): 276c052

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py +115 -0
  2. llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/openmp_helpers.py +123 -0
  8. llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/pre_build_helpers.py +73 -0
  9. llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py +57 -0
  10. llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/version.py +14 -0
  11. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_feature_agglomeration.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hierarchical.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_k_means.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_mean_shift.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_optics.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/sklearn/compose/__init__.py +20 -0
  35. llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py +1463 -0
  39. llmeval-env/lib/python3.10/site-packages/sklearn/compose/_target.py +342 -0
  40. llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__init__.py +0 -0
  41. llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py +2582 -0
  45. llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py +387 -0
  46. llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-292.json.gz +3 -0
  47. llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz +3 -0
  48. llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1-s-dact.json.gz +3 -0
  49. llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/data-v1-dl-49822.arff.gz +3 -0
  50. llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz +3 -0
llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities useful during the build.
3
+ """
4
+ # author: Andy Mueller, Gael Varoquaux
5
+ # license: BSD
6
+
7
+
8
+ import contextlib
9
+ import os
10
+
11
+ import sklearn
12
+
13
+ from .._min_dependencies import CYTHON_MIN_VERSION
14
+ from ..externals._packaging.version import parse
15
+ from .openmp_helpers import check_openmp_support
16
+ from .pre_build_helpers import basic_check_build
17
+
18
+ DEFAULT_ROOT = "sklearn"
19
+
20
+
21
+ def _check_cython_version():
22
+ message = (
23
+ "Please install Cython with a version >= {0} in order "
24
+ "to build a scikit-learn from source."
25
+ ).format(CYTHON_MIN_VERSION)
26
+ try:
27
+ import Cython
28
+ except ModuleNotFoundError as e:
29
+ # Re-raise with more informative error message instead:
30
+ raise ModuleNotFoundError(message) from e
31
+
32
+ if parse(Cython.__version__) < parse(CYTHON_MIN_VERSION):
33
+ message += " The current version of Cython is {} installed in {}.".format(
34
+ Cython.__version__, Cython.__path__
35
+ )
36
+ raise ValueError(message)
37
+
38
+
39
+ def cythonize_extensions(extension):
40
+ """Check that a recent Cython is available and cythonize extensions"""
41
+ _check_cython_version()
42
+ from Cython.Build import cythonize
43
+
44
+ # Fast fail before cythonization if compiler fails compiling basic test
45
+ # code even without OpenMP
46
+ basic_check_build()
47
+
48
+ # check simple compilation with OpenMP. If it fails scikit-learn will be
49
+ # built without OpenMP and the test test_openmp_supported in the test suite
50
+ # will fail.
51
+ # `check_openmp_support` compiles a small test program to see if the
52
+ # compilers are properly configured to build with OpenMP. This is expensive
53
+ # and we only want to call this function once.
54
+ # The result of this check is cached as a private attribute on the sklearn
55
+ # module (only at build-time) to be used in the build_ext subclass defined
56
+ # in the top-level setup.py file to actually build the compiled extensions
57
+ # with OpenMP flags if needed.
58
+ sklearn._OPENMP_SUPPORTED = check_openmp_support()
59
+
60
+ n_jobs = 1
61
+ with contextlib.suppress(ImportError):
62
+ import joblib
63
+
64
+ n_jobs = joblib.cpu_count()
65
+
66
+ # Additional checks for Cython
67
+ cython_enable_debug_directives = (
68
+ os.environ.get("SKLEARN_ENABLE_DEBUG_CYTHON_DIRECTIVES", "0") != "0"
69
+ )
70
+
71
+ compiler_directives = {
72
+ "language_level": 3,
73
+ "boundscheck": cython_enable_debug_directives,
74
+ "wraparound": False,
75
+ "initializedcheck": False,
76
+ "nonecheck": False,
77
+ "cdivision": True,
78
+ "profile": False,
79
+ }
80
+
81
+ return cythonize(
82
+ extension,
83
+ nthreads=n_jobs,
84
+ compiler_directives=compiler_directives,
85
+ annotate=False,
86
+ )
87
+
88
+
89
+ def gen_from_templates(templates):
90
+ """Generate cython files from a list of templates"""
91
+ # Lazy import because cython is not a runtime dependency.
92
+ from Cython import Tempita
93
+
94
+ for template in templates:
95
+ outfile = template.replace(".tp", "")
96
+
97
+ # if the template is not updated, no need to output the cython file
98
+ if not (
99
+ os.path.exists(outfile)
100
+ and os.stat(template).st_mtime < os.stat(outfile).st_mtime
101
+ ):
102
+ with open(template, "r") as f:
103
+ tmpl = f.read()
104
+
105
+ tmpl_ = Tempita.sub(tmpl)
106
+
107
+ warn_msg = (
108
+ "# WARNING: Do not edit this file directly.\n"
109
+ f"# It is automatically generated from {template!r}.\n"
110
+ "# Changes must be made there.\n\n"
111
+ )
112
+
113
+ with open(outfile, "w") as f:
114
+ f.write(warn_msg)
115
+ f.write(tmpl_)
llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc ADDED
Binary file (2.81 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc ADDED
Binary file (1.77 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc ADDED
Binary file (675 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/openmp_helpers.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helpers for OpenMP support during the build."""
2
+
3
+ # This code is adapted for a large part from the astropy openmp helpers, which
4
+ # can be found at: https://github.com/astropy/extension-helpers/blob/master/extension_helpers/_openmp_helpers.py # noqa
5
+
6
+
7
+ import os
8
+ import sys
9
+ import textwrap
10
+ import warnings
11
+
12
+ from .pre_build_helpers import compile_test_program
13
+
14
+
15
+ def get_openmp_flag():
16
+ if sys.platform == "win32":
17
+ return ["/openmp"]
18
+ elif sys.platform == "darwin" and "openmp" in os.getenv("CPPFLAGS", ""):
19
+ # -fopenmp can't be passed as compile flag when using Apple-clang.
20
+ # OpenMP support has to be enabled during preprocessing.
21
+ #
22
+ # For example, our macOS wheel build jobs use the following environment
23
+ # variables to build with Apple-clang and the brew installed "libomp":
24
+ #
25
+ # export CPPFLAGS="$CPPFLAGS -Xpreprocessor -fopenmp"
26
+ # export CFLAGS="$CFLAGS -I/usr/local/opt/libomp/include"
27
+ # export CXXFLAGS="$CXXFLAGS -I/usr/local/opt/libomp/include"
28
+ # export LDFLAGS="$LDFLAGS -Wl,-rpath,/usr/local/opt/libomp/lib
29
+ # -L/usr/local/opt/libomp/lib -lomp"
30
+ return []
31
+ # Default flag for GCC and clang:
32
+ return ["-fopenmp"]
33
+
34
+
35
+ def check_openmp_support():
36
+ """Check whether OpenMP test code can be compiled and run"""
37
+ if "PYODIDE_PACKAGE_ABI" in os.environ:
38
+ # Pyodide doesn't support OpenMP
39
+ return False
40
+
41
+ code = textwrap.dedent("""\
42
+ #include <omp.h>
43
+ #include <stdio.h>
44
+ int main(void) {
45
+ #pragma omp parallel
46
+ printf("nthreads=%d\\n", omp_get_num_threads());
47
+ return 0;
48
+ }
49
+ """)
50
+
51
+ extra_preargs = os.getenv("LDFLAGS", None)
52
+ if extra_preargs is not None:
53
+ extra_preargs = extra_preargs.strip().split(" ")
54
+ # FIXME: temporary fix to link against system libraries on linux
55
+ # "-Wl,--sysroot=/" should be removed
56
+ extra_preargs = [
57
+ flag
58
+ for flag in extra_preargs
59
+ if flag.startswith(("-L", "-Wl,-rpath", "-l", "-Wl,--sysroot=/"))
60
+ ]
61
+
62
+ extra_postargs = get_openmp_flag()
63
+
64
+ openmp_exception = None
65
+ try:
66
+ output = compile_test_program(
67
+ code, extra_preargs=extra_preargs, extra_postargs=extra_postargs
68
+ )
69
+
70
+ if output and "nthreads=" in output[0]:
71
+ nthreads = int(output[0].strip().split("=")[1])
72
+ openmp_supported = len(output) == nthreads
73
+ elif "PYTHON_CROSSENV" in os.environ:
74
+ # Since we can't run the test program when cross-compiling
75
+ # assume that openmp is supported if the program can be
76
+ # compiled.
77
+ openmp_supported = True
78
+ else:
79
+ openmp_supported = False
80
+
81
+ except Exception as exception:
82
+ # We could be more specific and only catch: CompileError, LinkError,
83
+ # and subprocess.CalledProcessError.
84
+ # setuptools introduced CompileError and LinkError, but that requires
85
+ # version 61.1. Even the latest version of Ubuntu (22.04LTS) only
86
+ # ships with 59.6. So for now we catch all exceptions and reraise a
87
+ # generic exception with the original error message instead:
88
+ openmp_supported = False
89
+ openmp_exception = exception
90
+
91
+ if not openmp_supported:
92
+ if os.getenv("SKLEARN_FAIL_NO_OPENMP"):
93
+ raise Exception(
94
+ "Failed to build scikit-learn with OpenMP support"
95
+ ) from openmp_exception
96
+ else:
97
+ message = textwrap.dedent("""
98
+
99
+ ***********
100
+ * WARNING *
101
+ ***********
102
+
103
+ It seems that scikit-learn cannot be built with OpenMP.
104
+
105
+ - Make sure you have followed the installation instructions:
106
+
107
+ https://scikit-learn.org/dev/developers/advanced_installation.html
108
+
109
+ - If your compiler supports OpenMP but you still see this
110
+ message, please submit a bug report at:
111
+
112
+ https://github.com/scikit-learn/scikit-learn/issues
113
+
114
+ - The build will continue with OpenMP-based parallelism
115
+ disabled. Note however that some estimators will run in
116
+ sequential mode instead of leveraging thread-based
117
+ parallelism.
118
+
119
+ ***
120
+ """)
121
+ warnings.warn(message)
122
+
123
+ return openmp_supported
llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/pre_build_helpers.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helpers to check build environment before actual build of scikit-learn"""
2
+
3
+ import glob
4
+ import os
5
+ import subprocess
6
+ import sys
7
+ import tempfile
8
+ import textwrap
9
+
10
+ from setuptools.command.build_ext import customize_compiler, new_compiler
11
+
12
+
13
+ def compile_test_program(code, extra_preargs=None, extra_postargs=None):
14
+ """Check that some C code can be compiled and run"""
15
+ ccompiler = new_compiler()
16
+ customize_compiler(ccompiler)
17
+
18
+ start_dir = os.path.abspath(".")
19
+
20
+ with tempfile.TemporaryDirectory() as tmp_dir:
21
+ try:
22
+ os.chdir(tmp_dir)
23
+
24
+ # Write test program
25
+ with open("test_program.c", "w") as f:
26
+ f.write(code)
27
+
28
+ os.mkdir("objects")
29
+
30
+ # Compile, test program
31
+ ccompiler.compile(
32
+ ["test_program.c"], output_dir="objects", extra_postargs=extra_postargs
33
+ )
34
+
35
+ # Link test program
36
+ objects = glob.glob(os.path.join("objects", "*" + ccompiler.obj_extension))
37
+ ccompiler.link_executable(
38
+ objects,
39
+ "test_program",
40
+ extra_preargs=extra_preargs,
41
+ extra_postargs=extra_postargs,
42
+ )
43
+
44
+ if "PYTHON_CROSSENV" not in os.environ:
45
+ # Run test program if not cross compiling
46
+ # will raise a CalledProcessError if return code was non-zero
47
+ output = subprocess.check_output("./test_program")
48
+ output = output.decode(sys.stdout.encoding or "utf-8").splitlines()
49
+ else:
50
+ # Return an empty output if we are cross compiling
51
+ # as we cannot run the test_program
52
+ output = []
53
+ except Exception:
54
+ raise
55
+ finally:
56
+ os.chdir(start_dir)
57
+
58
+ return output
59
+
60
+
61
+ def basic_check_build():
62
+ """Check basic compilation and linking of C code"""
63
+ if "PYODIDE_PACKAGE_ABI" in os.environ:
64
+ # The following check won't work in pyodide
65
+ return
66
+
67
+ code = textwrap.dedent("""\
68
+ #include <stdio.h>
69
+ int main(void) {
70
+ return 0;
71
+ }
72
+ """)
73
+ compile_test_program(code)
llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+
4
+ from Cython import Tempita as tempita
5
+
6
+ # XXX: If this import ever fails (does it really?), vendor either
7
+ # cython.tempita or numpy/npy_tempita.
8
+
9
+
10
+ def process_tempita(fromfile, outfile=None):
11
+ """Process tempita templated file and write out the result.
12
+
13
+ The template file is expected to end in `.c.tp` or `.pyx.tp`:
14
+ E.g. processing `template.c.in` generates `template.c`.
15
+
16
+ """
17
+ with open(fromfile, "r", encoding="utf-8") as f:
18
+ template_content = f.read()
19
+
20
+ template = tempita.Template(template_content)
21
+ content = template.substitute()
22
+
23
+ with open(outfile, "w", encoding="utf-8") as f:
24
+ f.write(content)
25
+
26
+
27
+ def main():
28
+ parser = argparse.ArgumentParser()
29
+ parser.add_argument("infile", type=str, help="Path to the input file")
30
+ parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory")
31
+ parser.add_argument(
32
+ "-i",
33
+ "--ignore",
34
+ type=str,
35
+ help=(
36
+ "An ignored input - may be useful to add a "
37
+ "dependency between custom targets"
38
+ ),
39
+ )
40
+ args = parser.parse_args()
41
+
42
+ if not args.infile.endswith(".tp"):
43
+ raise ValueError(f"Unexpected extension: {args.infile}")
44
+
45
+ if not args.outdir:
46
+ raise ValueError("Missing `--outdir` argument to tempita.py")
47
+
48
+ outdir_abs = os.path.join(os.getcwd(), args.outdir)
49
+ outfile = os.path.join(
50
+ outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0]
51
+ )
52
+
53
+ process_tempita(args.infile, outfile)
54
+
55
+
56
+ if __name__ == "__main__":
57
+ main()
llmeval-env/lib/python3.10/site-packages/sklearn/_build_utils/version.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """ Extract version number from __init__.py
3
+ """
4
+
5
+ import os
6
+
7
+ sklearn_init = os.path.join(os.path.dirname(__file__), "../__init__.py")
8
+
9
+ data = open(sklearn_init).readlines()
10
+ version_line = next(line for line in data if line.startswith("__version__"))
11
+
12
+ version = version_line.strip().split(" = ")[1].replace('"', "").replace("'", "")
13
+
14
+ print(version)
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc ADDED
Binary file (37.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc ADDED
Binary file (19.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc ADDED
Binary file (19.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc ADDED
Binary file (3.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc ADDED
Binary file (62 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc ADDED
Binary file (35.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc ADDED
Binary file (26.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc ADDED
Binary file (831 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc ADDED
Binary file (9.65 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc ADDED
Binary file (7.74 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc ADDED
Binary file (7.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc ADDED
Binary file (4.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_feature_agglomeration.cpython-310.pyc ADDED
Binary file (2.77 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hierarchical.cpython-310.pyc ADDED
Binary file (22.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_k_means.cpython-310.pyc ADDED
Binary file (31.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_mean_shift.cpython-310.pyc ADDED
Binary file (5.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_optics.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/compose/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Meta-estimators for building composite models with transformers
2
+
3
+ In addition to its current contents, this module will eventually be home to
4
+ refurbished versions of Pipeline and FeatureUnion.
5
+
6
+ """
7
+
8
+ from ._column_transformer import (
9
+ ColumnTransformer,
10
+ make_column_selector,
11
+ make_column_transformer,
12
+ )
13
+ from ._target import TransformedTargetRegressor
14
+
15
+ __all__ = [
16
+ "ColumnTransformer",
17
+ "make_column_transformer",
18
+ "TransformedTargetRegressor",
19
+ "make_column_selector",
20
+ ]
llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (623 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc ADDED
Binary file (49.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc ADDED
Binary file (9.69 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py ADDED
@@ -0,0 +1,1463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.compose._column_transformer` module implements utilities
3
+ to work with heterogeneous data and to apply different transformers to
4
+ different columns.
5
+ """
6
+
7
+ # Author: Andreas Mueller
8
+ # Joris Van den Bossche
9
+ # License: BSD
10
+ import warnings
11
+ from collections import Counter
12
+ from itertools import chain
13
+ from numbers import Integral, Real
14
+
15
+ import numpy as np
16
+ from scipy import sparse
17
+
18
+ from ..base import TransformerMixin, _fit_context, clone
19
+ from ..pipeline import _fit_transform_one, _name_estimators, _transform_one
20
+ from ..preprocessing import FunctionTransformer
21
+ from ..utils import Bunch, _get_column_indices, _safe_indexing
22
+ from ..utils._estimator_html_repr import _VisualBlock
23
+ from ..utils._metadata_requests import METHODS
24
+ from ..utils._param_validation import HasMethods, Hidden, Interval, StrOptions
25
+ from ..utils._set_output import (
26
+ _get_container_adapter,
27
+ _get_output_config,
28
+ _safe_set_output,
29
+ )
30
+ from ..utils.metadata_routing import (
31
+ MetadataRouter,
32
+ MethodMapping,
33
+ _raise_for_params,
34
+ _routing_enabled,
35
+ process_routing,
36
+ )
37
+ from ..utils.metaestimators import _BaseComposition
38
+ from ..utils.parallel import Parallel, delayed
39
+ from ..utils.validation import (
40
+ _check_feature_names_in,
41
+ _get_feature_names,
42
+ _is_pandas_df,
43
+ _num_samples,
44
+ check_array,
45
+ check_is_fitted,
46
+ )
47
+
48
+ __all__ = ["ColumnTransformer", "make_column_transformer", "make_column_selector"]
49
+
50
+
51
+ _ERR_MSG_1DCOLUMN = (
52
+ "1D data passed to a transformer that expects 2D data. "
53
+ "Try to specify the column selection as a list of one "
54
+ "item instead of a scalar."
55
+ )
56
+
57
+
58
+ class ColumnTransformer(TransformerMixin, _BaseComposition):
59
+ """Applies transformers to columns of an array or pandas DataFrame.
60
+
61
+ This estimator allows different columns or column subsets of the input
62
+ to be transformed separately and the features generated by each transformer
63
+ will be concatenated to form a single feature space.
64
+ This is useful for heterogeneous or columnar data, to combine several
65
+ feature extraction mechanisms or transformations into a single transformer.
66
+
67
+ Read more in the :ref:`User Guide <column_transformer>`.
68
+
69
+ .. versionadded:: 0.20
70
+
71
+ Parameters
72
+ ----------
73
+ transformers : list of tuples
74
+ List of (name, transformer, columns) tuples specifying the
75
+ transformer objects to be applied to subsets of the data.
76
+
77
+ name : str
78
+ Like in Pipeline and FeatureUnion, this allows the transformer and
79
+ its parameters to be set using ``set_params`` and searched in grid
80
+ search.
81
+ transformer : {'drop', 'passthrough'} or estimator
82
+ Estimator must support :term:`fit` and :term:`transform`.
83
+ Special-cased strings 'drop' and 'passthrough' are accepted as
84
+ well, to indicate to drop the columns or to pass them through
85
+ untransformed, respectively.
86
+ columns : str, array-like of str, int, array-like of int, \
87
+ array-like of bool, slice or callable
88
+ Indexes the data on its second axis. Integers are interpreted as
89
+ positional columns, while strings can reference DataFrame columns
90
+ by name. A scalar string or int should be used where
91
+ ``transformer`` expects X to be a 1d array-like (vector),
92
+ otherwise a 2d array will be passed to the transformer.
93
+ A callable is passed the input data `X` and can return any of the
94
+ above. To select multiple columns by name or dtype, you can use
95
+ :obj:`make_column_selector`.
96
+
97
+ remainder : {'drop', 'passthrough'} or estimator, default='drop'
98
+ By default, only the specified columns in `transformers` are
99
+ transformed and combined in the output, and the non-specified
100
+ columns are dropped. (default of ``'drop'``).
101
+ By specifying ``remainder='passthrough'``, all remaining columns that
102
+ were not specified in `transformers`, but present in the data passed
103
+ to `fit` will be automatically passed through. This subset of columns
104
+ is concatenated with the output of the transformers. For dataframes,
105
+ extra columns not seen during `fit` will be excluded from the output
106
+ of `transform`.
107
+ By setting ``remainder`` to be an estimator, the remaining
108
+ non-specified columns will use the ``remainder`` estimator. The
109
+ estimator must support :term:`fit` and :term:`transform`.
110
+ Note that using this feature requires that the DataFrame columns
111
+ input at :term:`fit` and :term:`transform` have identical order.
112
+
113
+ sparse_threshold : float, default=0.3
114
+ If the output of the different transformers contains sparse matrices,
115
+ these will be stacked as a sparse matrix if the overall density is
116
+ lower than this value. Use ``sparse_threshold=0`` to always return
117
+ dense. When the transformed output consists of all dense data, the
118
+ stacked result will be dense, and this keyword will be ignored.
119
+
120
+ n_jobs : int, default=None
121
+ Number of jobs to run in parallel.
122
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
123
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
124
+ for more details.
125
+
126
+ transformer_weights : dict, default=None
127
+ Multiplicative weights for features per transformer. The output of the
128
+ transformer is multiplied by these weights. Keys are transformer names,
129
+ values the weights.
130
+
131
+ verbose : bool, default=False
132
+ If True, the time elapsed while fitting each transformer will be
133
+ printed as it is completed.
134
+
135
+ verbose_feature_names_out : bool, default=True
136
+ If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix
137
+ all feature names with the name of the transformer that generated that
138
+ feature.
139
+ If False, :meth:`ColumnTransformer.get_feature_names_out` will not
140
+ prefix any feature names and will error if feature names are not
141
+ unique.
142
+
143
+ .. versionadded:: 1.0
144
+
145
+ Attributes
146
+ ----------
147
+ transformers_ : list
148
+ The collection of fitted transformers as tuples of (name,
149
+ fitted_transformer, column). `fitted_transformer` can be an estimator,
150
+ or `'drop'`; `'passthrough'` is replaced with an equivalent
151
+ :class:`~sklearn.preprocessing.FunctionTransformer`. In case there were
152
+ no columns selected, this will be the unfitted transformer. If there
153
+ are remaining columns, the final element is a tuple of the form:
154
+ ('remainder', transformer, remaining_columns) corresponding to the
155
+ ``remainder`` parameter. If there are remaining columns, then
156
+ ``len(transformers_)==len(transformers)+1``, otherwise
157
+ ``len(transformers_)==len(transformers)``.
158
+
159
+ named_transformers_ : :class:`~sklearn.utils.Bunch`
160
+ Read-only attribute to access any transformer by given name.
161
+ Keys are transformer names and values are the fitted transformer
162
+ objects.
163
+
164
+ sparse_output_ : bool
165
+ Boolean flag indicating whether the output of ``transform`` is a
166
+ sparse matrix or a dense numpy array, which depends on the output
167
+ of the individual transformers and the `sparse_threshold` keyword.
168
+
169
+ output_indices_ : dict
170
+ A dictionary from each transformer name to a slice, where the slice
171
+ corresponds to indices in the transformed output. This is useful to
172
+ inspect which transformer is responsible for which transformed
173
+ feature(s).
174
+
175
+ .. versionadded:: 1.0
176
+
177
+ n_features_in_ : int
178
+ Number of features seen during :term:`fit`. Only defined if the
179
+ underlying transformers expose such an attribute when fit.
180
+
181
+ .. versionadded:: 0.24
182
+
183
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
184
+ Names of features seen during :term:`fit`. Defined only when `X`
185
+ has feature names that are all strings.
186
+
187
+ .. versionadded:: 1.0
188
+
189
+ See Also
190
+ --------
191
+ make_column_transformer : Convenience function for
192
+ combining the outputs of multiple transformer objects applied to
193
+ column subsets of the original feature space.
194
+ make_column_selector : Convenience function for selecting
195
+ columns based on datatype or the columns name with a regex pattern.
196
+
197
+ Notes
198
+ -----
199
+ The order of the columns in the transformed feature matrix follows the
200
+ order of how the columns are specified in the `transformers` list.
201
+ Columns of the original feature matrix that are not specified are
202
+ dropped from the resulting transformed feature matrix, unless specified
203
+ in the `passthrough` keyword. Those columns specified with `passthrough`
204
+ are added at the right to the output of the transformers.
205
+
206
+ Examples
207
+ --------
208
+ >>> import numpy as np
209
+ >>> from sklearn.compose import ColumnTransformer
210
+ >>> from sklearn.preprocessing import Normalizer
211
+ >>> ct = ColumnTransformer(
212
+ ... [("norm1", Normalizer(norm='l1'), [0, 1]),
213
+ ... ("norm2", Normalizer(norm='l1'), slice(2, 4))])
214
+ >>> X = np.array([[0., 1., 2., 2.],
215
+ ... [1., 1., 0., 1.]])
216
+ >>> # Normalizer scales each row of X to unit norm. A separate scaling
217
+ >>> # is applied for the two first and two last elements of each
218
+ >>> # row independently.
219
+ >>> ct.fit_transform(X)
220
+ array([[0. , 1. , 0.5, 0.5],
221
+ [0.5, 0.5, 0. , 1. ]])
222
+
223
+ :class:`ColumnTransformer` can be configured with a transformer that requires
224
+ a 1d array by setting the column to a string:
225
+
226
+ >>> from sklearn.feature_extraction.text import CountVectorizer
227
+ >>> from sklearn.preprocessing import MinMaxScaler
228
+ >>> import pandas as pd # doctest: +SKIP
229
+ >>> X = pd.DataFrame({
230
+ ... "documents": ["First item", "second one here", "Is this the last?"],
231
+ ... "width": [3, 4, 5],
232
+ ... }) # doctest: +SKIP
233
+ >>> # "documents" is a string which configures ColumnTransformer to
234
+ >>> # pass the documents column as a 1d array to the CountVectorizer
235
+ >>> ct = ColumnTransformer(
236
+ ... [("text_preprocess", CountVectorizer(), "documents"),
237
+ ... ("num_preprocess", MinMaxScaler(), ["width"])])
238
+ >>> X_trans = ct.fit_transform(X) # doctest: +SKIP
239
+
240
+ For a more detailed example of usage, see
241
+ :ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`.
242
+ """
243
+
244
+ _required_parameters = ["transformers"]
245
+
246
+ _parameter_constraints: dict = {
247
+ "transformers": [list, Hidden(tuple)],
248
+ "remainder": [
249
+ StrOptions({"drop", "passthrough"}),
250
+ HasMethods(["fit", "transform"]),
251
+ HasMethods(["fit_transform", "transform"]),
252
+ ],
253
+ "sparse_threshold": [Interval(Real, 0, 1, closed="both")],
254
+ "n_jobs": [Integral, None],
255
+ "transformer_weights": [dict, None],
256
+ "verbose": ["verbose"],
257
+ "verbose_feature_names_out": ["boolean"],
258
+ }
259
+
260
+ def __init__(
261
+ self,
262
+ transformers,
263
+ *,
264
+ remainder="drop",
265
+ sparse_threshold=0.3,
266
+ n_jobs=None,
267
+ transformer_weights=None,
268
+ verbose=False,
269
+ verbose_feature_names_out=True,
270
+ ):
271
+ self.transformers = transformers
272
+ self.remainder = remainder
273
+ self.sparse_threshold = sparse_threshold
274
+ self.n_jobs = n_jobs
275
+ self.transformer_weights = transformer_weights
276
+ self.verbose = verbose
277
+ self.verbose_feature_names_out = verbose_feature_names_out
278
+
279
+ @property
280
+ def _transformers(self):
281
+ """
282
+ Internal list of transformer only containing the name and
283
+ transformers, dropping the columns.
284
+
285
+ DO NOT USE: This is for the implementation of get_params via
286
+ BaseComposition._get_params which expects lists of tuples of len 2.
287
+
288
+ To iterate through the transformers, use ``self._iter`` instead.
289
+ """
290
+ try:
291
+ return [(name, trans) for name, trans, _ in self.transformers]
292
+ except (TypeError, ValueError):
293
+ return self.transformers
294
+
295
+ @_transformers.setter
296
+ def _transformers(self, value):
297
+ """DO NOT USE: This is for the implementation of set_params via
298
+ BaseComposition._get_params which gives lists of tuples of len 2.
299
+ """
300
+ try:
301
+ self.transformers = [
302
+ (name, trans, col)
303
+ for ((name, trans), (_, _, col)) in zip(value, self.transformers)
304
+ ]
305
+ except (TypeError, ValueError):
306
+ self.transformers = value
307
+
308
+ def set_output(self, *, transform=None):
309
+ """Set the output container when `"transform"` and `"fit_transform"` are called.
310
+
311
+ Calling `set_output` will set the output of all estimators in `transformers`
312
+ and `transformers_`.
313
+
314
+ Parameters
315
+ ----------
316
+ transform : {"default", "pandas"}, default=None
317
+ Configure output of `transform` and `fit_transform`.
318
+
319
+ - `"default"`: Default output format of a transformer
320
+ - `"pandas"`: DataFrame output
321
+ - `"polars"`: Polars output
322
+ - `None`: Transform configuration is unchanged
323
+
324
+ .. versionadded:: 1.4
325
+ `"polars"` option was added.
326
+
327
+ Returns
328
+ -------
329
+ self : estimator instance
330
+ Estimator instance.
331
+ """
332
+ super().set_output(transform=transform)
333
+
334
+ transformers = (
335
+ trans
336
+ for _, trans, _ in chain(
337
+ self.transformers, getattr(self, "transformers_", [])
338
+ )
339
+ if trans not in {"passthrough", "drop"}
340
+ )
341
+ for trans in transformers:
342
+ _safe_set_output(trans, transform=transform)
343
+
344
+ if self.remainder not in {"passthrough", "drop"}:
345
+ _safe_set_output(self.remainder, transform=transform)
346
+
347
+ return self
348
+
349
+ def get_params(self, deep=True):
350
+ """Get parameters for this estimator.
351
+
352
+ Returns the parameters given in the constructor as well as the
353
+ estimators contained within the `transformers` of the
354
+ `ColumnTransformer`.
355
+
356
+ Parameters
357
+ ----------
358
+ deep : bool, default=True
359
+ If True, will return the parameters for this estimator and
360
+ contained subobjects that are estimators.
361
+
362
+ Returns
363
+ -------
364
+ params : dict
365
+ Parameter names mapped to their values.
366
+ """
367
+ return self._get_params("_transformers", deep=deep)
368
+
369
+ def set_params(self, **kwargs):
370
+ """Set the parameters of this estimator.
371
+
372
+ Valid parameter keys can be listed with ``get_params()``. Note that you
373
+ can directly set the parameters of the estimators contained in
374
+ `transformers` of `ColumnTransformer`.
375
+
376
+ Parameters
377
+ ----------
378
+ **kwargs : dict
379
+ Estimator parameters.
380
+
381
+ Returns
382
+ -------
383
+ self : ColumnTransformer
384
+ This estimator.
385
+ """
386
+ self._set_params("_transformers", **kwargs)
387
+ return self
388
+
389
+ def _iter(self, fitted, column_as_labels, skip_drop, skip_empty_columns):
390
+ """
391
+ Generate (name, trans, column, weight) tuples.
392
+
393
+
394
+ Parameters
395
+ ----------
396
+ fitted : bool
397
+ If True, use the fitted transformers (``self.transformers_``) to
398
+ iterate through transformers, else use the transformers passed by
399
+ the user (``self.transformers``).
400
+
401
+ column_as_labels : bool
402
+ If True, columns are returned as string labels. If False, columns
403
+ are returned as they were given by the user. This can only be True
404
+ if the ``ColumnTransformer`` is already fitted.
405
+
406
+ skip_drop : bool
407
+ If True, 'drop' transformers are filtered out.
408
+
409
+ skip_empty_columns : bool
410
+ If True, transformers with empty selected columns are filtered out.
411
+
412
+ Yields
413
+ ------
414
+ A generator of tuples containing:
415
+ - name : the name of the transformer
416
+ - transformer : the transformer object
417
+ - columns : the columns for that transformer
418
+ - weight : the weight of the transformer
419
+ """
420
+ if fitted:
421
+ transformers = self.transformers_
422
+ else:
423
+ # interleave the validated column specifiers
424
+ transformers = [
425
+ (name, trans, column)
426
+ for (name, trans, _), column in zip(self.transformers, self._columns)
427
+ ]
428
+ # add transformer tuple for remainder
429
+ if self._remainder[2]:
430
+ transformers = chain(transformers, [self._remainder])
431
+ get_weight = (self.transformer_weights or {}).get
432
+
433
+ for name, trans, columns in transformers:
434
+ if skip_drop and trans == "drop":
435
+ continue
436
+ if skip_empty_columns and _is_empty_column_selection(columns):
437
+ continue
438
+
439
+ if column_as_labels:
440
+ # Convert all columns to using their string labels
441
+ columns_is_scalar = np.isscalar(columns)
442
+
443
+ indices = self._transformer_to_input_indices[name]
444
+ columns = self.feature_names_in_[indices]
445
+
446
+ if columns_is_scalar:
447
+ # selection is done with one dimension
448
+ columns = columns[0]
449
+
450
+ yield (name, trans, columns, get_weight(name))
451
+
452
+ def _validate_transformers(self):
453
+ """Validate names of transformers and the transformers themselves.
454
+
455
+ This checks whether given transformers have the required methods, i.e.
456
+ `fit` or `fit_transform` and `transform` implemented.
457
+ """
458
+ if not self.transformers:
459
+ return
460
+
461
+ names, transformers, _ = zip(*self.transformers)
462
+
463
+ # validate names
464
+ self._validate_names(names)
465
+
466
+ # validate estimators
467
+ for t in transformers:
468
+ if t in ("drop", "passthrough"):
469
+ continue
470
+ if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
471
+ t, "transform"
472
+ ):
473
+ # Used to validate the transformers in the `transformers` list
474
+ raise TypeError(
475
+ "All estimators should implement fit and "
476
+ "transform, or can be 'drop' or 'passthrough' "
477
+ "specifiers. '%s' (type %s) doesn't." % (t, type(t))
478
+ )
479
+
480
+ def _validate_column_callables(self, X):
481
+ """
482
+ Converts callable column specifications.
483
+
484
+ This stores a dictionary of the form `{step_name: column_indices}` and
485
+ calls the `columns` on `X` if `columns` is a callable for a given
486
+ transformer.
487
+
488
+ The results are then stored in `self._transformer_to_input_indices`.
489
+ """
490
+ all_columns = []
491
+ transformer_to_input_indices = {}
492
+ for name, _, columns in self.transformers:
493
+ if callable(columns):
494
+ columns = columns(X)
495
+ all_columns.append(columns)
496
+ transformer_to_input_indices[name] = _get_column_indices(X, columns)
497
+
498
+ self._columns = all_columns
499
+ self._transformer_to_input_indices = transformer_to_input_indices
500
+
501
+ def _validate_remainder(self, X):
502
+ """
503
+ Validates ``remainder`` and defines ``_remainder`` targeting
504
+ the remaining columns.
505
+ """
506
+ cols = set(chain(*self._transformer_to_input_indices.values()))
507
+ remaining = sorted(set(range(self.n_features_in_)) - cols)
508
+ self._remainder = ("remainder", self.remainder, remaining)
509
+ self._transformer_to_input_indices["remainder"] = remaining
510
+
511
+ @property
512
+ def named_transformers_(self):
513
+ """Access the fitted transformer by name.
514
+
515
+ Read-only attribute to access any transformer by given name.
516
+ Keys are transformer names and values are the fitted transformer
517
+ objects.
518
+ """
519
+ # Use Bunch object to improve autocomplete
520
+ return Bunch(**{name: trans for name, trans, _ in self.transformers_})
521
+
522
+ def _get_feature_name_out_for_transformer(self, name, trans, feature_names_in):
523
+ """Gets feature names of transformer.
524
+
525
+ Used in conjunction with self._iter(fitted=True) in get_feature_names_out.
526
+ """
527
+ column_indices = self._transformer_to_input_indices[name]
528
+ names = feature_names_in[column_indices]
529
+ # An actual transformer
530
+ if not hasattr(trans, "get_feature_names_out"):
531
+ raise AttributeError(
532
+ f"Transformer {name} (type {type(trans).__name__}) does "
533
+ "not provide get_feature_names_out."
534
+ )
535
+ return trans.get_feature_names_out(names)
536
+
537
+ def get_feature_names_out(self, input_features=None):
538
+ """Get output feature names for transformation.
539
+
540
+ Parameters
541
+ ----------
542
+ input_features : array-like of str or None, default=None
543
+ Input features.
544
+
545
+ - If `input_features` is `None`, then `feature_names_in_` is
546
+ used as feature names in. If `feature_names_in_` is not defined,
547
+ then the following input feature names are generated:
548
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
549
+ - If `input_features` is an array-like, then `input_features` must
550
+ match `feature_names_in_` if `feature_names_in_` is defined.
551
+
552
+ Returns
553
+ -------
554
+ feature_names_out : ndarray of str objects
555
+ Transformed feature names.
556
+ """
557
+ check_is_fitted(self)
558
+ input_features = _check_feature_names_in(self, input_features)
559
+
560
+ # List of tuples (name, feature_names_out)
561
+ transformer_with_feature_names_out = []
562
+ for name, trans, *_ in self._iter(
563
+ fitted=True,
564
+ column_as_labels=False,
565
+ skip_empty_columns=True,
566
+ skip_drop=True,
567
+ ):
568
+ feature_names_out = self._get_feature_name_out_for_transformer(
569
+ name, trans, input_features
570
+ )
571
+ if feature_names_out is None:
572
+ continue
573
+ transformer_with_feature_names_out.append((name, feature_names_out))
574
+
575
+ if not transformer_with_feature_names_out:
576
+ # No feature names
577
+ return np.array([], dtype=object)
578
+
579
+ return self._add_prefix_for_feature_names_out(
580
+ transformer_with_feature_names_out
581
+ )
582
+
583
+ def _add_prefix_for_feature_names_out(self, transformer_with_feature_names_out):
584
+ """Add prefix for feature names out that includes the transformer names.
585
+
586
+ Parameters
587
+ ----------
588
+ transformer_with_feature_names_out : list of tuples of (str, array-like of str)
589
+ The tuple consistent of the transformer's name and its feature names out.
590
+
591
+ Returns
592
+ -------
593
+ feature_names_out : ndarray of shape (n_features,), dtype=str
594
+ Transformed feature names.
595
+ """
596
+ if self.verbose_feature_names_out:
597
+ # Prefix the feature names out with the transformers name
598
+ names = list(
599
+ chain.from_iterable(
600
+ (f"{name}__{i}" for i in feature_names_out)
601
+ for name, feature_names_out in transformer_with_feature_names_out
602
+ )
603
+ )
604
+ return np.asarray(names, dtype=object)
605
+
606
+ # verbose_feature_names_out is False
607
+ # Check that names are all unique without a prefix
608
+ feature_names_count = Counter(
609
+ chain.from_iterable(s for _, s in transformer_with_feature_names_out)
610
+ )
611
+ top_6_overlap = [
612
+ name for name, count in feature_names_count.most_common(6) if count > 1
613
+ ]
614
+ top_6_overlap.sort()
615
+ if top_6_overlap:
616
+ if len(top_6_overlap) == 6:
617
+ # There are more than 5 overlapping names, we only show the 5
618
+ # of the feature names
619
+ names_repr = str(top_6_overlap[:5])[:-1] + ", ...]"
620
+ else:
621
+ names_repr = str(top_6_overlap)
622
+ raise ValueError(
623
+ f"Output feature names: {names_repr} are not unique. Please set "
624
+ "verbose_feature_names_out=True to add prefixes to feature names"
625
+ )
626
+
627
+ return np.concatenate(
628
+ [name for _, name in transformer_with_feature_names_out],
629
+ )
630
+
631
+ def _update_fitted_transformers(self, transformers):
632
+ """Set self.transformers_ from given transformers.
633
+
634
+ Parameters
635
+ ----------
636
+ transformers : list of estimators
637
+ The fitted estimators as the output of
638
+ `self._call_func_on_transformers(func=_fit_transform_one, ...)`.
639
+ That function doesn't include 'drop' or transformers for which no
640
+ column is selected. 'drop' is kept as is, and for the no-column
641
+ transformers the unfitted transformer is put in
642
+ `self.transformers_`.
643
+ """
644
+ # transformers are fitted; excludes 'drop' cases
645
+ fitted_transformers = iter(transformers)
646
+ transformers_ = []
647
+
648
+ for name, old, column, _ in self._iter(
649
+ fitted=False,
650
+ column_as_labels=False,
651
+ skip_drop=False,
652
+ skip_empty_columns=False,
653
+ ):
654
+ if old == "drop":
655
+ trans = "drop"
656
+ elif _is_empty_column_selection(column):
657
+ trans = old
658
+ else:
659
+ trans = next(fitted_transformers)
660
+ transformers_.append((name, trans, column))
661
+
662
+ # sanity check that transformers is exhausted
663
+ assert not list(fitted_transformers)
664
+ self.transformers_ = transformers_
665
+
666
+ def _validate_output(self, result):
667
+ """
668
+ Ensure that the output of each transformer is 2D. Otherwise
669
+ hstack can raise an error or produce incorrect results.
670
+ """
671
+ names = [
672
+ name
673
+ for name, _, _, _ in self._iter(
674
+ fitted=True,
675
+ column_as_labels=False,
676
+ skip_drop=True,
677
+ skip_empty_columns=True,
678
+ )
679
+ ]
680
+ for Xs, name in zip(result, names):
681
+ if not getattr(Xs, "ndim", 0) == 2 and not hasattr(Xs, "__dataframe__"):
682
+ raise ValueError(
683
+ "The output of the '{0}' transformer should be 2D (numpy array, "
684
+ "scipy sparse array, dataframe).".format(name)
685
+ )
686
+ if _get_output_config("transform", self)["dense"] == "pandas":
687
+ return
688
+ try:
689
+ import pandas as pd
690
+ except ImportError:
691
+ return
692
+ for Xs, name in zip(result, names):
693
+ if not _is_pandas_df(Xs):
694
+ continue
695
+ for col_name, dtype in Xs.dtypes.to_dict().items():
696
+ if getattr(dtype, "na_value", None) is not pd.NA:
697
+ continue
698
+ if pd.NA not in Xs[col_name].values:
699
+ continue
700
+ class_name = self.__class__.__name__
701
+ # TODO(1.6): replace warning with ValueError
702
+ warnings.warn(
703
+ (
704
+ f"The output of the '{name}' transformer for column"
705
+ f" '{col_name}' has dtype {dtype} and uses pandas.NA to"
706
+ " represent null values. Storing this output in a numpy array"
707
+ " can cause errors in downstream scikit-learn estimators, and"
708
+ " inefficiencies. Starting with scikit-learn version 1.6, this"
709
+ " will raise a ValueError. To avoid this problem you can (i)"
710
+ " store the output in a pandas DataFrame by using"
711
+ f" {class_name}.set_output(transform='pandas') or (ii) modify"
712
+ f" the input data or the '{name}' transformer to avoid the"
713
+ " presence of pandas.NA (for example by using"
714
+ " pandas.DataFrame.astype)."
715
+ ),
716
+ FutureWarning,
717
+ )
718
+
719
+ def _record_output_indices(self, Xs):
720
+ """
721
+ Record which transformer produced which column.
722
+ """
723
+ idx = 0
724
+ self.output_indices_ = {}
725
+
726
+ for transformer_idx, (name, _, _, _) in enumerate(
727
+ self._iter(
728
+ fitted=True,
729
+ column_as_labels=False,
730
+ skip_drop=True,
731
+ skip_empty_columns=True,
732
+ )
733
+ ):
734
+ n_columns = Xs[transformer_idx].shape[1]
735
+ self.output_indices_[name] = slice(idx, idx + n_columns)
736
+ idx += n_columns
737
+
738
+ # `_iter` only generates transformers that have a non empty
739
+ # selection. Here we set empty slices for transformers that
740
+ # generate no output, which are safe for indexing
741
+ all_names = [t[0] for t in self.transformers] + ["remainder"]
742
+ for name in all_names:
743
+ if name not in self.output_indices_:
744
+ self.output_indices_[name] = slice(0, 0)
745
+
746
+ def _log_message(self, name, idx, total):
747
+ if not self.verbose:
748
+ return None
749
+ return "(%d of %d) Processing %s" % (idx, total, name)
750
+
751
+ def _call_func_on_transformers(self, X, y, func, column_as_labels, routed_params):
752
+ """
753
+ Private function to fit and/or transform on demand.
754
+
755
+ Parameters
756
+ ----------
757
+ X : {array-like, dataframe} of shape (n_samples, n_features)
758
+ The data to be used in fit and/or transform.
759
+
760
+ y : array-like of shape (n_samples,)
761
+ Targets.
762
+
763
+ func : callable
764
+ Function to call, which can be _fit_transform_one or
765
+ _transform_one.
766
+
767
+ column_as_labels : bool
768
+ Used to iterate through transformers. If True, columns are returned
769
+ as strings. If False, columns are returned as they were given by
770
+ the user. Can be True only if the ``ColumnTransformer`` is already
771
+ fitted.
772
+
773
+ routed_params : dict
774
+ The routed parameters as the output from ``process_routing``.
775
+
776
+ Returns
777
+ -------
778
+ Return value (transformers and/or transformed X data) depends
779
+ on the passed function.
780
+ """
781
+ if func is _fit_transform_one:
782
+ fitted = False
783
+ else: # func is _transform_one
784
+ fitted = True
785
+
786
+ transformers = list(
787
+ self._iter(
788
+ fitted=fitted,
789
+ column_as_labels=column_as_labels,
790
+ skip_drop=True,
791
+ skip_empty_columns=True,
792
+ )
793
+ )
794
+ try:
795
+ jobs = []
796
+ for idx, (name, trans, column, weight) in enumerate(transformers, start=1):
797
+ if func is _fit_transform_one:
798
+ if trans == "passthrough":
799
+ output_config = _get_output_config("transform", self)
800
+ trans = FunctionTransformer(
801
+ accept_sparse=True,
802
+ check_inverse=False,
803
+ feature_names_out="one-to-one",
804
+ ).set_output(transform=output_config["dense"])
805
+
806
+ extra_args = dict(
807
+ message_clsname="ColumnTransformer",
808
+ message=self._log_message(name, idx, len(transformers)),
809
+ )
810
+ else: # func is _transform_one
811
+ extra_args = {}
812
+ jobs.append(
813
+ delayed(func)(
814
+ transformer=clone(trans) if not fitted else trans,
815
+ X=_safe_indexing(X, column, axis=1),
816
+ y=y,
817
+ weight=weight,
818
+ **extra_args,
819
+ params=routed_params[name],
820
+ )
821
+ )
822
+
823
+ return Parallel(n_jobs=self.n_jobs)(jobs)
824
+
825
+ except ValueError as e:
826
+ if "Expected 2D array, got 1D array instead" in str(e):
827
+ raise ValueError(_ERR_MSG_1DCOLUMN) from e
828
+ else:
829
+ raise
830
+
831
+ def fit(self, X, y=None, **params):
832
+ """Fit all transformers using X.
833
+
834
+ Parameters
835
+ ----------
836
+ X : {array-like, dataframe} of shape (n_samples, n_features)
837
+ Input data, of which specified subsets are used to fit the
838
+ transformers.
839
+
840
+ y : array-like of shape (n_samples,...), default=None
841
+ Targets for supervised learning.
842
+
843
+ **params : dict, default=None
844
+ Parameters to be passed to the underlying transformers' ``fit`` and
845
+ ``transform`` methods.
846
+
847
+ You can only pass this if metadata routing is enabled, which you
848
+ can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
849
+
850
+ .. versionadded:: 1.4
851
+
852
+ Returns
853
+ -------
854
+ self : ColumnTransformer
855
+ This estimator.
856
+ """
857
+ _raise_for_params(params, self, "fit")
858
+ # we use fit_transform to make sure to set sparse_output_ (for which we
859
+ # need the transformed data) to have consistent output type in predict
860
+ self.fit_transform(X, y=y, **params)
861
+ return self
862
+
863
+ @_fit_context(
864
+ # estimators in ColumnTransformer.transformers are not validated yet
865
+ prefer_skip_nested_validation=False
866
+ )
867
+ def fit_transform(self, X, y=None, **params):
868
+ """Fit all transformers, transform the data and concatenate results.
869
+
870
+ Parameters
871
+ ----------
872
+ X : {array-like, dataframe} of shape (n_samples, n_features)
873
+ Input data, of which specified subsets are used to fit the
874
+ transformers.
875
+
876
+ y : array-like of shape (n_samples,), default=None
877
+ Targets for supervised learning.
878
+
879
+ **params : dict, default=None
880
+ Parameters to be passed to the underlying transformers' ``fit`` and
881
+ ``transform`` methods.
882
+
883
+ You can only pass this if metadata routing is enabled, which you
884
+ can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
885
+
886
+ .. versionadded:: 1.4
887
+
888
+ Returns
889
+ -------
890
+ X_t : {array-like, sparse matrix} of \
891
+ shape (n_samples, sum_n_components)
892
+ Horizontally stacked results of transformers. sum_n_components is the
893
+ sum of n_components (output dimension) over transformers. If
894
+ any result is a sparse matrix, everything will be converted to
895
+ sparse matrices.
896
+ """
897
+ _raise_for_params(params, self, "fit_transform")
898
+ self._check_feature_names(X, reset=True)
899
+
900
+ X = _check_X(X)
901
+ # set n_features_in_ attribute
902
+ self._check_n_features(X, reset=True)
903
+ self._validate_transformers()
904
+ n_samples = _num_samples(X)
905
+
906
+ self._validate_column_callables(X)
907
+ self._validate_remainder(X)
908
+
909
+ if _routing_enabled():
910
+ routed_params = process_routing(self, "fit_transform", **params)
911
+ else:
912
+ routed_params = self._get_empty_routing()
913
+
914
+ result = self._call_func_on_transformers(
915
+ X,
916
+ y,
917
+ _fit_transform_one,
918
+ column_as_labels=False,
919
+ routed_params=routed_params,
920
+ )
921
+
922
+ if not result:
923
+ self._update_fitted_transformers([])
924
+ # All transformers are None
925
+ return np.zeros((n_samples, 0))
926
+
927
+ Xs, transformers = zip(*result)
928
+
929
+ # determine if concatenated output will be sparse or not
930
+ if any(sparse.issparse(X) for X in Xs):
931
+ nnz = sum(X.nnz if sparse.issparse(X) else X.size for X in Xs)
932
+ total = sum(
933
+ X.shape[0] * X.shape[1] if sparse.issparse(X) else X.size for X in Xs
934
+ )
935
+ density = nnz / total
936
+ self.sparse_output_ = density < self.sparse_threshold
937
+ else:
938
+ self.sparse_output_ = False
939
+
940
+ self._update_fitted_transformers(transformers)
941
+ self._validate_output(Xs)
942
+ self._record_output_indices(Xs)
943
+
944
+ return self._hstack(list(Xs), n_samples=n_samples)
945
+
946
+ def transform(self, X, **params):
947
+ """Transform X separately by each transformer, concatenate results.
948
+
949
+ Parameters
950
+ ----------
951
+ X : {array-like, dataframe} of shape (n_samples, n_features)
952
+ The data to be transformed by subset.
953
+
954
+ **params : dict, default=None
955
+ Parameters to be passed to the underlying transformers' ``transform``
956
+ method.
957
+
958
+ You can only pass this if metadata routing is enabled, which you
959
+ can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
960
+
961
+ .. versionadded:: 1.4
962
+
963
+ Returns
964
+ -------
965
+ X_t : {array-like, sparse matrix} of \
966
+ shape (n_samples, sum_n_components)
967
+ Horizontally stacked results of transformers. sum_n_components is the
968
+ sum of n_components (output dimension) over transformers. If
969
+ any result is a sparse matrix, everything will be converted to
970
+ sparse matrices.
971
+ """
972
+ _raise_for_params(params, self, "transform")
973
+ check_is_fitted(self)
974
+ X = _check_X(X)
975
+
976
+ # If ColumnTransformer is fit using a dataframe, and now a dataframe is
977
+ # passed to be transformed, we select columns by name instead. This
978
+ # enables the user to pass X at transform time with extra columns which
979
+ # were not present in fit time, and the order of the columns doesn't
980
+ # matter.
981
+ fit_dataframe_and_transform_dataframe = hasattr(self, "feature_names_in_") and (
982
+ _is_pandas_df(X) or hasattr(X, "__dataframe__")
983
+ )
984
+
985
+ n_samples = _num_samples(X)
986
+ column_names = _get_feature_names(X)
987
+
988
+ if fit_dataframe_and_transform_dataframe:
989
+ named_transformers = self.named_transformers_
990
+ # check that all names seen in fit are in transform, unless
991
+ # they were dropped
992
+ non_dropped_indices = [
993
+ ind
994
+ for name, ind in self._transformer_to_input_indices.items()
995
+ if name in named_transformers and named_transformers[name] != "drop"
996
+ ]
997
+
998
+ all_indices = set(chain(*non_dropped_indices))
999
+ all_names = set(self.feature_names_in_[ind] for ind in all_indices)
1000
+
1001
+ diff = all_names - set(column_names)
1002
+ if diff:
1003
+ raise ValueError(f"columns are missing: {diff}")
1004
+ else:
1005
+ # ndarray was used for fitting or transforming, thus we only
1006
+ # check that n_features_in_ is consistent
1007
+ self._check_n_features(X, reset=False)
1008
+
1009
+ if _routing_enabled():
1010
+ routed_params = process_routing(self, "transform", **params)
1011
+ else:
1012
+ routed_params = self._get_empty_routing()
1013
+
1014
+ Xs = self._call_func_on_transformers(
1015
+ X,
1016
+ None,
1017
+ _transform_one,
1018
+ column_as_labels=fit_dataframe_and_transform_dataframe,
1019
+ routed_params=routed_params,
1020
+ )
1021
+ self._validate_output(Xs)
1022
+
1023
+ if not Xs:
1024
+ # All transformers are None
1025
+ return np.zeros((n_samples, 0))
1026
+
1027
+ return self._hstack(list(Xs), n_samples=n_samples)
1028
+
1029
+ def _hstack(self, Xs, *, n_samples):
1030
+ """Stacks Xs horizontally.
1031
+
1032
+ This allows subclasses to control the stacking behavior, while reusing
1033
+ everything else from ColumnTransformer.
1034
+
1035
+ Parameters
1036
+ ----------
1037
+ Xs : list of {array-like, sparse matrix, dataframe}
1038
+ The container to concatenate.
1039
+ n_samples : int
1040
+ The number of samples in the input data to checking the transformation
1041
+ consistency.
1042
+ """
1043
+ if self.sparse_output_:
1044
+ try:
1045
+ # since all columns should be numeric before stacking them
1046
+ # in a sparse matrix, `check_array` is used for the
1047
+ # dtype conversion if necessary.
1048
+ converted_Xs = [
1049
+ check_array(X, accept_sparse=True, force_all_finite=False)
1050
+ for X in Xs
1051
+ ]
1052
+ except ValueError as e:
1053
+ raise ValueError(
1054
+ "For a sparse output, all columns should "
1055
+ "be a numeric or convertible to a numeric."
1056
+ ) from e
1057
+
1058
+ return sparse.hstack(converted_Xs).tocsr()
1059
+ else:
1060
+ Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]
1061
+ adapter = _get_container_adapter("transform", self)
1062
+ if adapter and all(adapter.is_supported_container(X) for X in Xs):
1063
+ # rename before stacking as it avoids to error on temporary duplicated
1064
+ # columns
1065
+ transformer_names = [
1066
+ t[0]
1067
+ for t in self._iter(
1068
+ fitted=True,
1069
+ column_as_labels=False,
1070
+ skip_drop=True,
1071
+ skip_empty_columns=True,
1072
+ )
1073
+ ]
1074
+ feature_names_outs = [X.columns for X in Xs if X.shape[1] != 0]
1075
+ if self.verbose_feature_names_out:
1076
+ # `_add_prefix_for_feature_names_out` takes care about raising
1077
+ # an error if there are duplicated columns.
1078
+ feature_names_outs = self._add_prefix_for_feature_names_out(
1079
+ list(zip(transformer_names, feature_names_outs))
1080
+ )
1081
+ else:
1082
+ # check for duplicated columns and raise if any
1083
+ feature_names_outs = list(chain.from_iterable(feature_names_outs))
1084
+ feature_names_count = Counter(feature_names_outs)
1085
+ if any(count > 1 for count in feature_names_count.values()):
1086
+ duplicated_feature_names = sorted(
1087
+ name
1088
+ for name, count in feature_names_count.items()
1089
+ if count > 1
1090
+ )
1091
+ err_msg = (
1092
+ "Duplicated feature names found before concatenating the"
1093
+ " outputs of the transformers:"
1094
+ f" {duplicated_feature_names}.\n"
1095
+ )
1096
+ for transformer_name, X in zip(transformer_names, Xs):
1097
+ if X.shape[1] == 0:
1098
+ continue
1099
+ dup_cols_in_transformer = sorted(
1100
+ set(X.columns).intersection(duplicated_feature_names)
1101
+ )
1102
+ if len(dup_cols_in_transformer):
1103
+ err_msg += (
1104
+ f"Transformer {transformer_name} has conflicting "
1105
+ f"columns names: {dup_cols_in_transformer}.\n"
1106
+ )
1107
+ raise ValueError(
1108
+ err_msg
1109
+ + "Either make sure that the transformers named above "
1110
+ "do not generate columns with conflicting names or set "
1111
+ "verbose_feature_names_out=True to automatically "
1112
+ "prefix to the output feature names with the name "
1113
+ "of the transformer to prevent any conflicting "
1114
+ "names."
1115
+ )
1116
+
1117
+ names_idx = 0
1118
+ for X in Xs:
1119
+ if X.shape[1] == 0:
1120
+ continue
1121
+ names_out = feature_names_outs[names_idx : names_idx + X.shape[1]]
1122
+ adapter.rename_columns(X, names_out)
1123
+ names_idx += X.shape[1]
1124
+
1125
+ output = adapter.hstack(Xs)
1126
+ output_samples = output.shape[0]
1127
+ if output_samples != n_samples:
1128
+ raise ValueError(
1129
+ "Concatenating DataFrames from the transformer's output lead to"
1130
+ " an inconsistent number of samples. The output may have Pandas"
1131
+ " Indexes that do not match, or that transformers are returning"
1132
+ " number of samples which are not the same as the number input"
1133
+ " samples."
1134
+ )
1135
+
1136
+ return output
1137
+
1138
+ return np.hstack(Xs)
1139
+
1140
+ def _sk_visual_block_(self):
1141
+ if isinstance(self.remainder, str) and self.remainder == "drop":
1142
+ transformers = self.transformers
1143
+ elif hasattr(self, "_remainder"):
1144
+ remainder_columns = self._remainder[2]
1145
+ if (
1146
+ hasattr(self, "feature_names_in_")
1147
+ and remainder_columns
1148
+ and not all(isinstance(col, str) for col in remainder_columns)
1149
+ ):
1150
+ remainder_columns = self.feature_names_in_[remainder_columns].tolist()
1151
+ transformers = chain(
1152
+ self.transformers, [("remainder", self.remainder, remainder_columns)]
1153
+ )
1154
+ else:
1155
+ transformers = chain(self.transformers, [("remainder", self.remainder, "")])
1156
+
1157
+ names, transformers, name_details = zip(*transformers)
1158
+ return _VisualBlock(
1159
+ "parallel", transformers, names=names, name_details=name_details
1160
+ )
1161
+
1162
+ def _get_empty_routing(self):
1163
+ """Return empty routing.
1164
+
1165
+ Used while routing can be disabled.
1166
+
1167
+ TODO: Remove when ``set_config(enable_metadata_routing=False)`` is no
1168
+ more an option.
1169
+ """
1170
+ return Bunch(
1171
+ **{
1172
+ name: Bunch(**{method: {} for method in METHODS})
1173
+ for name, step, _, _ in self._iter(
1174
+ fitted=False,
1175
+ column_as_labels=False,
1176
+ skip_drop=True,
1177
+ skip_empty_columns=True,
1178
+ )
1179
+ }
1180
+ )
1181
+
1182
+ def get_metadata_routing(self):
1183
+ """Get metadata routing of this object.
1184
+
1185
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
1186
+ mechanism works.
1187
+
1188
+ .. versionadded:: 1.4
1189
+
1190
+ Returns
1191
+ -------
1192
+ routing : MetadataRouter
1193
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
1194
+ routing information.
1195
+ """
1196
+ router = MetadataRouter(owner=self.__class__.__name__)
1197
+ # Here we don't care about which columns are used for which
1198
+ # transformers, and whether or not a transformer is used at all, which
1199
+ # might happen if no columns are selected for that transformer. We
1200
+ # request all metadata requested by all transformers.
1201
+ transformers = chain(self.transformers, [("remainder", self.remainder, None)])
1202
+ for name, step, _ in transformers:
1203
+ method_mapping = MethodMapping()
1204
+ if hasattr(step, "fit_transform"):
1205
+ (
1206
+ method_mapping.add(caller="fit", callee="fit_transform").add(
1207
+ caller="fit_transform", callee="fit_transform"
1208
+ )
1209
+ )
1210
+ else:
1211
+ (
1212
+ method_mapping.add(caller="fit", callee="fit")
1213
+ .add(caller="fit", callee="transform")
1214
+ .add(caller="fit_transform", callee="fit")
1215
+ .add(caller="fit_transform", callee="transform")
1216
+ )
1217
+ method_mapping.add(caller="transform", callee="transform")
1218
+ router.add(method_mapping=method_mapping, **{name: step})
1219
+
1220
+ return router
1221
+
1222
+
1223
+ def _check_X(X):
1224
+ """Use check_array only when necessary, e.g. on lists and other non-array-likes."""
1225
+ if hasattr(X, "__array__") or hasattr(X, "__dataframe__") or sparse.issparse(X):
1226
+ return X
1227
+ return check_array(X, force_all_finite="allow-nan", dtype=object)
1228
+
1229
+
1230
+ def _is_empty_column_selection(column):
1231
+ """
1232
+ Return True if the column selection is empty (empty list or all-False
1233
+ boolean array).
1234
+
1235
+ """
1236
+ if hasattr(column, "dtype") and np.issubdtype(column.dtype, np.bool_):
1237
+ return not column.any()
1238
+ elif hasattr(column, "__len__"):
1239
+ return (
1240
+ len(column) == 0
1241
+ or all(isinstance(col, bool) for col in column)
1242
+ and not any(column)
1243
+ )
1244
+ else:
1245
+ return False
1246
+
1247
+
1248
+ def _get_transformer_list(estimators):
1249
+ """
1250
+ Construct (name, trans, column) tuples from list
1251
+
1252
+ """
1253
+ transformers, columns = zip(*estimators)
1254
+ names, _ = zip(*_name_estimators(transformers))
1255
+
1256
+ transformer_list = list(zip(names, transformers, columns))
1257
+ return transformer_list
1258
+
1259
+
1260
+ # This function is not validated using validate_params because
1261
+ # it's just a factory for ColumnTransformer.
1262
+ def make_column_transformer(
1263
+ *transformers,
1264
+ remainder="drop",
1265
+ sparse_threshold=0.3,
1266
+ n_jobs=None,
1267
+ verbose=False,
1268
+ verbose_feature_names_out=True,
1269
+ ):
1270
+ """Construct a ColumnTransformer from the given transformers.
1271
+
1272
+ This is a shorthand for the ColumnTransformer constructor; it does not
1273
+ require, and does not permit, naming the transformers. Instead, they will
1274
+ be given names automatically based on their types. It also does not allow
1275
+ weighting with ``transformer_weights``.
1276
+
1277
+ Read more in the :ref:`User Guide <make_column_transformer>`.
1278
+
1279
+ Parameters
1280
+ ----------
1281
+ *transformers : tuples
1282
+ Tuples of the form (transformer, columns) specifying the
1283
+ transformer objects to be applied to subsets of the data.
1284
+
1285
+ transformer : {'drop', 'passthrough'} or estimator
1286
+ Estimator must support :term:`fit` and :term:`transform`.
1287
+ Special-cased strings 'drop' and 'passthrough' are accepted as
1288
+ well, to indicate to drop the columns or to pass them through
1289
+ untransformed, respectively.
1290
+ columns : str, array-like of str, int, array-like of int, slice, \
1291
+ array-like of bool or callable
1292
+ Indexes the data on its second axis. Integers are interpreted as
1293
+ positional columns, while strings can reference DataFrame columns
1294
+ by name. A scalar string or int should be used where
1295
+ ``transformer`` expects X to be a 1d array-like (vector),
1296
+ otherwise a 2d array will be passed to the transformer.
1297
+ A callable is passed the input data `X` and can return any of the
1298
+ above. To select multiple columns by name or dtype, you can use
1299
+ :obj:`make_column_selector`.
1300
+
1301
+ remainder : {'drop', 'passthrough'} or estimator, default='drop'
1302
+ By default, only the specified columns in `transformers` are
1303
+ transformed and combined in the output, and the non-specified
1304
+ columns are dropped. (default of ``'drop'``).
1305
+ By specifying ``remainder='passthrough'``, all remaining columns that
1306
+ were not specified in `transformers` will be automatically passed
1307
+ through. This subset of columns is concatenated with the output of
1308
+ the transformers.
1309
+ By setting ``remainder`` to be an estimator, the remaining
1310
+ non-specified columns will use the ``remainder`` estimator. The
1311
+ estimator must support :term:`fit` and :term:`transform`.
1312
+
1313
+ sparse_threshold : float, default=0.3
1314
+ If the transformed output consists of a mix of sparse and dense data,
1315
+ it will be stacked as a sparse matrix if the density is lower than this
1316
+ value. Use ``sparse_threshold=0`` to always return dense.
1317
+ When the transformed output consists of all sparse or all dense data,
1318
+ the stacked result will be sparse or dense, respectively, and this
1319
+ keyword will be ignored.
1320
+
1321
+ n_jobs : int, default=None
1322
+ Number of jobs to run in parallel.
1323
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1324
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1325
+ for more details.
1326
+
1327
+ verbose : bool, default=False
1328
+ If True, the time elapsed while fitting each transformer will be
1329
+ printed as it is completed.
1330
+
1331
+ verbose_feature_names_out : bool, default=True
1332
+ If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix
1333
+ all feature names with the name of the transformer that generated that
1334
+ feature.
1335
+ If False, :meth:`ColumnTransformer.get_feature_names_out` will not
1336
+ prefix any feature names and will error if feature names are not
1337
+ unique.
1338
+
1339
+ .. versionadded:: 1.0
1340
+
1341
+ Returns
1342
+ -------
1343
+ ct : ColumnTransformer
1344
+ Returns a :class:`ColumnTransformer` object.
1345
+
1346
+ See Also
1347
+ --------
1348
+ ColumnTransformer : Class that allows combining the
1349
+ outputs of multiple transformer objects used on column subsets
1350
+ of the data into a single feature space.
1351
+
1352
+ Examples
1353
+ --------
1354
+ >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
1355
+ >>> from sklearn.compose import make_column_transformer
1356
+ >>> make_column_transformer(
1357
+ ... (StandardScaler(), ['numerical_column']),
1358
+ ... (OneHotEncoder(), ['categorical_column']))
1359
+ ColumnTransformer(transformers=[('standardscaler', StandardScaler(...),
1360
+ ['numerical_column']),
1361
+ ('onehotencoder', OneHotEncoder(...),
1362
+ ['categorical_column'])])
1363
+ """
1364
+ # transformer_weights keyword is not passed through because the user
1365
+ # would need to know the automatically generated names of the transformers
1366
+ transformer_list = _get_transformer_list(transformers)
1367
+ return ColumnTransformer(
1368
+ transformer_list,
1369
+ n_jobs=n_jobs,
1370
+ remainder=remainder,
1371
+ sparse_threshold=sparse_threshold,
1372
+ verbose=verbose,
1373
+ verbose_feature_names_out=verbose_feature_names_out,
1374
+ )
1375
+
1376
+
1377
+ class make_column_selector:
1378
+ """Create a callable to select columns to be used with
1379
+ :class:`ColumnTransformer`.
1380
+
1381
+ :func:`make_column_selector` can select columns based on datatype or the
1382
+ columns name with a regex. When using multiple selection criteria, **all**
1383
+ criteria must match for a column to be selected.
1384
+
1385
+ For an example of how to use :func:`make_column_selector` within a
1386
+ :class:`ColumnTransformer` to select columns based on data type (i.e.
1387
+ `dtype`), refer to
1388
+ :ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`.
1389
+
1390
+ Parameters
1391
+ ----------
1392
+ pattern : str, default=None
1393
+ Name of columns containing this regex pattern will be included. If
1394
+ None, column selection will not be selected based on pattern.
1395
+
1396
+ dtype_include : column dtype or list of column dtypes, default=None
1397
+ A selection of dtypes to include. For more details, see
1398
+ :meth:`pandas.DataFrame.select_dtypes`.
1399
+
1400
+ dtype_exclude : column dtype or list of column dtypes, default=None
1401
+ A selection of dtypes to exclude. For more details, see
1402
+ :meth:`pandas.DataFrame.select_dtypes`.
1403
+
1404
+ Returns
1405
+ -------
1406
+ selector : callable
1407
+ Callable for column selection to be used by a
1408
+ :class:`ColumnTransformer`.
1409
+
1410
+ See Also
1411
+ --------
1412
+ ColumnTransformer : Class that allows combining the
1413
+ outputs of multiple transformer objects used on column subsets
1414
+ of the data into a single feature space.
1415
+
1416
+ Examples
1417
+ --------
1418
+ >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
1419
+ >>> from sklearn.compose import make_column_transformer
1420
+ >>> from sklearn.compose import make_column_selector
1421
+ >>> import numpy as np
1422
+ >>> import pandas as pd # doctest: +SKIP
1423
+ >>> X = pd.DataFrame({'city': ['London', 'London', 'Paris', 'Sallisaw'],
1424
+ ... 'rating': [5, 3, 4, 5]}) # doctest: +SKIP
1425
+ >>> ct = make_column_transformer(
1426
+ ... (StandardScaler(),
1427
+ ... make_column_selector(dtype_include=np.number)), # rating
1428
+ ... (OneHotEncoder(),
1429
+ ... make_column_selector(dtype_include=object))) # city
1430
+ >>> ct.fit_transform(X) # doctest: +SKIP
1431
+ array([[ 0.90453403, 1. , 0. , 0. ],
1432
+ [-1.50755672, 1. , 0. , 0. ],
1433
+ [-0.30151134, 0. , 1. , 0. ],
1434
+ [ 0.90453403, 0. , 0. , 1. ]])
1435
+ """
1436
+
1437
+ def __init__(self, pattern=None, *, dtype_include=None, dtype_exclude=None):
1438
+ self.pattern = pattern
1439
+ self.dtype_include = dtype_include
1440
+ self.dtype_exclude = dtype_exclude
1441
+
1442
+ def __call__(self, df):
1443
+ """Callable for column selection to be used by a
1444
+ :class:`ColumnTransformer`.
1445
+
1446
+ Parameters
1447
+ ----------
1448
+ df : dataframe of shape (n_features, n_samples)
1449
+ DataFrame to select columns from.
1450
+ """
1451
+ if not hasattr(df, "iloc"):
1452
+ raise ValueError(
1453
+ "make_column_selector can only be applied to pandas dataframes"
1454
+ )
1455
+ df_row = df.iloc[:1]
1456
+ if self.dtype_include is not None or self.dtype_exclude is not None:
1457
+ df_row = df_row.select_dtypes(
1458
+ include=self.dtype_include, exclude=self.dtype_exclude
1459
+ )
1460
+ cols = df_row.columns
1461
+ if self.pattern is not None:
1462
+ cols = cols[cols.str.contains(self.pattern, regex=True)]
1463
+ return cols.tolist()
llmeval-env/lib/python3.10/site-packages/sklearn/compose/_target.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Andreas Mueller <[email protected]>
2
+ # Guillaume Lemaitre <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ import warnings
6
+
7
+ import numpy as np
8
+
9
+ from ..base import BaseEstimator, RegressorMixin, _fit_context, clone
10
+ from ..exceptions import NotFittedError
11
+ from ..preprocessing import FunctionTransformer
12
+ from ..utils import _safe_indexing, check_array
13
+ from ..utils._param_validation import HasMethods
14
+ from ..utils._tags import _safe_tags
15
+ from ..utils.metadata_routing import (
16
+ _raise_for_unsupported_routing,
17
+ _RoutingNotSupportedMixin,
18
+ )
19
+ from ..utils.validation import check_is_fitted
20
+
21
+ __all__ = ["TransformedTargetRegressor"]
22
+
23
+
24
+ class TransformedTargetRegressor(
25
+ _RoutingNotSupportedMixin, RegressorMixin, BaseEstimator
26
+ ):
27
+ """Meta-estimator to regress on a transformed target.
28
+
29
+ Useful for applying a non-linear transformation to the target `y` in
30
+ regression problems. This transformation can be given as a Transformer
31
+ such as the :class:`~sklearn.preprocessing.QuantileTransformer` or as a
32
+ function and its inverse such as `np.log` and `np.exp`.
33
+
34
+ The computation during :meth:`fit` is::
35
+
36
+ regressor.fit(X, func(y))
37
+
38
+ or::
39
+
40
+ regressor.fit(X, transformer.transform(y))
41
+
42
+ The computation during :meth:`predict` is::
43
+
44
+ inverse_func(regressor.predict(X))
45
+
46
+ or::
47
+
48
+ transformer.inverse_transform(regressor.predict(X))
49
+
50
+ Read more in the :ref:`User Guide <transformed_target_regressor>`.
51
+
52
+ .. versionadded:: 0.20
53
+
54
+ Parameters
55
+ ----------
56
+ regressor : object, default=None
57
+ Regressor object such as derived from
58
+ :class:`~sklearn.base.RegressorMixin`. This regressor will
59
+ automatically be cloned each time prior to fitting. If `regressor is
60
+ None`, :class:`~sklearn.linear_model.LinearRegression` is created and used.
61
+
62
+ transformer : object, default=None
63
+ Estimator object such as derived from
64
+ :class:`~sklearn.base.TransformerMixin`. Cannot be set at the same time
65
+ as `func` and `inverse_func`. If `transformer is None` as well as
66
+ `func` and `inverse_func`, the transformer will be an identity
67
+ transformer. Note that the transformer will be cloned during fitting.
68
+ Also, the transformer is restricting `y` to be a numpy array.
69
+
70
+ func : function, default=None
71
+ Function to apply to `y` before passing to :meth:`fit`. Cannot be set
72
+ at the same time as `transformer`. The function needs to return a
73
+ 2-dimensional array. If `func is None`, the function used will be the
74
+ identity function.
75
+
76
+ inverse_func : function, default=None
77
+ Function to apply to the prediction of the regressor. Cannot be set at
78
+ the same time as `transformer`. The function needs to return a
79
+ 2-dimensional array. The inverse function is used to return
80
+ predictions to the same space of the original training labels.
81
+
82
+ check_inverse : bool, default=True
83
+ Whether to check that `transform` followed by `inverse_transform`
84
+ or `func` followed by `inverse_func` leads to the original targets.
85
+
86
+ Attributes
87
+ ----------
88
+ regressor_ : object
89
+ Fitted regressor.
90
+
91
+ transformer_ : object
92
+ Transformer used in :meth:`fit` and :meth:`predict`.
93
+
94
+ n_features_in_ : int
95
+ Number of features seen during :term:`fit`. Only defined if the
96
+ underlying regressor exposes such an attribute when fit.
97
+
98
+ .. versionadded:: 0.24
99
+
100
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
101
+ Names of features seen during :term:`fit`. Defined only when `X`
102
+ has feature names that are all strings.
103
+
104
+ .. versionadded:: 1.0
105
+
106
+ See Also
107
+ --------
108
+ sklearn.preprocessing.FunctionTransformer : Construct a transformer from an
109
+ arbitrary callable.
110
+
111
+ Notes
112
+ -----
113
+ Internally, the target `y` is always converted into a 2-dimensional array
114
+ to be used by scikit-learn transformers. At the time of prediction, the
115
+ output will be reshaped to a have the same number of dimensions as `y`.
116
+
117
+ Examples
118
+ --------
119
+ >>> import numpy as np
120
+ >>> from sklearn.linear_model import LinearRegression
121
+ >>> from sklearn.compose import TransformedTargetRegressor
122
+ >>> tt = TransformedTargetRegressor(regressor=LinearRegression(),
123
+ ... func=np.log, inverse_func=np.exp)
124
+ >>> X = np.arange(4).reshape(-1, 1)
125
+ >>> y = np.exp(2 * X).ravel()
126
+ >>> tt.fit(X, y)
127
+ TransformedTargetRegressor(...)
128
+ >>> tt.score(X, y)
129
+ 1.0
130
+ >>> tt.regressor_.coef_
131
+ array([2.])
132
+
133
+ For a more detailed example use case refer to
134
+ :ref:`sphx_glr_auto_examples_compose_plot_transformed_target.py`.
135
+ """
136
+
137
+ _parameter_constraints: dict = {
138
+ "regressor": [HasMethods(["fit", "predict"]), None],
139
+ "transformer": [HasMethods("transform"), None],
140
+ "func": [callable, None],
141
+ "inverse_func": [callable, None],
142
+ "check_inverse": ["boolean"],
143
+ }
144
+
145
+ def __init__(
146
+ self,
147
+ regressor=None,
148
+ *,
149
+ transformer=None,
150
+ func=None,
151
+ inverse_func=None,
152
+ check_inverse=True,
153
+ ):
154
+ self.regressor = regressor
155
+ self.transformer = transformer
156
+ self.func = func
157
+ self.inverse_func = inverse_func
158
+ self.check_inverse = check_inverse
159
+
160
+ def _fit_transformer(self, y):
161
+ """Check transformer and fit transformer.
162
+
163
+ Create the default transformer, fit it and make additional inverse
164
+ check on a subset (optional).
165
+
166
+ """
167
+ if self.transformer is not None and (
168
+ self.func is not None or self.inverse_func is not None
169
+ ):
170
+ raise ValueError(
171
+ "'transformer' and functions 'func'/'inverse_func' cannot both be set."
172
+ )
173
+ elif self.transformer is not None:
174
+ self.transformer_ = clone(self.transformer)
175
+ else:
176
+ if self.func is not None and self.inverse_func is None:
177
+ raise ValueError(
178
+ "When 'func' is provided, 'inverse_func' must also be provided"
179
+ )
180
+ self.transformer_ = FunctionTransformer(
181
+ func=self.func,
182
+ inverse_func=self.inverse_func,
183
+ validate=True,
184
+ check_inverse=self.check_inverse,
185
+ )
186
+ # XXX: sample_weight is not currently passed to the
187
+ # transformer. However, if transformer starts using sample_weight, the
188
+ # code should be modified accordingly. At the time to consider the
189
+ # sample_prop feature, it is also a good use case to be considered.
190
+ self.transformer_.fit(y)
191
+ if self.check_inverse:
192
+ idx_selected = slice(None, None, max(1, y.shape[0] // 10))
193
+ y_sel = _safe_indexing(y, idx_selected)
194
+ y_sel_t = self.transformer_.transform(y_sel)
195
+ if not np.allclose(y_sel, self.transformer_.inverse_transform(y_sel_t)):
196
+ warnings.warn(
197
+ (
198
+ "The provided functions or transformer are"
199
+ " not strictly inverse of each other. If"
200
+ " you are sure you want to proceed regardless"
201
+ ", set 'check_inverse=False'"
202
+ ),
203
+ UserWarning,
204
+ )
205
+
206
+ @_fit_context(
207
+ # TransformedTargetRegressor.regressor/transformer are not validated yet.
208
+ prefer_skip_nested_validation=False
209
+ )
210
+ def fit(self, X, y, **fit_params):
211
+ """Fit the model according to the given training data.
212
+
213
+ Parameters
214
+ ----------
215
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
216
+ Training vector, where `n_samples` is the number of samples and
217
+ `n_features` is the number of features.
218
+
219
+ y : array-like of shape (n_samples,)
220
+ Target values.
221
+
222
+ **fit_params : dict
223
+ Parameters passed to the `fit` method of the underlying
224
+ regressor.
225
+
226
+ Returns
227
+ -------
228
+ self : object
229
+ Fitted estimator.
230
+ """
231
+ _raise_for_unsupported_routing(self, "fit", **fit_params)
232
+ if y is None:
233
+ raise ValueError(
234
+ f"This {self.__class__.__name__} estimator "
235
+ "requires y to be passed, but the target y is None."
236
+ )
237
+ y = check_array(
238
+ y,
239
+ input_name="y",
240
+ accept_sparse=False,
241
+ force_all_finite=True,
242
+ ensure_2d=False,
243
+ dtype="numeric",
244
+ allow_nd=True,
245
+ )
246
+
247
+ # store the number of dimension of the target to predict an array of
248
+ # similar shape at predict
249
+ self._training_dim = y.ndim
250
+
251
+ # transformers are designed to modify X which is 2d dimensional, we
252
+ # need to modify y accordingly.
253
+ if y.ndim == 1:
254
+ y_2d = y.reshape(-1, 1)
255
+ else:
256
+ y_2d = y
257
+ self._fit_transformer(y_2d)
258
+
259
+ # transform y and convert back to 1d array if needed
260
+ y_trans = self.transformer_.transform(y_2d)
261
+ # FIXME: a FunctionTransformer can return a 1D array even when validate
262
+ # is set to True. Therefore, we need to check the number of dimension
263
+ # first.
264
+ if y_trans.ndim == 2 and y_trans.shape[1] == 1:
265
+ y_trans = y_trans.squeeze(axis=1)
266
+
267
+ if self.regressor is None:
268
+ from ..linear_model import LinearRegression
269
+
270
+ self.regressor_ = LinearRegression()
271
+ else:
272
+ self.regressor_ = clone(self.regressor)
273
+
274
+ self.regressor_.fit(X, y_trans, **fit_params)
275
+
276
+ if hasattr(self.regressor_, "feature_names_in_"):
277
+ self.feature_names_in_ = self.regressor_.feature_names_in_
278
+
279
+ return self
280
+
281
+ def predict(self, X, **predict_params):
282
+ """Predict using the base regressor, applying inverse.
283
+
284
+ The regressor is used to predict and the `inverse_func` or
285
+ `inverse_transform` is applied before returning the prediction.
286
+
287
+ Parameters
288
+ ----------
289
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
290
+ Samples.
291
+
292
+ **predict_params : dict of str -> object
293
+ Parameters passed to the `predict` method of the underlying
294
+ regressor.
295
+
296
+ Returns
297
+ -------
298
+ y_hat : ndarray of shape (n_samples,)
299
+ Predicted values.
300
+ """
301
+ check_is_fitted(self)
302
+ pred = self.regressor_.predict(X, **predict_params)
303
+ if pred.ndim == 1:
304
+ pred_trans = self.transformer_.inverse_transform(pred.reshape(-1, 1))
305
+ else:
306
+ pred_trans = self.transformer_.inverse_transform(pred)
307
+ if (
308
+ self._training_dim == 1
309
+ and pred_trans.ndim == 2
310
+ and pred_trans.shape[1] == 1
311
+ ):
312
+ pred_trans = pred_trans.squeeze(axis=1)
313
+
314
+ return pred_trans
315
+
316
+ def _more_tags(self):
317
+ regressor = self.regressor
318
+ if regressor is None:
319
+ from ..linear_model import LinearRegression
320
+
321
+ regressor = LinearRegression()
322
+
323
+ return {
324
+ "poor_score": True,
325
+ "multioutput": _safe_tags(regressor, key="multioutput"),
326
+ }
327
+
328
+ @property
329
+ def n_features_in_(self):
330
+ """Number of features seen during :term:`fit`."""
331
+ # For consistency with other estimators we raise a AttributeError so
332
+ # that hasattr() returns False the estimator isn't fitted.
333
+ try:
334
+ check_is_fitted(self)
335
+ except NotFittedError as nfe:
336
+ raise AttributeError(
337
+ "{} object has no n_features_in_ attribute.".format(
338
+ self.__class__.__name__
339
+ )
340
+ ) from nfe
341
+
342
+ return self.regressor_.n_features_in_
llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc ADDED
Binary file (60.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py ADDED
@@ -0,0 +1,2582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the ColumnTransformer.
3
+ """
4
+
5
+ import pickle
6
+ import re
7
+ import warnings
8
+
9
+ import numpy as np
10
+ import pytest
11
+ from numpy.testing import assert_allclose
12
+ from scipy import sparse
13
+
14
+ from sklearn.base import BaseEstimator, TransformerMixin
15
+ from sklearn.compose import (
16
+ ColumnTransformer,
17
+ make_column_selector,
18
+ make_column_transformer,
19
+ )
20
+ from sklearn.exceptions import NotFittedError
21
+ from sklearn.feature_selection import VarianceThreshold
22
+ from sklearn.preprocessing import (
23
+ FunctionTransformer,
24
+ Normalizer,
25
+ OneHotEncoder,
26
+ StandardScaler,
27
+ )
28
+ from sklearn.tests.metadata_routing_common import (
29
+ ConsumingTransformer,
30
+ _Registry,
31
+ check_recorded_metadata,
32
+ )
33
+ from sklearn.utils._testing import (
34
+ _convert_container,
35
+ assert_allclose_dense_sparse,
36
+ assert_almost_equal,
37
+ assert_array_equal,
38
+ )
39
+ from sklearn.utils.fixes import CSR_CONTAINERS
40
+
41
+
42
+ class Trans(TransformerMixin, BaseEstimator):
43
+ def fit(self, X, y=None):
44
+ return self
45
+
46
+ def transform(self, X, y=None):
47
+ # 1D Series -> 2D DataFrame
48
+ if hasattr(X, "to_frame"):
49
+ return X.to_frame()
50
+ # 1D array -> 2D array
51
+ if getattr(X, "ndim", 2) == 1:
52
+ return np.atleast_2d(X).T
53
+ return X
54
+
55
+
56
+ class DoubleTrans(BaseEstimator):
57
+ def fit(self, X, y=None):
58
+ return self
59
+
60
+ def transform(self, X):
61
+ return 2 * X
62
+
63
+
64
+ class SparseMatrixTrans(BaseEstimator):
65
+ def __init__(self, csr_container):
66
+ self.csr_container = csr_container
67
+
68
+ def fit(self, X, y=None):
69
+ return self
70
+
71
+ def transform(self, X, y=None):
72
+ n_samples = len(X)
73
+ return self.csr_container(sparse.eye(n_samples, n_samples))
74
+
75
+
76
+ class TransNo2D(BaseEstimator):
77
+ def fit(self, X, y=None):
78
+ return self
79
+
80
+ def transform(self, X, y=None):
81
+ return X
82
+
83
+
84
+ class TransRaise(BaseEstimator):
85
+ def fit(self, X, y=None):
86
+ raise ValueError("specific message")
87
+
88
+ def transform(self, X, y=None):
89
+ raise ValueError("specific message")
90
+
91
+
92
+ def test_column_transformer():
93
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
94
+
95
+ X_res_first1D = np.array([0, 1, 2])
96
+ X_res_second1D = np.array([2, 4, 6])
97
+ X_res_first = X_res_first1D.reshape(-1, 1)
98
+ X_res_both = X_array
99
+
100
+ cases = [
101
+ # single column 1D / 2D
102
+ (0, X_res_first),
103
+ ([0], X_res_first),
104
+ # list-like
105
+ ([0, 1], X_res_both),
106
+ (np.array([0, 1]), X_res_both),
107
+ # slice
108
+ (slice(0, 1), X_res_first),
109
+ (slice(0, 2), X_res_both),
110
+ # boolean mask
111
+ (np.array([True, False]), X_res_first),
112
+ ([True, False], X_res_first),
113
+ (np.array([True, True]), X_res_both),
114
+ ([True, True], X_res_both),
115
+ ]
116
+
117
+ for selection, res in cases:
118
+ ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop")
119
+ assert_array_equal(ct.fit_transform(X_array), res)
120
+ assert_array_equal(ct.fit(X_array).transform(X_array), res)
121
+
122
+ # callable that returns any of the allowed specifiers
123
+ ct = ColumnTransformer(
124
+ [("trans", Trans(), lambda x: selection)], remainder="drop"
125
+ )
126
+ assert_array_equal(ct.fit_transform(X_array), res)
127
+ assert_array_equal(ct.fit(X_array).transform(X_array), res)
128
+
129
+ ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
130
+ assert_array_equal(ct.fit_transform(X_array), X_res_both)
131
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
132
+ assert len(ct.transformers_) == 2
133
+
134
+ # test with transformer_weights
135
+ transformer_weights = {"trans1": 0.1, "trans2": 10}
136
+ both = ColumnTransformer(
137
+ [("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
138
+ transformer_weights=transformer_weights,
139
+ )
140
+ res = np.vstack(
141
+ [
142
+ transformer_weights["trans1"] * X_res_first1D,
143
+ transformer_weights["trans2"] * X_res_second1D,
144
+ ]
145
+ ).T
146
+ assert_array_equal(both.fit_transform(X_array), res)
147
+ assert_array_equal(both.fit(X_array).transform(X_array), res)
148
+ assert len(both.transformers_) == 2
149
+
150
+ both = ColumnTransformer(
151
+ [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
152
+ )
153
+ assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both)
154
+ assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both)
155
+ assert len(both.transformers_) == 1
156
+
157
+
158
+ def test_column_transformer_tuple_transformers_parameter():
159
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
160
+
161
+ transformers = [("trans1", Trans(), [0]), ("trans2", Trans(), [1])]
162
+
163
+ ct_with_list = ColumnTransformer(transformers)
164
+ ct_with_tuple = ColumnTransformer(tuple(transformers))
165
+
166
+ assert_array_equal(
167
+ ct_with_list.fit_transform(X_array), ct_with_tuple.fit_transform(X_array)
168
+ )
169
+ assert_array_equal(
170
+ ct_with_list.fit(X_array).transform(X_array),
171
+ ct_with_tuple.fit(X_array).transform(X_array),
172
+ )
173
+
174
+
175
+ @pytest.mark.parametrize("constructor_name", ["dataframe", "polars"])
176
+ def test_column_transformer_dataframe(constructor_name):
177
+ if constructor_name == "dataframe":
178
+ dataframe_lib = pytest.importorskip("pandas")
179
+ else:
180
+ dataframe_lib = pytest.importorskip(constructor_name)
181
+
182
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
183
+ X_df = _convert_container(
184
+ X_array, constructor_name, columns_name=["first", "second"]
185
+ )
186
+
187
+ X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
188
+ X_res_both = X_array
189
+
190
+ cases = [
191
+ # String keys: label based
192
+ # list
193
+ (["first"], X_res_first),
194
+ (["first", "second"], X_res_both),
195
+ # slice
196
+ (slice("first", "second"), X_res_both),
197
+ # int keys: positional
198
+ # list
199
+ ([0], X_res_first),
200
+ ([0, 1], X_res_both),
201
+ (np.array([0, 1]), X_res_both),
202
+ # slice
203
+ (slice(0, 1), X_res_first),
204
+ (slice(0, 2), X_res_both),
205
+ # boolean mask
206
+ (np.array([True, False]), X_res_first),
207
+ ([True, False], X_res_first),
208
+ ]
209
+ if constructor_name == "dataframe":
210
+ # Scalars are only supported for pandas dataframes.
211
+ cases.extend(
212
+ [
213
+ # scalar
214
+ (0, X_res_first),
215
+ ("first", X_res_first),
216
+ (
217
+ dataframe_lib.Series([True, False], index=["first", "second"]),
218
+ X_res_first,
219
+ ),
220
+ ]
221
+ )
222
+
223
+ for selection, res in cases:
224
+ ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop")
225
+ assert_array_equal(ct.fit_transform(X_df), res)
226
+ assert_array_equal(ct.fit(X_df).transform(X_df), res)
227
+
228
+ # callable that returns any of the allowed specifiers
229
+ ct = ColumnTransformer(
230
+ [("trans", Trans(), lambda X: selection)], remainder="drop"
231
+ )
232
+ assert_array_equal(ct.fit_transform(X_df), res)
233
+ assert_array_equal(ct.fit(X_df).transform(X_df), res)
234
+
235
+ ct = ColumnTransformer(
236
+ [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])]
237
+ )
238
+ assert_array_equal(ct.fit_transform(X_df), X_res_both)
239
+ assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
240
+ assert len(ct.transformers_) == 2
241
+ assert ct.transformers_[-1][0] != "remainder"
242
+
243
+ ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
244
+ assert_array_equal(ct.fit_transform(X_df), X_res_both)
245
+ assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
246
+ assert len(ct.transformers_) == 2
247
+ assert ct.transformers_[-1][0] != "remainder"
248
+
249
+ # test with transformer_weights
250
+ transformer_weights = {"trans1": 0.1, "trans2": 10}
251
+ both = ColumnTransformer(
252
+ [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])],
253
+ transformer_weights=transformer_weights,
254
+ )
255
+ res = np.vstack(
256
+ [
257
+ transformer_weights["trans1"] * X_df["first"],
258
+ transformer_weights["trans2"] * X_df["second"],
259
+ ]
260
+ ).T
261
+ assert_array_equal(both.fit_transform(X_df), res)
262
+ assert_array_equal(both.fit(X_df).transform(X_df), res)
263
+ assert len(both.transformers_) == 2
264
+ assert both.transformers_[-1][0] != "remainder"
265
+
266
+ # test multiple columns
267
+ both = ColumnTransformer(
268
+ [("trans", Trans(), ["first", "second"])], transformer_weights={"trans": 0.1}
269
+ )
270
+ assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
271
+ assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
272
+ assert len(both.transformers_) == 1
273
+ assert both.transformers_[-1][0] != "remainder"
274
+
275
+ both = ColumnTransformer(
276
+ [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
277
+ )
278
+ assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
279
+ assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
280
+ assert len(both.transformers_) == 1
281
+ assert both.transformers_[-1][0] != "remainder"
282
+
283
+ # ensure pandas object is passed through
284
+
285
+ class TransAssert(BaseEstimator):
286
+ def __init__(self, expected_type_transform):
287
+ self.expected_type_transform = expected_type_transform
288
+
289
+ def fit(self, X, y=None):
290
+ return self
291
+
292
+ def transform(self, X, y=None):
293
+ assert isinstance(X, self.expected_type_transform)
294
+ if isinstance(X, dataframe_lib.Series):
295
+ X = X.to_frame()
296
+ return X
297
+
298
+ ct = ColumnTransformer(
299
+ [
300
+ (
301
+ "trans",
302
+ TransAssert(expected_type_transform=dataframe_lib.DataFrame),
303
+ ["first", "second"],
304
+ )
305
+ ]
306
+ )
307
+ ct.fit_transform(X_df)
308
+
309
+ if constructor_name == "dataframe":
310
+ # DataFrame protocol does not have 1d columns, so we only test on Pandas
311
+ # dataframes.
312
+ ct = ColumnTransformer(
313
+ [
314
+ (
315
+ "trans",
316
+ TransAssert(expected_type_transform=dataframe_lib.Series),
317
+ "first",
318
+ )
319
+ ],
320
+ remainder="drop",
321
+ )
322
+ ct.fit_transform(X_df)
323
+
324
+ # Only test on pandas because the dataframe protocol requires string column
325
+ # names
326
+ # integer column spec + integer column names -> still use positional
327
+ X_df2 = X_df.copy()
328
+ X_df2.columns = [1, 0]
329
+ ct = ColumnTransformer([("trans", Trans(), 0)], remainder="drop")
330
+ assert_array_equal(ct.fit_transform(X_df2), X_res_first)
331
+ assert_array_equal(ct.fit(X_df2).transform(X_df2), X_res_first)
332
+
333
+ assert len(ct.transformers_) == 2
334
+ assert ct.transformers_[-1][0] == "remainder"
335
+ assert ct.transformers_[-1][1] == "drop"
336
+ assert_array_equal(ct.transformers_[-1][2], [1])
337
+
338
+
339
+ @pytest.mark.parametrize("pandas", [True, False], ids=["pandas", "numpy"])
340
+ @pytest.mark.parametrize(
341
+ "column_selection",
342
+ [[], np.array([False, False]), [False, False]],
343
+ ids=["list", "bool", "bool_int"],
344
+ )
345
+ @pytest.mark.parametrize("callable_column", [False, True])
346
+ def test_column_transformer_empty_columns(pandas, column_selection, callable_column):
347
+ # test case that ensures that the column transformer does also work when
348
+ # a given transformer doesn't have any columns to work on
349
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
350
+ X_res_both = X_array
351
+
352
+ if pandas:
353
+ pd = pytest.importorskip("pandas")
354
+ X = pd.DataFrame(X_array, columns=["first", "second"])
355
+ else:
356
+ X = X_array
357
+
358
+ if callable_column:
359
+ column = lambda X: column_selection # noqa
360
+ else:
361
+ column = column_selection
362
+
363
+ ct = ColumnTransformer(
364
+ [("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), column)]
365
+ )
366
+ assert_array_equal(ct.fit_transform(X), X_res_both)
367
+ assert_array_equal(ct.fit(X).transform(X), X_res_both)
368
+ assert len(ct.transformers_) == 2
369
+ assert isinstance(ct.transformers_[1][1], TransRaise)
370
+
371
+ ct = ColumnTransformer(
372
+ [("trans1", TransRaise(), column), ("trans2", Trans(), [0, 1])]
373
+ )
374
+ assert_array_equal(ct.fit_transform(X), X_res_both)
375
+ assert_array_equal(ct.fit(X).transform(X), X_res_both)
376
+ assert len(ct.transformers_) == 2
377
+ assert isinstance(ct.transformers_[0][1], TransRaise)
378
+
379
+ ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="passthrough")
380
+ assert_array_equal(ct.fit_transform(X), X_res_both)
381
+ assert_array_equal(ct.fit(X).transform(X), X_res_both)
382
+ assert len(ct.transformers_) == 2 # including remainder
383
+ assert isinstance(ct.transformers_[0][1], TransRaise)
384
+
385
+ fixture = np.array([[], [], []])
386
+ ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="drop")
387
+ assert_array_equal(ct.fit_transform(X), fixture)
388
+ assert_array_equal(ct.fit(X).transform(X), fixture)
389
+ assert len(ct.transformers_) == 2 # including remainder
390
+ assert isinstance(ct.transformers_[0][1], TransRaise)
391
+
392
+
393
+ def test_column_transformer_output_indices():
394
+ # Checks for the output_indices_ attribute
395
+ X_array = np.arange(6).reshape(3, 2)
396
+
397
+ ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
398
+ X_trans = ct.fit_transform(X_array)
399
+ assert ct.output_indices_ == {
400
+ "trans1": slice(0, 1),
401
+ "trans2": slice(1, 2),
402
+ "remainder": slice(0, 0),
403
+ }
404
+ assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
405
+ assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
406
+
407
+ # test with transformer_weights and multiple columns
408
+ ct = ColumnTransformer(
409
+ [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
410
+ )
411
+ X_trans = ct.fit_transform(X_array)
412
+ assert ct.output_indices_ == {"trans": slice(0, 2), "remainder": slice(0, 0)}
413
+ assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans"]])
414
+ assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
415
+
416
+ # test case that ensures that the attribute does also work when
417
+ # a given transformer doesn't have any columns to work on
418
+ ct = ColumnTransformer([("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), [])])
419
+ X_trans = ct.fit_transform(X_array)
420
+ assert ct.output_indices_ == {
421
+ "trans1": slice(0, 2),
422
+ "trans2": slice(0, 0),
423
+ "remainder": slice(0, 0),
424
+ }
425
+ assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans1"]])
426
+ assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans2"]])
427
+ assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
428
+
429
+ ct = ColumnTransformer([("trans", TransRaise(), [])], remainder="passthrough")
430
+ X_trans = ct.fit_transform(X_array)
431
+ assert ct.output_indices_ == {"trans": slice(0, 0), "remainder": slice(0, 2)}
432
+ assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans"]])
433
+ assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["remainder"]])
434
+
435
+
436
+ def test_column_transformer_output_indices_df():
437
+ # Checks for the output_indices_ attribute with data frames
438
+ pd = pytest.importorskip("pandas")
439
+
440
+ X_df = pd.DataFrame(np.arange(6).reshape(3, 2), columns=["first", "second"])
441
+
442
+ ct = ColumnTransformer(
443
+ [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])]
444
+ )
445
+ X_trans = ct.fit_transform(X_df)
446
+ assert ct.output_indices_ == {
447
+ "trans1": slice(0, 1),
448
+ "trans2": slice(1, 2),
449
+ "remainder": slice(0, 0),
450
+ }
451
+ assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
452
+ assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
453
+ assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
454
+
455
+ ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
456
+ X_trans = ct.fit_transform(X_df)
457
+ assert ct.output_indices_ == {
458
+ "trans1": slice(0, 1),
459
+ "trans2": slice(1, 2),
460
+ "remainder": slice(0, 0),
461
+ }
462
+ assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
463
+ assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
464
+ assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
465
+
466
+
467
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
468
+ def test_column_transformer_sparse_array(csr_container):
469
+ X_sparse = csr_container(sparse.eye(3, 2))
470
+
471
+ # no distinction between 1D and 2D
472
+ X_res_first = X_sparse[:, [0]]
473
+ X_res_both = X_sparse
474
+
475
+ for col in [(0,), [0], slice(0, 1)]:
476
+ for remainder, res in [("drop", X_res_first), ("passthrough", X_res_both)]:
477
+ ct = ColumnTransformer(
478
+ [("trans", Trans(), col)], remainder=remainder, sparse_threshold=0.8
479
+ )
480
+ assert sparse.issparse(ct.fit_transform(X_sparse))
481
+ assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res)
482
+ assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), res)
483
+
484
+ for col in [[0, 1], slice(0, 2)]:
485
+ ct = ColumnTransformer([("trans", Trans(), col)], sparse_threshold=0.8)
486
+ assert sparse.issparse(ct.fit_transform(X_sparse))
487
+ assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both)
488
+ assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), X_res_both)
489
+
490
+
491
+ def test_column_transformer_list():
492
+ X_list = [[1, float("nan"), "a"], [0, 0, "b"]]
493
+ expected_result = np.array(
494
+ [
495
+ [1, float("nan"), 1, 0],
496
+ [-1, 0, 0, 1],
497
+ ]
498
+ )
499
+
500
+ ct = ColumnTransformer(
501
+ [
502
+ ("numerical", StandardScaler(), [0, 1]),
503
+ ("categorical", OneHotEncoder(), [2]),
504
+ ]
505
+ )
506
+
507
+ assert_array_equal(ct.fit_transform(X_list), expected_result)
508
+ assert_array_equal(ct.fit(X_list).transform(X_list), expected_result)
509
+
510
+
511
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
512
+ def test_column_transformer_sparse_stacking(csr_container):
513
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
514
+ col_trans = ColumnTransformer(
515
+ [("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)],
516
+ sparse_threshold=0.8,
517
+ )
518
+ col_trans.fit(X_array)
519
+ X_trans = col_trans.transform(X_array)
520
+ assert sparse.issparse(X_trans)
521
+ assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
522
+ assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0]))
523
+ assert len(col_trans.transformers_) == 2
524
+ assert col_trans.transformers_[-1][0] != "remainder"
525
+
526
+ col_trans = ColumnTransformer(
527
+ [("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)],
528
+ sparse_threshold=0.1,
529
+ )
530
+ col_trans.fit(X_array)
531
+ X_trans = col_trans.transform(X_array)
532
+ assert not sparse.issparse(X_trans)
533
+ assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
534
+ assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0]))
535
+
536
+
537
+ def test_column_transformer_mixed_cols_sparse():
538
+ df = np.array([["a", 1, True], ["b", 2, False]], dtype="O")
539
+
540
+ ct = make_column_transformer(
541
+ (OneHotEncoder(), [0]), ("passthrough", [1, 2]), sparse_threshold=1.0
542
+ )
543
+
544
+ # this shouldn't fail, since boolean can be coerced into a numeric
545
+ # See: https://github.com/scikit-learn/scikit-learn/issues/11912
546
+ X_trans = ct.fit_transform(df)
547
+ assert X_trans.getformat() == "csr"
548
+ assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1], [0, 1, 2, 0]]))
549
+
550
+ ct = make_column_transformer(
551
+ (OneHotEncoder(), [0]), ("passthrough", [0]), sparse_threshold=1.0
552
+ )
553
+ with pytest.raises(ValueError, match="For a sparse output, all columns should"):
554
+ # this fails since strings `a` and `b` cannot be
555
+ # coerced into a numeric.
556
+ ct.fit_transform(df)
557
+
558
+
559
+ def test_column_transformer_sparse_threshold():
560
+ X_array = np.array([["a", "b"], ["A", "B"]], dtype=object).T
561
+ # above data has sparsity of 4 / 8 = 0.5
562
+
563
+ # apply threshold even if all sparse
564
+ col_trans = ColumnTransformer(
565
+ [("trans1", OneHotEncoder(), [0]), ("trans2", OneHotEncoder(), [1])],
566
+ sparse_threshold=0.2,
567
+ )
568
+ res = col_trans.fit_transform(X_array)
569
+ assert not sparse.issparse(res)
570
+ assert not col_trans.sparse_output_
571
+
572
+ # mixed -> sparsity of (4 + 2) / 8 = 0.75
573
+ for thres in [0.75001, 1]:
574
+ col_trans = ColumnTransformer(
575
+ [
576
+ ("trans1", OneHotEncoder(sparse_output=True), [0]),
577
+ ("trans2", OneHotEncoder(sparse_output=False), [1]),
578
+ ],
579
+ sparse_threshold=thres,
580
+ )
581
+ res = col_trans.fit_transform(X_array)
582
+ assert sparse.issparse(res)
583
+ assert col_trans.sparse_output_
584
+
585
+ for thres in [0.75, 0]:
586
+ col_trans = ColumnTransformer(
587
+ [
588
+ ("trans1", OneHotEncoder(sparse_output=True), [0]),
589
+ ("trans2", OneHotEncoder(sparse_output=False), [1]),
590
+ ],
591
+ sparse_threshold=thres,
592
+ )
593
+ res = col_trans.fit_transform(X_array)
594
+ assert not sparse.issparse(res)
595
+ assert not col_trans.sparse_output_
596
+
597
+ # if nothing is sparse -> no sparse
598
+ for thres in [0.33, 0, 1]:
599
+ col_trans = ColumnTransformer(
600
+ [
601
+ ("trans1", OneHotEncoder(sparse_output=False), [0]),
602
+ ("trans2", OneHotEncoder(sparse_output=False), [1]),
603
+ ],
604
+ sparse_threshold=thres,
605
+ )
606
+ res = col_trans.fit_transform(X_array)
607
+ assert not sparse.issparse(res)
608
+ assert not col_trans.sparse_output_
609
+
610
+
611
+ def test_column_transformer_error_msg_1D():
612
+ X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
613
+
614
+ col_trans = ColumnTransformer([("trans", StandardScaler(), 0)])
615
+ msg = "1D data passed to a transformer"
616
+ with pytest.raises(ValueError, match=msg):
617
+ col_trans.fit(X_array)
618
+
619
+ with pytest.raises(ValueError, match=msg):
620
+ col_trans.fit_transform(X_array)
621
+
622
+ col_trans = ColumnTransformer([("trans", TransRaise(), 0)])
623
+ for func in [col_trans.fit, col_trans.fit_transform]:
624
+ with pytest.raises(ValueError, match="specific message"):
625
+ func(X_array)
626
+
627
+
628
+ def test_2D_transformer_output():
629
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
630
+
631
+ # if one transformer is dropped, test that name is still correct
632
+ ct = ColumnTransformer([("trans1", "drop", 0), ("trans2", TransNo2D(), 1)])
633
+
634
+ msg = "the 'trans2' transformer should be 2D"
635
+ with pytest.raises(ValueError, match=msg):
636
+ ct.fit_transform(X_array)
637
+ # because fit is also doing transform, this raises already on fit
638
+ with pytest.raises(ValueError, match=msg):
639
+ ct.fit(X_array)
640
+
641
+
642
+ def test_2D_transformer_output_pandas():
643
+ pd = pytest.importorskip("pandas")
644
+
645
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
646
+ X_df = pd.DataFrame(X_array, columns=["col1", "col2"])
647
+
648
+ # if one transformer is dropped, test that name is still correct
649
+ ct = ColumnTransformer([("trans1", TransNo2D(), "col1")])
650
+ msg = "the 'trans1' transformer should be 2D"
651
+ with pytest.raises(ValueError, match=msg):
652
+ ct.fit_transform(X_df)
653
+ # because fit is also doing transform, this raises already on fit
654
+ with pytest.raises(ValueError, match=msg):
655
+ ct.fit(X_df)
656
+
657
+
658
+ @pytest.mark.parametrize("remainder", ["drop", "passthrough"])
659
+ def test_column_transformer_invalid_columns(remainder):
660
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
661
+
662
+ # general invalid
663
+ for col in [1.5, ["string", 1], slice(1, "s"), np.array([1.0])]:
664
+ ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
665
+ with pytest.raises(ValueError, match="No valid specification"):
666
+ ct.fit(X_array)
667
+
668
+ # invalid for arrays
669
+ for col in ["string", ["string", "other"], slice("a", "b")]:
670
+ ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
671
+ with pytest.raises(ValueError, match="Specifying the columns"):
672
+ ct.fit(X_array)
673
+
674
+ # transformed n_features does not match fitted n_features
675
+ col = [0, 1]
676
+ ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
677
+ ct.fit(X_array)
678
+ X_array_more = np.array([[0, 1, 2], [2, 4, 6], [3, 6, 9]]).T
679
+ msg = "X has 3 features, but ColumnTransformer is expecting 2 features as input."
680
+ with pytest.raises(ValueError, match=msg):
681
+ ct.transform(X_array_more)
682
+ X_array_fewer = np.array(
683
+ [
684
+ [0, 1, 2],
685
+ ]
686
+ ).T
687
+ err_msg = (
688
+ "X has 1 features, but ColumnTransformer is expecting 2 features as input."
689
+ )
690
+ with pytest.raises(ValueError, match=err_msg):
691
+ ct.transform(X_array_fewer)
692
+
693
+
694
+ def test_column_transformer_invalid_transformer():
695
+ class NoTrans(BaseEstimator):
696
+ def fit(self, X, y=None):
697
+ return self
698
+
699
+ def predict(self, X):
700
+ return X
701
+
702
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
703
+ ct = ColumnTransformer([("trans", NoTrans(), [0])])
704
+ msg = "All estimators should implement fit and transform"
705
+ with pytest.raises(TypeError, match=msg):
706
+ ct.fit(X_array)
707
+
708
+
709
+ def test_make_column_transformer():
710
+ scaler = StandardScaler()
711
+ norm = Normalizer()
712
+ ct = make_column_transformer((scaler, "first"), (norm, ["second"]))
713
+ names, transformers, columns = zip(*ct.transformers)
714
+ assert names == ("standardscaler", "normalizer")
715
+ assert transformers == (scaler, norm)
716
+ assert columns == ("first", ["second"])
717
+
718
+
719
+ def test_make_column_transformer_pandas():
720
+ pd = pytest.importorskip("pandas")
721
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
722
+ X_df = pd.DataFrame(X_array, columns=["first", "second"])
723
+ norm = Normalizer()
724
+ ct1 = ColumnTransformer([("norm", Normalizer(), X_df.columns)])
725
+ ct2 = make_column_transformer((norm, X_df.columns))
726
+ assert_almost_equal(ct1.fit_transform(X_df), ct2.fit_transform(X_df))
727
+
728
+
729
+ def test_make_column_transformer_kwargs():
730
+ scaler = StandardScaler()
731
+ norm = Normalizer()
732
+ ct = make_column_transformer(
733
+ (scaler, "first"),
734
+ (norm, ["second"]),
735
+ n_jobs=3,
736
+ remainder="drop",
737
+ sparse_threshold=0.5,
738
+ )
739
+ assert (
740
+ ct.transformers
741
+ == make_column_transformer((scaler, "first"), (norm, ["second"])).transformers
742
+ )
743
+ assert ct.n_jobs == 3
744
+ assert ct.remainder == "drop"
745
+ assert ct.sparse_threshold == 0.5
746
+ # invalid keyword parameters should raise an error message
747
+ msg = re.escape(
748
+ "make_column_transformer() got an unexpected "
749
+ "keyword argument 'transformer_weights'"
750
+ )
751
+ with pytest.raises(TypeError, match=msg):
752
+ make_column_transformer(
753
+ (scaler, "first"),
754
+ (norm, ["second"]),
755
+ transformer_weights={"pca": 10, "Transf": 1},
756
+ )
757
+
758
+
759
+ def test_make_column_transformer_remainder_transformer():
760
+ scaler = StandardScaler()
761
+ norm = Normalizer()
762
+ remainder = StandardScaler()
763
+ ct = make_column_transformer(
764
+ (scaler, "first"), (norm, ["second"]), remainder=remainder
765
+ )
766
+ assert ct.remainder == remainder
767
+
768
+
769
+ def test_column_transformer_get_set_params():
770
+ ct = ColumnTransformer(
771
+ [("trans1", StandardScaler(), [0]), ("trans2", StandardScaler(), [1])]
772
+ )
773
+
774
+ exp = {
775
+ "n_jobs": None,
776
+ "remainder": "drop",
777
+ "sparse_threshold": 0.3,
778
+ "trans1": ct.transformers[0][1],
779
+ "trans1__copy": True,
780
+ "trans1__with_mean": True,
781
+ "trans1__with_std": True,
782
+ "trans2": ct.transformers[1][1],
783
+ "trans2__copy": True,
784
+ "trans2__with_mean": True,
785
+ "trans2__with_std": True,
786
+ "transformers": ct.transformers,
787
+ "transformer_weights": None,
788
+ "verbose_feature_names_out": True,
789
+ "verbose": False,
790
+ }
791
+
792
+ assert ct.get_params() == exp
793
+
794
+ ct.set_params(trans1__with_mean=False)
795
+ assert not ct.get_params()["trans1__with_mean"]
796
+
797
+ ct.set_params(trans1="passthrough")
798
+ exp = {
799
+ "n_jobs": None,
800
+ "remainder": "drop",
801
+ "sparse_threshold": 0.3,
802
+ "trans1": "passthrough",
803
+ "trans2": ct.transformers[1][1],
804
+ "trans2__copy": True,
805
+ "trans2__with_mean": True,
806
+ "trans2__with_std": True,
807
+ "transformers": ct.transformers,
808
+ "transformer_weights": None,
809
+ "verbose_feature_names_out": True,
810
+ "verbose": False,
811
+ }
812
+
813
+ assert ct.get_params() == exp
814
+
815
+
816
+ def test_column_transformer_named_estimators():
817
+ X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
818
+ ct = ColumnTransformer(
819
+ [
820
+ ("trans1", StandardScaler(), [0]),
821
+ ("trans2", StandardScaler(with_std=False), [1]),
822
+ ]
823
+ )
824
+ assert not hasattr(ct, "transformers_")
825
+ ct.fit(X_array)
826
+ assert hasattr(ct, "transformers_")
827
+ assert isinstance(ct.named_transformers_["trans1"], StandardScaler)
828
+ assert isinstance(ct.named_transformers_.trans1, StandardScaler)
829
+ assert isinstance(ct.named_transformers_["trans2"], StandardScaler)
830
+ assert isinstance(ct.named_transformers_.trans2, StandardScaler)
831
+ assert not ct.named_transformers_.trans2.with_std
832
+ # check it are fitted transformers
833
+ assert ct.named_transformers_.trans1.mean_ == 1.0
834
+
835
+
836
+ def test_column_transformer_cloning():
837
+ X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
838
+
839
+ ct = ColumnTransformer([("trans", StandardScaler(), [0])])
840
+ ct.fit(X_array)
841
+ assert not hasattr(ct.transformers[0][1], "mean_")
842
+ assert hasattr(ct.transformers_[0][1], "mean_")
843
+
844
+ ct = ColumnTransformer([("trans", StandardScaler(), [0])])
845
+ ct.fit_transform(X_array)
846
+ assert not hasattr(ct.transformers[0][1], "mean_")
847
+ assert hasattr(ct.transformers_[0][1], "mean_")
848
+
849
+
850
+ def test_column_transformer_get_feature_names():
851
+ X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
852
+ ct = ColumnTransformer([("trans", Trans(), [0, 1])])
853
+ # raise correct error when not fitted
854
+ with pytest.raises(NotFittedError):
855
+ ct.get_feature_names_out()
856
+ # raise correct error when no feature names are available
857
+ ct.fit(X_array)
858
+ msg = re.escape(
859
+ "Transformer trans (type Trans) does not provide get_feature_names_out"
860
+ )
861
+ with pytest.raises(AttributeError, match=msg):
862
+ ct.get_feature_names_out()
863
+
864
+
865
+ def test_column_transformer_special_strings():
866
+ # one 'drop' -> ignore
867
+ X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
868
+ ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "drop", [1])])
869
+ exp = np.array([[0.0], [1.0], [2.0]])
870
+ assert_array_equal(ct.fit_transform(X_array), exp)
871
+ assert_array_equal(ct.fit(X_array).transform(X_array), exp)
872
+ assert len(ct.transformers_) == 2
873
+ assert ct.transformers_[-1][0] != "remainder"
874
+
875
+ # all 'drop' -> return shape 0 array
876
+ ct = ColumnTransformer([("trans1", "drop", [0]), ("trans2", "drop", [1])])
877
+ assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0))
878
+ assert_array_equal(ct.fit_transform(X_array).shape, (3, 0))
879
+ assert len(ct.transformers_) == 2
880
+ assert ct.transformers_[-1][0] != "remainder"
881
+
882
+ # 'passthrough'
883
+ X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
884
+ ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "passthrough", [1])])
885
+ exp = X_array
886
+ assert_array_equal(ct.fit_transform(X_array), exp)
887
+ assert_array_equal(ct.fit(X_array).transform(X_array), exp)
888
+ assert len(ct.transformers_) == 2
889
+ assert ct.transformers_[-1][0] != "remainder"
890
+
891
+
892
+ def test_column_transformer_remainder():
893
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
894
+
895
+ X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
896
+ X_res_second = np.array([2, 4, 6]).reshape(-1, 1)
897
+ X_res_both = X_array
898
+
899
+ # default drop
900
+ ct = ColumnTransformer([("trans1", Trans(), [0])])
901
+ assert_array_equal(ct.fit_transform(X_array), X_res_first)
902
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
903
+ assert len(ct.transformers_) == 2
904
+ assert ct.transformers_[-1][0] == "remainder"
905
+ assert ct.transformers_[-1][1] == "drop"
906
+ assert_array_equal(ct.transformers_[-1][2], [1])
907
+
908
+ # specify passthrough
909
+ ct = ColumnTransformer([("trans", Trans(), [0])], remainder="passthrough")
910
+ assert_array_equal(ct.fit_transform(X_array), X_res_both)
911
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
912
+ assert len(ct.transformers_) == 2
913
+ assert ct.transformers_[-1][0] == "remainder"
914
+ assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
915
+ assert_array_equal(ct.transformers_[-1][2], [1])
916
+
917
+ # column order is not preserved (passed through added to end)
918
+ ct = ColumnTransformer([("trans1", Trans(), [1])], remainder="passthrough")
919
+ assert_array_equal(ct.fit_transform(X_array), X_res_both[:, ::-1])
920
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both[:, ::-1])
921
+ assert len(ct.transformers_) == 2
922
+ assert ct.transformers_[-1][0] == "remainder"
923
+ assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
924
+ assert_array_equal(ct.transformers_[-1][2], [0])
925
+
926
+ # passthrough when all actual transformers are skipped
927
+ ct = ColumnTransformer([("trans1", "drop", [0])], remainder="passthrough")
928
+ assert_array_equal(ct.fit_transform(X_array), X_res_second)
929
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_second)
930
+ assert len(ct.transformers_) == 2
931
+ assert ct.transformers_[-1][0] == "remainder"
932
+ assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
933
+ assert_array_equal(ct.transformers_[-1][2], [1])
934
+
935
+ # check default for make_column_transformer
936
+ ct = make_column_transformer((Trans(), [0]))
937
+ assert ct.remainder == "drop"
938
+
939
+
940
+ @pytest.mark.parametrize(
941
+ "key", [[0], np.array([0]), slice(0, 1), np.array([True, False])]
942
+ )
943
+ def test_column_transformer_remainder_numpy(key):
944
+ # test different ways that columns are specified with passthrough
945
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
946
+ X_res_both = X_array
947
+
948
+ ct = ColumnTransformer([("trans1", Trans(), key)], remainder="passthrough")
949
+ assert_array_equal(ct.fit_transform(X_array), X_res_both)
950
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
951
+ assert len(ct.transformers_) == 2
952
+ assert ct.transformers_[-1][0] == "remainder"
953
+ assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
954
+ assert_array_equal(ct.transformers_[-1][2], [1])
955
+
956
+
957
+ @pytest.mark.parametrize(
958
+ "key",
959
+ [
960
+ [0],
961
+ slice(0, 1),
962
+ np.array([True, False]),
963
+ ["first"],
964
+ "pd-index",
965
+ np.array(["first"]),
966
+ np.array(["first"], dtype=object),
967
+ slice(None, "first"),
968
+ slice("first", "first"),
969
+ ],
970
+ )
971
+ def test_column_transformer_remainder_pandas(key):
972
+ # test different ways that columns are specified with passthrough
973
+ pd = pytest.importorskip("pandas")
974
+ if isinstance(key, str) and key == "pd-index":
975
+ key = pd.Index(["first"])
976
+
977
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
978
+ X_df = pd.DataFrame(X_array, columns=["first", "second"])
979
+ X_res_both = X_array
980
+
981
+ ct = ColumnTransformer([("trans1", Trans(), key)], remainder="passthrough")
982
+ assert_array_equal(ct.fit_transform(X_df), X_res_both)
983
+ assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
984
+ assert len(ct.transformers_) == 2
985
+ assert ct.transformers_[-1][0] == "remainder"
986
+ assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
987
+ assert_array_equal(ct.transformers_[-1][2], [1])
988
+
989
+
990
+ @pytest.mark.parametrize(
991
+ "key", [[0], np.array([0]), slice(0, 1), np.array([True, False, False])]
992
+ )
993
+ def test_column_transformer_remainder_transformer(key):
994
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
995
+ X_res_both = X_array.copy()
996
+
997
+ # second and third columns are doubled when remainder = DoubleTrans
998
+ X_res_both[:, 1:3] *= 2
999
+
1000
+ ct = ColumnTransformer([("trans1", Trans(), key)], remainder=DoubleTrans())
1001
+
1002
+ assert_array_equal(ct.fit_transform(X_array), X_res_both)
1003
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
1004
+ assert len(ct.transformers_) == 2
1005
+ assert ct.transformers_[-1][0] == "remainder"
1006
+ assert isinstance(ct.transformers_[-1][1], DoubleTrans)
1007
+ assert_array_equal(ct.transformers_[-1][2], [1, 2])
1008
+
1009
+
1010
+ def test_column_transformer_no_remaining_remainder_transformer():
1011
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
1012
+
1013
+ ct = ColumnTransformer([("trans1", Trans(), [0, 1, 2])], remainder=DoubleTrans())
1014
+
1015
+ assert_array_equal(ct.fit_transform(X_array), X_array)
1016
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_array)
1017
+ assert len(ct.transformers_) == 1
1018
+ assert ct.transformers_[-1][0] != "remainder"
1019
+
1020
+
1021
+ def test_column_transformer_drops_all_remainder_transformer():
1022
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
1023
+
1024
+ # columns are doubled when remainder = DoubleTrans
1025
+ X_res_both = 2 * X_array.copy()[:, 1:3]
1026
+
1027
+ ct = ColumnTransformer([("trans1", "drop", [0])], remainder=DoubleTrans())
1028
+
1029
+ assert_array_equal(ct.fit_transform(X_array), X_res_both)
1030
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
1031
+ assert len(ct.transformers_) == 2
1032
+ assert ct.transformers_[-1][0] == "remainder"
1033
+ assert isinstance(ct.transformers_[-1][1], DoubleTrans)
1034
+ assert_array_equal(ct.transformers_[-1][2], [1, 2])
1035
+
1036
+
1037
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
1038
+ def test_column_transformer_sparse_remainder_transformer(csr_container):
1039
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
1040
+
1041
+ ct = ColumnTransformer(
1042
+ [("trans1", Trans(), [0])],
1043
+ remainder=SparseMatrixTrans(csr_container),
1044
+ sparse_threshold=0.8,
1045
+ )
1046
+
1047
+ X_trans = ct.fit_transform(X_array)
1048
+ assert sparse.issparse(X_trans)
1049
+ # SparseMatrixTrans creates 3 features for each column. There is
1050
+ # one column in ``transformers``, thus:
1051
+ assert X_trans.shape == (3, 3 + 1)
1052
+
1053
+ exp_array = np.hstack((X_array[:, 0].reshape(-1, 1), np.eye(3)))
1054
+ assert_array_equal(X_trans.toarray(), exp_array)
1055
+ assert len(ct.transformers_) == 2
1056
+ assert ct.transformers_[-1][0] == "remainder"
1057
+ assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
1058
+ assert_array_equal(ct.transformers_[-1][2], [1, 2])
1059
+
1060
+
1061
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
1062
+ def test_column_transformer_drop_all_sparse_remainder_transformer(csr_container):
1063
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
1064
+ ct = ColumnTransformer(
1065
+ [("trans1", "drop", [0])],
1066
+ remainder=SparseMatrixTrans(csr_container),
1067
+ sparse_threshold=0.8,
1068
+ )
1069
+
1070
+ X_trans = ct.fit_transform(X_array)
1071
+ assert sparse.issparse(X_trans)
1072
+
1073
+ # SparseMatrixTrans creates 3 features for each column, thus:
1074
+ assert X_trans.shape == (3, 3)
1075
+ assert_array_equal(X_trans.toarray(), np.eye(3))
1076
+ assert len(ct.transformers_) == 2
1077
+ assert ct.transformers_[-1][0] == "remainder"
1078
+ assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
1079
+ assert_array_equal(ct.transformers_[-1][2], [1, 2])
1080
+
1081
+
1082
+ def test_column_transformer_get_set_params_with_remainder():
1083
+ ct = ColumnTransformer(
1084
+ [("trans1", StandardScaler(), [0])], remainder=StandardScaler()
1085
+ )
1086
+
1087
+ exp = {
1088
+ "n_jobs": None,
1089
+ "remainder": ct.remainder,
1090
+ "remainder__copy": True,
1091
+ "remainder__with_mean": True,
1092
+ "remainder__with_std": True,
1093
+ "sparse_threshold": 0.3,
1094
+ "trans1": ct.transformers[0][1],
1095
+ "trans1__copy": True,
1096
+ "trans1__with_mean": True,
1097
+ "trans1__with_std": True,
1098
+ "transformers": ct.transformers,
1099
+ "transformer_weights": None,
1100
+ "verbose_feature_names_out": True,
1101
+ "verbose": False,
1102
+ }
1103
+
1104
+ assert ct.get_params() == exp
1105
+
1106
+ ct.set_params(remainder__with_std=False)
1107
+ assert not ct.get_params()["remainder__with_std"]
1108
+
1109
+ ct.set_params(trans1="passthrough")
1110
+ exp = {
1111
+ "n_jobs": None,
1112
+ "remainder": ct.remainder,
1113
+ "remainder__copy": True,
1114
+ "remainder__with_mean": True,
1115
+ "remainder__with_std": False,
1116
+ "sparse_threshold": 0.3,
1117
+ "trans1": "passthrough",
1118
+ "transformers": ct.transformers,
1119
+ "transformer_weights": None,
1120
+ "verbose_feature_names_out": True,
1121
+ "verbose": False,
1122
+ }
1123
+ assert ct.get_params() == exp
1124
+
1125
+
1126
+ def test_column_transformer_no_estimators():
1127
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).astype("float").T
1128
+ ct = ColumnTransformer([], remainder=StandardScaler())
1129
+
1130
+ params = ct.get_params()
1131
+ assert params["remainder__with_mean"]
1132
+
1133
+ X_trans = ct.fit_transform(X_array)
1134
+ assert X_trans.shape == X_array.shape
1135
+ assert len(ct.transformers_) == 1
1136
+ assert ct.transformers_[-1][0] == "remainder"
1137
+ assert ct.transformers_[-1][2] == [0, 1, 2]
1138
+
1139
+
1140
+ @pytest.mark.parametrize(
1141
+ ["est", "pattern"],
1142
+ [
1143
+ (
1144
+ ColumnTransformer(
1145
+ [("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
1146
+ remainder=DoubleTrans(),
1147
+ ),
1148
+ (
1149
+ r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n"
1150
+ r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n"
1151
+ r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$"
1152
+ ),
1153
+ ),
1154
+ (
1155
+ ColumnTransformer(
1156
+ [("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
1157
+ remainder="passthrough",
1158
+ ),
1159
+ (
1160
+ r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n"
1161
+ r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n"
1162
+ r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$"
1163
+ ),
1164
+ ),
1165
+ (
1166
+ ColumnTransformer(
1167
+ [("trans1", Trans(), [0]), ("trans2", "drop", [1])],
1168
+ remainder="passthrough",
1169
+ ),
1170
+ (
1171
+ r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n"
1172
+ r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$"
1173
+ ),
1174
+ ),
1175
+ (
1176
+ ColumnTransformer(
1177
+ [("trans1", Trans(), [0]), ("trans2", "passthrough", [1])],
1178
+ remainder="passthrough",
1179
+ ),
1180
+ (
1181
+ r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n"
1182
+ r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n"
1183
+ r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$"
1184
+ ),
1185
+ ),
1186
+ (
1187
+ ColumnTransformer([("trans1", Trans(), [0])], remainder="passthrough"),
1188
+ (
1189
+ r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n"
1190
+ r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$"
1191
+ ),
1192
+ ),
1193
+ (
1194
+ ColumnTransformer(
1195
+ [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], remainder="drop"
1196
+ ),
1197
+ (
1198
+ r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n"
1199
+ r"\[ColumnTransformer\].*\(2 of 2\) Processing trans2.* total=.*\n$"
1200
+ ),
1201
+ ),
1202
+ (
1203
+ ColumnTransformer([("trans1", Trans(), [0])], remainder="drop"),
1204
+ r"\[ColumnTransformer\].*\(1 of 1\) Processing trans1.* total=.*\n$",
1205
+ ),
1206
+ ],
1207
+ )
1208
+ @pytest.mark.parametrize("method", ["fit", "fit_transform"])
1209
+ def test_column_transformer_verbose(est, pattern, method, capsys):
1210
+ X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
1211
+
1212
+ func = getattr(est, method)
1213
+ est.set_params(verbose=False)
1214
+ func(X_array)
1215
+ assert not capsys.readouterr().out, "Got output for verbose=False"
1216
+
1217
+ est.set_params(verbose=True)
1218
+ func(X_array)
1219
+ assert re.match(pattern, capsys.readouterr()[0])
1220
+
1221
+
1222
+ def test_column_transformer_no_estimators_set_params():
1223
+ ct = ColumnTransformer([]).set_params(n_jobs=2)
1224
+ assert ct.n_jobs == 2
1225
+
1226
+
1227
+ def test_column_transformer_callable_specifier():
1228
+ # assert that function gets the full array
1229
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
1230
+ X_res_first = np.array([[0, 1, 2]]).T
1231
+
1232
+ def func(X):
1233
+ assert_array_equal(X, X_array)
1234
+ return [0]
1235
+
1236
+ ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop")
1237
+ assert_array_equal(ct.fit_transform(X_array), X_res_first)
1238
+ assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
1239
+ assert callable(ct.transformers[0][2])
1240
+ assert ct.transformers_[0][2] == [0]
1241
+
1242
+
1243
+ def test_column_transformer_callable_specifier_dataframe():
1244
+ # assert that function gets the full dataframe
1245
+ pd = pytest.importorskip("pandas")
1246
+ X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
1247
+ X_res_first = np.array([[0, 1, 2]]).T
1248
+
1249
+ X_df = pd.DataFrame(X_array, columns=["first", "second"])
1250
+
1251
+ def func(X):
1252
+ assert_array_equal(X.columns, X_df.columns)
1253
+ assert_array_equal(X.values, X_df.values)
1254
+ return ["first"]
1255
+
1256
+ ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop")
1257
+ assert_array_equal(ct.fit_transform(X_df), X_res_first)
1258
+ assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first)
1259
+ assert callable(ct.transformers[0][2])
1260
+ assert ct.transformers_[0][2] == ["first"]
1261
+
1262
+
1263
+ def test_column_transformer_negative_column_indexes():
1264
+ X = np.random.randn(2, 2)
1265
+ X_categories = np.array([[1], [2]])
1266
+ X = np.concatenate([X, X_categories], axis=1)
1267
+
1268
+ ohe = OneHotEncoder()
1269
+
1270
+ tf_1 = ColumnTransformer([("ohe", ohe, [-1])], remainder="passthrough")
1271
+ tf_2 = ColumnTransformer([("ohe", ohe, [2])], remainder="passthrough")
1272
+ assert_array_equal(tf_1.fit_transform(X), tf_2.fit_transform(X))
1273
+
1274
+
1275
+ @pytest.mark.parametrize("array_type", [np.asarray, *CSR_CONTAINERS])
1276
+ def test_column_transformer_mask_indexing(array_type):
1277
+ # Regression test for #14510
1278
+ # Boolean array-like does not behave as boolean array with sparse matrices.
1279
+ X = np.transpose([[1, 2, 3], [4, 5, 6], [5, 6, 7], [8, 9, 10]])
1280
+ X = array_type(X)
1281
+ column_transformer = ColumnTransformer(
1282
+ [("identity", FunctionTransformer(), [False, True, False, True])]
1283
+ )
1284
+ X_trans = column_transformer.fit_transform(X)
1285
+ assert X_trans.shape == (3, 2)
1286
+
1287
+
1288
+ def test_n_features_in():
1289
+ # make sure n_features_in is what is passed as input to the column
1290
+ # transformer.
1291
+
1292
+ X = [[1, 2], [3, 4], [5, 6]]
1293
+ ct = ColumnTransformer([("a", DoubleTrans(), [0]), ("b", DoubleTrans(), [1])])
1294
+ assert not hasattr(ct, "n_features_in_")
1295
+ ct.fit(X)
1296
+ assert ct.n_features_in_ == 2
1297
+
1298
+
1299
+ @pytest.mark.parametrize(
1300
+ "cols, pattern, include, exclude",
1301
+ [
1302
+ (["col_int", "col_float"], None, np.number, None),
1303
+ (["col_int", "col_float"], None, None, object),
1304
+ (["col_int", "col_float"], None, [int, float], None),
1305
+ (["col_str"], None, [object], None),
1306
+ (["col_str"], None, object, None),
1307
+ (["col_float"], None, float, None),
1308
+ (["col_float"], "at$", [np.number], None),
1309
+ (["col_int"], None, [int], None),
1310
+ (["col_int"], "^col_int", [np.number], None),
1311
+ (["col_float", "col_str"], "float|str", None, None),
1312
+ (["col_str"], "^col_s", None, [int]),
1313
+ ([], "str$", float, None),
1314
+ (["col_int", "col_float", "col_str"], None, [np.number, object], None),
1315
+ ],
1316
+ )
1317
+ def test_make_column_selector_with_select_dtypes(cols, pattern, include, exclude):
1318
+ pd = pytest.importorskip("pandas")
1319
+
1320
+ X_df = pd.DataFrame(
1321
+ {
1322
+ "col_int": np.array([0, 1, 2], dtype=int),
1323
+ "col_float": np.array([0.0, 1.0, 2.0], dtype=float),
1324
+ "col_str": ["one", "two", "three"],
1325
+ },
1326
+ columns=["col_int", "col_float", "col_str"],
1327
+ )
1328
+
1329
+ selector = make_column_selector(
1330
+ dtype_include=include, dtype_exclude=exclude, pattern=pattern
1331
+ )
1332
+
1333
+ assert_array_equal(selector(X_df), cols)
1334
+
1335
+
1336
+ def test_column_transformer_with_make_column_selector():
1337
+ # Functional test for column transformer + column selector
1338
+ pd = pytest.importorskip("pandas")
1339
+ X_df = pd.DataFrame(
1340
+ {
1341
+ "col_int": np.array([0, 1, 2], dtype=int),
1342
+ "col_float": np.array([0.0, 1.0, 2.0], dtype=float),
1343
+ "col_cat": ["one", "two", "one"],
1344
+ "col_str": ["low", "middle", "high"],
1345
+ },
1346
+ columns=["col_int", "col_float", "col_cat", "col_str"],
1347
+ )
1348
+ X_df["col_str"] = X_df["col_str"].astype("category")
1349
+
1350
+ cat_selector = make_column_selector(dtype_include=["category", object])
1351
+ num_selector = make_column_selector(dtype_include=np.number)
1352
+
1353
+ ohe = OneHotEncoder()
1354
+ scaler = StandardScaler()
1355
+
1356
+ ct_selector = make_column_transformer((ohe, cat_selector), (scaler, num_selector))
1357
+ ct_direct = make_column_transformer(
1358
+ (ohe, ["col_cat", "col_str"]), (scaler, ["col_float", "col_int"])
1359
+ )
1360
+
1361
+ X_selector = ct_selector.fit_transform(X_df)
1362
+ X_direct = ct_direct.fit_transform(X_df)
1363
+
1364
+ assert_allclose(X_selector, X_direct)
1365
+
1366
+
1367
+ def test_make_column_selector_error():
1368
+ selector = make_column_selector(dtype_include=np.number)
1369
+ X = np.array([[0.1, 0.2]])
1370
+ msg = "make_column_selector can only be applied to pandas dataframes"
1371
+ with pytest.raises(ValueError, match=msg):
1372
+ selector(X)
1373
+
1374
+
1375
+ def test_make_column_selector_pickle():
1376
+ pd = pytest.importorskip("pandas")
1377
+
1378
+ X_df = pd.DataFrame(
1379
+ {
1380
+ "col_int": np.array([0, 1, 2], dtype=int),
1381
+ "col_float": np.array([0.0, 1.0, 2.0], dtype=float),
1382
+ "col_str": ["one", "two", "three"],
1383
+ },
1384
+ columns=["col_int", "col_float", "col_str"],
1385
+ )
1386
+
1387
+ selector = make_column_selector(dtype_include=[object])
1388
+ selector_picked = pickle.loads(pickle.dumps(selector))
1389
+
1390
+ assert_array_equal(selector(X_df), selector_picked(X_df))
1391
+
1392
+
1393
+ @pytest.mark.parametrize(
1394
+ "empty_col",
1395
+ [[], np.array([], dtype=int), lambda x: []],
1396
+ ids=["list", "array", "callable"],
1397
+ )
1398
+ def test_feature_names_empty_columns(empty_col):
1399
+ pd = pytest.importorskip("pandas")
1400
+
1401
+ df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]})
1402
+
1403
+ ct = ColumnTransformer(
1404
+ transformers=[
1405
+ ("ohe", OneHotEncoder(), ["col1", "col2"]),
1406
+ ("empty_features", OneHotEncoder(), empty_col),
1407
+ ],
1408
+ )
1409
+
1410
+ ct.fit(df)
1411
+ assert_array_equal(
1412
+ ct.get_feature_names_out(), ["ohe__col1_a", "ohe__col1_b", "ohe__col2_z"]
1413
+ )
1414
+
1415
+
1416
+ @pytest.mark.parametrize(
1417
+ "selector",
1418
+ [
1419
+ [1],
1420
+ lambda x: [1],
1421
+ ["col2"],
1422
+ lambda x: ["col2"],
1423
+ [False, True],
1424
+ lambda x: [False, True],
1425
+ ],
1426
+ )
1427
+ def test_feature_names_out_pandas(selector):
1428
+ """Checks name when selecting only the second column"""
1429
+ pd = pytest.importorskip("pandas")
1430
+ df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]})
1431
+ ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)])
1432
+ ct.fit(df)
1433
+
1434
+ assert_array_equal(ct.get_feature_names_out(), ["ohe__col2_z"])
1435
+
1436
+
1437
+ @pytest.mark.parametrize(
1438
+ "selector", [[1], lambda x: [1], [False, True], lambda x: [False, True]]
1439
+ )
1440
+ def test_feature_names_out_non_pandas(selector):
1441
+ """Checks name when selecting the second column with numpy array"""
1442
+ X = [["a", "z"], ["a", "z"], ["b", "z"]]
1443
+ ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)])
1444
+ ct.fit(X)
1445
+
1446
+ assert_array_equal(ct.get_feature_names_out(), ["ohe__x1_z"])
1447
+
1448
+
1449
+ @pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()])
1450
+ def test_sk_visual_block_remainder(remainder):
1451
+ # remainder='passthrough' or an estimator will be shown in repr_html
1452
+ ohe = OneHotEncoder()
1453
+ ct = ColumnTransformer(
1454
+ transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder
1455
+ )
1456
+ visual_block = ct._sk_visual_block_()
1457
+ assert visual_block.names == ("ohe", "remainder")
1458
+ assert visual_block.name_details == (["col1", "col2"], "")
1459
+ assert visual_block.estimators == (ohe, remainder)
1460
+
1461
+
1462
+ def test_sk_visual_block_remainder_drop():
1463
+ # remainder='drop' is not shown in repr_html
1464
+ ohe = OneHotEncoder()
1465
+ ct = ColumnTransformer(transformers=[("ohe", ohe, ["col1", "col2"])])
1466
+ visual_block = ct._sk_visual_block_()
1467
+ assert visual_block.names == ("ohe",)
1468
+ assert visual_block.name_details == (["col1", "col2"],)
1469
+ assert visual_block.estimators == (ohe,)
1470
+
1471
+
1472
+ @pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()])
1473
+ def test_sk_visual_block_remainder_fitted_pandas(remainder):
1474
+ # Remainder shows the columns after fitting
1475
+ pd = pytest.importorskip("pandas")
1476
+ ohe = OneHotEncoder()
1477
+ ct = ColumnTransformer(
1478
+ transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder
1479
+ )
1480
+ df = pd.DataFrame(
1481
+ {
1482
+ "col1": ["a", "b", "c"],
1483
+ "col2": ["z", "z", "z"],
1484
+ "col3": [1, 2, 3],
1485
+ "col4": [3, 4, 5],
1486
+ }
1487
+ )
1488
+ ct.fit(df)
1489
+ visual_block = ct._sk_visual_block_()
1490
+ assert visual_block.names == ("ohe", "remainder")
1491
+ assert visual_block.name_details == (["col1", "col2"], ["col3", "col4"])
1492
+ assert visual_block.estimators == (ohe, remainder)
1493
+
1494
+
1495
+ @pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()])
1496
+ def test_sk_visual_block_remainder_fitted_numpy(remainder):
1497
+ # Remainder shows the indices after fitting
1498
+ X = np.array([[1, 2, 3], [4, 5, 6]], dtype=float)
1499
+ scaler = StandardScaler()
1500
+ ct = ColumnTransformer(
1501
+ transformers=[("scale", scaler, [0, 2])], remainder=remainder
1502
+ )
1503
+ ct.fit(X)
1504
+ visual_block = ct._sk_visual_block_()
1505
+ assert visual_block.names == ("scale", "remainder")
1506
+ assert visual_block.name_details == ([0, 2], [1])
1507
+ assert visual_block.estimators == (scaler, remainder)
1508
+
1509
+
1510
+ @pytest.mark.parametrize("explicit_colname", ["first", "second", 0, 1])
1511
+ @pytest.mark.parametrize("remainder", [Trans(), "passthrough", "drop"])
1512
+ def test_column_transformer_reordered_column_names_remainder(
1513
+ explicit_colname, remainder
1514
+ ):
1515
+ """Test the interaction between remainder and column transformer"""
1516
+ pd = pytest.importorskip("pandas")
1517
+
1518
+ X_fit_array = np.array([[0, 1, 2], [2, 4, 6]]).T
1519
+ X_fit_df = pd.DataFrame(X_fit_array, columns=["first", "second"])
1520
+
1521
+ X_trans_array = np.array([[2, 4, 6], [0, 1, 2]]).T
1522
+ X_trans_df = pd.DataFrame(X_trans_array, columns=["second", "first"])
1523
+
1524
+ tf = ColumnTransformer([("bycol", Trans(), explicit_colname)], remainder=remainder)
1525
+
1526
+ tf.fit(X_fit_df)
1527
+ X_fit_trans = tf.transform(X_fit_df)
1528
+
1529
+ # Changing the order still works
1530
+ X_trans = tf.transform(X_trans_df)
1531
+ assert_allclose(X_trans, X_fit_trans)
1532
+
1533
+ # extra columns are ignored
1534
+ X_extended_df = X_fit_df.copy()
1535
+ X_extended_df["third"] = [3, 6, 9]
1536
+ X_trans = tf.transform(X_extended_df)
1537
+ assert_allclose(X_trans, X_fit_trans)
1538
+
1539
+ if isinstance(explicit_colname, str):
1540
+ # Raise error if columns are specified by names but input only allows
1541
+ # to specify by position, e.g. numpy array instead of a pandas df.
1542
+ X_array = X_fit_array.copy()
1543
+ err_msg = "Specifying the columns"
1544
+ with pytest.raises(ValueError, match=err_msg):
1545
+ tf.transform(X_array)
1546
+
1547
+
1548
+ def test_feature_name_validation_missing_columns_drop_passthough():
1549
+ """Test the interaction between {'drop', 'passthrough'} and
1550
+ missing column names."""
1551
+ pd = pytest.importorskip("pandas")
1552
+
1553
+ X = np.ones(shape=(3, 4))
1554
+ df = pd.DataFrame(X, columns=["a", "b", "c", "d"])
1555
+
1556
+ df_dropped = df.drop("c", axis=1)
1557
+
1558
+ # with remainder='passthrough', all columns seen during `fit` must be
1559
+ # present
1560
+ tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="passthrough")
1561
+ tf.fit(df)
1562
+ msg = r"columns are missing: {'c'}"
1563
+ with pytest.raises(ValueError, match=msg):
1564
+ tf.transform(df_dropped)
1565
+
1566
+ # with remainder='drop', it is allowed to have column 'c' missing
1567
+ tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="drop")
1568
+ tf.fit(df)
1569
+
1570
+ df_dropped_trans = tf.transform(df_dropped)
1571
+ df_fit_trans = tf.transform(df)
1572
+ assert_allclose(df_dropped_trans, df_fit_trans)
1573
+
1574
+ # bycol drops 'c', thus it is allowed for 'c' to be missing
1575
+ tf = ColumnTransformer([("bycol", "drop", ["c"])], remainder="passthrough")
1576
+ tf.fit(df)
1577
+ df_dropped_trans = tf.transform(df_dropped)
1578
+ df_fit_trans = tf.transform(df)
1579
+ assert_allclose(df_dropped_trans, df_fit_trans)
1580
+
1581
+
1582
+ def test_feature_names_in_():
1583
+ """Feature names are stored in column transformer.
1584
+
1585
+ Column transformer deliberately does not check for column name consistency.
1586
+ It only checks that the non-dropped names seen in `fit` are seen
1587
+ in `transform`. This behavior is already tested in
1588
+ `test_feature_name_validation_missing_columns_drop_passthough`"""
1589
+
1590
+ pd = pytest.importorskip("pandas")
1591
+
1592
+ feature_names = ["a", "c", "d"]
1593
+ df = pd.DataFrame([[1, 2, 3]], columns=feature_names)
1594
+ ct = ColumnTransformer([("bycol", Trans(), ["a", "d"])], remainder="passthrough")
1595
+
1596
+ ct.fit(df)
1597
+ assert_array_equal(ct.feature_names_in_, feature_names)
1598
+ assert isinstance(ct.feature_names_in_, np.ndarray)
1599
+ assert ct.feature_names_in_.dtype == object
1600
+
1601
+
1602
+ class TransWithNames(Trans):
1603
+ def __init__(self, feature_names_out=None):
1604
+ self.feature_names_out = feature_names_out
1605
+
1606
+ def get_feature_names_out(self, input_features=None):
1607
+ if self.feature_names_out is not None:
1608
+ return np.asarray(self.feature_names_out, dtype=object)
1609
+ return input_features
1610
+
1611
+
1612
+ @pytest.mark.parametrize(
1613
+ "transformers, remainder, expected_names",
1614
+ [
1615
+ (
1616
+ [
1617
+ ("bycol1", TransWithNames(), ["d", "c"]),
1618
+ ("bycol2", "passthrough", ["d"]),
1619
+ ],
1620
+ "passthrough",
1621
+ ["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"],
1622
+ ),
1623
+ (
1624
+ [
1625
+ ("bycol1", TransWithNames(), ["d", "c"]),
1626
+ ("bycol2", "passthrough", ["d"]),
1627
+ ],
1628
+ "drop",
1629
+ ["bycol1__d", "bycol1__c", "bycol2__d"],
1630
+ ),
1631
+ (
1632
+ [
1633
+ ("bycol1", TransWithNames(), ["b"]),
1634
+ ("bycol2", "drop", ["d"]),
1635
+ ],
1636
+ "passthrough",
1637
+ ["bycol1__b", "remainder__a", "remainder__c"],
1638
+ ),
1639
+ (
1640
+ [
1641
+ ("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]),
1642
+ ],
1643
+ "passthrough",
1644
+ ["bycol1__pca1", "bycol1__pca2", "remainder__c"],
1645
+ ),
1646
+ (
1647
+ [
1648
+ ("bycol1", TransWithNames(["a", "b"]), ["d"]),
1649
+ ("bycol2", "passthrough", ["b"]),
1650
+ ],
1651
+ "drop",
1652
+ ["bycol1__a", "bycol1__b", "bycol2__b"],
1653
+ ),
1654
+ (
1655
+ [
1656
+ ("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]),
1657
+ ("bycol2", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]),
1658
+ ],
1659
+ "passthrough",
1660
+ [
1661
+ "bycol1__pca0",
1662
+ "bycol1__pca1",
1663
+ "bycol2__pca0",
1664
+ "bycol2__pca1",
1665
+ "remainder__a",
1666
+ "remainder__c",
1667
+ "remainder__d",
1668
+ ],
1669
+ ),
1670
+ (
1671
+ [
1672
+ ("bycol1", "drop", ["d"]),
1673
+ ],
1674
+ "drop",
1675
+ [],
1676
+ ),
1677
+ (
1678
+ [
1679
+ ("bycol1", TransWithNames(), slice(1, 3)),
1680
+ ],
1681
+ "drop",
1682
+ ["bycol1__b", "bycol1__c"],
1683
+ ),
1684
+ (
1685
+ [
1686
+ ("bycol1", TransWithNames(), ["b"]),
1687
+ ("bycol2", "drop", slice(3, 4)),
1688
+ ],
1689
+ "passthrough",
1690
+ ["bycol1__b", "remainder__a", "remainder__c"],
1691
+ ),
1692
+ (
1693
+ [
1694
+ ("bycol1", TransWithNames(), ["d", "c"]),
1695
+ ("bycol2", "passthrough", slice(3, 4)),
1696
+ ],
1697
+ "passthrough",
1698
+ ["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"],
1699
+ ),
1700
+ (
1701
+ [
1702
+ ("bycol1", TransWithNames(), slice("b", "c")),
1703
+ ],
1704
+ "drop",
1705
+ ["bycol1__b", "bycol1__c"],
1706
+ ),
1707
+ (
1708
+ [
1709
+ ("bycol1", TransWithNames(), ["b"]),
1710
+ ("bycol2", "drop", slice("c", "d")),
1711
+ ],
1712
+ "passthrough",
1713
+ ["bycol1__b", "remainder__a"],
1714
+ ),
1715
+ (
1716
+ [
1717
+ ("bycol1", TransWithNames(), ["d", "c"]),
1718
+ ("bycol2", "passthrough", slice("c", "d")),
1719
+ ],
1720
+ "passthrough",
1721
+ [
1722
+ "bycol1__d",
1723
+ "bycol1__c",
1724
+ "bycol2__c",
1725
+ "bycol2__d",
1726
+ "remainder__a",
1727
+ "remainder__b",
1728
+ ],
1729
+ ),
1730
+ ],
1731
+ )
1732
+ def test_verbose_feature_names_out_true(transformers, remainder, expected_names):
1733
+ """Check feature_names_out for verbose_feature_names_out=True (default)"""
1734
+ pd = pytest.importorskip("pandas")
1735
+ df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
1736
+ ct = ColumnTransformer(
1737
+ transformers,
1738
+ remainder=remainder,
1739
+ )
1740
+ ct.fit(df)
1741
+
1742
+ names = ct.get_feature_names_out()
1743
+ assert isinstance(names, np.ndarray)
1744
+ assert names.dtype == object
1745
+ assert_array_equal(names, expected_names)
1746
+
1747
+
1748
+ @pytest.mark.parametrize(
1749
+ "transformers, remainder, expected_names",
1750
+ [
1751
+ (
1752
+ [
1753
+ ("bycol1", TransWithNames(), ["d", "c"]),
1754
+ ("bycol2", "passthrough", ["a"]),
1755
+ ],
1756
+ "passthrough",
1757
+ ["d", "c", "a", "b"],
1758
+ ),
1759
+ (
1760
+ [
1761
+ ("bycol1", TransWithNames(["a"]), ["d", "c"]),
1762
+ ("bycol2", "passthrough", ["d"]),
1763
+ ],
1764
+ "drop",
1765
+ ["a", "d"],
1766
+ ),
1767
+ (
1768
+ [
1769
+ ("bycol1", TransWithNames(), ["b"]),
1770
+ ("bycol2", "drop", ["d"]),
1771
+ ],
1772
+ "passthrough",
1773
+ ["b", "a", "c"],
1774
+ ),
1775
+ (
1776
+ [
1777
+ ("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]),
1778
+ ],
1779
+ "passthrough",
1780
+ ["pca1", "pca2", "c"],
1781
+ ),
1782
+ (
1783
+ [
1784
+ ("bycol1", TransWithNames(["a", "c"]), ["d"]),
1785
+ ("bycol2", "passthrough", ["d"]),
1786
+ ],
1787
+ "drop",
1788
+ ["a", "c", "d"],
1789
+ ),
1790
+ (
1791
+ [
1792
+ ("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]),
1793
+ ("bycol2", TransWithNames([f"kpca{i}" for i in range(2)]), ["b"]),
1794
+ ],
1795
+ "passthrough",
1796
+ ["pca0", "pca1", "kpca0", "kpca1", "a", "c", "d"],
1797
+ ),
1798
+ (
1799
+ [
1800
+ ("bycol1", "drop", ["d"]),
1801
+ ],
1802
+ "drop",
1803
+ [],
1804
+ ),
1805
+ (
1806
+ [
1807
+ ("bycol1", TransWithNames(), slice(1, 2)),
1808
+ ("bycol2", "drop", ["d"]),
1809
+ ],
1810
+ "passthrough",
1811
+ ["b", "a", "c"],
1812
+ ),
1813
+ (
1814
+ [
1815
+ ("bycol1", TransWithNames(), ["b"]),
1816
+ ("bycol2", "drop", slice(3, 4)),
1817
+ ],
1818
+ "passthrough",
1819
+ ["b", "a", "c"],
1820
+ ),
1821
+ (
1822
+ [
1823
+ ("bycol1", TransWithNames(), ["d", "c"]),
1824
+ ("bycol2", "passthrough", slice(0, 2)),
1825
+ ],
1826
+ "drop",
1827
+ ["d", "c", "a", "b"],
1828
+ ),
1829
+ (
1830
+ [
1831
+ ("bycol1", TransWithNames(), slice("a", "b")),
1832
+ ("bycol2", "drop", ["d"]),
1833
+ ],
1834
+ "passthrough",
1835
+ ["a", "b", "c"],
1836
+ ),
1837
+ (
1838
+ [
1839
+ ("bycol1", TransWithNames(), ["b"]),
1840
+ ("bycol2", "drop", slice("c", "d")),
1841
+ ],
1842
+ "passthrough",
1843
+ ["b", "a"],
1844
+ ),
1845
+ (
1846
+ [
1847
+ ("bycol1", TransWithNames(), ["d", "c"]),
1848
+ ("bycol2", "passthrough", slice("a", "b")),
1849
+ ],
1850
+ "drop",
1851
+ ["d", "c", "a", "b"],
1852
+ ),
1853
+ (
1854
+ [
1855
+ ("bycol1", TransWithNames(), ["d", "c"]),
1856
+ ("bycol2", "passthrough", slice("b", "b")),
1857
+ ],
1858
+ "drop",
1859
+ ["d", "c", "b"],
1860
+ ),
1861
+ ],
1862
+ )
1863
+ def test_verbose_feature_names_out_false(transformers, remainder, expected_names):
1864
+ """Check feature_names_out for verbose_feature_names_out=False"""
1865
+ pd = pytest.importorskip("pandas")
1866
+ df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
1867
+ ct = ColumnTransformer(
1868
+ transformers,
1869
+ remainder=remainder,
1870
+ verbose_feature_names_out=False,
1871
+ )
1872
+ ct.fit(df)
1873
+
1874
+ names = ct.get_feature_names_out()
1875
+ assert isinstance(names, np.ndarray)
1876
+ assert names.dtype == object
1877
+ assert_array_equal(names, expected_names)
1878
+
1879
+
1880
+ @pytest.mark.parametrize(
1881
+ "transformers, remainder, colliding_columns",
1882
+ [
1883
+ (
1884
+ [
1885
+ ("bycol1", TransWithNames(), ["b"]),
1886
+ ("bycol2", "passthrough", ["b"]),
1887
+ ],
1888
+ "drop",
1889
+ "['b']",
1890
+ ),
1891
+ (
1892
+ [
1893
+ ("bycol1", TransWithNames(["c", "d"]), ["c"]),
1894
+ ("bycol2", "passthrough", ["c"]),
1895
+ ],
1896
+ "drop",
1897
+ "['c']",
1898
+ ),
1899
+ (
1900
+ [
1901
+ ("bycol1", TransWithNames(["a"]), ["b"]),
1902
+ ("bycol2", "passthrough", ["b"]),
1903
+ ],
1904
+ "passthrough",
1905
+ "['a']",
1906
+ ),
1907
+ (
1908
+ [
1909
+ ("bycol1", TransWithNames(["a"]), ["b"]),
1910
+ ("bycol2", "drop", ["b"]),
1911
+ ],
1912
+ "passthrough",
1913
+ "['a']",
1914
+ ),
1915
+ (
1916
+ [
1917
+ ("bycol1", TransWithNames(["c", "b"]), ["b"]),
1918
+ ("bycol2", "passthrough", ["c", "b"]),
1919
+ ],
1920
+ "drop",
1921
+ "['b', 'c']",
1922
+ ),
1923
+ (
1924
+ [
1925
+ ("bycol1", TransWithNames(["a"]), ["b"]),
1926
+ ("bycol2", "passthrough", ["a"]),
1927
+ ("bycol3", TransWithNames(["a"]), ["b"]),
1928
+ ],
1929
+ "passthrough",
1930
+ "['a']",
1931
+ ),
1932
+ (
1933
+ [
1934
+ ("bycol1", TransWithNames(["a", "b"]), ["b"]),
1935
+ ("bycol2", "passthrough", ["a"]),
1936
+ ("bycol3", TransWithNames(["b"]), ["c"]),
1937
+ ],
1938
+ "passthrough",
1939
+ "['a', 'b']",
1940
+ ),
1941
+ (
1942
+ [
1943
+ ("bycol1", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]),
1944
+ ("bycol2", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]),
1945
+ ],
1946
+ "passthrough",
1947
+ "['pca0', 'pca1', 'pca2', 'pca3', 'pca4', ...]",
1948
+ ),
1949
+ (
1950
+ [
1951
+ ("bycol1", TransWithNames(["a", "b"]), slice(1, 2)),
1952
+ ("bycol2", "passthrough", ["a"]),
1953
+ ("bycol3", TransWithNames(["b"]), ["c"]),
1954
+ ],
1955
+ "passthrough",
1956
+ "['a', 'b']",
1957
+ ),
1958
+ (
1959
+ [
1960
+ ("bycol1", TransWithNames(["a", "b"]), ["b"]),
1961
+ ("bycol2", "passthrough", slice(0, 1)),
1962
+ ("bycol3", TransWithNames(["b"]), ["c"]),
1963
+ ],
1964
+ "passthrough",
1965
+ "['a', 'b']",
1966
+ ),
1967
+ (
1968
+ [
1969
+ ("bycol1", TransWithNames(["a", "b"]), slice("b", "c")),
1970
+ ("bycol2", "passthrough", ["a"]),
1971
+ ("bycol3", TransWithNames(["b"]), ["c"]),
1972
+ ],
1973
+ "passthrough",
1974
+ "['a', 'b']",
1975
+ ),
1976
+ (
1977
+ [
1978
+ ("bycol1", TransWithNames(["a", "b"]), ["b"]),
1979
+ ("bycol2", "passthrough", slice("a", "a")),
1980
+ ("bycol3", TransWithNames(["b"]), ["c"]),
1981
+ ],
1982
+ "passthrough",
1983
+ "['a', 'b']",
1984
+ ),
1985
+ ],
1986
+ )
1987
+ def test_verbose_feature_names_out_false_errors(
1988
+ transformers, remainder, colliding_columns
1989
+ ):
1990
+ """Check feature_names_out for verbose_feature_names_out=False"""
1991
+
1992
+ pd = pytest.importorskip("pandas")
1993
+ df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
1994
+ ct = ColumnTransformer(
1995
+ transformers,
1996
+ remainder=remainder,
1997
+ verbose_feature_names_out=False,
1998
+ )
1999
+ ct.fit(df)
2000
+
2001
+ msg = re.escape(
2002
+ f"Output feature names: {colliding_columns} are not unique. Please set "
2003
+ "verbose_feature_names_out=True to add prefixes to feature names"
2004
+ )
2005
+ with pytest.raises(ValueError, match=msg):
2006
+ ct.get_feature_names_out()
2007
+
2008
+
2009
+ @pytest.mark.parametrize("verbose_feature_names_out", [True, False])
2010
+ @pytest.mark.parametrize("remainder", ["drop", "passthrough"])
2011
+ def test_column_transformer_set_output(verbose_feature_names_out, remainder):
2012
+ """Check column transformer behavior with set_output."""
2013
+ pd = pytest.importorskip("pandas")
2014
+ df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"], index=[10])
2015
+ ct = ColumnTransformer(
2016
+ [("first", TransWithNames(), ["a", "c"]), ("second", TransWithNames(), ["d"])],
2017
+ remainder=remainder,
2018
+ verbose_feature_names_out=verbose_feature_names_out,
2019
+ )
2020
+ X_trans = ct.fit_transform(df)
2021
+ assert isinstance(X_trans, np.ndarray)
2022
+
2023
+ ct.set_output(transform="pandas")
2024
+
2025
+ df_test = pd.DataFrame([[1, 2, 3, 4]], columns=df.columns, index=[20])
2026
+ X_trans = ct.transform(df_test)
2027
+ assert isinstance(X_trans, pd.DataFrame)
2028
+
2029
+ feature_names_out = ct.get_feature_names_out()
2030
+ assert_array_equal(X_trans.columns, feature_names_out)
2031
+ assert_array_equal(X_trans.index, df_test.index)
2032
+
2033
+
2034
+ @pytest.mark.parametrize("remainder", ["drop", "passthrough"])
2035
+ @pytest.mark.parametrize("fit_transform", [True, False])
2036
+ def test_column_transform_set_output_mixed(remainder, fit_transform):
2037
+ """Check ColumnTransformer outputs mixed types correctly."""
2038
+ pd = pytest.importorskip("pandas")
2039
+ df = pd.DataFrame(
2040
+ {
2041
+ "pet": pd.Series(["dog", "cat", "snake"], dtype="category"),
2042
+ "color": pd.Series(["green", "blue", "red"], dtype="object"),
2043
+ "age": [1.4, 2.1, 4.4],
2044
+ "height": [20, 40, 10],
2045
+ "distance": pd.Series([20, pd.NA, 100], dtype="Int32"),
2046
+ }
2047
+ )
2048
+ ct = ColumnTransformer(
2049
+ [
2050
+ (
2051
+ "color_encode",
2052
+ OneHotEncoder(sparse_output=False, dtype="int8"),
2053
+ ["color"],
2054
+ ),
2055
+ ("age", StandardScaler(), ["age"]),
2056
+ ],
2057
+ remainder=remainder,
2058
+ verbose_feature_names_out=False,
2059
+ ).set_output(transform="pandas")
2060
+ if fit_transform:
2061
+ X_trans = ct.fit_transform(df)
2062
+ else:
2063
+ X_trans = ct.fit(df).transform(df)
2064
+
2065
+ assert isinstance(X_trans, pd.DataFrame)
2066
+ assert_array_equal(X_trans.columns, ct.get_feature_names_out())
2067
+
2068
+ expected_dtypes = {
2069
+ "color_blue": "int8",
2070
+ "color_green": "int8",
2071
+ "color_red": "int8",
2072
+ "age": "float64",
2073
+ "pet": "category",
2074
+ "height": "int64",
2075
+ "distance": "Int32",
2076
+ }
2077
+ for col, dtype in X_trans.dtypes.items():
2078
+ assert dtype == expected_dtypes[col]
2079
+
2080
+
2081
+ @pytest.mark.parametrize("remainder", ["drop", "passthrough"])
2082
+ def test_column_transform_set_output_after_fitting(remainder):
2083
+ pd = pytest.importorskip("pandas")
2084
+ df = pd.DataFrame(
2085
+ {
2086
+ "pet": pd.Series(["dog", "cat", "snake"], dtype="category"),
2087
+ "age": [1.4, 2.1, 4.4],
2088
+ "height": [20, 40, 10],
2089
+ }
2090
+ )
2091
+ ct = ColumnTransformer(
2092
+ [
2093
+ (
2094
+ "color_encode",
2095
+ OneHotEncoder(sparse_output=False, dtype="int16"),
2096
+ ["pet"],
2097
+ ),
2098
+ ("age", StandardScaler(), ["age"]),
2099
+ ],
2100
+ remainder=remainder,
2101
+ verbose_feature_names_out=False,
2102
+ )
2103
+
2104
+ # fit without calling set_output
2105
+ X_trans = ct.fit_transform(df)
2106
+ assert isinstance(X_trans, np.ndarray)
2107
+ assert X_trans.dtype == "float64"
2108
+
2109
+ ct.set_output(transform="pandas")
2110
+ X_trans_df = ct.transform(df)
2111
+ expected_dtypes = {
2112
+ "pet_cat": "int16",
2113
+ "pet_dog": "int16",
2114
+ "pet_snake": "int16",
2115
+ "height": "int64",
2116
+ "age": "float64",
2117
+ }
2118
+ for col, dtype in X_trans_df.dtypes.items():
2119
+ assert dtype == expected_dtypes[col]
2120
+
2121
+
2122
+ # PandasOutTransformer that does not define get_feature_names_out and always expects
2123
+ # the input to be a DataFrame.
2124
+ class PandasOutTransformer(BaseEstimator):
2125
+ def __init__(self, offset=1.0):
2126
+ self.offset = offset
2127
+
2128
+ def fit(self, X, y=None):
2129
+ pd = pytest.importorskip("pandas")
2130
+ assert isinstance(X, pd.DataFrame)
2131
+ return self
2132
+
2133
+ def transform(self, X, y=None):
2134
+ pd = pytest.importorskip("pandas")
2135
+ assert isinstance(X, pd.DataFrame)
2136
+ return X - self.offset
2137
+
2138
+ def set_output(self, transform=None):
2139
+ # This transformer will always output a DataFrame regardless of the
2140
+ # configuration.
2141
+ return self
2142
+
2143
+
2144
+ @pytest.mark.parametrize(
2145
+ "trans_1, expected_verbose_names, expected_non_verbose_names",
2146
+ [
2147
+ (
2148
+ PandasOutTransformer(offset=2.0),
2149
+ ["trans_0__feat1", "trans_1__feat0"],
2150
+ ["feat1", "feat0"],
2151
+ ),
2152
+ (
2153
+ "drop",
2154
+ ["trans_0__feat1"],
2155
+ ["feat1"],
2156
+ ),
2157
+ (
2158
+ "passthrough",
2159
+ ["trans_0__feat1", "trans_1__feat0"],
2160
+ ["feat1", "feat0"],
2161
+ ),
2162
+ ],
2163
+ )
2164
+ def test_transformers_with_pandas_out_but_not_feature_names_out(
2165
+ trans_1, expected_verbose_names, expected_non_verbose_names
2166
+ ):
2167
+ """Check that set_config(transform="pandas") is compatible with more transformers.
2168
+
2169
+ Specifically, if transformers returns a DataFrame, but does not define
2170
+ `get_feature_names_out`.
2171
+ """
2172
+ pd = pytest.importorskip("pandas")
2173
+
2174
+ X_df = pd.DataFrame({"feat0": [1.0, 2.0, 3.0], "feat1": [2.0, 3.0, 4.0]})
2175
+ ct = ColumnTransformer(
2176
+ [
2177
+ ("trans_0", PandasOutTransformer(offset=3.0), ["feat1"]),
2178
+ ("trans_1", trans_1, ["feat0"]),
2179
+ ]
2180
+ )
2181
+ X_trans_np = ct.fit_transform(X_df)
2182
+ assert isinstance(X_trans_np, np.ndarray)
2183
+
2184
+ # `ct` does not have `get_feature_names_out` because `PandasOutTransformer` does
2185
+ # not define the method.
2186
+ with pytest.raises(AttributeError, match="not provide get_feature_names_out"):
2187
+ ct.get_feature_names_out()
2188
+
2189
+ # The feature names are prefixed because verbose_feature_names_out=True is default
2190
+ ct.set_output(transform="pandas")
2191
+ X_trans_df0 = ct.fit_transform(X_df)
2192
+ assert_array_equal(X_trans_df0.columns, expected_verbose_names)
2193
+
2194
+ ct.set_params(verbose_feature_names_out=False)
2195
+ X_trans_df1 = ct.fit_transform(X_df)
2196
+ assert_array_equal(X_trans_df1.columns, expected_non_verbose_names)
2197
+
2198
+
2199
+ @pytest.mark.parametrize(
2200
+ "empty_selection",
2201
+ [[], np.array([False, False]), [False, False]],
2202
+ ids=["list", "bool", "bool_int"],
2203
+ )
2204
+ def test_empty_selection_pandas_output(empty_selection):
2205
+ """Check that pandas output works when there is an empty selection.
2206
+
2207
+ Non-regression test for gh-25487
2208
+ """
2209
+ pd = pytest.importorskip("pandas")
2210
+
2211
+ X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"])
2212
+ ct = ColumnTransformer(
2213
+ [
2214
+ ("categorical", "passthrough", empty_selection),
2215
+ ("numerical", StandardScaler(), ["a", "b"]),
2216
+ ],
2217
+ verbose_feature_names_out=True,
2218
+ )
2219
+ ct.set_output(transform="pandas")
2220
+ X_out = ct.fit_transform(X)
2221
+ assert_array_equal(X_out.columns, ["numerical__a", "numerical__b"])
2222
+
2223
+ ct.set_params(verbose_feature_names_out=False)
2224
+ X_out = ct.fit_transform(X)
2225
+ assert_array_equal(X_out.columns, ["a", "b"])
2226
+
2227
+
2228
+ def test_raise_error_if_index_not_aligned():
2229
+ """Check column transformer raises error if indices are not aligned.
2230
+
2231
+ Non-regression test for gh-26210.
2232
+ """
2233
+ pd = pytest.importorskip("pandas")
2234
+
2235
+ X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"], index=[8, 3])
2236
+ reset_index_transformer = FunctionTransformer(
2237
+ lambda x: x.reset_index(drop=True), feature_names_out="one-to-one"
2238
+ )
2239
+
2240
+ ct = ColumnTransformer(
2241
+ [
2242
+ ("num1", "passthrough", ["a"]),
2243
+ ("num2", reset_index_transformer, ["b"]),
2244
+ ],
2245
+ )
2246
+ ct.set_output(transform="pandas")
2247
+ msg = (
2248
+ "Concatenating DataFrames from the transformer's output lead to"
2249
+ " an inconsistent number of samples. The output may have Pandas"
2250
+ " Indexes that do not match."
2251
+ )
2252
+ with pytest.raises(ValueError, match=msg):
2253
+ ct.fit_transform(X)
2254
+
2255
+
2256
+ def test_remainder_set_output():
2257
+ """Check that the output is set for the remainder.
2258
+
2259
+ Non-regression test for #26306.
2260
+ """
2261
+
2262
+ pd = pytest.importorskip("pandas")
2263
+ df = pd.DataFrame({"a": [True, False, True], "b": [1, 2, 3]})
2264
+
2265
+ ct = make_column_transformer(
2266
+ (VarianceThreshold(), make_column_selector(dtype_include=bool)),
2267
+ remainder=VarianceThreshold(),
2268
+ verbose_feature_names_out=False,
2269
+ )
2270
+ ct.set_output(transform="pandas")
2271
+
2272
+ out = ct.fit_transform(df)
2273
+ pd.testing.assert_frame_equal(out, df)
2274
+
2275
+ ct.set_output(transform="default")
2276
+ out = ct.fit_transform(df)
2277
+ assert isinstance(out, np.ndarray)
2278
+
2279
+
2280
+ # TODO(1.6): replace the warning by a ValueError exception
2281
+ def test_transform_pd_na():
2282
+ """Check behavior when a tranformer's output contains pandas.NA
2283
+
2284
+ It should emit a warning unless the output config is set to 'pandas'.
2285
+ """
2286
+ pd = pytest.importorskip("pandas")
2287
+ if not hasattr(pd, "Float64Dtype"):
2288
+ pytest.skip(
2289
+ "The issue with pd.NA tested here does not happen in old versions that do"
2290
+ " not have the extension dtypes"
2291
+ )
2292
+ df = pd.DataFrame({"a": [1.5, None]})
2293
+ ct = make_column_transformer(("passthrough", ["a"]))
2294
+ # No warning with non-extension dtypes and np.nan
2295
+ with warnings.catch_warnings():
2296
+ warnings.simplefilter("error")
2297
+ ct.fit_transform(df)
2298
+ df = df.convert_dtypes()
2299
+ # Error with extension dtype and pd.NA
2300
+ with pytest.warns(FutureWarning, match=r"set_output\(transform='pandas'\)"):
2301
+ ct.fit_transform(df)
2302
+ # No warning when output is set to pandas
2303
+ with warnings.catch_warnings():
2304
+ warnings.simplefilter("error")
2305
+ ct.set_output(transform="pandas")
2306
+ ct.fit_transform(df)
2307
+ ct.set_output(transform="default")
2308
+ # No warning when there are no pd.NA
2309
+ with warnings.catch_warnings():
2310
+ warnings.simplefilter("error")
2311
+ ct.fit_transform(df.fillna(-1.0))
2312
+
2313
+
2314
+ def test_dataframe_different_dataframe_libraries():
2315
+ """Check fitting and transforming on pandas and polars dataframes."""
2316
+ pd = pytest.importorskip("pandas")
2317
+ pl = pytest.importorskip("polars")
2318
+ X_train_np = np.array([[0, 1], [2, 4], [4, 5]])
2319
+ X_test_np = np.array([[1, 2], [1, 3], [2, 3]])
2320
+
2321
+ # Fit on pandas and transform on polars
2322
+ X_train_pd = pd.DataFrame(X_train_np, columns=["a", "b"])
2323
+ X_test_pl = pl.DataFrame(X_test_np, schema=["a", "b"])
2324
+
2325
+ ct = make_column_transformer((Trans(), [0, 1]))
2326
+ ct.fit(X_train_pd)
2327
+
2328
+ out_pl_in = ct.transform(X_test_pl)
2329
+ assert_array_equal(out_pl_in, X_test_np)
2330
+
2331
+ # Fit on polars and transform on pandas
2332
+ X_train_pl = pl.DataFrame(X_train_np, schema=["a", "b"])
2333
+ X_test_pd = pd.DataFrame(X_test_np, columns=["a", "b"])
2334
+ ct.fit(X_train_pl)
2335
+
2336
+ out_pd_in = ct.transform(X_test_pd)
2337
+ assert_array_equal(out_pd_in, X_test_np)
2338
+
2339
+
2340
+ @pytest.mark.parametrize("transform_output", ["default", "pandas"])
2341
+ def test_column_transformer_remainder_passthrough_naming_consistency(transform_output):
2342
+ """Check that when `remainder="passthrough"`, inconsistent naming is handled
2343
+ correctly by the underlying `FunctionTransformer`.
2344
+
2345
+ Non-regression test for:
2346
+ https://github.com/scikit-learn/scikit-learn/issues/28232
2347
+ """
2348
+ pd = pytest.importorskip("pandas")
2349
+ X = pd.DataFrame(np.random.randn(10, 4))
2350
+
2351
+ preprocessor = ColumnTransformer(
2352
+ transformers=[("scaler", StandardScaler(), [0, 1])],
2353
+ remainder="passthrough",
2354
+ ).set_output(transform=transform_output)
2355
+ X_trans = preprocessor.fit_transform(X)
2356
+ assert X_trans.shape == X.shape
2357
+
2358
+ expected_column_names = [
2359
+ "scaler__x0",
2360
+ "scaler__x1",
2361
+ "remainder__x2",
2362
+ "remainder__x3",
2363
+ ]
2364
+ if hasattr(X_trans, "columns"):
2365
+ assert X_trans.columns.tolist() == expected_column_names
2366
+ assert preprocessor.get_feature_names_out().tolist() == expected_column_names
2367
+
2368
+
2369
+ @pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
2370
+ def test_column_transformer_column_renaming(dataframe_lib):
2371
+ """Check that we properly rename columns when using `ColumnTransformer` and
2372
+ selected columns are redundant between transformers.
2373
+
2374
+ Non-regression test for:
2375
+ https://github.com/scikit-learn/scikit-learn/issues/28260
2376
+ """
2377
+ lib = pytest.importorskip(dataframe_lib)
2378
+
2379
+ df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]})
2380
+
2381
+ transformer = ColumnTransformer(
2382
+ transformers=[
2383
+ ("A", "passthrough", ["x1", "x2", "x3"]),
2384
+ ("B", FunctionTransformer(), ["x1", "x2"]),
2385
+ ("C", StandardScaler(), ["x1", "x3"]),
2386
+ # special case of empty transformer
2387
+ ("D", FunctionTransformer(lambda x: x[[]]), ["x1", "x2", "x3"]),
2388
+ ],
2389
+ verbose_feature_names_out=True,
2390
+ ).set_output(transform=dataframe_lib)
2391
+ df_trans = transformer.fit_transform(df)
2392
+ assert list(df_trans.columns) == [
2393
+ "A__x1",
2394
+ "A__x2",
2395
+ "A__x3",
2396
+ "B__x1",
2397
+ "B__x2",
2398
+ "C__x1",
2399
+ "C__x3",
2400
+ ]
2401
+
2402
+
2403
+ @pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
2404
+ def test_column_transformer_error_with_duplicated_columns(dataframe_lib):
2405
+ """Check that we raise an error when using `ColumnTransformer` and
2406
+ the columns names are duplicated between transformers."""
2407
+ lib = pytest.importorskip(dataframe_lib)
2408
+
2409
+ df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]})
2410
+
2411
+ transformer = ColumnTransformer(
2412
+ transformers=[
2413
+ ("A", "passthrough", ["x1", "x2", "x3"]),
2414
+ ("B", FunctionTransformer(), ["x1", "x2"]),
2415
+ ("C", StandardScaler(), ["x1", "x3"]),
2416
+ # special case of empty transformer
2417
+ ("D", FunctionTransformer(lambda x: x[[]]), ["x1", "x2", "x3"]),
2418
+ ],
2419
+ verbose_feature_names_out=False,
2420
+ ).set_output(transform=dataframe_lib)
2421
+ err_msg = re.escape(
2422
+ "Duplicated feature names found before concatenating the outputs of the "
2423
+ "transformers: ['x1', 'x2', 'x3'].\n"
2424
+ "Transformer A has conflicting columns names: ['x1', 'x2', 'x3'].\n"
2425
+ "Transformer B has conflicting columns names: ['x1', 'x2'].\n"
2426
+ "Transformer C has conflicting columns names: ['x1', 'x3'].\n"
2427
+ )
2428
+ with pytest.raises(ValueError, match=err_msg):
2429
+ transformer.fit_transform(df)
2430
+
2431
+
2432
+ # Metadata Routing Tests
2433
+ # ======================
2434
+
2435
+
2436
+ @pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"])
2437
+ def test_routing_passed_metadata_not_supported(method):
2438
+ """Test that the right error message is raised when metadata is passed while
2439
+ not supported when `enable_metadata_routing=False`."""
2440
+
2441
+ X = np.array([[0, 1, 2], [2, 4, 6]]).T
2442
+ y = [1, 2, 3]
2443
+ trs = ColumnTransformer([("trans", Trans(), [0])]).fit(X, y)
2444
+
2445
+ with pytest.raises(
2446
+ ValueError, match="is only supported if enable_metadata_routing=True"
2447
+ ):
2448
+ getattr(trs, method)([[1]], sample_weight=[1], prop="a")
2449
+
2450
+
2451
+ @pytest.mark.usefixtures("enable_slep006")
2452
+ @pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"])
2453
+ def test_metadata_routing_for_column_transformer(method):
2454
+ """Test that metadata is routed correctly for column transformer."""
2455
+ X = np.array([[0, 1, 2], [2, 4, 6]]).T
2456
+ y = [1, 2, 3]
2457
+ registry = _Registry()
2458
+ sample_weight, metadata = [1], "a"
2459
+ trs = ColumnTransformer(
2460
+ [
2461
+ (
2462
+ "trans",
2463
+ ConsumingTransformer(registry=registry)
2464
+ .set_fit_request(sample_weight=True, metadata=True)
2465
+ .set_transform_request(sample_weight=True, metadata=True),
2466
+ [0],
2467
+ )
2468
+ ]
2469
+ )
2470
+
2471
+ if method == "transform":
2472
+ trs.fit(X, y)
2473
+ trs.transform(X, sample_weight=sample_weight, metadata=metadata)
2474
+ else:
2475
+ getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata)
2476
+
2477
+ assert len(registry)
2478
+ for _trs in registry:
2479
+ check_recorded_metadata(
2480
+ obj=_trs, method=method, sample_weight=sample_weight, metadata=metadata
2481
+ )
2482
+
2483
+
2484
+ @pytest.mark.usefixtures("enable_slep006")
2485
+ def test_metadata_routing_no_fit_transform():
2486
+ """Test metadata routing when the sub-estimator doesn't implement
2487
+ ``fit_transform``."""
2488
+
2489
+ class NoFitTransform(BaseEstimator):
2490
+ def fit(self, X, y=None, sample_weight=None, metadata=None):
2491
+ assert sample_weight
2492
+ assert metadata
2493
+ return self
2494
+
2495
+ def transform(self, X, sample_weight=None, metadata=None):
2496
+ assert sample_weight
2497
+ assert metadata
2498
+ return X
2499
+
2500
+ X = np.array([[0, 1, 2], [2, 4, 6]]).T
2501
+ y = [1, 2, 3]
2502
+ _Registry()
2503
+ sample_weight, metadata = [1], "a"
2504
+ trs = ColumnTransformer(
2505
+ [
2506
+ (
2507
+ "trans",
2508
+ NoFitTransform()
2509
+ .set_fit_request(sample_weight=True, metadata=True)
2510
+ .set_transform_request(sample_weight=True, metadata=True),
2511
+ [0],
2512
+ )
2513
+ ]
2514
+ )
2515
+
2516
+ trs.fit(X, y, sample_weight=sample_weight, metadata=metadata)
2517
+ trs.fit_transform(X, y, sample_weight=sample_weight, metadata=metadata)
2518
+
2519
+
2520
+ @pytest.mark.usefixtures("enable_slep006")
2521
+ @pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"])
2522
+ def test_metadata_routing_error_for_column_transformer(method):
2523
+ """Test that the right error is raised when metadata is not requested."""
2524
+ X = np.array([[0, 1, 2], [2, 4, 6]]).T
2525
+ y = [1, 2, 3]
2526
+ sample_weight, metadata = [1], "a"
2527
+ trs = ColumnTransformer([("trans", ConsumingTransformer(), [0])])
2528
+
2529
+ error_message = (
2530
+ "[sample_weight, metadata] are passed but are not explicitly set as requested"
2531
+ f" or not for ConsumingTransformer.{method}"
2532
+ )
2533
+ with pytest.raises(ValueError, match=re.escape(error_message)):
2534
+ if method == "transform":
2535
+ trs.fit(X, y)
2536
+ trs.transform(X, sample_weight=sample_weight, metadata=metadata)
2537
+ else:
2538
+ getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata)
2539
+
2540
+
2541
+ @pytest.mark.usefixtures("enable_slep006")
2542
+ def test_get_metadata_routing_works_without_fit():
2543
+ # Regression test for https://github.com/scikit-learn/scikit-learn/issues/28186
2544
+ # Make sure ct.get_metadata_routing() works w/o having called fit.
2545
+ ct = ColumnTransformer([("trans", ConsumingTransformer(), [0])])
2546
+ ct.get_metadata_routing()
2547
+
2548
+
2549
+ @pytest.mark.usefixtures("enable_slep006")
2550
+ def test_remainder_request_always_present():
2551
+ # Test that remainder request is always present.
2552
+ ct = ColumnTransformer(
2553
+ [("trans", StandardScaler(), [0])],
2554
+ remainder=ConsumingTransformer()
2555
+ .set_fit_request(metadata=True)
2556
+ .set_transform_request(metadata=True),
2557
+ )
2558
+ router = ct.get_metadata_routing()
2559
+ assert router.consumes("fit", ["metadata"]) == set(["metadata"])
2560
+
2561
+
2562
+ @pytest.mark.usefixtures("enable_slep006")
2563
+ def test_unused_transformer_request_present():
2564
+ # Test that the request of a transformer is always present even when not
2565
+ # used due to no selected columns.
2566
+ ct = ColumnTransformer(
2567
+ [
2568
+ (
2569
+ "trans",
2570
+ ConsumingTransformer()
2571
+ .set_fit_request(metadata=True)
2572
+ .set_transform_request(metadata=True),
2573
+ lambda X: [],
2574
+ )
2575
+ ]
2576
+ )
2577
+ router = ct.get_metadata_routing()
2578
+ assert router.consumes("fit", ["metadata"]) == set(["metadata"])
2579
+
2580
+
2581
+ # End of Metadata Routing Tests
2582
+ # =============================
llmeval-env/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn import datasets
5
+ from sklearn.base import BaseEstimator, TransformerMixin, clone
6
+ from sklearn.compose import TransformedTargetRegressor
7
+ from sklearn.dummy import DummyRegressor
8
+ from sklearn.linear_model import LinearRegression, OrthogonalMatchingPursuit
9
+ from sklearn.pipeline import Pipeline
10
+ from sklearn.preprocessing import FunctionTransformer, StandardScaler
11
+ from sklearn.utils._testing import assert_allclose, assert_no_warnings
12
+
13
+ friedman = datasets.make_friedman1(random_state=0)
14
+
15
+
16
+ def test_transform_target_regressor_error():
17
+ X, y = friedman
18
+ # provide a transformer and functions at the same time
19
+ regr = TransformedTargetRegressor(
20
+ regressor=LinearRegression(),
21
+ transformer=StandardScaler(),
22
+ func=np.exp,
23
+ inverse_func=np.log,
24
+ )
25
+ with pytest.raises(
26
+ ValueError,
27
+ match="'transformer' and functions 'func'/'inverse_func' cannot both be set.",
28
+ ):
29
+ regr.fit(X, y)
30
+ # fit with sample_weight with a regressor which does not support it
31
+ sample_weight = np.ones((y.shape[0],))
32
+ regr = TransformedTargetRegressor(
33
+ regressor=OrthogonalMatchingPursuit(), transformer=StandardScaler()
34
+ )
35
+ with pytest.raises(
36
+ TypeError,
37
+ match=r"fit\(\) got an unexpected " "keyword argument 'sample_weight'",
38
+ ):
39
+ regr.fit(X, y, sample_weight=sample_weight)
40
+ # func is given but inverse_func is not
41
+ regr = TransformedTargetRegressor(func=np.exp)
42
+ with pytest.raises(
43
+ ValueError,
44
+ match="When 'func' is provided, 'inverse_func' must also be provided",
45
+ ):
46
+ regr.fit(X, y)
47
+
48
+
49
+ def test_transform_target_regressor_invertible():
50
+ X, y = friedman
51
+ regr = TransformedTargetRegressor(
52
+ regressor=LinearRegression(),
53
+ func=np.sqrt,
54
+ inverse_func=np.log,
55
+ check_inverse=True,
56
+ )
57
+ with pytest.warns(
58
+ UserWarning,
59
+ match=(
60
+ "The provided functions or"
61
+ " transformer are not strictly inverse of each other."
62
+ ),
63
+ ):
64
+ regr.fit(X, y)
65
+ regr = TransformedTargetRegressor(
66
+ regressor=LinearRegression(), func=np.sqrt, inverse_func=np.log
67
+ )
68
+ regr.set_params(check_inverse=False)
69
+ assert_no_warnings(regr.fit, X, y)
70
+
71
+
72
+ def _check_standard_scaled(y, y_pred):
73
+ y_mean = np.mean(y, axis=0)
74
+ y_std = np.std(y, axis=0)
75
+ assert_allclose((y - y_mean) / y_std, y_pred)
76
+
77
+
78
+ def _check_shifted_by_one(y, y_pred):
79
+ assert_allclose(y + 1, y_pred)
80
+
81
+
82
+ def test_transform_target_regressor_functions():
83
+ X, y = friedman
84
+ regr = TransformedTargetRegressor(
85
+ regressor=LinearRegression(), func=np.log, inverse_func=np.exp
86
+ )
87
+ y_pred = regr.fit(X, y).predict(X)
88
+ # check the transformer output
89
+ y_tran = regr.transformer_.transform(y.reshape(-1, 1)).squeeze()
90
+ assert_allclose(np.log(y), y_tran)
91
+ assert_allclose(
92
+ y, regr.transformer_.inverse_transform(y_tran.reshape(-1, 1)).squeeze()
93
+ )
94
+ assert y.shape == y_pred.shape
95
+ assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X)))
96
+ # check the regressor output
97
+ lr = LinearRegression().fit(X, regr.func(y))
98
+ assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel())
99
+
100
+
101
+ def test_transform_target_regressor_functions_multioutput():
102
+ X = friedman[0]
103
+ y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
104
+ regr = TransformedTargetRegressor(
105
+ regressor=LinearRegression(), func=np.log, inverse_func=np.exp
106
+ )
107
+ y_pred = regr.fit(X, y).predict(X)
108
+ # check the transformer output
109
+ y_tran = regr.transformer_.transform(y)
110
+ assert_allclose(np.log(y), y_tran)
111
+ assert_allclose(y, regr.transformer_.inverse_transform(y_tran))
112
+ assert y.shape == y_pred.shape
113
+ assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X)))
114
+ # check the regressor output
115
+ lr = LinearRegression().fit(X, regr.func(y))
116
+ assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel())
117
+
118
+
119
+ @pytest.mark.parametrize(
120
+ "X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)]
121
+ )
122
+ def test_transform_target_regressor_1d_transformer(X, y):
123
+ # All transformer in scikit-learn expect 2D data. FunctionTransformer with
124
+ # validate=False lift this constraint without checking that the input is a
125
+ # 2D vector. We check the consistency of the data shape using a 1D and 2D y
126
+ # array.
127
+ transformer = FunctionTransformer(
128
+ func=lambda x: x + 1, inverse_func=lambda x: x - 1
129
+ )
130
+ regr = TransformedTargetRegressor(
131
+ regressor=LinearRegression(), transformer=transformer
132
+ )
133
+ y_pred = regr.fit(X, y).predict(X)
134
+ assert y.shape == y_pred.shape
135
+ # consistency forward transform
136
+ y_tran = regr.transformer_.transform(y)
137
+ _check_shifted_by_one(y, y_tran)
138
+ assert y.shape == y_pred.shape
139
+ # consistency inverse transform
140
+ assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze())
141
+ # consistency of the regressor
142
+ lr = LinearRegression()
143
+ transformer2 = clone(transformer)
144
+ lr.fit(X, transformer2.fit_transform(y))
145
+ y_lr_pred = lr.predict(X)
146
+ assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
147
+ assert_allclose(regr.regressor_.coef_, lr.coef_)
148
+
149
+
150
+ @pytest.mark.parametrize(
151
+ "X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)]
152
+ )
153
+ def test_transform_target_regressor_2d_transformer(X, y):
154
+ # Check consistency with transformer accepting only 2D array and a 1D/2D y
155
+ # array.
156
+ transformer = StandardScaler()
157
+ regr = TransformedTargetRegressor(
158
+ regressor=LinearRegression(), transformer=transformer
159
+ )
160
+ y_pred = regr.fit(X, y).predict(X)
161
+ assert y.shape == y_pred.shape
162
+ # consistency forward transform
163
+ if y.ndim == 1: # create a 2D array and squeeze results
164
+ y_tran = regr.transformer_.transform(y.reshape(-1, 1))
165
+ else:
166
+ y_tran = regr.transformer_.transform(y)
167
+ _check_standard_scaled(y, y_tran.squeeze())
168
+ assert y.shape == y_pred.shape
169
+ # consistency inverse transform
170
+ assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze())
171
+ # consistency of the regressor
172
+ lr = LinearRegression()
173
+ transformer2 = clone(transformer)
174
+ if y.ndim == 1: # create a 2D array and squeeze results
175
+ lr.fit(X, transformer2.fit_transform(y.reshape(-1, 1)).squeeze())
176
+ y_lr_pred = lr.predict(X).reshape(-1, 1)
177
+ y_pred2 = transformer2.inverse_transform(y_lr_pred).squeeze()
178
+ else:
179
+ lr.fit(X, transformer2.fit_transform(y))
180
+ y_lr_pred = lr.predict(X)
181
+ y_pred2 = transformer2.inverse_transform(y_lr_pred)
182
+
183
+ assert_allclose(y_pred, y_pred2)
184
+ assert_allclose(regr.regressor_.coef_, lr.coef_)
185
+
186
+
187
+ def test_transform_target_regressor_2d_transformer_multioutput():
188
+ # Check consistency with transformer accepting only 2D array and a 2D y
189
+ # array.
190
+ X = friedman[0]
191
+ y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
192
+ transformer = StandardScaler()
193
+ regr = TransformedTargetRegressor(
194
+ regressor=LinearRegression(), transformer=transformer
195
+ )
196
+ y_pred = regr.fit(X, y).predict(X)
197
+ assert y.shape == y_pred.shape
198
+ # consistency forward transform
199
+ y_tran = regr.transformer_.transform(y)
200
+ _check_standard_scaled(y, y_tran)
201
+ assert y.shape == y_pred.shape
202
+ # consistency inverse transform
203
+ assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze())
204
+ # consistency of the regressor
205
+ lr = LinearRegression()
206
+ transformer2 = clone(transformer)
207
+ lr.fit(X, transformer2.fit_transform(y))
208
+ y_lr_pred = lr.predict(X)
209
+ assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
210
+ assert_allclose(regr.regressor_.coef_, lr.coef_)
211
+
212
+
213
+ def test_transform_target_regressor_3d_target():
214
+ # Non-regression test for:
215
+ # https://github.com/scikit-learn/scikit-learn/issues/18866
216
+ # Check with a 3D target with a transformer that reshapes the target
217
+ X = friedman[0]
218
+ y = np.tile(friedman[1].reshape(-1, 1, 1), [1, 3, 2])
219
+
220
+ def flatten_data(data):
221
+ return data.reshape(data.shape[0], -1)
222
+
223
+ def unflatten_data(data):
224
+ return data.reshape(data.shape[0], -1, 2)
225
+
226
+ transformer = FunctionTransformer(func=flatten_data, inverse_func=unflatten_data)
227
+ regr = TransformedTargetRegressor(
228
+ regressor=LinearRegression(), transformer=transformer
229
+ )
230
+ y_pred = regr.fit(X, y).predict(X)
231
+ assert y.shape == y_pred.shape
232
+
233
+
234
+ def test_transform_target_regressor_multi_to_single():
235
+ X = friedman[0]
236
+ y = np.transpose([friedman[1], (friedman[1] ** 2 + 1)])
237
+
238
+ def func(y):
239
+ out = np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2)
240
+ return out[:, np.newaxis]
241
+
242
+ def inverse_func(y):
243
+ return y
244
+
245
+ tt = TransformedTargetRegressor(
246
+ func=func, inverse_func=inverse_func, check_inverse=False
247
+ )
248
+ tt.fit(X, y)
249
+ y_pred_2d_func = tt.predict(X)
250
+ assert y_pred_2d_func.shape == (100, 1)
251
+
252
+ # force that the function only return a 1D array
253
+ def func(y):
254
+ return np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2)
255
+
256
+ tt = TransformedTargetRegressor(
257
+ func=func, inverse_func=inverse_func, check_inverse=False
258
+ )
259
+ tt.fit(X, y)
260
+ y_pred_1d_func = tt.predict(X)
261
+ assert y_pred_1d_func.shape == (100, 1)
262
+
263
+ assert_allclose(y_pred_1d_func, y_pred_2d_func)
264
+
265
+
266
+ class DummyCheckerArrayTransformer(TransformerMixin, BaseEstimator):
267
+ def fit(self, X, y=None):
268
+ assert isinstance(X, np.ndarray)
269
+ return self
270
+
271
+ def transform(self, X):
272
+ assert isinstance(X, np.ndarray)
273
+ return X
274
+
275
+ def inverse_transform(self, X):
276
+ assert isinstance(X, np.ndarray)
277
+ return X
278
+
279
+
280
+ class DummyCheckerListRegressor(DummyRegressor):
281
+ def fit(self, X, y, sample_weight=None):
282
+ assert isinstance(X, list)
283
+ return super().fit(X, y, sample_weight)
284
+
285
+ def predict(self, X):
286
+ assert isinstance(X, list)
287
+ return super().predict(X)
288
+
289
+
290
+ def test_transform_target_regressor_ensure_y_array():
291
+ # check that the target ``y`` passed to the transformer will always be a
292
+ # numpy array. Similarly, if ``X`` is passed as a list, we check that the
293
+ # predictor receive as it is.
294
+ X, y = friedman
295
+ tt = TransformedTargetRegressor(
296
+ transformer=DummyCheckerArrayTransformer(),
297
+ regressor=DummyCheckerListRegressor(),
298
+ check_inverse=False,
299
+ )
300
+ tt.fit(X.tolist(), y.tolist())
301
+ tt.predict(X.tolist())
302
+ with pytest.raises(AssertionError):
303
+ tt.fit(X, y.tolist())
304
+ with pytest.raises(AssertionError):
305
+ tt.predict(X)
306
+
307
+
308
+ class DummyTransformer(TransformerMixin, BaseEstimator):
309
+ """Dummy transformer which count how many time fit was called."""
310
+
311
+ def __init__(self, fit_counter=0):
312
+ self.fit_counter = fit_counter
313
+
314
+ def fit(self, X, y=None):
315
+ self.fit_counter += 1
316
+ return self
317
+
318
+ def transform(self, X):
319
+ return X
320
+
321
+ def inverse_transform(self, X):
322
+ return X
323
+
324
+
325
+ @pytest.mark.parametrize("check_inverse", [False, True])
326
+ def test_transform_target_regressor_count_fit(check_inverse):
327
+ # regression test for gh-issue #11618
328
+ # check that we only call a single time fit for the transformer
329
+ X, y = friedman
330
+ ttr = TransformedTargetRegressor(
331
+ transformer=DummyTransformer(), check_inverse=check_inverse
332
+ )
333
+ ttr.fit(X, y)
334
+ assert ttr.transformer_.fit_counter == 1
335
+
336
+
337
+ class DummyRegressorWithExtraFitParams(DummyRegressor):
338
+ def fit(self, X, y, sample_weight=None, check_input=True):
339
+ # on the test below we force this to false, we make sure this is
340
+ # actually passed to the regressor
341
+ assert not check_input
342
+ return super().fit(X, y, sample_weight)
343
+
344
+
345
+ def test_transform_target_regressor_pass_fit_parameters():
346
+ X, y = friedman
347
+ regr = TransformedTargetRegressor(
348
+ regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer()
349
+ )
350
+
351
+ regr.fit(X, y, check_input=False)
352
+ assert regr.transformer_.fit_counter == 1
353
+
354
+
355
+ def test_transform_target_regressor_route_pipeline():
356
+ X, y = friedman
357
+
358
+ regr = TransformedTargetRegressor(
359
+ regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer()
360
+ )
361
+ estimators = [("normalize", StandardScaler()), ("est", regr)]
362
+
363
+ pip = Pipeline(estimators)
364
+ pip.fit(X, y, **{"est__check_input": False})
365
+
366
+ assert regr.transformer_.fit_counter == 1
367
+
368
+
369
+ class DummyRegressorWithExtraPredictParams(DummyRegressor):
370
+ def predict(self, X, check_input=True):
371
+ # In the test below we make sure that the check input parameter is
372
+ # passed as false
373
+ self.predict_called = True
374
+ assert not check_input
375
+ return super().predict(X)
376
+
377
+
378
+ def test_transform_target_regressor_pass_extra_predict_parameters():
379
+ # Checks that predict kwargs are passed to regressor.
380
+ X, y = friedman
381
+ regr = TransformedTargetRegressor(
382
+ regressor=DummyRegressorWithExtraPredictParams(), transformer=DummyTransformer()
383
+ )
384
+
385
+ regr.fit(X, y)
386
+ regr.predict(X, check_input=False)
387
+ assert regr.regressor_.predict_called
llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-292.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:255c16f33ed2967fe100cd8011a7e69f789603724b1ec2ecf91dfeb72067c190
3
+ size 306
llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:255c16f33ed2967fe100cd8011a7e69f789603724b1ec2ecf91dfeb72067c190
3
+ size 306
llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1-s-dact.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ef6025425fdfc5f736555ea385252af5bcbf62383615db82489366d4f96a0a7
3
+ size 327
llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/data-v1-dl-49822.arff.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7ee24adabd4aaed6419b43fe9d3f86d55fcf4bee0f1698ae21d86c2701314e3
3
+ size 2532
llmeval-env/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1d38fdd601b67bb9c6d16152f53ddf166a0cfcfef4fa86438e899bfe449226c
3
+ size 1798