diff --git a/.gitattributes b/.gitattributes index fa3d231abe794d3ee6d10a6bb67e6d826101152e..f1ed04ab2f5754ec73d810a025f9f0289a2c8e6a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -85,3 +85,4 @@ llmeval-env/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0. llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f98c6927043bbcfe6fcb62c397b1a686a0d8649d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7836accb6f19aadd3c2a5066acfb2f86fcdff510bb6d3efb3832ea3f26e4cc13 +size 2503320 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/__check_build/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/__check_build/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3895a0e430082238ac6f9995cf0fd08d73e2d53d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/__check_build/__init__.py @@ -0,0 +1,47 @@ +""" Module to give helpful messages to the user that did not +compile scikit-learn properly. +""" +import os + +INPLACE_MSG = """ +It appears that you are importing a local scikit-learn source tree. For +this, you need to have an inplace install. Maybe you are in the source +directory and you need to try from another location.""" + +STANDARD_MSG = """ +If you have used an installer, please check that it is suited for your +Python version, your operating system and your platform.""" + + +def raise_build_error(e): + # Raise a comprehensible error and list the contents of the + # directory to help debugging on the mailing list. + local_dir = os.path.split(__file__)[0] + msg = STANDARD_MSG + if local_dir == "sklearn/__check_build": + # Picking up the local install: this will work only if the + # install is an 'inplace build' + msg = INPLACE_MSG + dir_content = list() + for i, filename in enumerate(os.listdir(local_dir)): + if (i + 1) % 3: + dir_content.append(filename.ljust(26)) + else: + dir_content.append(filename + "\n") + raise ImportError("""%s +___________________________________________________________________________ +Contents of %s: +%s +___________________________________________________________________________ +It seems that scikit-learn has not been built correctly. + +If you have installed scikit-learn from source, please do not forget +to build the package before using it: run `python setup.py install` or +`make` in the source directory. +%s""" % (e, local_dir, "".join(dir_content).strip(), msg)) + + +try: + from ._check_build import check_build # noqa +except ImportError as e: + raise_build_error(e) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/__check_build/_check_build.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/sklearn/__check_build/_check_build.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..37d5061af8966be53c262499c4539c45defa06ce Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/__check_build/_check_build.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0a680dc80d7741b196cf59c12ce2aecf3219fa0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ece0240b2bea267843a6f9664aafa48a3ccdf24 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..361d69954ee4e01659dafd7c040c9bb827b5e1ed Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fcd1ad7a6fd50a780003cff390b03c0c1895e85 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4784ad48029c866cfe1b65a473403895beb2b7f3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_dict_vectorizer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_dict_vectorizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8812f2d92e95fb10430bce2bbd47a4c674772f0a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_dict_vectorizer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_feature_hasher.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_feature_hasher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d9d37a6b436574aa397ca2b0a0b4b20f2a322b5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_feature_hasher.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_image.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f159c76fcb6697fbdf4d435198d05713cdc259a7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_image.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_text.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e333cb50fad782053193f35128843297e6b4ae20 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_text.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py new file mode 100644 index 0000000000000000000000000000000000000000..276d0d48b077022559c775eab90abf363ffc6989 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py @@ -0,0 +1,160 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.feature_extraction import FeatureHasher +from sklearn.feature_extraction._hashing_fast import transform as _hashing_transform + + +def test_feature_hasher_dicts(): + feature_hasher = FeatureHasher(n_features=16) + assert "dict" == feature_hasher.input_type + + raw_X = [{"foo": "bar", "dada": 42, "tzara": 37}, {"foo": "baz", "gaga": "string1"}] + X1 = FeatureHasher(n_features=16).transform(raw_X) + gen = (iter(d.items()) for d in raw_X) + X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen) + assert_array_equal(X1.toarray(), X2.toarray()) + + +def test_feature_hasher_strings(): + # mix byte and Unicode strings; note that "foo" is a duplicate in row 0 + raw_X = [ + ["foo", "bar", "baz", "foo".encode("ascii")], + ["bar".encode("ascii"), "baz", "quux"], + ] + + for lg_n_features in (7, 9, 11, 16, 22): + n_features = 2**lg_n_features + + it = (x for x in raw_X) # iterable + + feature_hasher = FeatureHasher( + n_features=n_features, input_type="string", alternate_sign=False + ) + X = feature_hasher.transform(it) + + assert X.shape[0] == len(raw_X) + assert X.shape[1] == n_features + + assert X[0].sum() == 4 + assert X[1].sum() == 3 + + assert X.nnz == 6 + + +@pytest.mark.parametrize( + "raw_X", + [ + ["my_string", "another_string"], + (x for x in ["my_string", "another_string"]), + ], + ids=["list", "generator"], +) +def test_feature_hasher_single_string(raw_X): + """FeatureHasher raises error when a sample is a single string. + + Non-regression test for gh-13199. + """ + msg = "Samples can not be a single string" + + feature_hasher = FeatureHasher(n_features=10, input_type="string") + with pytest.raises(ValueError, match=msg): + feature_hasher.transform(raw_X) + + +def test_hashing_transform_seed(): + # check the influence of the seed when computing the hashes + raw_X = [ + ["foo", "bar", "baz", "foo".encode("ascii")], + ["bar".encode("ascii"), "baz", "quux"], + ] + + raw_X_ = (((f, 1) for f in x) for x in raw_X) + indices, indptr, _ = _hashing_transform(raw_X_, 2**7, str, False) + + raw_X_ = (((f, 1) for f in x) for x in raw_X) + indices_0, indptr_0, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=0) + assert_array_equal(indices, indices_0) + assert_array_equal(indptr, indptr_0) + + raw_X_ = (((f, 1) for f in x) for x in raw_X) + indices_1, _, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=1) + with pytest.raises(AssertionError): + assert_array_equal(indices, indices_1) + + +def test_feature_hasher_pairs(): + raw_X = ( + iter(d.items()) + for d in [{"foo": 1, "bar": 2}, {"baz": 3, "quux": 4, "foo": -1}] + ) + feature_hasher = FeatureHasher(n_features=16, input_type="pair") + x1, x2 = feature_hasher.transform(raw_X).toarray() + x1_nz = sorted(np.abs(x1[x1 != 0])) + x2_nz = sorted(np.abs(x2[x2 != 0])) + assert [1, 2] == x1_nz + assert [1, 3, 4] == x2_nz + + +def test_feature_hasher_pairs_with_string_values(): + raw_X = ( + iter(d.items()) + for d in [{"foo": 1, "bar": "a"}, {"baz": "abc", "quux": 4, "foo": -1}] + ) + feature_hasher = FeatureHasher(n_features=16, input_type="pair") + x1, x2 = feature_hasher.transform(raw_X).toarray() + x1_nz = sorted(np.abs(x1[x1 != 0])) + x2_nz = sorted(np.abs(x2[x2 != 0])) + assert [1, 1] == x1_nz + assert [1, 1, 4] == x2_nz + + raw_X = (iter(d.items()) for d in [{"bax": "abc"}, {"bax": "abc"}]) + x1, x2 = feature_hasher.transform(raw_X).toarray() + x1_nz = np.abs(x1[x1 != 0]) + x2_nz = np.abs(x2[x2 != 0]) + assert [1] == x1_nz + assert [1] == x2_nz + assert_array_equal(x1, x2) + + +def test_hash_empty_input(): + n_features = 16 + raw_X = [[], (), iter(range(0))] + + feature_hasher = FeatureHasher(n_features=n_features, input_type="string") + X = feature_hasher.transform(raw_X) + + assert_array_equal(X.toarray(), np.zeros((len(raw_X), n_features))) + + +def test_hasher_zeros(): + # Assert that no zeros are materialized in the output. + X = FeatureHasher().transform([{"foo": 0}]) + assert X.data.shape == (0,) + + +def test_hasher_alternate_sign(): + X = [list("Thequickbrownfoxjumped")] + + Xt = FeatureHasher(alternate_sign=True, input_type="string").fit_transform(X) + assert Xt.data.min() < 0 and Xt.data.max() > 0 + + Xt = FeatureHasher(alternate_sign=False, input_type="string").fit_transform(X) + assert Xt.data.min() > 0 + + +def test_hash_collisions(): + X = [list("Thequickbrownfoxjumped")] + + Xt = FeatureHasher( + alternate_sign=True, n_features=1, input_type="string" + ).fit_transform(X) + # check that some of the hashed tokens are added + # with an opposite sign and cancel out + assert abs(Xt.data[0]) < len(X[0]) + + Xt = FeatureHasher( + alternate_sign=False, n_features=1, input_type="string" + ).fit_transform(X) + assert Xt.data[0] == len(X[0]) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_text.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_text.py new file mode 100644 index 0000000000000000000000000000000000000000..7c7cac85ccc6ba3deeec862246f2118b6131fcf2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_text.py @@ -0,0 +1,1655 @@ +import pickle +import re +import warnings +from collections import defaultdict +from collections.abc import Mapping +from functools import partial +from io import StringIO +from itertools import product + +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from scipy import sparse + +from sklearn.base import clone +from sklearn.feature_extraction.text import ( + ENGLISH_STOP_WORDS, + CountVectorizer, + HashingVectorizer, + TfidfTransformer, + TfidfVectorizer, + strip_accents_ascii, + strip_accents_unicode, + strip_tags, +) +from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split +from sklearn.pipeline import Pipeline +from sklearn.svm import LinearSVC +from sklearn.utils import _IS_WASM, IS_PYPY +from sklearn.utils._testing import ( + assert_allclose_dense_sparse, + assert_almost_equal, + fails_if_pypy, + skip_if_32bit, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +JUNK_FOOD_DOCS = ( + "the pizza pizza beer copyright", + "the pizza burger beer copyright", + "the the pizza beer beer copyright", + "the burger beer beer copyright", + "the coke burger coke copyright", + "the coke burger burger", +) + +NOTJUNK_FOOD_DOCS = ( + "the salad celeri copyright", + "the salad salad sparkling water copyright", + "the the celeri celeri copyright", + "the tomato tomato salad water", + "the tomato salad water copyright", +) + +ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS + + +def uppercase(s): + return strip_accents_unicode(s).upper() + + +def strip_eacute(s): + return s.replace("é", "e") + + +def split_tokenize(s): + return s.split() + + +def lazy_analyze(s): + return ["the_ultimate_feature"] + + +def test_strip_accents(): + # check some classical latin accentuated symbols + a = "àáâãäåçèéêë" + expected = "aaaaaaceeee" + assert strip_accents_unicode(a) == expected + + a = "ìíîïñòóôõöùúûüý" + expected = "iiiinooooouuuuy" + assert strip_accents_unicode(a) == expected + + # check some arabic + a = "\u0625" # alef with a hamza below: إ + expected = "\u0627" # simple alef: ا + assert strip_accents_unicode(a) == expected + + # mix letters accentuated and not + a = "this is à test" + expected = "this is a test" + assert strip_accents_unicode(a) == expected + + # strings that are already decomposed + a = "o\u0308" # o with diaeresis + expected = "o" + assert strip_accents_unicode(a) == expected + + # combining marks by themselves + a = "\u0300\u0301\u0302\u0303" + expected = "" + assert strip_accents_unicode(a) == expected + + # Multiple combining marks on one character + a = "o\u0308\u0304" + expected = "o" + assert strip_accents_unicode(a) == expected + + +def test_to_ascii(): + # check some classical latin accentuated symbols + a = "àáâãäåçèéêë" + expected = "aaaaaaceeee" + assert strip_accents_ascii(a) == expected + + a = "ìíîïñòóôõöùúûüý" + expected = "iiiinooooouuuuy" + assert strip_accents_ascii(a) == expected + + # check some arabic + a = "\u0625" # halef with a hamza below + expected = "" # halef has no direct ascii match + assert strip_accents_ascii(a) == expected + + # mix letters accentuated and not + a = "this is à test" + expected = "this is a test" + assert strip_accents_ascii(a) == expected + + +@pytest.mark.parametrize("Vectorizer", (CountVectorizer, HashingVectorizer)) +def test_word_analyzer_unigrams(Vectorizer): + wa = Vectorizer(strip_accents="ascii").build_analyzer() + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = [ + "ai", + "mange", + "du", + "kangourou", + "ce", + "midi", + "etait", + "pas", + "tres", + "bon", + ] + assert wa(text) == expected + + text = "This is a test, really.\n\n I met Harry yesterday." + expected = ["this", "is", "test", "really", "met", "harry", "yesterday"] + assert wa(text) == expected + + wa = Vectorizer(input="file").build_analyzer() + text = StringIO("This is a test with a file-like object!") + expected = ["this", "is", "test", "with", "file", "like", "object"] + assert wa(text) == expected + + # with custom preprocessor + wa = Vectorizer(preprocessor=uppercase).build_analyzer() + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = [ + "AI", + "MANGE", + "DU", + "KANGOUROU", + "CE", + "MIDI", + "ETAIT", + "PAS", + "TRES", + "BON", + ] + assert wa(text) == expected + + # with custom tokenizer + wa = Vectorizer(tokenizer=split_tokenize, strip_accents="ascii").build_analyzer() + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = [ + "j'ai", + "mange", + "du", + "kangourou", + "ce", + "midi,", + "c'etait", + "pas", + "tres", + "bon.", + ] + assert wa(text) == expected + + +def test_word_analyzer_unigrams_and_bigrams(): + wa = CountVectorizer( + analyzer="word", strip_accents="unicode", ngram_range=(1, 2) + ).build_analyzer() + + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = [ + "ai", + "mange", + "du", + "kangourou", + "ce", + "midi", + "etait", + "pas", + "tres", + "bon", + "ai mange", + "mange du", + "du kangourou", + "kangourou ce", + "ce midi", + "midi etait", + "etait pas", + "pas tres", + "tres bon", + ] + assert wa(text) == expected + + +def test_unicode_decode_error(): + # decode_error default to strict, so this should fail + # First, encode (as bytes) a unicode string. + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + text_bytes = text.encode("utf-8") + + # Then let the Analyzer try to decode it as ascii. It should fail, + # because we have given it an incorrect encoding. + wa = CountVectorizer(ngram_range=(1, 2), encoding="ascii").build_analyzer() + with pytest.raises(UnicodeDecodeError): + wa(text_bytes) + + ca = CountVectorizer( + analyzer="char", ngram_range=(3, 6), encoding="ascii" + ).build_analyzer() + with pytest.raises(UnicodeDecodeError): + ca(text_bytes) + + +def test_char_ngram_analyzer(): + cnga = CountVectorizer( + analyzer="char", strip_accents="unicode", ngram_range=(3, 6) + ).build_analyzer() + + text = "J'ai mangé du kangourou ce midi, c'était pas très bon" + expected = ["j'a", "'ai", "ai ", "i m", " ma"] + assert cnga(text)[:5] == expected + expected = ["s tres", " tres ", "tres b", "res bo", "es bon"] + assert cnga(text)[-5:] == expected + + text = "This \n\tis a test, really.\n\n I met Harry yesterday" + expected = ["thi", "his", "is ", "s i", " is"] + assert cnga(text)[:5] == expected + + expected = [" yeste", "yester", "esterd", "sterda", "terday"] + assert cnga(text)[-5:] == expected + + cnga = CountVectorizer( + input="file", analyzer="char", ngram_range=(3, 6) + ).build_analyzer() + text = StringIO("This is a test with a file-like object!") + expected = ["thi", "his", "is ", "s i", " is"] + assert cnga(text)[:5] == expected + + +def test_char_wb_ngram_analyzer(): + cnga = CountVectorizer( + analyzer="char_wb", strip_accents="unicode", ngram_range=(3, 6) + ).build_analyzer() + + text = "This \n\tis a test, really.\n\n I met Harry yesterday" + expected = [" th", "thi", "his", "is ", " thi"] + assert cnga(text)[:5] == expected + + expected = ["yester", "esterd", "sterda", "terday", "erday "] + assert cnga(text)[-5:] == expected + + cnga = CountVectorizer( + input="file", analyzer="char_wb", ngram_range=(3, 6) + ).build_analyzer() + text = StringIO("A test with a file-like object!") + expected = [" a ", " te", "tes", "est", "st ", " tes"] + assert cnga(text)[:6] == expected + + +def test_word_ngram_analyzer(): + cnga = CountVectorizer( + analyzer="word", strip_accents="unicode", ngram_range=(3, 6) + ).build_analyzer() + + text = "This \n\tis a test, really.\n\n I met Harry yesterday" + expected = ["this is test", "is test really", "test really met"] + assert cnga(text)[:3] == expected + + expected = [ + "test really met harry yesterday", + "this is test really met harry", + "is test really met harry yesterday", + ] + assert cnga(text)[-3:] == expected + + cnga_file = CountVectorizer( + input="file", analyzer="word", ngram_range=(3, 6) + ).build_analyzer() + file = StringIO(text) + assert cnga_file(file) == cnga(text) + + +def test_countvectorizer_custom_vocabulary(): + vocab = {"pizza": 0, "beer": 1} + terms = set(vocab.keys()) + + # Try a few of the supported types. + for typ in [dict, list, iter, partial(defaultdict, int)]: + v = typ(vocab) + vect = CountVectorizer(vocabulary=v) + vect.fit(JUNK_FOOD_DOCS) + if isinstance(v, Mapping): + assert vect.vocabulary_ == vocab + else: + assert set(vect.vocabulary_) == terms + X = vect.transform(JUNK_FOOD_DOCS) + assert X.shape[1] == len(terms) + v = typ(vocab) + vect = CountVectorizer(vocabulary=v) + inv = vect.inverse_transform(X) + assert len(inv) == X.shape[0] + + +def test_countvectorizer_custom_vocabulary_pipeline(): + what_we_like = ["pizza", "beer"] + pipe = Pipeline( + [ + ("count", CountVectorizer(vocabulary=what_we_like)), + ("tfidf", TfidfTransformer()), + ] + ) + X = pipe.fit_transform(ALL_FOOD_DOCS) + assert set(pipe.named_steps["count"].vocabulary_) == set(what_we_like) + assert X.shape[1] == len(what_we_like) + + +def test_countvectorizer_custom_vocabulary_repeated_indices(): + vocab = {"pizza": 0, "beer": 0} + msg = "Vocabulary contains repeated indices" + with pytest.raises(ValueError, match=msg): + vect = CountVectorizer(vocabulary=vocab) + vect.fit(["pasta_siziliana"]) + + +def test_countvectorizer_custom_vocabulary_gap_index(): + vocab = {"pizza": 1, "beer": 2} + with pytest.raises(ValueError, match="doesn't contain index"): + vect = CountVectorizer(vocabulary=vocab) + vect.fit(["pasta_verdura"]) + + +def test_countvectorizer_stop_words(): + cv = CountVectorizer() + cv.set_params(stop_words="english") + assert cv.get_stop_words() == ENGLISH_STOP_WORDS + cv.set_params(stop_words="_bad_str_stop_") + with pytest.raises(ValueError): + cv.get_stop_words() + cv.set_params(stop_words="_bad_unicode_stop_") + with pytest.raises(ValueError): + cv.get_stop_words() + stoplist = ["some", "other", "words"] + cv.set_params(stop_words=stoplist) + assert cv.get_stop_words() == set(stoplist) + + +def test_countvectorizer_empty_vocabulary(): + with pytest.raises(ValueError, match="empty vocabulary"): + vect = CountVectorizer(vocabulary=[]) + vect.fit(["foo"]) + + with pytest.raises(ValueError, match="empty vocabulary"): + v = CountVectorizer(max_df=1.0, stop_words="english") + # fit on stopwords only + v.fit(["to be or not to be", "and me too", "and so do you"]) + + +def test_fit_countvectorizer_twice(): + cv = CountVectorizer() + X1 = cv.fit_transform(ALL_FOOD_DOCS[:5]) + X2 = cv.fit_transform(ALL_FOOD_DOCS[5:]) + assert X1.shape[1] != X2.shape[1] + + +def test_countvectorizer_custom_token_pattern(): + """Check `get_feature_names_out()` when a custom token pattern is passed. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12971 + """ + corpus = [ + "This is the 1st document in my corpus.", + "This document is the 2nd sample.", + "And this is the 3rd one.", + "Is this the 4th document?", + ] + token_pattern = r"[0-9]{1,3}(?:st|nd|rd|th)\s\b(\w{2,})\b" + vectorizer = CountVectorizer(token_pattern=token_pattern) + vectorizer.fit_transform(corpus) + expected = ["document", "one", "sample"] + feature_names_out = vectorizer.get_feature_names_out() + assert_array_equal(feature_names_out, expected) + + +def test_countvectorizer_custom_token_pattern_with_several_group(): + """Check that we raise an error if token pattern capture several groups. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12971 + """ + corpus = [ + "This is the 1st document in my corpus.", + "This document is the 2nd sample.", + "And this is the 3rd one.", + "Is this the 4th document?", + ] + + token_pattern = r"([0-9]{1,3}(?:st|nd|rd|th))\s\b(\w{2,})\b" + err_msg = "More than 1 capturing group in token pattern" + vectorizer = CountVectorizer(token_pattern=token_pattern) + with pytest.raises(ValueError, match=err_msg): + vectorizer.fit(corpus) + + +def test_countvectorizer_uppercase_in_vocab(): + # Check that the check for uppercase in the provided vocabulary is only done at fit + # time and not at transform time (#21251) + vocabulary = ["Sample", "Upper", "Case", "Vocabulary"] + message = ( + "Upper case characters found in" + " vocabulary while 'lowercase'" + " is True. These entries will not" + " be matched with any documents" + ) + + vectorizer = CountVectorizer(lowercase=True, vocabulary=vocabulary) + + with pytest.warns(UserWarning, match=message): + vectorizer.fit(vocabulary) + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + vectorizer.transform(vocabulary) + + +def test_tf_transformer_feature_names_out(): + """Check get_feature_names_out for TfidfTransformer""" + X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=True, norm="l2").fit(X) + + feature_names_in = ["a", "c", "b"] + feature_names_out = tr.get_feature_names_out(feature_names_in) + assert_array_equal(feature_names_in, feature_names_out) + + +def test_tf_idf_smoothing(): + X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=True, norm="l2") + tfidf = tr.fit_transform(X).toarray() + assert (tfidf >= 0).all() + + # check normalization + assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0]) + + # this is robust to features with only zeros + X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=True, norm="l2") + tfidf = tr.fit_transform(X).toarray() + assert (tfidf >= 0).all() + + +@pytest.mark.xfail( + _IS_WASM, + reason=( + "no floating point exceptions, see" + " https://github.com/numpy/numpy/pull/21895#issuecomment-1311525881" + ), +) +def test_tfidf_no_smoothing(): + X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=False, norm="l2") + tfidf = tr.fit_transform(X).toarray() + assert (tfidf >= 0).all() + + # check normalization + assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0]) + + # the lack of smoothing make IDF fragile in the presence of feature with + # only zeros + X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=False, norm="l2") + + in_warning_message = "divide by zero" + with pytest.warns(RuntimeWarning, match=in_warning_message): + tr.fit_transform(X).toarray() + + +def test_sublinear_tf(): + X = [[1], [2], [3]] + tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None) + tfidf = tr.fit_transform(X).toarray() + assert tfidf[0] == 1 + assert tfidf[1] > tfidf[0] + assert tfidf[2] > tfidf[1] + assert tfidf[1] < 2 + assert tfidf[2] < 3 + + +def test_vectorizer(): + # raw documents as an iterator + train_data = iter(ALL_FOOD_DOCS[:-1]) + test_data = [ALL_FOOD_DOCS[-1]] + n_train = len(ALL_FOOD_DOCS) - 1 + + # test without vocabulary + v1 = CountVectorizer(max_df=0.5) + counts_train = v1.fit_transform(train_data) + if hasattr(counts_train, "tocsr"): + counts_train = counts_train.tocsr() + assert counts_train[0, v1.vocabulary_["pizza"]] == 2 + + # build a vectorizer v1 with the same vocabulary as the one fitted by v1 + v2 = CountVectorizer(vocabulary=v1.vocabulary_) + + # compare that the two vectorizer give the same output on the test sample + for v in (v1, v2): + counts_test = v.transform(test_data) + if hasattr(counts_test, "tocsr"): + counts_test = counts_test.tocsr() + + vocabulary = v.vocabulary_ + assert counts_test[0, vocabulary["salad"]] == 1 + assert counts_test[0, vocabulary["tomato"]] == 1 + assert counts_test[0, vocabulary["water"]] == 1 + + # stop word from the fixed list + assert "the" not in vocabulary + + # stop word found automatically by the vectorizer DF thresholding + # words that are high frequent across the complete corpus are likely + # to be not informative (either real stop words of extraction + # artifacts) + assert "copyright" not in vocabulary + + # not present in the sample + assert counts_test[0, vocabulary["coke"]] == 0 + assert counts_test[0, vocabulary["burger"]] == 0 + assert counts_test[0, vocabulary["beer"]] == 0 + assert counts_test[0, vocabulary["pizza"]] == 0 + + # test tf-idf + t1 = TfidfTransformer(norm="l1") + tfidf = t1.fit(counts_train).transform(counts_train).toarray() + assert len(t1.idf_) == len(v1.vocabulary_) + assert tfidf.shape == (n_train, len(v1.vocabulary_)) + + # test tf-idf with new data + tfidf_test = t1.transform(counts_test).toarray() + assert tfidf_test.shape == (len(test_data), len(v1.vocabulary_)) + + # test tf alone + t2 = TfidfTransformer(norm="l1", use_idf=False) + tf = t2.fit(counts_train).transform(counts_train).toarray() + assert not hasattr(t2, "idf_") + + # test idf transform with unlearned idf vector + t3 = TfidfTransformer(use_idf=True) + with pytest.raises(ValueError): + t3.transform(counts_train) + + # L1-normalized term frequencies sum to one + assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train) + + # test the direct tfidf vectorizer + # (equivalent to term count vectorizer + tfidf transformer) + train_data = iter(ALL_FOOD_DOCS[:-1]) + tv = TfidfVectorizer(norm="l1") + + tv.max_df = v1.max_df + tfidf2 = tv.fit_transform(train_data).toarray() + assert not tv.fixed_vocabulary_ + assert_array_almost_equal(tfidf, tfidf2) + + # test the direct tfidf vectorizer with new data + tfidf_test2 = tv.transform(test_data).toarray() + assert_array_almost_equal(tfidf_test, tfidf_test2) + + # test transform on unfitted vectorizer with empty vocabulary + v3 = CountVectorizer(vocabulary=None) + with pytest.raises(ValueError): + v3.transform(train_data) + + # ascii preprocessor? + v3.set_params(strip_accents="ascii", lowercase=False) + processor = v3.build_preprocessor() + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = strip_accents_ascii(text) + result = processor(text) + assert expected == result + + # error on bad strip_accents param + v3.set_params(strip_accents="_gabbledegook_", preprocessor=None) + with pytest.raises(ValueError): + v3.build_preprocessor() + + # error with bad analyzer type + v3.set_params = "_invalid_analyzer_type_" + with pytest.raises(ValueError): + v3.build_analyzer() + + +def test_tfidf_vectorizer_setters(): + norm, use_idf, smooth_idf, sublinear_tf = "l2", False, False, False + tv = TfidfVectorizer( + norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf + ) + tv.fit(JUNK_FOOD_DOCS) + assert tv._tfidf.norm == norm + assert tv._tfidf.use_idf == use_idf + assert tv._tfidf.smooth_idf == smooth_idf + assert tv._tfidf.sublinear_tf == sublinear_tf + + # assigning value to `TfidfTransformer` should not have any effect until + # fitting + tv.norm = "l1" + tv.use_idf = True + tv.smooth_idf = True + tv.sublinear_tf = True + assert tv._tfidf.norm == norm + assert tv._tfidf.use_idf == use_idf + assert tv._tfidf.smooth_idf == smooth_idf + assert tv._tfidf.sublinear_tf == sublinear_tf + + tv.fit(JUNK_FOOD_DOCS) + assert tv._tfidf.norm == tv.norm + assert tv._tfidf.use_idf == tv.use_idf + assert tv._tfidf.smooth_idf == tv.smooth_idf + assert tv._tfidf.sublinear_tf == tv.sublinear_tf + + +@fails_if_pypy +def test_hashing_vectorizer(): + v = HashingVectorizer() + X = v.transform(ALL_FOOD_DOCS) + token_nnz = X.nnz + assert X.shape == (len(ALL_FOOD_DOCS), v.n_features) + assert X.dtype == v.dtype + + # By default the hashed values receive a random sign and l2 normalization + # makes the feature values bounded + assert np.min(X.data) > -1 + assert np.min(X.data) < 0 + assert np.max(X.data) > 0 + assert np.max(X.data) < 1 + + # Check that the rows are normalized + for i in range(X.shape[0]): + assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0) + + # Check vectorization with some non-default parameters + v = HashingVectorizer(ngram_range=(1, 2), norm="l1") + X = v.transform(ALL_FOOD_DOCS) + assert X.shape == (len(ALL_FOOD_DOCS), v.n_features) + assert X.dtype == v.dtype + + # ngrams generate more non zeros + ngrams_nnz = X.nnz + assert ngrams_nnz > token_nnz + assert ngrams_nnz < 2 * token_nnz + + # makes the feature values bounded + assert np.min(X.data) > -1 + assert np.max(X.data) < 1 + + # Check that the rows are normalized + for i in range(X.shape[0]): + assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0) + + +def test_feature_names(): + cv = CountVectorizer(max_df=0.5) + + # test for Value error on unfitted/empty vocabulary + with pytest.raises(ValueError): + cv.get_feature_names_out() + assert not cv.fixed_vocabulary_ + + # test for vocabulary learned from data + X = cv.fit_transform(ALL_FOOD_DOCS) + n_samples, n_features = X.shape + assert len(cv.vocabulary_) == n_features + + feature_names = cv.get_feature_names_out() + assert isinstance(feature_names, np.ndarray) + assert feature_names.dtype == object + + assert len(feature_names) == n_features + assert_array_equal( + [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ], + feature_names, + ) + + for idx, name in enumerate(feature_names): + assert idx == cv.vocabulary_.get(name) + + # test for custom vocabulary + vocab = [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ] + + cv = CountVectorizer(vocabulary=vocab) + feature_names = cv.get_feature_names_out() + assert_array_equal( + [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ], + feature_names, + ) + assert cv.fixed_vocabulary_ + + for idx, name in enumerate(feature_names): + assert idx == cv.vocabulary_.get(name) + + +@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer)) +def test_vectorizer_max_features(Vectorizer): + expected_vocabulary = {"burger", "beer", "salad", "pizza"} + expected_stop_words = { + "celeri", + "tomato", + "copyright", + "coke", + "sparkling", + "water", + "the", + } + + # test bounded number of extracted features + vectorizer = Vectorizer(max_df=0.6, max_features=4) + vectorizer.fit(ALL_FOOD_DOCS) + assert set(vectorizer.vocabulary_) == expected_vocabulary + assert vectorizer.stop_words_ == expected_stop_words + + +def test_count_vectorizer_max_features(): + # Regression test: max_features didn't work correctly in 0.14. + + cv_1 = CountVectorizer(max_features=1) + cv_3 = CountVectorizer(max_features=3) + cv_None = CountVectorizer(max_features=None) + + counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) + counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) + counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) + + features_1 = cv_1.get_feature_names_out() + features_3 = cv_3.get_feature_names_out() + features_None = cv_None.get_feature_names_out() + + # The most common feature is "the", with frequency 7. + assert 7 == counts_1.max() + assert 7 == counts_3.max() + assert 7 == counts_None.max() + + # The most common feature should be the same + assert "the" == features_1[np.argmax(counts_1)] + assert "the" == features_3[np.argmax(counts_3)] + assert "the" == features_None[np.argmax(counts_None)] + + +def test_vectorizer_max_df(): + test_data = ["abc", "dea", "eat"] + vect = CountVectorizer(analyzer="char", max_df=1.0) + vect.fit(test_data) + assert "a" in vect.vocabulary_.keys() + assert len(vect.vocabulary_.keys()) == 6 + assert len(vect.stop_words_) == 0 + + vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5 + vect.fit(test_data) + assert "a" not in vect.vocabulary_.keys() # {ae} ignored + assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain + assert "a" in vect.stop_words_ + assert len(vect.stop_words_) == 2 + + vect.max_df = 1 + vect.fit(test_data) + assert "a" not in vect.vocabulary_.keys() # {ae} ignored + assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain + assert "a" in vect.stop_words_ + assert len(vect.stop_words_) == 2 + + +def test_vectorizer_min_df(): + test_data = ["abc", "dea", "eat"] + vect = CountVectorizer(analyzer="char", min_df=1) + vect.fit(test_data) + assert "a" in vect.vocabulary_.keys() + assert len(vect.vocabulary_.keys()) == 6 + assert len(vect.stop_words_) == 0 + + vect.min_df = 2 + vect.fit(test_data) + assert "c" not in vect.vocabulary_.keys() # {bcdt} ignored + assert len(vect.vocabulary_.keys()) == 2 # {ae} remain + assert "c" in vect.stop_words_ + assert len(vect.stop_words_) == 4 + + vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4 + vect.fit(test_data) + assert "c" not in vect.vocabulary_.keys() # {bcdet} ignored + assert len(vect.vocabulary_.keys()) == 1 # {a} remains + assert "c" in vect.stop_words_ + assert len(vect.stop_words_) == 5 + + +def test_count_binary_occurrences(): + # by default multiple occurrences are counted as longs + test_data = ["aaabc", "abbde"] + vect = CountVectorizer(analyzer="char", max_df=1.0) + X = vect.fit_transform(test_data).toarray() + assert_array_equal(["a", "b", "c", "d", "e"], vect.get_feature_names_out()) + assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X) + + # using boolean features, we can fetch the binary occurrence info + # instead. + vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True) + X = vect.fit_transform(test_data).toarray() + assert_array_equal([[1, 1, 1, 0, 0], [1, 1, 0, 1, 1]], X) + + # check the ability to change the dtype + vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True, dtype=np.float32) + X_sparse = vect.fit_transform(test_data) + assert X_sparse.dtype == np.float32 + + +@fails_if_pypy +def test_hashed_binary_occurrences(): + # by default multiple occurrences are counted as longs + test_data = ["aaabc", "abbde"] + vect = HashingVectorizer(alternate_sign=False, analyzer="char", norm=None) + X = vect.transform(test_data) + assert np.max(X[0:1].data) == 3 + assert np.max(X[1:2].data) == 2 + assert X.dtype == np.float64 + + # using boolean features, we can fetch the binary occurrence info + # instead. + vect = HashingVectorizer( + analyzer="char", alternate_sign=False, binary=True, norm=None + ) + X = vect.transform(test_data) + assert np.max(X.data) == 1 + assert X.dtype == np.float64 + + # check the ability to change the dtype + vect = HashingVectorizer( + analyzer="char", alternate_sign=False, binary=True, norm=None, dtype=np.float64 + ) + X = vect.transform(test_data) + assert X.dtype == np.float64 + + +@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer)) +def test_vectorizer_inverse_transform(Vectorizer): + # raw documents + data = ALL_FOOD_DOCS + vectorizer = Vectorizer() + transformed_data = vectorizer.fit_transform(data) + inversed_data = vectorizer.inverse_transform(transformed_data) + assert isinstance(inversed_data, list) + + analyze = vectorizer.build_analyzer() + for doc, inversed_terms in zip(data, inversed_data): + terms = np.sort(np.unique(analyze(doc))) + inversed_terms = np.sort(np.unique(inversed_terms)) + assert_array_equal(terms, inversed_terms) + + assert sparse.issparse(transformed_data) + assert transformed_data.format == "csr" + + # Test that inverse_transform also works with numpy arrays and + # scipy + transformed_data2 = transformed_data.toarray() + inversed_data2 = vectorizer.inverse_transform(transformed_data2) + for terms, terms2 in zip(inversed_data, inversed_data2): + assert_array_equal(np.sort(terms), np.sort(terms2)) + + # Check that inverse_transform also works on non CSR sparse data: + transformed_data3 = transformed_data.tocsc() + inversed_data3 = vectorizer.inverse_transform(transformed_data3) + for terms, terms3 in zip(inversed_data, inversed_data3): + assert_array_equal(np.sort(terms), np.sort(terms3)) + + +def test_count_vectorizer_pipeline_grid_selection(): + # raw documents + data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS + + # label junk food as -1, the others as +1 + target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) + + # split the dataset for model development and final evaluation + train_data, test_data, target_train, target_test = train_test_split( + data, target, test_size=0.2, random_state=0 + ) + + pipeline = Pipeline([("vect", CountVectorizer()), ("svc", LinearSVC(dual="auto"))]) + + parameters = { + "vect__ngram_range": [(1, 1), (1, 2)], + "svc__loss": ("hinge", "squared_hinge"), + } + + # find the best parameters for both the feature extraction and the + # classifier + grid_search = GridSearchCV(pipeline, parameters, n_jobs=1, cv=3) + + # Check that the best model found by grid search is 100% correct on the + # held out evaluation set. + pred = grid_search.fit(train_data, target_train).predict(test_data) + assert_array_equal(pred, target_test) + + # on this toy dataset bigram representation which is used in the last of + # the grid_search is considered the best estimator since they all converge + # to 100% accuracy models + assert grid_search.best_score_ == 1.0 + best_vectorizer = grid_search.best_estimator_.named_steps["vect"] + assert best_vectorizer.ngram_range == (1, 1) + + +def test_vectorizer_pipeline_grid_selection(): + # raw documents + data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS + + # label junk food as -1, the others as +1 + target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) + + # split the dataset for model development and final evaluation + train_data, test_data, target_train, target_test = train_test_split( + data, target, test_size=0.1, random_state=0 + ) + + pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC(dual="auto"))]) + + parameters = { + "vect__ngram_range": [(1, 1), (1, 2)], + "vect__norm": ("l1", "l2"), + "svc__loss": ("hinge", "squared_hinge"), + } + + # find the best parameters for both the feature extraction and the + # classifier + grid_search = GridSearchCV(pipeline, parameters, n_jobs=1) + + # Check that the best model found by grid search is 100% correct on the + # held out evaluation set. + pred = grid_search.fit(train_data, target_train).predict(test_data) + assert_array_equal(pred, target_test) + + # on this toy dataset bigram representation which is used in the last of + # the grid_search is considered the best estimator since they all converge + # to 100% accuracy models + assert grid_search.best_score_ == 1.0 + best_vectorizer = grid_search.best_estimator_.named_steps["vect"] + assert best_vectorizer.ngram_range == (1, 1) + assert best_vectorizer.norm == "l2" + assert not best_vectorizer.fixed_vocabulary_ + + +def test_vectorizer_pipeline_cross_validation(): + # raw documents + data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS + + # label junk food as -1, the others as +1 + target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) + + pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC(dual="auto"))]) + + cv_scores = cross_val_score(pipeline, data, target, cv=3) + assert_array_equal(cv_scores, [1.0, 1.0, 1.0]) + + +@fails_if_pypy +def test_vectorizer_unicode(): + # tests that the count vectorizer works with cyrillic. + document = ( + "Машинное обучение — обширный подраздел искусственного " + "интеллекта, изучающий методы построения алгоритмов, " + "способных обучаться." + ) + + vect = CountVectorizer() + X_counted = vect.fit_transform([document]) + assert X_counted.shape == (1, 12) + + vect = HashingVectorizer(norm=None, alternate_sign=False) + X_hashed = vect.transform([document]) + assert X_hashed.shape == (1, 2**20) + + # No collisions on such a small dataset + assert X_counted.nnz == X_hashed.nnz + + # When norm is None and not alternate_sign, the tokens are counted up to + # collisions + assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data)) + + +def test_tfidf_vectorizer_with_fixed_vocabulary(): + # non regression smoke test for inheritance issues + vocabulary = ["pizza", "celeri"] + vect = TfidfVectorizer(vocabulary=vocabulary) + X_1 = vect.fit_transform(ALL_FOOD_DOCS) + X_2 = vect.transform(ALL_FOOD_DOCS) + assert_array_almost_equal(X_1.toarray(), X_2.toarray()) + assert vect.fixed_vocabulary_ + + +def test_pickling_vectorizer(): + instances = [ + HashingVectorizer(), + HashingVectorizer(norm="l1"), + HashingVectorizer(binary=True), + HashingVectorizer(ngram_range=(1, 2)), + CountVectorizer(), + CountVectorizer(preprocessor=strip_tags), + CountVectorizer(analyzer=lazy_analyze), + CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS), + CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS), + TfidfVectorizer(), + TfidfVectorizer(analyzer=lazy_analyze), + TfidfVectorizer().fit(JUNK_FOOD_DOCS), + ] + + for orig in instances: + s = pickle.dumps(orig) + copy = pickle.loads(s) + assert type(copy) == orig.__class__ + assert copy.get_params() == orig.get_params() + if IS_PYPY and isinstance(orig, HashingVectorizer): + continue + else: + assert_allclose_dense_sparse( + copy.fit_transform(JUNK_FOOD_DOCS), + orig.fit_transform(JUNK_FOOD_DOCS), + ) + + +@pytest.mark.parametrize( + "factory", + [ + CountVectorizer.build_analyzer, + CountVectorizer.build_preprocessor, + CountVectorizer.build_tokenizer, + ], +) +def test_pickling_built_processors(factory): + """Tokenizers cannot be pickled + https://github.com/scikit-learn/scikit-learn/issues/12833 + """ + vec = CountVectorizer() + function = factory(vec) + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + roundtripped_function = pickle.loads(pickle.dumps(function)) + expected = function(text) + result = roundtripped_function(text) + assert result == expected + + +def test_countvectorizer_vocab_sets_when_pickling(): + # ensure that vocabulary of type set is coerced to a list to + # preserve iteration ordering after deserialization + rng = np.random.RandomState(0) + vocab_words = np.array( + [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ] + ) + for x in range(0, 100): + vocab_set = set(rng.choice(vocab_words, size=5, replace=False)) + cv = CountVectorizer(vocabulary=vocab_set) + unpickled_cv = pickle.loads(pickle.dumps(cv)) + cv.fit(ALL_FOOD_DOCS) + unpickled_cv.fit(ALL_FOOD_DOCS) + assert_array_equal( + cv.get_feature_names_out(), unpickled_cv.get_feature_names_out() + ) + + +def test_countvectorizer_vocab_dicts_when_pickling(): + rng = np.random.RandomState(0) + vocab_words = np.array( + [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ] + ) + for x in range(0, 100): + vocab_dict = dict() + words = rng.choice(vocab_words, size=5, replace=False) + for y in range(0, 5): + vocab_dict[words[y]] = y + cv = CountVectorizer(vocabulary=vocab_dict) + unpickled_cv = pickle.loads(pickle.dumps(cv)) + cv.fit(ALL_FOOD_DOCS) + unpickled_cv.fit(ALL_FOOD_DOCS) + assert_array_equal( + cv.get_feature_names_out(), unpickled_cv.get_feature_names_out() + ) + + +def test_stop_words_removal(): + # Ensure that deleting the stop_words_ attribute doesn't affect transform + + fitted_vectorizers = ( + TfidfVectorizer().fit(JUNK_FOOD_DOCS), + CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS), + CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS), + ) + + for vect in fitted_vectorizers: + vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray() + + vect.stop_words_ = None + stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray() + + delattr(vect, "stop_words_") + stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray() + + assert_array_equal(stop_None_transform, vect_transform) + assert_array_equal(stop_del_transform, vect_transform) + + +def test_pickling_transformer(): + X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS) + orig = TfidfTransformer().fit(X) + s = pickle.dumps(orig) + copy = pickle.loads(s) + assert type(copy) == orig.__class__ + assert_array_equal(copy.fit_transform(X).toarray(), orig.fit_transform(X).toarray()) + + +def test_transformer_idf_setter(): + X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS) + orig = TfidfTransformer().fit(X) + copy = TfidfTransformer() + copy.idf_ = orig.idf_ + assert_array_equal(copy.transform(X).toarray(), orig.transform(X).toarray()) + + +def test_tfidf_vectorizer_setter(): + orig = TfidfVectorizer(use_idf=True) + orig.fit(JUNK_FOOD_DOCS) + copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=True) + copy.idf_ = orig.idf_ + assert_array_equal( + copy.transform(JUNK_FOOD_DOCS).toarray(), + orig.transform(JUNK_FOOD_DOCS).toarray(), + ) + # `idf_` cannot be set with `use_idf=False` + copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=False) + err_msg = "`idf_` cannot be set when `user_idf=False`." + with pytest.raises(ValueError, match=err_msg): + copy.idf_ = orig.idf_ + + +def test_tfidfvectorizer_invalid_idf_attr(): + vect = TfidfVectorizer(use_idf=True) + vect.fit(JUNK_FOOD_DOCS) + copy = TfidfVectorizer(vocabulary=vect.vocabulary_, use_idf=True) + expected_idf_len = len(vect.idf_) + invalid_idf = [1.0] * (expected_idf_len + 1) + with pytest.raises(ValueError): + setattr(copy, "idf_", invalid_idf) + + +def test_non_unique_vocab(): + vocab = ["a", "b", "c", "a", "a"] + vect = CountVectorizer(vocabulary=vocab) + with pytest.raises(ValueError): + vect.fit([]) + + +@fails_if_pypy +def test_hashingvectorizer_nan_in_docs(): + # np.nan can appear when using pandas to load text fields from a csv file + # with missing values. + message = "np.nan is an invalid document, expected byte or unicode string." + exception = ValueError + + def func(): + hv = HashingVectorizer() + hv.fit_transform(["hello world", np.nan, "hello hello"]) + + with pytest.raises(exception, match=message): + func() + + +def test_tfidfvectorizer_binary(): + # Non-regression test: TfidfVectorizer used to ignore its "binary" param. + v = TfidfVectorizer(binary=True, use_idf=False, norm=None) + assert v.binary + + X = v.fit_transform(["hello world", "hello hello"]).toarray() + assert_array_equal(X.ravel(), [1, 1, 1, 0]) + X2 = v.transform(["hello world", "hello hello"]).toarray() + assert_array_equal(X2.ravel(), [1, 1, 1, 0]) + + +def test_tfidfvectorizer_export_idf(): + vect = TfidfVectorizer(use_idf=True) + vect.fit(JUNK_FOOD_DOCS) + assert_array_almost_equal(vect.idf_, vect._tfidf.idf_) + + +def test_vectorizer_vocab_clone(): + vect_vocab = TfidfVectorizer(vocabulary=["the"]) + vect_vocab_clone = clone(vect_vocab) + vect_vocab.fit(ALL_FOOD_DOCS) + vect_vocab_clone.fit(ALL_FOOD_DOCS) + assert vect_vocab_clone.vocabulary_ == vect_vocab.vocabulary_ + + +@pytest.mark.parametrize( + "Vectorizer", (CountVectorizer, TfidfVectorizer, HashingVectorizer) +) +def test_vectorizer_string_object_as_input(Vectorizer): + message = "Iterable over raw text documents expected, string object received." + vec = Vectorizer() + + with pytest.raises(ValueError, match=message): + vec.fit_transform("hello world!") + + with pytest.raises(ValueError, match=message): + vec.fit("hello world!") + vec.fit(["some text", "some other text"]) + + with pytest.raises(ValueError, match=message): + vec.transform("hello world!") + + +@pytest.mark.parametrize("X_dtype", [np.float32, np.float64]) +def test_tfidf_transformer_type(X_dtype): + X = sparse.rand(10, 20000, dtype=X_dtype, random_state=42) + X_trans = TfidfTransformer().fit_transform(X) + assert X_trans.dtype == X.dtype + + +@pytest.mark.parametrize( + "csc_container, csr_container", product(CSC_CONTAINERS, CSR_CONTAINERS) +) +def test_tfidf_transformer_sparse(csc_container, csr_container): + X = sparse.rand(10, 20000, dtype=np.float64, random_state=42) + X_csc = csc_container(X) + X_csr = csr_container(X) + + X_trans_csc = TfidfTransformer().fit_transform(X_csc) + X_trans_csr = TfidfTransformer().fit_transform(X_csr) + assert_allclose_dense_sparse(X_trans_csc, X_trans_csr) + assert X_trans_csc.format == X_trans_csr.format + + +@pytest.mark.parametrize( + "vectorizer_dtype, output_dtype, warning_expected", + [ + (np.int32, np.float64, True), + (np.int64, np.float64, True), + (np.float32, np.float32, False), + (np.float64, np.float64, False), + ], +) +def test_tfidf_vectorizer_type(vectorizer_dtype, output_dtype, warning_expected): + X = np.array(["numpy", "scipy", "sklearn"]) + vectorizer = TfidfVectorizer(dtype=vectorizer_dtype) + + warning_msg_match = "'dtype' should be used." + if warning_expected: + with pytest.warns(UserWarning, match=warning_msg_match): + X_idf = vectorizer.fit_transform(X) + else: + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + X_idf = vectorizer.fit_transform(X) + assert X_idf.dtype == output_dtype + + +@pytest.mark.parametrize( + "vec", + [ + HashingVectorizer(ngram_range=(2, 1)), + CountVectorizer(ngram_range=(2, 1)), + TfidfVectorizer(ngram_range=(2, 1)), + ], +) +def test_vectorizers_invalid_ngram_range(vec): + # vectorizers could be initialized with invalid ngram range + # test for raising error message + invalid_range = vec.ngram_range + message = re.escape( + f"Invalid value for ngram_range={invalid_range} " + "lower boundary larger than the upper boundary." + ) + if isinstance(vec, HashingVectorizer) and IS_PYPY: + pytest.xfail(reason="HashingVectorizer is not supported on PyPy") + + with pytest.raises(ValueError, match=message): + vec.fit(["good news everyone"]) + + with pytest.raises(ValueError, match=message): + vec.fit_transform(["good news everyone"]) + + if isinstance(vec, HashingVectorizer): + with pytest.raises(ValueError, match=message): + vec.transform(["good news everyone"]) + + +def _check_stop_words_consistency(estimator): + stop_words = estimator.get_stop_words() + tokenize = estimator.build_tokenizer() + preprocess = estimator.build_preprocessor() + return estimator._check_stop_words_consistency(stop_words, preprocess, tokenize) + + +@fails_if_pypy +def test_vectorizer_stop_words_inconsistent(): + lstr = r"\['and', 'll', 've'\]" + message = ( + "Your stop_words may be inconsistent with your " + "preprocessing. Tokenizing the stop words generated " + "tokens %s not in stop_words." % lstr + ) + for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]: + vec.set_params(stop_words=["you've", "you", "you'll", "AND"]) + with pytest.warns(UserWarning, match=message): + vec.fit_transform(["hello world"]) + # reset stop word validation + del vec._stop_words_id + assert _check_stop_words_consistency(vec) is False + + # Only one warning per stop list + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + vec.fit_transform(["hello world"]) + assert _check_stop_words_consistency(vec) is None + + # Test caching of inconsistency assessment + vec.set_params(stop_words=["you've", "you", "you'll", "blah", "AND"]) + with pytest.warns(UserWarning, match=message): + vec.fit_transform(["hello world"]) + + +@skip_if_32bit +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_countvectorizer_sort_features_64bit_sparse_indices(csr_container): + """ + Check that CountVectorizer._sort_features preserves the dtype of its sparse + feature matrix. + + This test is skipped on 32bit platforms, see: + https://github.com/scikit-learn/scikit-learn/pull/11295 + for more details. + """ + + X = csr_container((5, 5), dtype=np.int64) + + # force indices and indptr to int64. + INDICES_DTYPE = np.int64 + X.indices = X.indices.astype(INDICES_DTYPE) + X.indptr = X.indptr.astype(INDICES_DTYPE) + + vocabulary = {"scikit-learn": 0, "is": 1, "great!": 2} + + Xs = CountVectorizer()._sort_features(X, vocabulary) + + assert INDICES_DTYPE == Xs.indices.dtype + + +@fails_if_pypy +@pytest.mark.parametrize( + "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer] +) +def test_stop_word_validation_custom_preprocessor(Estimator): + data = [{"text": "some text"}] + + vec = Estimator() + assert _check_stop_words_consistency(vec) is True + + vec = Estimator(preprocessor=lambda x: x["text"], stop_words=["and"]) + assert _check_stop_words_consistency(vec) == "error" + # checks are cached + assert _check_stop_words_consistency(vec) is None + vec.fit_transform(data) + + class CustomEstimator(Estimator): + def build_preprocessor(self): + return lambda x: x["text"] + + vec = CustomEstimator(stop_words=["and"]) + assert _check_stop_words_consistency(vec) == "error" + + vec = Estimator( + tokenizer=lambda doc: re.compile(r"\w{1,}").findall(doc), stop_words=["and"] + ) + assert _check_stop_words_consistency(vec) is True + + +@pytest.mark.parametrize( + "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer] +) +@pytest.mark.parametrize( + "input_type, err_type, err_msg", + [ + ("filename", FileNotFoundError, ""), + ("file", AttributeError, "'str' object has no attribute 'read'"), + ], +) +def test_callable_analyzer_error(Estimator, input_type, err_type, err_msg): + if issubclass(Estimator, HashingVectorizer) and IS_PYPY: + pytest.xfail("HashingVectorizer is not supported on PyPy") + data = ["this is text, not file or filename"] + with pytest.raises(err_type, match=err_msg): + Estimator(analyzer=lambda x: x.split(), input=input_type).fit_transform(data) + + +@pytest.mark.parametrize( + "Estimator", + [ + CountVectorizer, + TfidfVectorizer, + pytest.param(HashingVectorizer, marks=fails_if_pypy), + ], +) +@pytest.mark.parametrize( + "analyzer", [lambda doc: open(doc, "r"), lambda doc: doc.read()] +) +@pytest.mark.parametrize("input_type", ["file", "filename"]) +def test_callable_analyzer_change_behavior(Estimator, analyzer, input_type): + data = ["this is text, not file or filename"] + with pytest.raises((FileNotFoundError, AttributeError)): + Estimator(analyzer=analyzer, input=input_type).fit_transform(data) + + +@pytest.mark.parametrize( + "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer] +) +def test_callable_analyzer_reraise_error(tmpdir, Estimator): + # check if a custom exception from the analyzer is shown to the user + def analyzer(doc): + raise Exception("testing") + + if issubclass(Estimator, HashingVectorizer) and IS_PYPY: + pytest.xfail("HashingVectorizer is not supported on PyPy") + + f = tmpdir.join("file.txt") + f.write("sample content\n") + + with pytest.raises(Exception, match="testing"): + Estimator(analyzer=analyzer, input="file").fit_transform([f]) + + +@pytest.mark.parametrize( + "Vectorizer", [CountVectorizer, HashingVectorizer, TfidfVectorizer] +) +@pytest.mark.parametrize( + ( + "stop_words, tokenizer, preprocessor, ngram_range, token_pattern," + "analyzer, unused_name, ovrd_name, ovrd_msg" + ), + [ + ( + ["you've", "you'll"], + None, + None, + (1, 1), + None, + "char", + "'stop_words'", + "'analyzer'", + "!= 'word'", + ), + ( + None, + lambda s: s.split(), + None, + (1, 1), + None, + "char", + "'tokenizer'", + "'analyzer'", + "!= 'word'", + ), + ( + None, + lambda s: s.split(), + None, + (1, 1), + r"\w+", + "word", + "'token_pattern'", + "'tokenizer'", + "is not None", + ), + ( + None, + None, + lambda s: s.upper(), + (1, 1), + r"\w+", + lambda s: s.upper(), + "'preprocessor'", + "'analyzer'", + "is callable", + ), + ( + None, + None, + None, + (1, 2), + None, + lambda s: s.upper(), + "'ngram_range'", + "'analyzer'", + "is callable", + ), + ( + None, + None, + None, + (1, 1), + r"\w+", + "char", + "'token_pattern'", + "'analyzer'", + "!= 'word'", + ), + ], +) +def test_unused_parameters_warn( + Vectorizer, + stop_words, + tokenizer, + preprocessor, + ngram_range, + token_pattern, + analyzer, + unused_name, + ovrd_name, + ovrd_msg, +): + train_data = JUNK_FOOD_DOCS + # setting parameter and checking for corresponding warning messages + vect = Vectorizer() + vect.set_params( + stop_words=stop_words, + tokenizer=tokenizer, + preprocessor=preprocessor, + ngram_range=ngram_range, + token_pattern=token_pattern, + analyzer=analyzer, + ) + msg = "The parameter %s will not be used since %s %s" % ( + unused_name, + ovrd_name, + ovrd_msg, + ) + with pytest.warns(UserWarning, match=msg): + vect.fit(train_data) + + +@pytest.mark.parametrize( + "Vectorizer, X", + ( + (HashingVectorizer, [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}]), + (CountVectorizer, JUNK_FOOD_DOCS), + ), +) +def test_n_features_in(Vectorizer, X): + # For vectorizers, n_features_in_ does not make sense + vectorizer = Vectorizer() + assert not hasattr(vectorizer, "n_features_in_") + vectorizer.fit(X) + assert not hasattr(vectorizer, "n_features_in_") + + +def test_tie_breaking_sample_order_invariance(): + # Checks the sample order invariance when setting max_features + # non-regression test for #17939 + vec = CountVectorizer(max_features=1) + vocab1 = vec.fit(["hello", "world"]).vocabulary_ + vocab2 = vec.fit(["world", "hello"]).vocabulary_ + assert vocab1 == vocab2 + + +@fails_if_pypy +def test_nonnegative_hashing_vectorizer_result_indices(): + # add test for pr 19035 + hashing = HashingVectorizer(n_features=1000000, ngram_range=(2, 3)) + indices = hashing.transform(["22pcs efuture"]).indices + assert indices[0] >= 0 + + +@pytest.mark.parametrize( + "Estimator", [CountVectorizer, TfidfVectorizer, TfidfTransformer, HashingVectorizer] +) +def test_vectorizers_do_not_have_set_output(Estimator): + """Check that vectorizers do not define set_output.""" + est = Estimator() + assert not hasattr(est, "set_output") diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f8e08785e8358bd039e8179368db28483be2cd55 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__init__.py @@ -0,0 +1,14 @@ +"""The :mod:`sklearn.inspection` module includes tools for model inspection.""" + + +from ._partial_dependence import partial_dependence +from ._permutation_importance import permutation_importance +from ._plot.decision_boundary import DecisionBoundaryDisplay +from ._plot.partial_dependence import PartialDependenceDisplay + +__all__ = [ + "partial_dependence", + "permutation_importance", + "PartialDependenceDisplay", + "DecisionBoundaryDisplay", +] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4efe5c6db126a727bd852e881f365893aafe8453 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29239ca43164120cb6924a951c71576d1e363ea0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_pd_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_pd_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9606665d5ad6fbedea524f22688885df98a7485d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_pd_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0141918e2b600c8007ad053e1ef0a347decba1f1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..4ad6094e02478a3dd579537b79355679a1a335f4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py @@ -0,0 +1,743 @@ +"""Partial dependence plots for regression and classification models.""" + +# Authors: Peter Prettenhofer +# Trevor Stephens +# Nicolas Hug +# License: BSD 3 clause + +from collections.abc import Iterable + +import numpy as np +from scipy import sparse +from scipy.stats.mstats import mquantiles + +from ..base import is_classifier, is_regressor +from ..ensemble import RandomForestRegressor +from ..ensemble._gb import BaseGradientBoosting +from ..ensemble._hist_gradient_boosting.gradient_boosting import ( + BaseHistGradientBoosting, +) +from ..exceptions import NotFittedError +from ..tree import DecisionTreeRegressor +from ..utils import ( + Bunch, + _determine_key_type, + _get_column_indices, + _safe_assign, + _safe_indexing, + check_array, + check_matplotlib_support, # noqa +) +from ..utils._param_validation import ( + HasMethods, + Integral, + Interval, + StrOptions, + validate_params, +) +from ..utils.extmath import cartesian +from ..utils.validation import _check_sample_weight, check_is_fitted +from ._pd_utils import _check_feature_names, _get_feature_index + +__all__ = [ + "partial_dependence", +] + + +def _grid_from_X(X, percentiles, is_categorical, grid_resolution): + """Generate a grid of points based on the percentiles of X. + + The grid is a cartesian product between the columns of ``values``. The + ith column of ``values`` consists in ``grid_resolution`` equally-spaced + points between the percentiles of the jth column of X. + + If ``grid_resolution`` is bigger than the number of unique values in the + j-th column of X or if the feature is a categorical feature (by inspecting + `is_categorical`) , then those unique values will be used instead. + + Parameters + ---------- + X : array-like of shape (n_samples, n_target_features) + The data. + + percentiles : tuple of float + The percentiles which are used to construct the extreme values of + the grid. Must be in [0, 1]. + + is_categorical : list of bool + For each feature, tells whether it is categorical or not. If a feature + is categorical, then the values used will be the unique ones + (i.e. categories) instead of the percentiles. + + grid_resolution : int + The number of equally spaced points to be placed on the grid for each + feature. + + Returns + ------- + grid : ndarray of shape (n_points, n_target_features) + A value for each feature at each point in the grid. ``n_points`` is + always ``<= grid_resolution ** X.shape[1]``. + + values : list of 1d ndarrays + The values with which the grid has been created. The size of each + array ``values[j]`` is either ``grid_resolution``, or the number of + unique values in ``X[:, j]``, whichever is smaller. + """ + if not isinstance(percentiles, Iterable) or len(percentiles) != 2: + raise ValueError("'percentiles' must be a sequence of 2 elements.") + if not all(0 <= x <= 1 for x in percentiles): + raise ValueError("'percentiles' values must be in [0, 1].") + if percentiles[0] >= percentiles[1]: + raise ValueError("percentiles[0] must be strictly less than percentiles[1].") + + if grid_resolution <= 1: + raise ValueError("'grid_resolution' must be strictly greater than 1.") + + values = [] + # TODO: we should handle missing values (i.e. `np.nan`) specifically and store them + # in a different Bunch attribute. + for feature, is_cat in enumerate(is_categorical): + try: + uniques = np.unique(_safe_indexing(X, feature, axis=1)) + except TypeError as exc: + # `np.unique` will fail in the presence of `np.nan` and `str` categories + # due to sorting. Temporary, we reraise an error explaining the problem. + raise ValueError( + f"The column #{feature} contains mixed data types. Finding unique " + "categories fail due to sorting. It usually means that the column " + "contains `np.nan` values together with `str` categories. Such use " + "case is not yet supported in scikit-learn." + ) from exc + if is_cat or uniques.shape[0] < grid_resolution: + # Use the unique values either because: + # - feature has low resolution use unique values + # - feature is categorical + axis = uniques + else: + # create axis based on percentiles and grid resolution + emp_percentiles = mquantiles( + _safe_indexing(X, feature, axis=1), prob=percentiles, axis=0 + ) + if np.allclose(emp_percentiles[0], emp_percentiles[1]): + raise ValueError( + "percentiles are too close to each other, " + "unable to build the grid. Please choose percentiles " + "that are further apart." + ) + axis = np.linspace( + emp_percentiles[0], + emp_percentiles[1], + num=grid_resolution, + endpoint=True, + ) + values.append(axis) + + return cartesian(values), values + + +def _partial_dependence_recursion(est, grid, features): + """Calculate partial dependence via the recursion method. + + The recursion method is in particular enabled for tree-based estimators. + + For each `grid` value, a weighted tree traversal is performed: if a split node + involves an input feature of interest, the corresponding left or right branch + is followed; otherwise both branches are followed, each branch being weighted + by the fraction of training samples that entered that branch. Finally, the + partial dependence is given by a weighted average of all the visited leaves + values. + + This method is more efficient in terms of speed than the `'brute'` method + (:func:`~sklearn.inspection._partial_dependence._partial_dependence_brute`). + However, here, the partial dependence computation is done explicitly with the + `X` used during training of `est`. + + Parameters + ---------- + est : BaseEstimator + A fitted estimator object implementing :term:`predict` or + :term:`decision_function`. Multioutput-multiclass classifiers are not + supported. Note that `'recursion'` is only supported for some tree-based + estimators (namely + :class:`~sklearn.ensemble.GradientBoostingClassifier`, + :class:`~sklearn.ensemble.GradientBoostingRegressor`, + :class:`~sklearn.ensemble.HistGradientBoostingClassifier`, + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, + :class:`~sklearn.tree.DecisionTreeRegressor`, + :class:`~sklearn.ensemble.RandomForestRegressor`, + ). + + grid : array-like of shape (n_points, n_target_features) + The grid of feature values for which the partial dependence is calculated. + Note that `n_points` is the number of points in the grid and `n_target_features` + is the number of features you are doing partial dependence at. + + features : array-like of {int, str} + The feature (e.g. `[0]`) or pair of interacting features + (e.g. `[(0, 1)]`) for which the partial dependency should be computed. + + Returns + ------- + averaged_predictions : array-like of shape (n_targets, n_points) + The averaged predictions for the given `grid` of features values. + Note that `n_targets` is the number of targets (e.g. 1 for binary + classification, `n_tasks` for multi-output regression, and `n_classes` for + multiclass classification) and `n_points` is the number of points in the `grid`. + """ + averaged_predictions = est._compute_partial_dependence_recursion(grid, features) + if averaged_predictions.ndim == 1: + # reshape to (1, n_points) for consistency with + # _partial_dependence_brute + averaged_predictions = averaged_predictions.reshape(1, -1) + + return averaged_predictions + + +def _partial_dependence_brute( + est, grid, features, X, response_method, sample_weight=None +): + """Calculate partial dependence via the brute force method. + + The brute method explicitly averages the predictions of an estimator over a + grid of feature values. + + For each `grid` value, all the samples from `X` have their variables of + interest replaced by that specific `grid` value. The predictions are then made + and averaged across the samples. + + This method is slower than the `'recursion'` + (:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`) + version for estimators with this second option. However, with the `'brute'` + force method, the average will be done with the given `X` and not the `X` + used during training, as it is done in the `'recursion'` version. Therefore + the average can always accept `sample_weight` (even when the estimator was + fitted without). + + Parameters + ---------- + est : BaseEstimator + A fitted estimator object implementing :term:`predict`, + :term:`predict_proba`, or :term:`decision_function`. + Multioutput-multiclass classifiers are not supported. + + grid : array-like of shape (n_points, n_target_features) + The grid of feature values for which the partial dependence is calculated. + Note that `n_points` is the number of points in the grid and `n_target_features` + is the number of features you are doing partial dependence at. + + features : array-like of {int, str} + The feature (e.g. `[0]`) or pair of interacting features + (e.g. `[(0, 1)]`) for which the partial dependency should be computed. + + X : array-like of shape (n_samples, n_features) + `X` is used to generate values for the complement features. That is, for + each value in `grid`, the method will average the prediction of each + sample from `X` having that grid value for `features`. + + response_method : {'auto', 'predict_proba', 'decision_function'}, \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. For regressors + this parameter is ignored and the response is always the output of + :term:`predict`. By default, :term:`predict_proba` is tried first + and we revert to :term:`decision_function` if it doesn't exist. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights are used to calculate weighted means when averaging the + model output. If `None`, then samples are equally weighted. Note that + `sample_weight` does not change the individual predictions. + + Returns + ------- + averaged_predictions : array-like of shape (n_targets, n_points) + The averaged predictions for the given `grid` of features values. + Note that `n_targets` is the number of targets (e.g. 1 for binary + classification, `n_tasks` for multi-output regression, and `n_classes` for + multiclass classification) and `n_points` is the number of points in the `grid`. + + predictions : array-like + The predictions for the given `grid` of features values over the samples + from `X`. For non-multioutput regression and binary classification the + shape is `(n_instances, n_points)` and for multi-output regression and + multiclass classification the shape is `(n_targets, n_instances, n_points)`, + where `n_targets` is the number of targets (`n_tasks` for multi-output + regression, and `n_classes` for multiclass classification), `n_instances` + is the number of instances in `X`, and `n_points` is the number of points + in the `grid`. + """ + predictions = [] + averaged_predictions = [] + + # define the prediction_method (predict, predict_proba, decision_function). + if is_regressor(est): + prediction_method = est.predict + else: + predict_proba = getattr(est, "predict_proba", None) + decision_function = getattr(est, "decision_function", None) + if response_method == "auto": + # try predict_proba, then decision_function if it doesn't exist + prediction_method = predict_proba or decision_function + else: + prediction_method = ( + predict_proba + if response_method == "predict_proba" + else decision_function + ) + if prediction_method is None: + if response_method == "auto": + raise ValueError( + "The estimator has no predict_proba and no " + "decision_function method." + ) + elif response_method == "predict_proba": + raise ValueError("The estimator has no predict_proba method.") + else: + raise ValueError("The estimator has no decision_function method.") + + X_eval = X.copy() + for new_values in grid: + for i, variable in enumerate(features): + _safe_assign(X_eval, new_values[i], column_indexer=variable) + + try: + # Note: predictions is of shape + # (n_points,) for non-multioutput regressors + # (n_points, n_tasks) for multioutput regressors + # (n_points, 1) for the regressors in cross_decomposition (I think) + # (n_points, 2) for binary classification + # (n_points, n_classes) for multiclass classification + pred = prediction_method(X_eval) + + predictions.append(pred) + # average over samples + averaged_predictions.append(np.average(pred, axis=0, weights=sample_weight)) + except NotFittedError as e: + raise ValueError("'estimator' parameter must be a fitted estimator") from e + + n_samples = X.shape[0] + + # reshape to (n_targets, n_instances, n_points) where n_targets is: + # - 1 for non-multioutput regression and binary classification (shape is + # already correct in those cases) + # - n_tasks for multi-output regression + # - n_classes for multiclass classification. + predictions = np.array(predictions).T + if is_regressor(est) and predictions.ndim == 2: + # non-multioutput regression, shape is (n_instances, n_points,) + predictions = predictions.reshape(n_samples, -1) + elif is_classifier(est) and predictions.shape[0] == 2: + # Binary classification, shape is (2, n_instances, n_points). + # we output the effect of **positive** class + predictions = predictions[1] + predictions = predictions.reshape(n_samples, -1) + + # reshape averaged_predictions to (n_targets, n_points) where n_targets is: + # - 1 for non-multioutput regression and binary classification (shape is + # already correct in those cases) + # - n_tasks for multi-output regression + # - n_classes for multiclass classification. + averaged_predictions = np.array(averaged_predictions).T + if is_regressor(est) and averaged_predictions.ndim == 1: + # non-multioutput regression, shape is (n_points,) + averaged_predictions = averaged_predictions.reshape(1, -1) + elif is_classifier(est) and averaged_predictions.shape[0] == 2: + # Binary classification, shape is (2, n_points). + # we output the effect of **positive** class + averaged_predictions = averaged_predictions[1] + averaged_predictions = averaged_predictions.reshape(1, -1) + + return averaged_predictions, predictions + + +@validate_params( + { + "estimator": [ + HasMethods(["fit", "predict"]), + HasMethods(["fit", "predict_proba"]), + HasMethods(["fit", "decision_function"]), + ], + "X": ["array-like", "sparse matrix"], + "features": ["array-like", Integral, str], + "sample_weight": ["array-like", None], + "categorical_features": ["array-like", None], + "feature_names": ["array-like", None], + "response_method": [StrOptions({"auto", "predict_proba", "decision_function"})], + "percentiles": [tuple], + "grid_resolution": [Interval(Integral, 1, None, closed="left")], + "method": [StrOptions({"auto", "recursion", "brute"})], + "kind": [StrOptions({"average", "individual", "both"})], + }, + prefer_skip_nested_validation=True, +) +def partial_dependence( + estimator, + X, + features, + *, + sample_weight=None, + categorical_features=None, + feature_names=None, + response_method="auto", + percentiles=(0.05, 0.95), + grid_resolution=100, + method="auto", + kind="average", +): + """Partial dependence of ``features``. + + Partial dependence of a feature (or a set of features) corresponds to + the average response of an estimator for each possible value of the + feature. + + Read more in the :ref:`User Guide `. + + .. warning:: + + For :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, the + `'recursion'` method (used by default) will not account for the `init` + predictor of the boosting process. In practice, this will produce + the same values as `'brute'` up to a constant offset in the target + response, provided that `init` is a constant estimator (which is the + default). However, if `init` is not a constant estimator, the + partial dependence values are incorrect for `'recursion'` because the + offset will be sample-dependent. It is preferable to use the `'brute'` + method. Note that this only applies to + :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, not to + :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. + + Parameters + ---------- + estimator : BaseEstimator + A fitted estimator object implementing :term:`predict`, + :term:`predict_proba`, or :term:`decision_function`. + Multioutput-multiclass classifiers are not supported. + + X : {array-like, sparse matrix or dataframe} of shape (n_samples, n_features) + ``X`` is used to generate a grid of values for the target + ``features`` (where the partial dependence will be evaluated), and + also to generate values for the complement features when the + `method` is 'brute'. + + features : array-like of {int, str, bool} or int or str + The feature (e.g. `[0]`) or pair of interacting features + (e.g. `[(0, 1)]`) for which the partial dependency should be computed. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights are used to calculate weighted means when averaging the + model output. If `None`, then samples are equally weighted. If + `sample_weight` is not `None`, then `method` will be set to `'brute'`. + Note that `sample_weight` is ignored for `kind='individual'`. + + .. versionadded:: 1.3 + + categorical_features : array-like of shape (n_features,) or shape \ + (n_categorical_features,), dtype={bool, int, str}, default=None + Indicates the categorical features. + + - `None`: no feature will be considered categorical; + - boolean array-like: boolean mask of shape `(n_features,)` + indicating which features are categorical. Thus, this array has + the same shape has `X.shape[1]`; + - integer or string array-like: integer indices or strings + indicating categorical features. + + .. versionadded:: 1.2 + + feature_names : array-like of shape (n_features,), dtype=str, default=None + Name of each feature; `feature_names[i]` holds the name of the feature + with index `i`. + By default, the name of the feature corresponds to their numerical + index for NumPy array and their column name for pandas dataframe. + + .. versionadded:: 1.2 + + response_method : {'auto', 'predict_proba', 'decision_function'}, \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. For regressors + this parameter is ignored and the response is always the output of + :term:`predict`. By default, :term:`predict_proba` is tried first + and we revert to :term:`decision_function` if it doesn't exist. If + ``method`` is 'recursion', the response is always the output of + :term:`decision_function`. + + percentiles : tuple of float, default=(0.05, 0.95) + The lower and upper percentile used to create the extreme values + for the grid. Must be in [0, 1]. + + grid_resolution : int, default=100 + The number of equally spaced points on the grid, for each target + feature. + + method : {'auto', 'recursion', 'brute'}, default='auto' + The method used to calculate the averaged predictions: + + - `'recursion'` is only supported for some tree-based estimators + (namely + :class:`~sklearn.ensemble.GradientBoostingClassifier`, + :class:`~sklearn.ensemble.GradientBoostingRegressor`, + :class:`~sklearn.ensemble.HistGradientBoostingClassifier`, + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, + :class:`~sklearn.tree.DecisionTreeRegressor`, + :class:`~sklearn.ensemble.RandomForestRegressor`, + ) when `kind='average'`. + This is more efficient in terms of speed. + With this method, the target response of a + classifier is always the decision function, not the predicted + probabilities. Since the `'recursion'` method implicitly computes + the average of the Individual Conditional Expectation (ICE) by + design, it is not compatible with ICE and thus `kind` must be + `'average'`. + + - `'brute'` is supported for any estimator, but is more + computationally intensive. + + - `'auto'`: the `'recursion'` is used for estimators that support it, + and `'brute'` is used otherwise. If `sample_weight` is not `None`, + then `'brute'` is used regardless of the estimator. + + Please see :ref:`this note ` for + differences between the `'brute'` and `'recursion'` method. + + kind : {'average', 'individual', 'both'}, default='average' + Whether to return the partial dependence averaged across all the + samples in the dataset or one value per sample or both. + See Returns below. + + Note that the fast `method='recursion'` option is only available for + `kind='average'` and `sample_weights=None`. Computing individual + dependencies and doing weighted averages requires using the slower + `method='brute'`. + + .. versionadded:: 0.24 + + Returns + ------- + predictions : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + individual : ndarray of shape (n_outputs, n_instances, \ + len(values[0]), len(values[1]), ...) + The predictions for all the points in the grid for all + samples in X. This is also known as Individual + Conditional Expectation (ICE). + Only available when `kind='individual'` or `kind='both'`. + + average : ndarray of shape (n_outputs, len(values[0]), \ + len(values[1]), ...) + The predictions for all the points in the grid, averaged + over all samples in X (or over the training data if + `method` is 'recursion'). + Only available when `kind='average'` or `kind='both'`. + + values : seq of 1d ndarrays + The values with which the grid has been created. + + .. deprecated:: 1.3 + The key `values` has been deprecated in 1.3 and will be removed + in 1.5 in favor of `grid_values`. See `grid_values` for details + about the `values` attribute. + + grid_values : seq of 1d ndarrays + The values with which the grid has been created. The generated + grid is a cartesian product of the arrays in `grid_values` where + `len(grid_values) == len(features)`. The size of each array + `grid_values[j]` is either `grid_resolution`, or the number of + unique values in `X[:, j]`, whichever is smaller. + + .. versionadded:: 1.3 + + `n_outputs` corresponds to the number of classes in a multi-class + setting, or to the number of tasks for multi-output regression. + For classical regression and binary classification `n_outputs==1`. + `n_values_feature_j` corresponds to the size `grid_values[j]`. + + See Also + -------- + PartialDependenceDisplay.from_estimator : Plot Partial Dependence. + PartialDependenceDisplay : Partial Dependence visualization. + + Examples + -------- + >>> X = [[0, 0, 2], [1, 0, 0]] + >>> y = [0, 1] + >>> from sklearn.ensemble import GradientBoostingClassifier + >>> gb = GradientBoostingClassifier(random_state=0).fit(X, y) + >>> partial_dependence(gb, features=[0], X=X, percentiles=(0, 1), + ... grid_resolution=2) # doctest: +SKIP + (array([[-4.52..., 4.52...]]), [array([ 0., 1.])]) + """ + check_is_fitted(estimator) + + if not (is_classifier(estimator) or is_regressor(estimator)): + raise ValueError("'estimator' must be a fitted regressor or classifier.") + + if is_classifier(estimator) and isinstance(estimator.classes_[0], np.ndarray): + raise ValueError("Multiclass-multioutput estimators are not supported") + + # Use check_array only on lists and other non-array-likes / sparse. Do not + # convert DataFrame into a NumPy array. + if not (hasattr(X, "__array__") or sparse.issparse(X)): + X = check_array(X, force_all_finite="allow-nan", dtype=object) + + if is_regressor(estimator) and response_method != "auto": + raise ValueError( + "The response_method parameter is ignored for regressors and " + "must be 'auto'." + ) + + if kind != "average": + if method == "recursion": + raise ValueError( + "The 'recursion' method only applies when 'kind' is set to 'average'" + ) + method = "brute" + + if method == "recursion" and sample_weight is not None: + raise ValueError( + "The 'recursion' method can only be applied when sample_weight is None." + ) + + if method == "auto": + if sample_weight is not None: + method = "brute" + elif isinstance(estimator, BaseGradientBoosting) and estimator.init is None: + method = "recursion" + elif isinstance( + estimator, + (BaseHistGradientBoosting, DecisionTreeRegressor, RandomForestRegressor), + ): + method = "recursion" + else: + method = "brute" + + if method == "recursion": + if not isinstance( + estimator, + ( + BaseGradientBoosting, + BaseHistGradientBoosting, + DecisionTreeRegressor, + RandomForestRegressor, + ), + ): + supported_classes_recursion = ( + "GradientBoostingClassifier", + "GradientBoostingRegressor", + "HistGradientBoostingClassifier", + "HistGradientBoostingRegressor", + "HistGradientBoostingRegressor", + "DecisionTreeRegressor", + "RandomForestRegressor", + ) + raise ValueError( + "Only the following estimators support the 'recursion' " + "method: {}. Try using method='brute'.".format( + ", ".join(supported_classes_recursion) + ) + ) + if response_method == "auto": + response_method = "decision_function" + + if response_method != "decision_function": + raise ValueError( + "With the 'recursion' method, the response_method must be " + "'decision_function'. Got {}.".format(response_method) + ) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + if _determine_key_type(features, accept_slice=False) == "int": + # _get_column_indices() supports negative indexing. Here, we limit + # the indexing to be positive. The upper bound will be checked + # by _get_column_indices() + if np.any(np.less(features, 0)): + raise ValueError("all features must be in [0, {}]".format(X.shape[1] - 1)) + + features_indices = np.asarray( + _get_column_indices(X, features), dtype=np.int32, order="C" + ).ravel() + + feature_names = _check_feature_names(X, feature_names) + + n_features = X.shape[1] + if categorical_features is None: + is_categorical = [False] * len(features_indices) + else: + categorical_features = np.asarray(categorical_features) + if categorical_features.dtype.kind == "b": + # categorical features provided as a list of boolean + if categorical_features.size != n_features: + raise ValueError( + "When `categorical_features` is a boolean array-like, " + "the array should be of shape (n_features,). Got " + f"{categorical_features.size} elements while `X` contains " + f"{n_features} features." + ) + is_categorical = [categorical_features[idx] for idx in features_indices] + elif categorical_features.dtype.kind in ("i", "O", "U"): + # categorical features provided as a list of indices or feature names + categorical_features_idx = [ + _get_feature_index(cat, feature_names=feature_names) + for cat in categorical_features + ] + is_categorical = [ + idx in categorical_features_idx for idx in features_indices + ] + else: + raise ValueError( + "Expected `categorical_features` to be an array-like of boolean," + f" integer, or string. Got {categorical_features.dtype} instead." + ) + + grid, values = _grid_from_X( + _safe_indexing(X, features_indices, axis=1), + percentiles, + is_categorical, + grid_resolution, + ) + + if method == "brute": + averaged_predictions, predictions = _partial_dependence_brute( + estimator, grid, features_indices, X, response_method, sample_weight + ) + + # reshape predictions to + # (n_outputs, n_instances, n_values_feature_0, n_values_feature_1, ...) + predictions = predictions.reshape( + -1, X.shape[0], *[val.shape[0] for val in values] + ) + else: + averaged_predictions = _partial_dependence_recursion( + estimator, grid, features_indices + ) + + # reshape averaged_predictions to + # (n_outputs, n_values_feature_0, n_values_feature_1, ...) + averaged_predictions = averaged_predictions.reshape( + -1, *[val.shape[0] for val in values] + ) + pdp_results = Bunch() + + msg = ( + "Key: 'values', is deprecated in 1.3 and will be removed in 1.5. " + "Please use 'grid_values' instead." + ) + pdp_results._set_deprecated( + values, new_key="grid_values", deprecated_key="values", warning_message=msg + ) + + if kind == "average": + pdp_results["average"] = averaged_predictions + elif kind == "individual": + pdp_results["individual"] = predictions + else: # kind='both' + pdp_results["average"] = averaged_predictions + pdp_results["individual"] = predictions + + return pdp_results diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..76f4d626fd53c3e669f29335e65e724e5e33e382 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py @@ -0,0 +1,64 @@ +def _check_feature_names(X, feature_names=None): + """Check feature names. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + feature_names : None or array-like of shape (n_names,), dtype=str + Feature names to check or `None`. + + Returns + ------- + feature_names : list of str + Feature names validated. If `feature_names` is `None`, then a list of + feature names is provided, i.e. the column names of a pandas dataframe + or a generic list of feature names (e.g. `["x0", "x1", ...]`) for a + NumPy array. + """ + if feature_names is None: + if hasattr(X, "columns") and hasattr(X.columns, "tolist"): + # get the column names for a pandas dataframe + feature_names = X.columns.tolist() + else: + # define a list of numbered indices for a numpy array + feature_names = [f"x{i}" for i in range(X.shape[1])] + elif hasattr(feature_names, "tolist"): + # convert numpy array or pandas index to a list + feature_names = feature_names.tolist() + if len(set(feature_names)) != len(feature_names): + raise ValueError("feature_names should not contain duplicates.") + + return feature_names + + +def _get_feature_index(fx, feature_names=None): + """Get feature index. + + Parameters + ---------- + fx : int or str + Feature index or name. + + feature_names : list of str, default=None + All feature names from which to search the indices. + + Returns + ------- + idx : int + Feature index. + """ + if isinstance(fx, str): + if feature_names is None: + raise ValueError( + f"Cannot plot partial dependence for feature {fx!r} since " + "the list of feature names was not provided, neither as " + "column names of a pandas data-frame nor via the feature_names " + "parameter." + ) + try: + return feature_names.index(fx) + except ValueError as e: + raise ValueError(f"Feature {fx!r} not in feature_names") from e + return fx diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py new file mode 100644 index 0000000000000000000000000000000000000000..3d96acff9b91a52916b0a29ad45f8d86fad8a9e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py @@ -0,0 +1,317 @@ +"""Permutation importance for estimators.""" + +import numbers + +import numpy as np + +from ..ensemble._bagging import _generate_indices +from ..metrics import check_scoring, get_scorer_names +from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer +from ..model_selection._validation import _aggregate_score_dicts +from ..utils import Bunch, _safe_indexing, check_array, check_random_state +from ..utils._param_validation import ( + HasMethods, + Integral, + Interval, + RealNotInt, + StrOptions, + validate_params, +) +from ..utils.parallel import Parallel, delayed + + +def _weights_scorer(scorer, estimator, X, y, sample_weight): + if sample_weight is not None: + return scorer(estimator, X, y, sample_weight=sample_weight) + return scorer(estimator, X, y) + + +def _calculate_permutation_scores( + estimator, + X, + y, + sample_weight, + col_idx, + random_state, + n_repeats, + scorer, + max_samples, +): + """Calculate score when `col_idx` is permuted.""" + random_state = check_random_state(random_state) + + # Work on a copy of X to ensure thread-safety in case of threading based + # parallelism. Furthermore, making a copy is also useful when the joblib + # backend is 'loky' (default) or the old 'multiprocessing': in those cases, + # if X is large it will be automatically be backed by a readonly memory map + # (memmap). X.copy() on the other hand is always guaranteed to return a + # writable data-structure whose columns can be shuffled inplace. + if max_samples < X.shape[0]: + row_indices = _generate_indices( + random_state=random_state, + bootstrap=False, + n_population=X.shape[0], + n_samples=max_samples, + ) + X_permuted = _safe_indexing(X, row_indices, axis=0) + y = _safe_indexing(y, row_indices, axis=0) + if sample_weight is not None: + sample_weight = _safe_indexing(sample_weight, row_indices, axis=0) + else: + X_permuted = X.copy() + + scores = [] + shuffling_idx = np.arange(X_permuted.shape[0]) + for _ in range(n_repeats): + random_state.shuffle(shuffling_idx) + if hasattr(X_permuted, "iloc"): + col = X_permuted.iloc[shuffling_idx, col_idx] + col.index = X_permuted.index + X_permuted[X_permuted.columns[col_idx]] = col + else: + X_permuted[:, col_idx] = X_permuted[shuffling_idx, col_idx] + scores.append(_weights_scorer(scorer, estimator, X_permuted, y, sample_weight)) + + if isinstance(scores[0], dict): + scores = _aggregate_score_dicts(scores) + else: + scores = np.array(scores) + + return scores + + +def _create_importances_bunch(baseline_score, permuted_score): + """Compute the importances as the decrease in score. + + Parameters + ---------- + baseline_score : ndarray of shape (n_features,) + The baseline score without permutation. + permuted_score : ndarray of shape (n_features, n_repeats) + The permuted scores for the `n` repetitions. + + Returns + ------- + importances : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + importances_mean : ndarray, shape (n_features, ) + Mean of feature importance over `n_repeats`. + importances_std : ndarray, shape (n_features, ) + Standard deviation over `n_repeats`. + importances : ndarray, shape (n_features, n_repeats) + Raw permutation importance scores. + """ + importances = baseline_score - permuted_score + return Bunch( + importances_mean=np.mean(importances, axis=1), + importances_std=np.std(importances, axis=1), + importances=importances, + ) + + +@validate_params( + { + "estimator": [HasMethods(["fit"])], + "X": ["array-like"], + "y": ["array-like", None], + "scoring": [ + StrOptions(set(get_scorer_names())), + callable, + list, + tuple, + dict, + None, + ], + "n_repeats": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + "random_state": ["random_state"], + "sample_weight": ["array-like", None], + "max_samples": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="right"), + ], + }, + prefer_skip_nested_validation=True, +) +def permutation_importance( + estimator, + X, + y, + *, + scoring=None, + n_repeats=5, + n_jobs=None, + random_state=None, + sample_weight=None, + max_samples=1.0, +): + """Permutation importance for feature evaluation [BRE]_. + + The :term:`estimator` is required to be a fitted estimator. `X` can be the + data set used to train the estimator or a hold-out set. The permutation + importance of a feature is calculated as follows. First, a baseline metric, + defined by :term:`scoring`, is evaluated on a (potentially different) + dataset defined by the `X`. Next, a feature column from the validation set + is permuted and the metric is evaluated again. The permutation importance + is defined to be the difference between the baseline metric and metric from + permutating the feature column. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object + An estimator that has already been :term:`fitted` and is compatible + with :term:`scorer`. + + X : ndarray or DataFrame, shape (n_samples, n_features) + Data on which permutation importance will be computed. + + y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) + Targets for supervised or `None` for unsupervised. + + scoring : str, callable, list, tuple, or dict, default=None + Scorer to use. + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + Passing multiple scores to `scoring` is more efficient than calling + `permutation_importance` for each of the scores as it reuses + predictions to avoid redundant computation. + + If None, the estimator's default scorer is used. + + n_repeats : int, default=5 + Number of times to permute a feature. + + n_jobs : int or None, default=None + Number of jobs to run in parallel. The computation is done by computing + permutation score for each columns and parallelized over the columns. + `None` means 1 unless in a :obj:`joblib.parallel_backend` context. + `-1` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + Pseudo-random number generator to control the permutations of each + feature. + Pass an int to get reproducible results across function calls. + See :term:`Glossary `. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights used in scoring. + + .. versionadded:: 0.24 + + max_samples : int or float, default=1.0 + The number of samples to draw from X to compute feature importance + in each repeat (without replacement). + + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. + - If `max_samples` is equal to `1.0` or `X.shape[0]`, all samples + will be used. + + While using this option may provide less accurate importance estimates, + it keeps the method tractable when evaluating feature importance on + large datasets. In combination with `n_repeats`, this allows to control + the computational speed vs statistical accuracy trade-off of this method. + + .. versionadded:: 1.0 + + Returns + ------- + result : :class:`~sklearn.utils.Bunch` or dict of such instances + Dictionary-like object, with the following attributes. + + importances_mean : ndarray of shape (n_features, ) + Mean of feature importance over `n_repeats`. + importances_std : ndarray of shape (n_features, ) + Standard deviation over `n_repeats`. + importances : ndarray of shape (n_features, n_repeats) + Raw permutation importance scores. + + If there are multiple scoring metrics in the scoring parameter + `result` is a dict with scorer names as keys (e.g. 'roc_auc') and + `Bunch` objects like above as values. + + References + ---------- + .. [BRE] :doi:`L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, + 2001. <10.1023/A:1010933404324>` + + Examples + -------- + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.inspection import permutation_importance + >>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9], + ... [0, 9, 9],[0, 9, 9],[0, 9, 9]] + >>> y = [1, 1, 1, 0, 0, 0] + >>> clf = LogisticRegression().fit(X, y) + >>> result = permutation_importance(clf, X, y, n_repeats=10, + ... random_state=0) + >>> result.importances_mean + array([0.4666..., 0. , 0. ]) + >>> result.importances_std + array([0.2211..., 0. , 0. ]) + """ + if not hasattr(X, "iloc"): + X = check_array(X, force_all_finite="allow-nan", dtype=None) + + # Precompute random seed from the random state to be used + # to get a fresh independent RandomState instance for each + # parallel call to _calculate_permutation_scores, irrespective of + # the fact that variables are shared or not depending on the active + # joblib backend (sequential, thread-based or process-based). + random_state = check_random_state(random_state) + random_seed = random_state.randint(np.iinfo(np.int32).max + 1) + + if not isinstance(max_samples, numbers.Integral): + max_samples = int(max_samples * X.shape[0]) + elif max_samples > X.shape[0]: + raise ValueError("max_samples must be <= n_samples") + + if callable(scoring): + scorer = scoring + elif scoring is None or isinstance(scoring, str): + scorer = check_scoring(estimator, scoring=scoring) + else: + scorers_dict = _check_multimetric_scoring(estimator, scoring) + scorer = _MultimetricScorer(scorers=scorers_dict) + + baseline_score = _weights_scorer(scorer, estimator, X, y, sample_weight) + + scores = Parallel(n_jobs=n_jobs)( + delayed(_calculate_permutation_scores)( + estimator, + X, + y, + sample_weight, + col_idx, + random_seed, + n_repeats, + scorer, + max_samples, + ) + for col_idx in range(X.shape[1]) + ) + + if isinstance(baseline_score, dict): + return { + name: _create_importances_bunch( + baseline_score[name], + # unpack the permuted scores + np.array([scores[col_idx][name] for col_idx in range(X.shape[1])]), + ) + for name in baseline_score + } + else: + return _create_importances_bunch(baseline_score, np.array(scores)) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3523f78b9ce6311db1e6e1b50014fff60d8abdd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..133c9e2a61221e7cce9fd634e401caaa6acbe655 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6516ef5055c11ddc73dd0c76a4699bdec4c0566 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..096c84565b17876b9b9754992ecf79b66c06474e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_partial_dependence.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d36c277db39c028fbbe19e5f59041452b7ba4179 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_partial_dependence.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_permutation_importance.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_permutation_importance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f2f720f4df0c9b72149bf27dad1856989e00356 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_permutation_importance.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/test_partial_dependence.py b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/test_partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..0336dc4b827fea3510560dd406dab948da581817 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/test_partial_dependence.py @@ -0,0 +1,958 @@ +""" +Testing for the partial dependence module. +""" +import warnings + +import numpy as np +import pytest + +import sklearn +from sklearn.base import BaseEstimator, ClassifierMixin, clone, is_regressor +from sklearn.cluster import KMeans +from sklearn.compose import make_column_transformer +from sklearn.datasets import load_iris, make_classification, make_regression +from sklearn.dummy import DummyClassifier +from sklearn.ensemble import ( + GradientBoostingClassifier, + GradientBoostingRegressor, + HistGradientBoostingClassifier, + HistGradientBoostingRegressor, + RandomForestRegressor, +) +from sklearn.exceptions import NotFittedError +from sklearn.inspection import partial_dependence +from sklearn.inspection._partial_dependence import ( + _grid_from_X, + _partial_dependence_brute, + _partial_dependence_recursion, +) +from sklearn.linear_model import LinearRegression, LogisticRegression, MultiTaskLasso +from sklearn.metrics import r2_score +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import ( + PolynomialFeatures, + RobustScaler, + StandardScaler, + scale, +) +from sklearn.tree import DecisionTreeRegressor +from sklearn.tree.tests.test_tree import assert_is_subtree +from sklearn.utils import _IS_32BIT +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.validation import check_random_state + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y = [-1, -1, -1, 1, 1, 1] + + +# (X, y), n_targets <-- as expected in the output of partial_dep() +binary_classification_data = (make_classification(n_samples=50, random_state=0), 1) +multiclass_classification_data = ( + make_classification( + n_samples=50, n_classes=3, n_clusters_per_class=1, random_state=0 + ), + 3, +) +regression_data = (make_regression(n_samples=50, random_state=0), 1) +multioutput_regression_data = ( + make_regression(n_samples=50, n_targets=2, random_state=0), + 2, +) + +# iris +iris = load_iris() + + +@pytest.mark.parametrize( + "Estimator, method, data", + [ + (GradientBoostingClassifier, "auto", binary_classification_data), + (GradientBoostingClassifier, "auto", multiclass_classification_data), + (GradientBoostingClassifier, "brute", binary_classification_data), + (GradientBoostingClassifier, "brute", multiclass_classification_data), + (GradientBoostingRegressor, "auto", regression_data), + (GradientBoostingRegressor, "brute", regression_data), + (DecisionTreeRegressor, "brute", regression_data), + (LinearRegression, "brute", regression_data), + (LinearRegression, "brute", multioutput_regression_data), + (LogisticRegression, "brute", binary_classification_data), + (LogisticRegression, "brute", multiclass_classification_data), + (MultiTaskLasso, "brute", multioutput_regression_data), + ], +) +@pytest.mark.parametrize("grid_resolution", (5, 10)) +@pytest.mark.parametrize("features", ([1], [1, 2])) +@pytest.mark.parametrize("kind", ("average", "individual", "both")) +def test_output_shape(Estimator, method, data, grid_resolution, features, kind): + # Check that partial_dependence has consistent output shape for different + # kinds of estimators: + # - classifiers with binary and multiclass settings + # - regressors + # - multi-task regressors + + est = Estimator() + if hasattr(est, "n_estimators"): + est.set_params(n_estimators=2) # speed-up computations + + # n_target corresponds to the number of classes (1 for binary classif) or + # the number of tasks / outputs in multi task settings. It's equal to 1 for + # classical regression_data. + (X, y), n_targets = data + n_instances = X.shape[0] + + est.fit(X, y) + result = partial_dependence( + est, + X=X, + features=features, + method=method, + kind=kind, + grid_resolution=grid_resolution, + ) + pdp, axes = result, result["grid_values"] + + expected_pdp_shape = (n_targets, *[grid_resolution for _ in range(len(features))]) + expected_ice_shape = ( + n_targets, + n_instances, + *[grid_resolution for _ in range(len(features))], + ) + if kind == "average": + assert pdp.average.shape == expected_pdp_shape + elif kind == "individual": + assert pdp.individual.shape == expected_ice_shape + else: # 'both' + assert pdp.average.shape == expected_pdp_shape + assert pdp.individual.shape == expected_ice_shape + + expected_axes_shape = (len(features), grid_resolution) + assert axes is not None + assert np.asarray(axes).shape == expected_axes_shape + + +def test_grid_from_X(): + # tests for _grid_from_X: sanity check for output, and for shapes. + + # Make sure that the grid is a cartesian product of the input (it will use + # the unique values instead of the percentiles) + percentiles = (0.05, 0.95) + grid_resolution = 100 + is_categorical = [False, False] + X = np.asarray([[1, 2], [3, 4]]) + grid, axes = _grid_from_X(X, percentiles, is_categorical, grid_resolution) + assert_array_equal(grid, [[1, 2], [1, 4], [3, 2], [3, 4]]) + assert_array_equal(axes, X.T) + + # test shapes of returned objects depending on the number of unique values + # for a feature. + rng = np.random.RandomState(0) + grid_resolution = 15 + + # n_unique_values > grid_resolution + X = rng.normal(size=(20, 2)) + grid, axes = _grid_from_X( + X, percentiles, is_categorical, grid_resolution=grid_resolution + ) + assert grid.shape == (grid_resolution * grid_resolution, X.shape[1]) + assert np.asarray(axes).shape == (2, grid_resolution) + + # n_unique_values < grid_resolution, will use actual values + n_unique_values = 12 + X[n_unique_values - 1 :, 0] = 12345 + rng.shuffle(X) # just to make sure the order is irrelevant + grid, axes = _grid_from_X( + X, percentiles, is_categorical, grid_resolution=grid_resolution + ) + assert grid.shape == (n_unique_values * grid_resolution, X.shape[1]) + # axes is a list of arrays of different shapes + assert axes[0].shape == (n_unique_values,) + assert axes[1].shape == (grid_resolution,) + + +@pytest.mark.parametrize( + "grid_resolution", + [ + 2, # since n_categories > 2, we should not use quantiles resampling + 100, + ], +) +def test_grid_from_X_with_categorical(grid_resolution): + """Check that `_grid_from_X` always sample from categories and does not + depend from the percentiles. + """ + pd = pytest.importorskip("pandas") + percentiles = (0.05, 0.95) + is_categorical = [True] + X = pd.DataFrame({"cat_feature": ["A", "B", "C", "A", "B", "D", "E"]}) + grid, axes = _grid_from_X( + X, percentiles, is_categorical, grid_resolution=grid_resolution + ) + assert grid.shape == (5, X.shape[1]) + assert axes[0].shape == (5,) + + +@pytest.mark.parametrize("grid_resolution", [3, 100]) +def test_grid_from_X_heterogeneous_type(grid_resolution): + """Check that `_grid_from_X` always sample from categories and does not + depend from the percentiles. + """ + pd = pytest.importorskip("pandas") + percentiles = (0.05, 0.95) + is_categorical = [True, False] + X = pd.DataFrame( + { + "cat": ["A", "B", "C", "A", "B", "D", "E", "A", "B", "D"], + "num": [1, 1, 1, 2, 5, 6, 6, 6, 6, 8], + } + ) + nunique = X.nunique() + + grid, axes = _grid_from_X( + X, percentiles, is_categorical, grid_resolution=grid_resolution + ) + if grid_resolution == 3: + assert grid.shape == (15, 2) + assert axes[0].shape[0] == nunique["num"] + assert axes[1].shape[0] == grid_resolution + else: + assert grid.shape == (25, 2) + assert axes[0].shape[0] == nunique["cat"] + assert axes[1].shape[0] == nunique["cat"] + + +@pytest.mark.parametrize( + "grid_resolution, percentiles, err_msg", + [ + (2, (0, 0.0001), "percentiles are too close"), + (100, (1, 2, 3, 4), "'percentiles' must be a sequence of 2 elements"), + (100, 12345, "'percentiles' must be a sequence of 2 elements"), + (100, (-1, 0.95), r"'percentiles' values must be in \[0, 1\]"), + (100, (0.05, 2), r"'percentiles' values must be in \[0, 1\]"), + (100, (0.9, 0.1), r"percentiles\[0\] must be strictly less than"), + (1, (0.05, 0.95), "'grid_resolution' must be strictly greater than 1"), + ], +) +def test_grid_from_X_error(grid_resolution, percentiles, err_msg): + X = np.asarray([[1, 2], [3, 4]]) + is_categorical = [False] + with pytest.raises(ValueError, match=err_msg): + _grid_from_X(X, percentiles, is_categorical, grid_resolution) + + +@pytest.mark.parametrize("target_feature", range(5)) +@pytest.mark.parametrize( + "est, method", + [ + (LinearRegression(), "brute"), + (GradientBoostingRegressor(random_state=0), "brute"), + (GradientBoostingRegressor(random_state=0), "recursion"), + (HistGradientBoostingRegressor(random_state=0), "brute"), + (HistGradientBoostingRegressor(random_state=0), "recursion"), + ], +) +def test_partial_dependence_helpers(est, method, target_feature): + # Check that what is returned by _partial_dependence_brute or + # _partial_dependence_recursion is equivalent to manually setting a target + # feature to a given value, and computing the average prediction over all + # samples. + # This also checks that the brute and recursion methods give the same + # output. + # Note that even on the trainset, the brute and the recursion methods + # aren't always strictly equivalent, in particular when the slow method + # generates unrealistic samples that have low mass in the joint + # distribution of the input features, and when some of the features are + # dependent. Hence the high tolerance on the checks. + + X, y = make_regression(random_state=0, n_features=5, n_informative=5) + # The 'init' estimator for GBDT (here the average prediction) isn't taken + # into account with the recursion method, for technical reasons. We set + # the mean to 0 to that this 'bug' doesn't have any effect. + y = y - y.mean() + est.fit(X, y) + + # target feature will be set to .5 and then to 123 + features = np.array([target_feature], dtype=np.int32) + grid = np.array([[0.5], [123]]) + + if method == "brute": + pdp, predictions = _partial_dependence_brute( + est, grid, features, X, response_method="auto" + ) + else: + pdp = _partial_dependence_recursion(est, grid, features) + + mean_predictions = [] + for val in (0.5, 123): + X_ = X.copy() + X_[:, target_feature] = val + mean_predictions.append(est.predict(X_).mean()) + + pdp = pdp[0] # (shape is (1, 2) so make it (2,)) + + # allow for greater margin for error with recursion method + rtol = 1e-1 if method == "recursion" else 1e-3 + assert np.allclose(pdp, mean_predictions, rtol=rtol) + + +@pytest.mark.parametrize("seed", range(1)) +def test_recursion_decision_tree_vs_forest_and_gbdt(seed): + # Make sure that the recursion method gives the same results on a + # DecisionTreeRegressor and a GradientBoostingRegressor or a + # RandomForestRegressor with 1 tree and equivalent parameters. + + rng = np.random.RandomState(seed) + + # Purely random dataset to avoid correlated features + n_samples = 1000 + n_features = 5 + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) * 10 + + # The 'init' estimator for GBDT (here the average prediction) isn't taken + # into account with the recursion method, for technical reasons. We set + # the mean to 0 to that this 'bug' doesn't have any effect. + y = y - y.mean() + + # set max_depth not too high to avoid splits with same gain but different + # features + max_depth = 5 + + tree_seed = 0 + forest = RandomForestRegressor( + n_estimators=1, + max_features=None, + bootstrap=False, + max_depth=max_depth, + random_state=tree_seed, + ) + # The forest will use ensemble.base._set_random_states to set the + # random_state of the tree sub-estimator. We simulate this here to have + # equivalent estimators. + equiv_random_state = check_random_state(tree_seed).randint(np.iinfo(np.int32).max) + gbdt = GradientBoostingRegressor( + n_estimators=1, + learning_rate=1, + criterion="squared_error", + max_depth=max_depth, + random_state=equiv_random_state, + ) + tree = DecisionTreeRegressor(max_depth=max_depth, random_state=equiv_random_state) + + forest.fit(X, y) + gbdt.fit(X, y) + tree.fit(X, y) + + # sanity check: if the trees aren't the same, the PD values won't be equal + try: + assert_is_subtree(tree.tree_, gbdt[0, 0].tree_) + assert_is_subtree(tree.tree_, forest[0].tree_) + except AssertionError: + # For some reason the trees aren't exactly equal on 32bits, so the PDs + # cannot be equal either. See + # https://github.com/scikit-learn/scikit-learn/issues/8853 + assert _IS_32BIT, "this should only fail on 32 bit platforms" + return + + grid = rng.randn(50).reshape(-1, 1) + for f in range(n_features): + features = np.array([f], dtype=np.int32) + + pdp_forest = _partial_dependence_recursion(forest, grid, features) + pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features) + pdp_tree = _partial_dependence_recursion(tree, grid, features) + + np.testing.assert_allclose(pdp_gbdt, pdp_tree) + np.testing.assert_allclose(pdp_forest, pdp_tree) + + +@pytest.mark.parametrize( + "est", + ( + GradientBoostingClassifier(random_state=0), + HistGradientBoostingClassifier(random_state=0), + ), +) +@pytest.mark.parametrize("target_feature", (0, 1, 2, 3, 4, 5)) +def test_recursion_decision_function(est, target_feature): + # Make sure the recursion method (implicitly uses decision_function) has + # the same result as using brute method with + # response_method=decision_function + + X, y = make_classification(n_classes=2, n_clusters_per_class=1, random_state=1) + assert np.mean(y) == 0.5 # make sure the init estimator predicts 0 anyway + + est.fit(X, y) + + preds_1 = partial_dependence( + est, + X, + [target_feature], + response_method="decision_function", + method="recursion", + kind="average", + ) + preds_2 = partial_dependence( + est, + X, + [target_feature], + response_method="decision_function", + method="brute", + kind="average", + ) + + assert_allclose(preds_1["average"], preds_2["average"], atol=1e-7) + + +@pytest.mark.parametrize( + "est", + ( + LinearRegression(), + GradientBoostingRegressor(random_state=0), + HistGradientBoostingRegressor( + random_state=0, min_samples_leaf=1, max_leaf_nodes=None, max_iter=1 + ), + DecisionTreeRegressor(random_state=0), + ), +) +@pytest.mark.parametrize("power", (1, 2)) +def test_partial_dependence_easy_target(est, power): + # If the target y only depends on one feature in an obvious way (linear or + # quadratic) then the partial dependence for that feature should reflect + # it. + # We here fit a linear regression_data model (with polynomial features if + # needed) and compute r_squared to check that the partial dependence + # correctly reflects the target. + + rng = np.random.RandomState(0) + n_samples = 200 + target_variable = 2 + X = rng.normal(size=(n_samples, 5)) + y = X[:, target_variable] ** power + + est.fit(X, y) + + pdp = partial_dependence( + est, features=[target_variable], X=X, grid_resolution=1000, kind="average" + ) + + new_X = pdp["grid_values"][0].reshape(-1, 1) + new_y = pdp["average"][0] + # add polynomial features if needed + new_X = PolynomialFeatures(degree=power).fit_transform(new_X) + + lr = LinearRegression().fit(new_X, new_y) + r2 = r2_score(new_y, lr.predict(new_X)) + + assert r2 > 0.99 + + +@pytest.mark.parametrize( + "Estimator", + ( + sklearn.tree.DecisionTreeClassifier, + sklearn.tree.ExtraTreeClassifier, + sklearn.ensemble.ExtraTreesClassifier, + sklearn.neighbors.KNeighborsClassifier, + sklearn.neighbors.RadiusNeighborsClassifier, + sklearn.ensemble.RandomForestClassifier, + ), +) +def test_multiclass_multioutput(Estimator): + # Make sure error is raised for multiclass-multioutput classifiers + + # make multiclass-multioutput dataset + X, y = make_classification(n_classes=3, n_clusters_per_class=1, random_state=0) + y = np.array([y, y]).T + + est = Estimator() + est.fit(X, y) + + with pytest.raises( + ValueError, match="Multiclass-multioutput estimators are not supported" + ): + partial_dependence(est, X, [0]) + + +class NoPredictProbaNoDecisionFunction(ClassifierMixin, BaseEstimator): + def fit(self, X, y): + # simulate that we have some classes + self.classes_ = [0, 1] + return self + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "estimator, params, err_msg", + [ + ( + KMeans(random_state=0, n_init="auto"), + {"features": [0]}, + "'estimator' must be a fitted regressor or classifier", + ), + ( + LinearRegression(), + {"features": [0], "response_method": "predict_proba"}, + "The response_method parameter is ignored for regressors", + ), + ( + GradientBoostingClassifier(random_state=0), + { + "features": [0], + "response_method": "predict_proba", + "method": "recursion", + }, + "'recursion' method, the response_method must be 'decision_function'", + ), + ( + GradientBoostingClassifier(random_state=0), + {"features": [0], "response_method": "predict_proba", "method": "auto"}, + "'recursion' method, the response_method must be 'decision_function'", + ), + ( + LinearRegression(), + {"features": [0], "method": "recursion", "kind": "individual"}, + "The 'recursion' method only applies when 'kind' is set to 'average'", + ), + ( + LinearRegression(), + {"features": [0], "method": "recursion", "kind": "both"}, + "The 'recursion' method only applies when 'kind' is set to 'average'", + ), + ( + LinearRegression(), + {"features": [0], "method": "recursion"}, + "Only the following estimators support the 'recursion' method:", + ), + ], +) +def test_partial_dependence_error(estimator, params, err_msg): + X, y = make_classification(random_state=0) + estimator.fit(X, y) + + with pytest.raises(ValueError, match=err_msg): + partial_dependence(estimator, X, **params) + + +@pytest.mark.parametrize( + "estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)] +) +@pytest.mark.parametrize("features", [-1, 10000]) +def test_partial_dependence_unknown_feature_indices(estimator, features): + X, y = make_classification(random_state=0) + estimator.fit(X, y) + + err_msg = "all features must be in" + with pytest.raises(ValueError, match=err_msg): + partial_dependence(estimator, X, [features]) + + +@pytest.mark.parametrize( + "estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)] +) +def test_partial_dependence_unknown_feature_string(estimator): + pd = pytest.importorskip("pandas") + X, y = make_classification(random_state=0) + df = pd.DataFrame(X) + estimator.fit(df, y) + + features = ["random"] + err_msg = "A given column is not a column of the dataframe" + with pytest.raises(ValueError, match=err_msg): + partial_dependence(estimator, df, features) + + +@pytest.mark.parametrize( + "estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)] +) +def test_partial_dependence_X_list(estimator): + # check that array-like objects are accepted + X, y = make_classification(random_state=0) + estimator.fit(X, y) + partial_dependence(estimator, list(X), [0], kind="average") + + +def test_warning_recursion_non_constant_init(): + # make sure that passing a non-constant init parameter to a GBDT and using + # recursion method yields a warning. + + gbc = GradientBoostingClassifier(init=DummyClassifier(), random_state=0) + gbc.fit(X, y) + + with pytest.warns( + UserWarning, match="Using recursion method with a non-constant init predictor" + ): + partial_dependence(gbc, X, [0], method="recursion", kind="average") + + with pytest.warns( + UserWarning, match="Using recursion method with a non-constant init predictor" + ): + partial_dependence(gbc, X, [0], method="recursion", kind="average") + + +def test_partial_dependence_sample_weight_of_fitted_estimator(): + # Test near perfect correlation between partial dependence and diagonal + # when sample weights emphasize y = x predictions + # non-regression test for #13193 + # TODO: extend to HistGradientBoosting once sample_weight is supported + N = 1000 + rng = np.random.RandomState(123456) + mask = rng.randint(2, size=N, dtype=bool) + + x = rng.rand(N) + # set y = x on mask and y = -x outside + y = x.copy() + y[~mask] = -y[~mask] + X = np.c_[mask, x] + # sample weights to emphasize data points where y = x + sample_weight = np.ones(N) + sample_weight[mask] = 1000.0 + + clf = GradientBoostingRegressor(n_estimators=10, random_state=1) + clf.fit(X, y, sample_weight=sample_weight) + + pdp = partial_dependence(clf, X, features=[1], kind="average") + + assert np.corrcoef(pdp["average"], pdp["grid_values"])[0, 1] > 0.99 + + +def test_hist_gbdt_sw_not_supported(): + # TODO: remove/fix when PDP supports HGBT with sample weights + clf = HistGradientBoostingRegressor(random_state=1) + clf.fit(X, y, sample_weight=np.ones(len(X))) + + with pytest.raises( + NotImplementedError, match="does not support partial dependence" + ): + partial_dependence(clf, X, features=[1]) + + +def test_partial_dependence_pipeline(): + # check that the partial dependence support pipeline + iris = load_iris() + + scaler = StandardScaler() + clf = DummyClassifier(random_state=42) + pipe = make_pipeline(scaler, clf) + + clf.fit(scaler.fit_transform(iris.data), iris.target) + pipe.fit(iris.data, iris.target) + + features = 0 + pdp_pipe = partial_dependence( + pipe, iris.data, features=[features], grid_resolution=10, kind="average" + ) + pdp_clf = partial_dependence( + clf, + scaler.transform(iris.data), + features=[features], + grid_resolution=10, + kind="average", + ) + assert_allclose(pdp_pipe["average"], pdp_clf["average"]) + assert_allclose( + pdp_pipe["grid_values"][0], + pdp_clf["grid_values"][0] * scaler.scale_[features] + scaler.mean_[features], + ) + + +@pytest.mark.parametrize( + "estimator", + [ + LogisticRegression(max_iter=1000, random_state=0), + GradientBoostingClassifier(random_state=0, n_estimators=5), + ], + ids=["estimator-brute", "estimator-recursion"], +) +@pytest.mark.parametrize( + "preprocessor", + [ + None, + make_column_transformer( + (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]), + (RobustScaler(), [iris.feature_names[i] for i in (1, 3)]), + ), + make_column_transformer( + (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]), + remainder="passthrough", + ), + ], + ids=["None", "column-transformer", "column-transformer-passthrough"], +) +@pytest.mark.parametrize( + "features", + [[0, 2], [iris.feature_names[i] for i in (0, 2)]], + ids=["features-integer", "features-string"], +) +def test_partial_dependence_dataframe(estimator, preprocessor, features): + # check that the partial dependence support dataframe and pipeline + # including a column transformer + pd = pytest.importorskip("pandas") + df = pd.DataFrame(scale(iris.data), columns=iris.feature_names) + + pipe = make_pipeline(preprocessor, estimator) + pipe.fit(df, iris.target) + pdp_pipe = partial_dependence( + pipe, df, features=features, grid_resolution=10, kind="average" + ) + + # the column transformer will reorder the column when transforming + # we mixed the index to be sure that we are computing the partial + # dependence of the right columns + if preprocessor is not None: + X_proc = clone(preprocessor).fit_transform(df) + features_clf = [0, 1] + else: + X_proc = df + features_clf = [0, 2] + + clf = clone(estimator).fit(X_proc, iris.target) + pdp_clf = partial_dependence( + clf, + X_proc, + features=features_clf, + method="brute", + grid_resolution=10, + kind="average", + ) + + assert_allclose(pdp_pipe["average"], pdp_clf["average"]) + if preprocessor is not None: + scaler = preprocessor.named_transformers_["standardscaler"] + assert_allclose( + pdp_pipe["grid_values"][1], + pdp_clf["grid_values"][1] * scaler.scale_[1] + scaler.mean_[1], + ) + else: + assert_allclose(pdp_pipe["grid_values"][1], pdp_clf["grid_values"][1]) + + +@pytest.mark.parametrize( + "features, expected_pd_shape", + [ + (0, (3, 10)), + (iris.feature_names[0], (3, 10)), + ([0, 2], (3, 10, 10)), + ([iris.feature_names[i] for i in (0, 2)], (3, 10, 10)), + ([True, False, True, False], (3, 10, 10)), + ], + ids=["scalar-int", "scalar-str", "list-int", "list-str", "mask"], +) +def test_partial_dependence_feature_type(features, expected_pd_shape): + # check all possible features type supported in PDP + pd = pytest.importorskip("pandas") + df = pd.DataFrame(iris.data, columns=iris.feature_names) + + preprocessor = make_column_transformer( + (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]), + (RobustScaler(), [iris.feature_names[i] for i in (1, 3)]), + ) + pipe = make_pipeline( + preprocessor, LogisticRegression(max_iter=1000, random_state=0) + ) + pipe.fit(df, iris.target) + pdp_pipe = partial_dependence( + pipe, df, features=features, grid_resolution=10, kind="average" + ) + assert pdp_pipe["average"].shape == expected_pd_shape + assert len(pdp_pipe["grid_values"]) == len(pdp_pipe["average"].shape) - 1 + + +@pytest.mark.parametrize( + "estimator", + [ + LinearRegression(), + LogisticRegression(), + GradientBoostingRegressor(), + GradientBoostingClassifier(), + ], +) +def test_partial_dependence_unfitted(estimator): + X = iris.data + preprocessor = make_column_transformer( + (StandardScaler(), [0, 2]), (RobustScaler(), [1, 3]) + ) + pipe = make_pipeline(preprocessor, estimator) + with pytest.raises(NotFittedError, match="is not fitted yet"): + partial_dependence(pipe, X, features=[0, 2], grid_resolution=10) + with pytest.raises(NotFittedError, match="is not fitted yet"): + partial_dependence(estimator, X, features=[0, 2], grid_resolution=10) + + +@pytest.mark.parametrize( + "Estimator, data", + [ + (LinearRegression, multioutput_regression_data), + (LogisticRegression, binary_classification_data), + ], +) +def test_kind_average_and_average_of_individual(Estimator, data): + est = Estimator() + (X, y), n_targets = data + est.fit(X, y) + + pdp_avg = partial_dependence(est, X=X, features=[1, 2], kind="average") + pdp_ind = partial_dependence(est, X=X, features=[1, 2], kind="individual") + avg_ind = np.mean(pdp_ind["individual"], axis=1) + assert_allclose(avg_ind, pdp_avg["average"]) + + +@pytest.mark.parametrize( + "Estimator, data", + [ + (LinearRegression, multioutput_regression_data), + (LogisticRegression, binary_classification_data), + ], +) +def test_partial_dependence_kind_individual_ignores_sample_weight(Estimator, data): + """Check that `sample_weight` does not have any effect on reported ICE.""" + est = Estimator() + (X, y), n_targets = data + sample_weight = np.arange(X.shape[0]) + est.fit(X, y) + + pdp_nsw = partial_dependence(est, X=X, features=[1, 2], kind="individual") + pdp_sw = partial_dependence( + est, X=X, features=[1, 2], kind="individual", sample_weight=sample_weight + ) + assert_allclose(pdp_nsw["individual"], pdp_sw["individual"]) + assert_allclose(pdp_nsw["grid_values"], pdp_sw["grid_values"]) + + +@pytest.mark.parametrize( + "estimator", + [ + LinearRegression(), + LogisticRegression(), + RandomForestRegressor(), + GradientBoostingClassifier(), + ], +) +@pytest.mark.parametrize("non_null_weight_idx", [0, 1, -1]) +def test_partial_dependence_non_null_weight_idx(estimator, non_null_weight_idx): + """Check that if we pass a `sample_weight` of zeros with only one index with + sample weight equals one, then the average `partial_dependence` with this + `sample_weight` is equal to the individual `partial_dependence` of the + corresponding index. + """ + X, y = iris.data, iris.target + preprocessor = make_column_transformer( + (StandardScaler(), [0, 2]), (RobustScaler(), [1, 3]) + ) + pipe = make_pipeline(preprocessor, estimator).fit(X, y) + + sample_weight = np.zeros_like(y) + sample_weight[non_null_weight_idx] = 1 + pdp_sw = partial_dependence( + pipe, + X, + [2, 3], + kind="average", + sample_weight=sample_weight, + grid_resolution=10, + ) + pdp_ind = partial_dependence(pipe, X, [2, 3], kind="individual", grid_resolution=10) + output_dim = 1 if is_regressor(pipe) else len(np.unique(y)) + for i in range(output_dim): + assert_allclose( + pdp_ind["individual"][i][non_null_weight_idx], + pdp_sw["average"][i], + ) + + +@pytest.mark.parametrize( + "Estimator, data", + [ + (LinearRegression, multioutput_regression_data), + (LogisticRegression, binary_classification_data), + ], +) +def test_partial_dependence_equivalence_equal_sample_weight(Estimator, data): + """Check that `sample_weight=None` is equivalent to having equal weights.""" + + est = Estimator() + (X, y), n_targets = data + est.fit(X, y) + + sample_weight, params = None, {"X": X, "features": [1, 2], "kind": "average"} + pdp_sw_none = partial_dependence(est, **params, sample_weight=sample_weight) + sample_weight = np.ones(len(y)) + pdp_sw_unit = partial_dependence(est, **params, sample_weight=sample_weight) + assert_allclose(pdp_sw_none["average"], pdp_sw_unit["average"]) + sample_weight = 2 * np.ones(len(y)) + pdp_sw_doubling = partial_dependence(est, **params, sample_weight=sample_weight) + assert_allclose(pdp_sw_none["average"], pdp_sw_doubling["average"]) + + +def test_partial_dependence_sample_weight_size_error(): + """Check that we raise an error when the size of `sample_weight` is not + consistent with `X` and `y`. + """ + est = LogisticRegression() + (X, y), n_targets = binary_classification_data + sample_weight = np.ones_like(y) + est.fit(X, y) + + with pytest.raises(ValueError, match="sample_weight.shape =="): + partial_dependence( + est, X, features=[0], sample_weight=sample_weight[1:], grid_resolution=10 + ) + + +def test_partial_dependence_sample_weight_with_recursion(): + """Check that we raise an error when `sample_weight` is provided with + `"recursion"` method. + """ + est = RandomForestRegressor() + (X, y), n_targets = regression_data + sample_weight = np.ones_like(y) + est.fit(X, y, sample_weight=sample_weight) + + with pytest.raises(ValueError, match="'recursion' method can only be applied when"): + partial_dependence( + est, X, features=[0], method="recursion", sample_weight=sample_weight + ) + + +# TODO(1.5): Remove when bunch values is deprecated in 1.5 +def test_partial_dependence_bunch_values_deprecated(): + """Test that deprecation warning is raised when values is accessed.""" + + est = LogisticRegression() + (X, y), _ = binary_classification_data + est.fit(X, y) + + pdp_avg = partial_dependence(est, X=X, features=[1, 2], kind="average") + + msg = ( + "Key: 'values', is deprecated in 1.3 and will be " + "removed in 1.5. Please use 'grid_values' instead" + ) + + with warnings.catch_warnings(): + # Does not raise warnings with "grid_values" + warnings.simplefilter("error", FutureWarning) + grid_values = pdp_avg["grid_values"] + + with pytest.warns(FutureWarning, match=msg): + # Warns for "values" + values = pdp_avg["values"] + + # "values" and "grid_values" are the same object + assert values is grid_values + + +def test_mixed_type_categorical(): + """Check that we raise a proper error when a column has mixed types and + the sorting of `np.unique` will fail.""" + X = np.array(["A", "B", "C", np.nan], dtype=object).reshape(-1, 1) + y = np.array([0, 1, 0, 1]) + + from sklearn.preprocessing import OrdinalEncoder + + clf = make_pipeline( + OrdinalEncoder(encoded_missing_value=-1), + LogisticRegression(), + ).fit(X, y) + with pytest.raises(ValueError, match="The column #0 contains mixed data types"): + partial_dependence(clf, X, features=[0]) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/test_pd_utils.py b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/test_pd_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5dea3834a77a70891a4efab25a560d09a49a13e1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/test_pd_utils.py @@ -0,0 +1,47 @@ +import numpy as np +import pytest + +from sklearn.inspection._pd_utils import _check_feature_names, _get_feature_index +from sklearn.utils._testing import _convert_container + + +@pytest.mark.parametrize( + "feature_names, array_type, expected_feature_names", + [ + (None, "array", ["x0", "x1", "x2"]), + (None, "dataframe", ["a", "b", "c"]), + (np.array(["a", "b", "c"]), "array", ["a", "b", "c"]), + ], +) +def test_check_feature_names(feature_names, array_type, expected_feature_names): + X = np.random.randn(10, 3) + column_names = ["a", "b", "c"] + X = _convert_container(X, constructor_name=array_type, columns_name=column_names) + feature_names_validated = _check_feature_names(X, feature_names) + assert feature_names_validated == expected_feature_names + + +def test_check_feature_names_error(): + X = np.random.randn(10, 3) + feature_names = ["a", "b", "c", "a"] + msg = "feature_names should not contain duplicates." + with pytest.raises(ValueError, match=msg): + _check_feature_names(X, feature_names) + + +@pytest.mark.parametrize("fx, idx", [(0, 0), (1, 1), ("a", 0), ("b", 1), ("c", 2)]) +def test_get_feature_index(fx, idx): + feature_names = ["a", "b", "c"] + assert _get_feature_index(fx, feature_names) == idx + + +@pytest.mark.parametrize( + "fx, feature_names, err_msg", + [ + ("a", None, "Cannot plot partial dependence for feature 'a'"), + ("d", ["a", "b", "c"], "Feature 'd' not in feature_names"), + ], +) +def test_get_feature_names_error(fx, feature_names, err_msg): + with pytest.raises(ValueError, match=err_msg): + _get_feature_index(fx, feature_names) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/test_permutation_importance.py b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/test_permutation_importance.py new file mode 100644 index 0000000000000000000000000000000000000000..2869e84c78bf872647eb786c05a93ce190bc5689 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/inspection/tests/test_permutation_importance.py @@ -0,0 +1,542 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from sklearn.compose import ColumnTransformer +from sklearn.datasets import ( + load_diabetes, + load_iris, + make_classification, + make_regression, +) +from sklearn.dummy import DummyClassifier +from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor +from sklearn.impute import SimpleImputer +from sklearn.inspection import permutation_importance +from sklearn.linear_model import LinearRegression, LogisticRegression +from sklearn.metrics import ( + get_scorer, + mean_squared_error, + r2_score, +) +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, StandardScaler, scale +from sklearn.utils import parallel_backend +from sklearn.utils._testing import _convert_container + + +@pytest.mark.parametrize("n_jobs", [1, 2]) +@pytest.mark.parametrize("max_samples", [0.5, 1.0]) +@pytest.mark.parametrize("sample_weight", [None, "ones"]) +def test_permutation_importance_correlated_feature_regression( + n_jobs, max_samples, sample_weight +): + # Make sure that feature highly correlated to the target have a higher + # importance + rng = np.random.RandomState(42) + n_repeats = 5 + + X, y = load_diabetes(return_X_y=True) + y_with_little_noise = (y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1) + + X = np.hstack([X, y_with_little_noise]) + + weights = np.ones_like(y) if sample_weight == "ones" else sample_weight + clf = RandomForestRegressor(n_estimators=10, random_state=42) + clf.fit(X, y) + + result = permutation_importance( + clf, + X, + y, + sample_weight=weights, + n_repeats=n_repeats, + random_state=rng, + n_jobs=n_jobs, + max_samples=max_samples, + ) + + assert result.importances.shape == (X.shape[1], n_repeats) + + # the correlated feature with y was added as the last column and should + # have the highest importance + assert np.all(result.importances_mean[-1] > result.importances_mean[:-1]) + + +@pytest.mark.parametrize("n_jobs", [1, 2]) +@pytest.mark.parametrize("max_samples", [0.5, 1.0]) +def test_permutation_importance_correlated_feature_regression_pandas( + n_jobs, max_samples +): + pd = pytest.importorskip("pandas") + + # Make sure that feature highly correlated to the target have a higher + # importance + rng = np.random.RandomState(42) + n_repeats = 5 + + dataset = load_iris() + X, y = dataset.data, dataset.target + y_with_little_noise = (y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1) + + # Adds feature correlated with y as the last column + X = pd.DataFrame(X, columns=dataset.feature_names) + X["correlated_feature"] = y_with_little_noise + + clf = RandomForestClassifier(n_estimators=10, random_state=42) + clf.fit(X, y) + + result = permutation_importance( + clf, + X, + y, + n_repeats=n_repeats, + random_state=rng, + n_jobs=n_jobs, + max_samples=max_samples, + ) + + assert result.importances.shape == (X.shape[1], n_repeats) + + # the correlated feature with y was added as the last column and should + # have the highest importance + assert np.all(result.importances_mean[-1] > result.importances_mean[:-1]) + + +@pytest.mark.parametrize("n_jobs", [1, 2]) +@pytest.mark.parametrize("max_samples", [0.5, 1.0]) +def test_robustness_to_high_cardinality_noisy_feature(n_jobs, max_samples, seed=42): + # Permutation variable importance should not be affected by the high + # cardinality bias of traditional feature importances, especially when + # computed on a held-out test set: + rng = np.random.RandomState(seed) + n_repeats = 5 + n_samples = 1000 + n_classes = 5 + n_informative_features = 2 + n_noise_features = 1 + n_features = n_informative_features + n_noise_features + + # Generate a multiclass classification dataset and a set of informative + # binary features that can be used to predict some classes of y exactly + # while leaving some classes unexplained to make the problem harder. + classes = np.arange(n_classes) + y = rng.choice(classes, size=n_samples) + X = np.hstack([(y == c).reshape(-1, 1) for c in classes[:n_informative_features]]) + X = X.astype(np.float32) + + # Not all target classes are explained by the binary class indicator + # features: + assert n_informative_features < n_classes + + # Add 10 other noisy features with high cardinality (numerical) values + # that can be used to overfit the training data. + X = np.concatenate([X, rng.randn(n_samples, n_noise_features)], axis=1) + assert X.shape == (n_samples, n_features) + + # Split the dataset to be able to evaluate on a held-out test set. The + # Test size should be large enough for importance measurements to be + # stable: + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.5, random_state=rng + ) + clf = RandomForestClassifier(n_estimators=5, random_state=rng) + clf.fit(X_train, y_train) + + # Variable importances computed by impurity decrease on the tree node + # splits often use the noisy features in splits. This can give misleading + # impression that high cardinality noisy variables are the most important: + tree_importances = clf.feature_importances_ + informative_tree_importances = tree_importances[:n_informative_features] + noisy_tree_importances = tree_importances[n_informative_features:] + assert informative_tree_importances.max() < noisy_tree_importances.min() + + # Let's check that permutation-based feature importances do not have this + # problem. + r = permutation_importance( + clf, + X_test, + y_test, + n_repeats=n_repeats, + random_state=rng, + n_jobs=n_jobs, + max_samples=max_samples, + ) + + assert r.importances.shape == (X.shape[1], n_repeats) + + # Split the importances between informative and noisy features + informative_importances = r.importances_mean[:n_informative_features] + noisy_importances = r.importances_mean[n_informative_features:] + + # Because we do not have a binary variable explaining each target classes, + # the RF model will have to use the random variable to make some + # (overfitting) splits (as max_depth is not set). Therefore the noisy + # variables will be non-zero but with small values oscillating around + # zero: + assert max(np.abs(noisy_importances)) > 1e-7 + assert noisy_importances.max() < 0.05 + + # The binary features correlated with y should have a higher importance + # than the high cardinality noisy features. + # The maximum test accuracy is 2 / 5 == 0.4, each informative feature + # contributing approximately a bit more than 0.2 of accuracy. + assert informative_importances.min() > 0.15 + + +def test_permutation_importance_mixed_types(): + rng = np.random.RandomState(42) + n_repeats = 4 + + # Last column is correlated with y + X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T + y = np.array([0, 1, 0, 1]) + + clf = make_pipeline(SimpleImputer(), LogisticRegression(solver="lbfgs")) + clf.fit(X, y) + result = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng) + + assert result.importances.shape == (X.shape[1], n_repeats) + + # the correlated feature with y is the last column and should + # have the highest importance + assert np.all(result.importances_mean[-1] > result.importances_mean[:-1]) + + # use another random state + rng = np.random.RandomState(0) + result2 = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng) + assert result2.importances.shape == (X.shape[1], n_repeats) + + assert not np.allclose(result.importances, result2.importances) + + # the correlated feature with y is the last column and should + # have the highest importance + assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1]) + + +def test_permutation_importance_mixed_types_pandas(): + pd = pytest.importorskip("pandas") + rng = np.random.RandomState(42) + n_repeats = 5 + + # Last column is correlated with y + X = pd.DataFrame({"col1": [1.0, 2.0, 3.0, np.nan], "col2": ["a", "b", "a", "b"]}) + y = np.array([0, 1, 0, 1]) + + num_preprocess = make_pipeline(SimpleImputer(), StandardScaler()) + preprocess = ColumnTransformer( + [("num", num_preprocess, ["col1"]), ("cat", OneHotEncoder(), ["col2"])] + ) + clf = make_pipeline(preprocess, LogisticRegression(solver="lbfgs")) + clf.fit(X, y) + + result = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng) + + assert result.importances.shape == (X.shape[1], n_repeats) + # the correlated feature with y is the last column and should + # have the highest importance + assert np.all(result.importances_mean[-1] > result.importances_mean[:-1]) + + +def test_permutation_importance_linear_regresssion(): + X, y = make_regression(n_samples=500, n_features=10, random_state=0) + + X = scale(X) + y = scale(y) + + lr = LinearRegression().fit(X, y) + + # this relationship can be computed in closed form + expected_importances = 2 * lr.coef_**2 + results = permutation_importance( + lr, X, y, n_repeats=50, scoring="neg_mean_squared_error" + ) + assert_allclose( + expected_importances, results.importances_mean, rtol=1e-1, atol=1e-6 + ) + + +@pytest.mark.parametrize("max_samples", [500, 1.0]) +def test_permutation_importance_equivalence_sequential_parallel(max_samples): + # regression test to make sure that sequential and parallel calls will + # output the same results. + # Also tests that max_samples equal to number of samples is equivalent to 1.0 + X, y = make_regression(n_samples=500, n_features=10, random_state=0) + lr = LinearRegression().fit(X, y) + + importance_sequential = permutation_importance( + lr, X, y, n_repeats=5, random_state=0, n_jobs=1, max_samples=max_samples + ) + + # First check that the problem is structured enough and that the model is + # complex enough to not yield trivial, constant importances: + imp_min = importance_sequential["importances"].min() + imp_max = importance_sequential["importances"].max() + assert imp_max - imp_min > 0.3 + + # The actually check that parallelism does not impact the results + # either with shared memory (threading) or without isolated memory + # via process-based parallelism using the default backend + # ('loky' or 'multiprocessing') depending on the joblib version: + + # process-based parallelism (by default): + importance_processes = permutation_importance( + lr, X, y, n_repeats=5, random_state=0, n_jobs=2 + ) + assert_allclose( + importance_processes["importances"], importance_sequential["importances"] + ) + + # thread-based parallelism: + with parallel_backend("threading"): + importance_threading = permutation_importance( + lr, X, y, n_repeats=5, random_state=0, n_jobs=2 + ) + assert_allclose( + importance_threading["importances"], importance_sequential["importances"] + ) + + +@pytest.mark.parametrize("n_jobs", [None, 1, 2]) +@pytest.mark.parametrize("max_samples", [0.5, 1.0]) +def test_permutation_importance_equivalence_array_dataframe(n_jobs, max_samples): + # This test checks that the column shuffling logic has the same behavior + # both a dataframe and a simple numpy array. + pd = pytest.importorskip("pandas") + + # regression test to make sure that sequential and parallel calls will + # output the same results. + X, y = make_regression(n_samples=100, n_features=5, random_state=0) + X_df = pd.DataFrame(X) + + # Add a categorical feature that is statistically linked to y: + binner = KBinsDiscretizer(n_bins=3, encode="ordinal") + cat_column = binner.fit_transform(y.reshape(-1, 1)) + + # Concatenate the extra column to the numpy array: integers will be + # cast to float values + X = np.hstack([X, cat_column]) + assert X.dtype.kind == "f" + + # Insert extra column as a non-numpy-native dtype (while keeping backward + # compat for old pandas versions): + if hasattr(pd, "Categorical"): + cat_column = pd.Categorical(cat_column.ravel()) + else: + cat_column = cat_column.ravel() + new_col_idx = len(X_df.columns) + X_df[new_col_idx] = cat_column + assert X_df[new_col_idx].dtype == cat_column.dtype + + # Stich an arbitrary index to the dataframe: + X_df.index = np.arange(len(X_df)).astype(str) + + rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0) + rf.fit(X, y) + + n_repeats = 3 + importance_array = permutation_importance( + rf, + X, + y, + n_repeats=n_repeats, + random_state=0, + n_jobs=n_jobs, + max_samples=max_samples, + ) + + # First check that the problem is structured enough and that the model is + # complex enough to not yield trivial, constant importances: + imp_min = importance_array["importances"].min() + imp_max = importance_array["importances"].max() + assert imp_max - imp_min > 0.3 + + # Now check that importances computed on dataframe matche the values + # of those computed on the array with the same data. + importance_dataframe = permutation_importance( + rf, + X_df, + y, + n_repeats=n_repeats, + random_state=0, + n_jobs=n_jobs, + max_samples=max_samples, + ) + assert_allclose( + importance_array["importances"], importance_dataframe["importances"] + ) + + +@pytest.mark.parametrize("input_type", ["array", "dataframe"]) +def test_permutation_importance_large_memmaped_data(input_type): + # Smoke, non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/15810 + n_samples, n_features = int(5e4), 4 + X, y = make_classification( + n_samples=n_samples, n_features=n_features, random_state=0 + ) + assert X.nbytes > 1e6 # trigger joblib memmaping + + X = _convert_container(X, input_type) + clf = DummyClassifier(strategy="prior").fit(X, y) + + # Actual smoke test: should not raise any error: + n_repeats = 5 + r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2) + + # Auxiliary check: DummyClassifier is feature independent: + # permutating feature should not change the predictions + expected_importances = np.zeros((n_features, n_repeats)) + assert_allclose(expected_importances, r.importances) + + +def test_permutation_importance_sample_weight(): + # Creating data with 2 features and 1000 samples, where the target + # variable is a linear combination of the two features, such that + # in half of the samples the impact of feature 1 is twice the impact of + # feature 2, and vice versa on the other half of the samples. + rng = np.random.RandomState(1) + n_samples = 1000 + n_features = 2 + n_half_samples = n_samples // 2 + x = rng.normal(0.0, 0.001, (n_samples, n_features)) + y = np.zeros(n_samples) + y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1] + y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1] + + # Fitting linear regression with perfect prediction + lr = LinearRegression(fit_intercept=False) + lr.fit(x, y) + + # When all samples are weighted with the same weights, the ratio of + # the two features importance should equal to 1 on expectation (when using + # mean absolutes error as the loss function). + pi = permutation_importance( + lr, x, y, random_state=1, scoring="neg_mean_absolute_error", n_repeats=200 + ) + x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1] + assert x1_x2_imp_ratio_w_none == pytest.approx(1, 0.01) + + # When passing a vector of ones as the sample_weight, results should be + # the same as in the case that sample_weight=None. + w = np.ones(n_samples) + pi = permutation_importance( + lr, + x, + y, + random_state=1, + scoring="neg_mean_absolute_error", + n_repeats=200, + sample_weight=w, + ) + x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1] + assert x1_x2_imp_ratio_w_ones == pytest.approx(x1_x2_imp_ratio_w_none, 0.01) + + # When the ratio between the weights of the first half of the samples and + # the second half of the samples approaches to infinity, the ratio of + # the two features importance should equal to 2 on expectation (when using + # mean absolutes error as the loss function). + w = np.hstack( + [np.repeat(10.0**10, n_half_samples), np.repeat(1.0, n_half_samples)] + ) + lr.fit(x, y, w) + pi = permutation_importance( + lr, + x, + y, + random_state=1, + scoring="neg_mean_absolute_error", + n_repeats=200, + sample_weight=w, + ) + x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1] + assert x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none == pytest.approx(2, 0.01) + + +def test_permutation_importance_no_weights_scoring_function(): + # Creating a scorer function that does not takes sample_weight + def my_scorer(estimator, X, y): + return 1 + + # Creating some data and estimator for the permutation test + x = np.array([[1, 2], [3, 4]]) + y = np.array([1, 2]) + w = np.array([1, 1]) + lr = LinearRegression() + lr.fit(x, y) + + # test that permutation_importance does not return error when + # sample_weight is None + try: + permutation_importance(lr, x, y, random_state=1, scoring=my_scorer, n_repeats=1) + except TypeError: + pytest.fail( + "permutation_test raised an error when using a scorer " + "function that does not accept sample_weight even though " + "sample_weight was None" + ) + + # test that permutation_importance raise exception when sample_weight is + # not None + with pytest.raises(TypeError): + permutation_importance( + lr, x, y, random_state=1, scoring=my_scorer, n_repeats=1, sample_weight=w + ) + + +@pytest.mark.parametrize( + "list_single_scorer, multi_scorer", + [ + (["r2", "neg_mean_squared_error"], ["r2", "neg_mean_squared_error"]), + ( + ["r2", "neg_mean_squared_error"], + { + "r2": get_scorer("r2"), + "neg_mean_squared_error": get_scorer("neg_mean_squared_error"), + }, + ), + ( + ["r2", "neg_mean_squared_error"], + lambda estimator, X, y: { + "r2": r2_score(y, estimator.predict(X)), + "neg_mean_squared_error": -mean_squared_error(y, estimator.predict(X)), + }, + ), + ], +) +def test_permutation_importance_multi_metric(list_single_scorer, multi_scorer): + # Test permutation importance when scoring contains multiple scorers + + # Creating some data and estimator for the permutation test + x, y = make_regression(n_samples=500, n_features=10, random_state=0) + lr = LinearRegression().fit(x, y) + + multi_importance = permutation_importance( + lr, x, y, random_state=1, scoring=multi_scorer, n_repeats=2 + ) + assert set(multi_importance.keys()) == set(list_single_scorer) + + for scorer in list_single_scorer: + multi_result = multi_importance[scorer] + single_result = permutation_importance( + lr, x, y, random_state=1, scoring=scorer, n_repeats=2 + ) + + assert_allclose(multi_result.importances, single_result.importances) + + +def test_permutation_importance_max_samples_error(): + """Check that a proper error message is raised when `max_samples` is not + set to a valid input value. + """ + X = np.array([(1.0, 2.0, 3.0, 4.0)]).T + y = np.array([0, 1, 0, 1]) + + clf = LogisticRegression() + clf.fit(X, y) + + err_msg = r"max_samples must be <= n_samples" + + with pytest.raises(ValueError, match=err_msg): + permutation_importance(clf, X, y, max_samples=5) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75367cb0ccf19db5e83a6170476e565027646e79 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7a5f22df8108e1a03de7f70e2ca2a553ab27d1d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2e2662ea89a9aa079b634f5c0ac619b07d9439b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7b85588b577da1c5410b944ba1f199b5142b1b6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a5237998c46e137ae82ad7dc2eba6c69d371eef Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42723ae299a417298ed261695db81ac92650fb09 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..620cbdf7140e9e14d8a82d1eaf1d43d01836c47f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9b133f3864d4bfbb8ff725ea84099a0e05d7c2e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71f5c822263660267fdff2ae579db4e2d280f813 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfad865219c7bab6f88f57c56f1041c357b17ff6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52fcf6cf32c9923302368d7878eca4c8a78ec405 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbec9f4cbd870244e7c1038afa7b7376be7a79a9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5017bd1862d7ce2617df232c5ec9aee72800d7d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb81f26c2780feb515df75c918ffb66434b1af80 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e4fa0c8489024067f157d99d27d57b966f531a9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe8bfe2ce809e8974aef72d04c00ff25eb875246 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e5e1945da67e4569ad71a54c042e1244f0a8980 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fcb06a9eabcad0e07406f42b085a82cca69ad29 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38195b8640cd5be568619778ce282ef07c634b72 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07b94f7a1659f545aef6a134d077751f303fe9fe Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03e74f74183ba51e9fd84c74e07ce19b3298087b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ec38d218687825ab5e1b11d211e55cdaf75ac1c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4a67136c22c2b33081ab37bd13238eb1e348187 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2d7806fdeda4061b04dc3e6f5c038aef6ced527 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed4ec36244e12a7f1554dd6d0d046177fa3ac830 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1359b8893ddf20063c81b5e731e851d37966ccd2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1cbf9016d68c8fddf31ef171fa269e31b5acb06 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..260df7f956c2f4f471e99224b3d2fd2ed0a86fe4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6949821d8cfb756f4fb23a473e3e3497158dd32 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27ac048f71e2930143fbe520e2ed1f94c5d45bd4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2554fe6c1d0b7e216848e1b86284b4c03660e5d6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..496058c42ceff23fec68915e598f5ee7a5eea155 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/sparsefuncs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/sparsefuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eb1347d8bad055882f19fcca7cd0086b19393dd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/sparsefuncs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/stats.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/stats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b82b0cf5d988537c978e604c03a289d655e7de75 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/stats.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/validation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3aa800b88ffa45c6b77c62935dd59f1dc21cd8ab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/__pycache__/validation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_bunch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_bunch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..203ca78d81a52d4ba54cf1e785fdf7cff1642ced Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_bunch.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_blas.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_blas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ff89d6f4ca38b8f0a07be6b9a53aacec0fb7237 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_blas.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_deprecation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_deprecation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68716a8c4f07569c35cab1f576eec634dd90a67d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_deprecation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_estimator_checks.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_estimator_checks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a295a65bca11eac5a039fadcc05a9f543aaadc1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_estimator_checks.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_metaestimators.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_metaestimators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdd50f38660017f76edd899b06f4a40eb018c5c6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_metaestimators.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_optimize.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..350d42df72ebbf0428fed233748ac71432f670f1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_optimize.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_pprint.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_pprint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dbb00b51ff6eeaf376801b6aa7f5bd7c65f6dda Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_pprint.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_show_versions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_show_versions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c24040619bef32574f758f55e1fdfaa76fd246d5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_show_versions.cpython-310.pyc differ