diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_lfw.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_lfw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9090363858ab91279a816f737ca101d1bbda7594 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_lfw.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_olivetti_faces.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_olivetti_faces.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f07ff0e1619c881306e021da013653a4affd76a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_olivetti_faces.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_samples_generator.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_samples_generator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..846eaa768acc11939eece4d7cc2be591df44f492 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_samples_generator.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/linnerud_physiological.csv b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/linnerud_physiological.csv new file mode 100644 index 0000000000000000000000000000000000000000..68bd0cd595695d59bc968844e7ef39d3593364db --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/linnerud_physiological.csv @@ -0,0 +1,21 @@ +Weight Waist Pulse +191 36 50 +189 37 52 +193 38 58 +162 35 62 +189 35 46 +182 36 56 +211 38 56 +167 34 60 +176 31 74 +154 33 56 +169 34 50 +166 33 52 +154 34 64 +247 46 50 +193 36 46 +202 37 62 +176 37 54 +157 32 52 +156 33 54 +138 33 68 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb338f0a7d20ee008f5d23970544f8f09d6421f5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/diabetes.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/diabetes.rst new file mode 100644 index 0000000000000000000000000000000000000000..b977c36cf29a0bbb0a75553bcf51263faf0c5942 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/diabetes.rst @@ -0,0 +1,38 @@ +.. _diabetes_dataset: + +Diabetes dataset +---------------- + +Ten baseline variables, age, sex, body mass index, average blood +pressure, and six blood serum measurements were obtained for each of n = +442 diabetes patients, as well as the response of interest, a +quantitative measure of disease progression one year after baseline. + +**Data Set Characteristics:** + +:Number of Instances: 442 + +:Number of Attributes: First 10 columns are numeric predictive values + +:Target: Column 11 is a quantitative measure of disease progression one year after baseline + +:Attribute Information: + - age age in years + - sex + - bmi body mass index + - bp average blood pressure + - s1 tc, total serum cholesterol + - s2 ldl, low-density lipoproteins + - s3 hdl, high-density lipoproteins + - s4 tch, total cholesterol / HDL + - s5 ltg, possibly log of serum triglycerides level + - s6 glu, blood sugar level + +Note: Each of these 10 feature variables have been mean centered and scaled by the standard deviation times the square root of `n_samples` (i.e. the sum of squares of each column totals 1). + +Source URL: +https://www4.stat.ncsu.edu/~boos/var.select/diabetes.html + +For more information see: +Bradley Efron, Trevor Hastie, Iain Johnstone and Robert Tibshirani (2004) "Least Angle Regression," Annals of Statistics (with discussion), 407-499. +(https://web.stanford.edu/~hastie/Papers/LARS/LeastAngle_2002.pdf) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/digits.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/digits.rst new file mode 100644 index 0000000000000000000000000000000000000000..3b07233721d69bacc9841b7ca3ae4d627268a419 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/digits.rst @@ -0,0 +1,50 @@ +.. _digits_dataset: + +Optical recognition of handwritten digits dataset +-------------------------------------------------- + +**Data Set Characteristics:** + +:Number of Instances: 1797 +:Number of Attributes: 64 +:Attribute Information: 8x8 image of integer pixels in the range 0..16. +:Missing Attribute Values: None +:Creator: E. Alpaydin (alpaydin '@' boun.edu.tr) +:Date: July; 1998 + +This is a copy of the test set of the UCI ML hand-written digits datasets +https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits + +The data set contains images of hand-written digits: 10 classes where +each class refers to a digit. + +Preprocessing programs made available by NIST were used to extract +normalized bitmaps of handwritten digits from a preprinted form. From a +total of 43 people, 30 contributed to the training set and different 13 +to the test set. 32x32 bitmaps are divided into nonoverlapping blocks of +4x4 and the number of on pixels are counted in each block. This generates +an input matrix of 8x8 where each element is an integer in the range +0..16. This reduces dimensionality and gives invariance to small +distortions. + +For info on NIST preprocessing routines, see M. D. Garris, J. L. Blue, G. +T. Candela, D. L. Dimmick, J. Geist, P. J. Grother, S. A. Janet, and C. +L. Wilson, NIST Form-Based Handprint Recognition System, NISTIR 5469, +1994. + +|details-start| +**References** +|details-split| + +- C. Kaynak (1995) Methods of Combining Multiple Classifiers and Their + Applications to Handwritten Digit Recognition, MSc Thesis, Institute of + Graduate Studies in Science and Engineering, Bogazici University. +- E. Alpaydin, C. Kaynak (1998) Cascading Classifiers, Kybernetika. +- Ken Tang and Ponnuthurai N. Suganthan and Xi Yao and A. Kai Qin. + Linear dimensionalityreduction using relevance weighted LDA. School of + Electrical and Electronic Engineering Nanyang Technological University. + 2005. +- Claudio Gentile. A New Approximate Maximal Margin Classification + Algorithm. NIPS. 2000. + +|details-end| diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/linnerud.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/linnerud.rst new file mode 100644 index 0000000000000000000000000000000000000000..108611a4722ad84516743aa1b989f196263d6a59 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/linnerud.rst @@ -0,0 +1,28 @@ +.. _linnerrud_dataset: + +Linnerrud dataset +----------------- + +**Data Set Characteristics:** + +:Number of Instances: 20 +:Number of Attributes: 3 +:Missing Attribute Values: None + +The Linnerud dataset is a multi-output regression dataset. It consists of three +exercise (data) and three physiological (target) variables collected from +twenty middle-aged men in a fitness club: + +- *physiological* - CSV containing 20 observations on 3 physiological variables: + Weight, Waist and Pulse. +- *exercise* - CSV containing 20 observations on 3 exercise variables: + Chins, Situps and Jumps. + +|details-start| +**References** +|details-split| + +* Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris: + Editions Technic. + +|details-end| diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/olivetti_faces.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/olivetti_faces.rst new file mode 100644 index 0000000000000000000000000000000000000000..060c866213e8ebed53f9fc3653c99ab8641e28bd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/olivetti_faces.rst @@ -0,0 +1,44 @@ +.. _olivetti_faces_dataset: + +The Olivetti faces dataset +-------------------------- + +`This dataset contains a set of face images`_ taken between April 1992 and +April 1994 at AT&T Laboratories Cambridge. The +:func:`sklearn.datasets.fetch_olivetti_faces` function is the data +fetching / caching function that downloads the data +archive from AT&T. + +.. _This dataset contains a set of face images: https://cam-orl.co.uk/facedatabase.html + +As described on the original website: + + There are ten different images of each of 40 distinct subjects. For some + subjects, the images were taken at different times, varying the lighting, + facial expressions (open / closed eyes, smiling / not smiling) and facial + details (glasses / no glasses). All the images were taken against a dark + homogeneous background with the subjects in an upright, frontal position + (with tolerance for some side movement). + +**Data Set Characteristics:** + +================= ===================== +Classes 40 +Samples total 400 +Dimensionality 4096 +Features real, between 0 and 1 +================= ===================== + +The image is quantized to 256 grey levels and stored as unsigned 8-bit +integers; the loader will convert these to floating point values on the +interval [0, 1], which are easier to work with for many algorithms. + +The "target" for this database is an integer from 0 to 39 indicating the +identity of the person pictured; however, with only 10 examples per class, this +relatively small dataset is more interesting from an unsupervised or +semi-supervised perspective. + +The original dataset consisted of 92 x 112, while the version available here +consists of 64x64 images. + +When using these images, please give credit to AT&T Laboratories Cambridge. diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/species_distributions.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/species_distributions.rst new file mode 100644 index 0000000000000000000000000000000000000000..a2c2243de55676615721b04312cd62b81d369cfb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/species_distributions.rst @@ -0,0 +1,36 @@ +.. _species_distribution_dataset: + +Species distribution dataset +---------------------------- + +This dataset represents the geographic distribution of two species in Central and +South America. The two species are: + +- `"Bradypus variegatus" `_ , + the Brown-throated Sloth. + + - `"Microryzomys minutus" `_ , + also known as the Forest Small Rice Rat, a rodent that lives in Peru, + Colombia, Ecuador, Peru, and Venezuela. + +The dataset is not a typical dataset since a :class:`~sklearn.datasets.base.Bunch` +containing the attributes `data` and `target` is not returned. Instead, we have +information allowing to create a "density" map of the different species. + +The grid for the map can be built using the attributes `x_left_lower_corner`, +`y_left_lower_corner`, `Nx`, `Ny` and `grid_size`, which respectively correspond +to the x and y coordinates of the lower left corner of the grid, the number of +points along the x- and y-axis and the size of the step on the grid. + +The density at each location of the grid is contained in the `coverage` attribute. + +Finally, the `train` and `test` attributes contain information regarding the location +of a species at a specific location. + +The dataset is provided by Phillips et. al. (2006). + +.. topic:: References + + * `"Maximum entropy modeling of species geographic distributions" + `_ S. J. Phillips, + R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/wine_data.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/wine_data.rst new file mode 100644 index 0000000000000000000000000000000000000000..0325af6233c173764e646935bf0200788e044754 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/wine_data.rst @@ -0,0 +1,98 @@ +.. _wine_dataset: + +Wine recognition dataset +------------------------ + +**Data Set Characteristics:** + +:Number of Instances: 178 +:Number of Attributes: 13 numeric, predictive attributes and the class +:Attribute Information: + - Alcohol + - Malic acid + - Ash + - Alcalinity of ash + - Magnesium + - Total phenols + - Flavanoids + - Nonflavanoid phenols + - Proanthocyanins + - Color intensity + - Hue + - OD280/OD315 of diluted wines + - Proline + - class: + - class_0 + - class_1 + - class_2 + +:Summary Statistics: + +============================= ==== ===== ======= ===== + Min Max Mean SD +============================= ==== ===== ======= ===== +Alcohol: 11.0 14.8 13.0 0.8 +Malic Acid: 0.74 5.80 2.34 1.12 +Ash: 1.36 3.23 2.36 0.27 +Alcalinity of Ash: 10.6 30.0 19.5 3.3 +Magnesium: 70.0 162.0 99.7 14.3 +Total Phenols: 0.98 3.88 2.29 0.63 +Flavanoids: 0.34 5.08 2.03 1.00 +Nonflavanoid Phenols: 0.13 0.66 0.36 0.12 +Proanthocyanins: 0.41 3.58 1.59 0.57 +Colour Intensity: 1.3 13.0 5.1 2.3 +Hue: 0.48 1.71 0.96 0.23 +OD280/OD315 of diluted wines: 1.27 4.00 2.61 0.71 +Proline: 278 1680 746 315 +============================= ==== ===== ======= ===== + +:Missing Attribute Values: None +:Class Distribution: class_0 (59), class_1 (71), class_2 (48) +:Creator: R.A. Fisher +:Donor: Michael Marshall (MARSHALL%PLU@io.arc.nasa.gov) +:Date: July, 1988 + +This is a copy of UCI ML Wine recognition datasets. +https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data + +The data is the results of a chemical analysis of wines grown in the same +region in Italy by three different cultivators. There are thirteen different +measurements taken for different constituents found in the three types of +wine. + +Original Owners: + +Forina, M. et al, PARVUS - +An Extendible Package for Data Exploration, Classification and Correlation. +Institute of Pharmaceutical and Food Analysis and Technologies, +Via Brigata Salerno, 16147 Genoa, Italy. + +Citation: + +Lichman, M. (2013). UCI Machine Learning Repository +[https://archive.ics.uci.edu/ml]. Irvine, CA: University of California, +School of Information and Computer Science. + +|details-start| +**References** +|details-split| + +(1) S. Aeberhard, D. Coomans and O. de Vel, +Comparison of Classifiers in High Dimensional Settings, +Tech. Rep. no. 92-02, (1992), Dept. of Computer Science and Dept. of +Mathematics and Statistics, James Cook University of North Queensland. +(Also submitted to Technometrics). + +The data was used with many others for comparing various +classifiers. The classes are separable, though only RDA +has achieved 100% correct classification. +(RDA : 100%, QDA 99.4%, LDA 98.9%, 1NN 96.1% (z-transformed data)) +(All results using the leave-one-out technique) + +(2) S. Aeberhard, D. Coomans and O. de Vel, +"THE CLASSIFICATION PERFORMANCE OF RDA" +Tech. Rep. no. 92-01, (1992), Dept. of Computer Science and Dept. of +Mathematics and Statistics, James Cook University of North Queensland. +(Also submitted to Journal of Chemometrics). + +|details-end| diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/README.txt b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..a95a5d42500d45079dedc65c12fd9aff32337ec4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/README.txt @@ -0,0 +1,21 @@ +Image: china.jpg +Released under a creative commons license. [1] +Attribution: Some rights reserved by danielbuechele [2] +Retrieved 21st August, 2011 from [3] by Robert Layton + +[1] https://creativecommons.org/licenses/by/2.0/ +[2] https://www.flickr.com/photos/danielbuechele/ +[3] https://www.flickr.com/photos/danielbuechele/6061409035/sizes/z/in/photostream/ + + +Image: flower.jpg +Released under a creative commons license. [1] +Attribution: Some rights reserved by danielbuechele [2] +Retrieved 21st August, 2011 from [3] by Robert Layton + +[1] https://creativecommons.org/licenses/by/2.0/ +[2] https://www.flickr.com/photos/vultilion/ +[3] https://www.flickr.com/photos/vultilion/6056698931/sizes/z/in/photostream/ + + + diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74dbbef6a055257fbc0acbbe34b576048d37978c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29b5ffc42377f810bfd74fe9eb1741fee4566435 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_20news.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_20news.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..455a45a2a4842fec07d50e2567e7d0e43676b4d0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_20news.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_arff_parser.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_arff_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..518f606f546ef0cbc3976b7684e3ad05ffe05fd3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_arff_parser.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf657b4f50e3e729bc1761e2e9b301e457f298e8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_california_housing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_california_housing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e553bee51789baafb3ea70df00180dc49971c1a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_california_housing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..076a1261fa3ae603b905b933d7144db10d2a6be6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_covtype.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_covtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69588b28a3b1027edf4ca394eabdfae6d0f79ed1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_covtype.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_kddcup99.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_kddcup99.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16450cec8c0057c0d4662cd6270607a69ee07e38 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_kddcup99.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_lfw.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_lfw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e5398e2ce1181d50b0b90dcf7dd4f57e5299085 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_lfw.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_olivetti_faces.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_olivetti_faces.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bddf96486abdbd735fd5a8a3b3f24745676bbebe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_olivetti_faces.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_openml.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_openml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4594814153851b46d3282d58184bc26b4b5140cc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_openml.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_rcv1.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_rcv1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d6b7c081c73f544488d46b8346deff82a137333 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_rcv1.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_samples_generator.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_samples_generator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1685e3860eacecb47197997948d2b793168627ac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_samples_generator.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_svmlight_format.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_svmlight_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14fc4bb59327db79753d116b05e062d0e500a790 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_svmlight_format.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..547ca941e228d4ae4b8e4bf7f401b48288a53f46 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb39194e635a4ac62c7ad137132c75cc333ee41d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d60a71dbe50e5aebef45501ac8925536729c017 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c66025ae100536eed31c182de83118fa70f6c16 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e27a6d34bef4436e3c73e77d4c0cf5824f43eec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e38bcecbc181cb060bfce14f8b4a23703c59378f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88a8b5639f360e36770ed372f5269453f9c93991 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3de3aae48c017b65c734bce56c445842c7fef56 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f15bae84625ce2b1645b181e35a5e483b4d7ac09 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fadd980fac244b65f1de66b51992c592a6e9e8a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7250177512429e3f427027e7472ef378ca852e66 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1d1c3ce38febe859b840470b587f4d5cb010964 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d26dc0cae3a296649892cc798e9cbf70c4cc9bf5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_classification.txt b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_classification.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3c4a3364cac126a91738c780ff668156f151611 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_classification.txt @@ -0,0 +1,9 @@ +# comment +# note: the next line contains a tab +1.0 3:2.5 11:-5.2 16:1.5 # and an inline comment +2.0 6:1.0 13:-3 +# another comment +3.0 21:27 +4.0 2:1.234567890123456e10 # double precision value +1.0 # empty line, all zeros +2.0 3:0 # explicit zeros diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_invalid.txt b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_invalid.txt new file mode 100644 index 0000000000000000000000000000000000000000..05601f6ca6eef3276c6c16c0983262836023eb78 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_invalid.txt @@ -0,0 +1,3 @@ +python 2:2.5 10:-5.2 15:1.5 +2.0 5:1.0 12:-3 +3.0 20:27 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_invalid_order.txt b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_invalid_order.txt new file mode 100644 index 0000000000000000000000000000000000000000..2160abf15ea4298d728e4fb2bed37655c8bbb7ed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_invalid_order.txt @@ -0,0 +1 @@ +-1 5:2.5 2:-5.2 15:1.5 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_multilabel.txt b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_multilabel.txt new file mode 100644 index 0000000000000000000000000000000000000000..a8194e5fef163ba9fa255e8f5c3ed9e593793769 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_multilabel.txt @@ -0,0 +1,5 @@ +# multilabel dataset in SVMlight format +1,0 2:2.5 10:-5.2 15:1.5 +2 5:1.0 12:-3 + 2:3.5 11:26 +1,2 20:27 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_20news.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_20news.py new file mode 100644 index 0000000000000000000000000000000000000000..4072d9c8ec67f2ba147e56bafc9e91c2d3485639 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_20news.py @@ -0,0 +1,142 @@ +"""Test the 20news downloader, if the data is available, +or if specifically requested via environment variable +(e.g. for CI jobs).""" +from functools import partial +from unittest.mock import patch + +import numpy as np +import pytest +import scipy.sparse as sp + +from sklearn.datasets.tests.test_common import ( + check_as_frame, + check_pandas_dependency_message, + check_return_X_y, +) +from sklearn.preprocessing import normalize +from sklearn.utils._testing import assert_allclose_dense_sparse + + +def test_20news(fetch_20newsgroups_fxt): + data = fetch_20newsgroups_fxt(subset="all", shuffle=False) + assert data.DESCR.startswith(".. _20newsgroups_dataset:") + + # Extract a reduced dataset + data2cats = fetch_20newsgroups_fxt( + subset="all", categories=data.target_names[-1:-3:-1], shuffle=False + ) + # Check that the ordering of the target_names is the same + # as the ordering in the full dataset + assert data2cats.target_names == data.target_names[-2:] + # Assert that we have only 0 and 1 as labels + assert np.unique(data2cats.target).tolist() == [0, 1] + + # Check that the number of filenames is consistent with data/target + assert len(data2cats.filenames) == len(data2cats.target) + assert len(data2cats.filenames) == len(data2cats.data) + + # Check that the first entry of the reduced dataset corresponds to + # the first entry of the corresponding category in the full dataset + entry1 = data2cats.data[0] + category = data2cats.target_names[data2cats.target[0]] + label = data.target_names.index(category) + entry2 = data.data[np.where(data.target == label)[0][0]] + assert entry1 == entry2 + + # check that return_X_y option + X, y = fetch_20newsgroups_fxt(subset="all", shuffle=False, return_X_y=True) + assert len(X) == len(data.data) + assert y.shape == data.target.shape + + +def test_20news_length_consistency(fetch_20newsgroups_fxt): + """Checks the length consistencies within the bunch + + This is a non-regression test for a bug present in 0.16.1. + """ + # Extract the full dataset + data = fetch_20newsgroups_fxt(subset="all") + assert len(data["data"]) == len(data.data) + assert len(data["target"]) == len(data.target) + assert len(data["filenames"]) == len(data.filenames) + + +def test_20news_vectorized(fetch_20newsgroups_vectorized_fxt): + # test subset = train + bunch = fetch_20newsgroups_vectorized_fxt(subset="train") + assert sp.issparse(bunch.data) and bunch.data.format == "csr" + assert bunch.data.shape == (11314, 130107) + assert bunch.target.shape[0] == 11314 + assert bunch.data.dtype == np.float64 + assert bunch.DESCR.startswith(".. _20newsgroups_dataset:") + + # test subset = test + bunch = fetch_20newsgroups_vectorized_fxt(subset="test") + assert sp.issparse(bunch.data) and bunch.data.format == "csr" + assert bunch.data.shape == (7532, 130107) + assert bunch.target.shape[0] == 7532 + assert bunch.data.dtype == np.float64 + assert bunch.DESCR.startswith(".. _20newsgroups_dataset:") + + # test return_X_y option + fetch_func = partial(fetch_20newsgroups_vectorized_fxt, subset="test") + check_return_X_y(bunch, fetch_func) + + # test subset = all + bunch = fetch_20newsgroups_vectorized_fxt(subset="all") + assert sp.issparse(bunch.data) and bunch.data.format == "csr" + assert bunch.data.shape == (11314 + 7532, 130107) + assert bunch.target.shape[0] == 11314 + 7532 + assert bunch.data.dtype == np.float64 + assert bunch.DESCR.startswith(".. _20newsgroups_dataset:") + + +def test_20news_normalization(fetch_20newsgroups_vectorized_fxt): + X = fetch_20newsgroups_vectorized_fxt(normalize=False) + X_ = fetch_20newsgroups_vectorized_fxt(normalize=True) + X_norm = X_["data"][:100] + X = X["data"][:100] + + assert_allclose_dense_sparse(X_norm, normalize(X)) + assert np.allclose(np.linalg.norm(X_norm.todense(), axis=1), 1) + + +def test_20news_as_frame(fetch_20newsgroups_vectorized_fxt): + pd = pytest.importorskip("pandas") + + bunch = fetch_20newsgroups_vectorized_fxt(as_frame=True) + check_as_frame(bunch, fetch_20newsgroups_vectorized_fxt) + + frame = bunch.frame + assert frame.shape == (11314, 130108) + assert all([isinstance(col, pd.SparseDtype) for col in bunch.data.dtypes]) + + # Check a small subset of features + for expected_feature in [ + "beginner", + "beginners", + "beginning", + "beginnings", + "begins", + "begley", + "begone", + ]: + assert expected_feature in frame.keys() + assert "category_class" in frame.keys() + assert bunch.target.name == "category_class" + + +def test_as_frame_no_pandas(fetch_20newsgroups_vectorized_fxt, hide_available_pandas): + check_pandas_dependency_message(fetch_20newsgroups_vectorized_fxt) + + +def test_outdated_pickle(fetch_20newsgroups_vectorized_fxt): + with patch("os.path.exists") as mock_is_exist: + with patch("joblib.load") as mock_load: + # mock that the dataset was cached + mock_is_exist.return_value = True + # mock that we have an outdated pickle with only X and y returned + mock_load.return_value = ("X", "y") + err_msg = "The cached dataset located in" + with pytest.raises(ValueError, match=err_msg): + fetch_20newsgroups_vectorized_fxt(as_frame=True) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_arff_parser.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_arff_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..b675439cd2e9d1bdd5b1e5105322d9a36a4b4e54 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_arff_parser.py @@ -0,0 +1,272 @@ +import textwrap +from io import BytesIO + +import pytest + +from sklearn.datasets._arff_parser import ( + _liac_arff_parser, + _pandas_arff_parser, + _post_process_frame, + load_arff_from_gzip_file, +) + + +@pytest.mark.parametrize( + "feature_names, target_names", + [ + ( + [ + "col_int_as_integer", + "col_int_as_numeric", + "col_float_as_real", + "col_float_as_numeric", + ], + ["col_categorical", "col_string"], + ), + ( + [ + "col_int_as_integer", + "col_int_as_numeric", + "col_float_as_real", + "col_float_as_numeric", + ], + ["col_categorical"], + ), + ( + [ + "col_int_as_integer", + "col_int_as_numeric", + "col_float_as_real", + "col_float_as_numeric", + ], + [], + ), + ], +) +def test_post_process_frame(feature_names, target_names): + """Check the behaviour of the post-processing function for splitting a dataframe.""" + pd = pytest.importorskip("pandas") + + X_original = pd.DataFrame( + { + "col_int_as_integer": [1, 2, 3], + "col_int_as_numeric": [1, 2, 3], + "col_float_as_real": [1.0, 2.0, 3.0], + "col_float_as_numeric": [1.0, 2.0, 3.0], + "col_categorical": ["a", "b", "c"], + "col_string": ["a", "b", "c"], + } + ) + + X, y = _post_process_frame(X_original, feature_names, target_names) + assert isinstance(X, pd.DataFrame) + if len(target_names) >= 2: + assert isinstance(y, pd.DataFrame) + elif len(target_names) == 1: + assert isinstance(y, pd.Series) + else: + assert y is None + + +def test_load_arff_from_gzip_file_error_parser(): + """An error will be raised if the parser is not known.""" + # None of the input parameters are required to be accurate since the check + # of the parser will be carried out first. + + err_msg = "Unknown parser: 'xxx'. Should be 'liac-arff' or 'pandas'" + with pytest.raises(ValueError, match=err_msg): + load_arff_from_gzip_file("xxx", "xxx", "xxx", "xxx", "xxx", "xxx") + + +@pytest.mark.parametrize("parser_func", [_liac_arff_parser, _pandas_arff_parser]) +def test_pandas_arff_parser_strip_single_quotes(parser_func): + """Check that we properly strip single quotes from the data.""" + pd = pytest.importorskip("pandas") + + arff_file = BytesIO(textwrap.dedent(""" + @relation 'toy' + @attribute 'cat_single_quote' {'A', 'B', 'C'} + @attribute 'str_single_quote' string + @attribute 'str_nested_quote' string + @attribute 'class' numeric + @data + 'A','some text','\"expect double quotes\"',0 + """).encode("utf-8")) + + columns_info = { + "cat_single_quote": { + "data_type": "nominal", + "name": "cat_single_quote", + }, + "str_single_quote": { + "data_type": "string", + "name": "str_single_quote", + }, + "str_nested_quote": { + "data_type": "string", + "name": "str_nested_quote", + }, + "class": { + "data_type": "numeric", + "name": "class", + }, + } + + feature_names = [ + "cat_single_quote", + "str_single_quote", + "str_nested_quote", + ] + target_names = ["class"] + + # We don't strip single quotes for string columns with the pandas parser. + expected_values = { + "cat_single_quote": "A", + "str_single_quote": ( + "some text" if parser_func is _liac_arff_parser else "'some text'" + ), + "str_nested_quote": ( + '"expect double quotes"' + if parser_func is _liac_arff_parser + else "'\"expect double quotes\"'" + ), + "class": 0, + } + + _, _, frame, _ = parser_func( + arff_file, + output_arrays_type="pandas", + openml_columns_info=columns_info, + feature_names_to_select=feature_names, + target_names_to_select=target_names, + ) + + assert frame.columns.tolist() == feature_names + target_names + pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0)) + + +@pytest.mark.parametrize("parser_func", [_liac_arff_parser, _pandas_arff_parser]) +def test_pandas_arff_parser_strip_double_quotes(parser_func): + """Check that we properly strip double quotes from the data.""" + pd = pytest.importorskip("pandas") + + arff_file = BytesIO(textwrap.dedent(""" + @relation 'toy' + @attribute 'cat_double_quote' {"A", "B", "C"} + @attribute 'str_double_quote' string + @attribute 'str_nested_quote' string + @attribute 'class' numeric + @data + "A","some text","\'expect double quotes\'",0 + """).encode("utf-8")) + + columns_info = { + "cat_double_quote": { + "data_type": "nominal", + "name": "cat_double_quote", + }, + "str_double_quote": { + "data_type": "string", + "name": "str_double_quote", + }, + "str_nested_quote": { + "data_type": "string", + "name": "str_nested_quote", + }, + "class": { + "data_type": "numeric", + "name": "class", + }, + } + + feature_names = [ + "cat_double_quote", + "str_double_quote", + "str_nested_quote", + ] + target_names = ["class"] + + expected_values = { + "cat_double_quote": "A", + "str_double_quote": "some text", + "str_nested_quote": "'expect double quotes'", + "class": 0, + } + + _, _, frame, _ = parser_func( + arff_file, + output_arrays_type="pandas", + openml_columns_info=columns_info, + feature_names_to_select=feature_names, + target_names_to_select=target_names, + ) + + assert frame.columns.tolist() == feature_names + target_names + pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0)) + + +@pytest.mark.parametrize( + "parser_func", + [ + # internal quotes are not considered to follow the ARFF spec in LIAC ARFF + pytest.param(_liac_arff_parser, marks=pytest.mark.xfail), + _pandas_arff_parser, + ], +) +def test_pandas_arff_parser_strip_no_quotes(parser_func): + """Check that we properly parse with no quotes characters.""" + pd = pytest.importorskip("pandas") + + arff_file = BytesIO(textwrap.dedent(""" + @relation 'toy' + @attribute 'cat_without_quote' {A, B, C} + @attribute 'str_without_quote' string + @attribute 'str_internal_quote' string + @attribute 'class' numeric + @data + A,some text,'internal' quote,0 + """).encode("utf-8")) + + columns_info = { + "cat_without_quote": { + "data_type": "nominal", + "name": "cat_without_quote", + }, + "str_without_quote": { + "data_type": "string", + "name": "str_without_quote", + }, + "str_internal_quote": { + "data_type": "string", + "name": "str_internal_quote", + }, + "class": { + "data_type": "numeric", + "name": "class", + }, + } + + feature_names = [ + "cat_without_quote", + "str_without_quote", + "str_internal_quote", + ] + target_names = ["class"] + + expected_values = { + "cat_without_quote": "A", + "str_without_quote": "some text", + "str_internal_quote": "'internal' quote", + "class": 0, + } + + _, _, frame, _ = parser_func( + arff_file, + output_arrays_type="pandas", + openml_columns_info=columns_info, + feature_names_to_select=feature_names, + target_names_to_select=target_names, + ) + + assert frame.columns.tolist() == feature_names + target_names + pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0)) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_base.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..0a1190060a0555f822f3bd4736e22849a773a52b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_base.py @@ -0,0 +1,365 @@ +import os +import shutil +import tempfile +import warnings +from functools import partial +from importlib import resources +from pathlib import Path +from pickle import dumps, loads + +import numpy as np +import pytest + +from sklearn.datasets import ( + clear_data_home, + get_data_home, + load_breast_cancer, + load_diabetes, + load_digits, + load_files, + load_iris, + load_linnerud, + load_sample_image, + load_sample_images, + load_wine, +) +from sklearn.datasets._base import ( + load_csv_data, + load_gzip_compressed_csv_data, +) +from sklearn.datasets.tests.test_common import check_as_frame +from sklearn.preprocessing import scale +from sklearn.utils import Bunch + + +class _DummyPath: + """Minimal class that implements the os.PathLike interface.""" + + def __init__(self, path): + self.path = path + + def __fspath__(self): + return self.path + + +def _remove_dir(path): + if os.path.isdir(path): + shutil.rmtree(path) + + +@pytest.fixture(scope="module") +def data_home(tmpdir_factory): + tmp_file = str(tmpdir_factory.mktemp("scikit_learn_data_home_test")) + yield tmp_file + _remove_dir(tmp_file) + + +@pytest.fixture(scope="module") +def load_files_root(tmpdir_factory): + tmp_file = str(tmpdir_factory.mktemp("scikit_learn_load_files_test")) + yield tmp_file + _remove_dir(tmp_file) + + +@pytest.fixture +def test_category_dir_1(load_files_root): + test_category_dir1 = tempfile.mkdtemp(dir=load_files_root) + sample_file = tempfile.NamedTemporaryFile(dir=test_category_dir1, delete=False) + sample_file.write(b"Hello World!\n") + sample_file.close() + yield str(test_category_dir1) + _remove_dir(test_category_dir1) + + +@pytest.fixture +def test_category_dir_2(load_files_root): + test_category_dir2 = tempfile.mkdtemp(dir=load_files_root) + yield str(test_category_dir2) + _remove_dir(test_category_dir2) + + +@pytest.mark.parametrize("path_container", [None, Path, _DummyPath]) +def test_data_home(path_container, data_home): + # get_data_home will point to a pre-existing folder + if path_container is not None: + data_home = path_container(data_home) + data_home = get_data_home(data_home=data_home) + assert data_home == data_home + assert os.path.exists(data_home) + + # clear_data_home will delete both the content and the folder it-self + if path_container is not None: + data_home = path_container(data_home) + clear_data_home(data_home=data_home) + assert not os.path.exists(data_home) + + # if the folder is missing it will be created again + data_home = get_data_home(data_home=data_home) + assert os.path.exists(data_home) + + +def test_default_empty_load_files(load_files_root): + res = load_files(load_files_root) + assert len(res.filenames) == 0 + assert len(res.target_names) == 0 + assert res.DESCR is None + + +def test_default_load_files(test_category_dir_1, test_category_dir_2, load_files_root): + res = load_files(load_files_root) + assert len(res.filenames) == 1 + assert len(res.target_names) == 2 + assert res.DESCR is None + assert res.data == [b"Hello World!\n"] + + +def test_load_files_w_categories_desc_and_encoding( + test_category_dir_1, test_category_dir_2, load_files_root +): + category = os.path.abspath(test_category_dir_1).split(os.sep).pop() + res = load_files( + load_files_root, description="test", categories=[category], encoding="utf-8" + ) + + assert len(res.filenames) == 1 + assert len(res.target_names) == 1 + assert res.DESCR == "test" + assert res.data == ["Hello World!\n"] + + +def test_load_files_wo_load_content( + test_category_dir_1, test_category_dir_2, load_files_root +): + res = load_files(load_files_root, load_content=False) + assert len(res.filenames) == 1 + assert len(res.target_names) == 2 + assert res.DESCR is None + assert res.get("data") is None + + +@pytest.mark.parametrize("allowed_extensions", ([".txt"], [".txt", ".json"])) +def test_load_files_allowed_extensions(tmp_path, allowed_extensions): + """Check the behaviour of `allowed_extension` in `load_files`.""" + d = tmp_path / "sub" + d.mkdir() + files = ("file1.txt", "file2.json", "file3.json", "file4.md") + paths = [d / f for f in files] + for p in paths: + p.write_bytes(b"hello") + res = load_files(tmp_path, allowed_extensions=allowed_extensions) + assert set([str(p) for p in paths if p.suffix in allowed_extensions]) == set( + res.filenames + ) + + +@pytest.mark.parametrize( + "filename, expected_n_samples, expected_n_features, expected_target_names", + [ + ("wine_data.csv", 178, 13, ["class_0", "class_1", "class_2"]), + ("iris.csv", 150, 4, ["setosa", "versicolor", "virginica"]), + ("breast_cancer.csv", 569, 30, ["malignant", "benign"]), + ], +) +def test_load_csv_data( + filename, expected_n_samples, expected_n_features, expected_target_names +): + actual_data, actual_target, actual_target_names = load_csv_data(filename) + assert actual_data.shape[0] == expected_n_samples + assert actual_data.shape[1] == expected_n_features + assert actual_target.shape[0] == expected_n_samples + np.testing.assert_array_equal(actual_target_names, expected_target_names) + + +def test_load_csv_data_with_descr(): + data_file_name = "iris.csv" + descr_file_name = "iris.rst" + + res_without_descr = load_csv_data(data_file_name=data_file_name) + res_with_descr = load_csv_data( + data_file_name=data_file_name, descr_file_name=descr_file_name + ) + assert len(res_with_descr) == 4 + assert len(res_without_descr) == 3 + + np.testing.assert_array_equal(res_with_descr[0], res_without_descr[0]) + np.testing.assert_array_equal(res_with_descr[1], res_without_descr[1]) + np.testing.assert_array_equal(res_with_descr[2], res_without_descr[2]) + + assert res_with_descr[-1].startswith(".. _iris_dataset:") + + +@pytest.mark.parametrize( + "filename, kwargs, expected_shape", + [ + ("diabetes_data_raw.csv.gz", {}, [442, 10]), + ("diabetes_target.csv.gz", {}, [442]), + ("digits.csv.gz", {"delimiter": ","}, [1797, 65]), + ], +) +def test_load_gzip_compressed_csv_data(filename, kwargs, expected_shape): + actual_data = load_gzip_compressed_csv_data(filename, **kwargs) + assert actual_data.shape == tuple(expected_shape) + + +def test_load_gzip_compressed_csv_data_with_descr(): + data_file_name = "diabetes_target.csv.gz" + descr_file_name = "diabetes.rst" + + expected_data = load_gzip_compressed_csv_data(data_file_name=data_file_name) + actual_data, descr = load_gzip_compressed_csv_data( + data_file_name=data_file_name, + descr_file_name=descr_file_name, + ) + + np.testing.assert_array_equal(actual_data, expected_data) + assert descr.startswith(".. _diabetes_dataset:") + + +def test_load_sample_images(): + try: + res = load_sample_images() + assert len(res.images) == 2 + assert len(res.filenames) == 2 + images = res.images + + # assert is china image + assert np.all(images[0][0, 0, :] == np.array([174, 201, 231], dtype=np.uint8)) + # assert is flower image + assert np.all(images[1][0, 0, :] == np.array([2, 19, 13], dtype=np.uint8)) + assert res.DESCR + except ImportError: + warnings.warn("Could not load sample images, PIL is not available.") + + +def test_load_sample_image(): + try: + china = load_sample_image("china.jpg") + assert china.dtype == "uint8" + assert china.shape == (427, 640, 3) + except ImportError: + warnings.warn("Could not load sample images, PIL is not available.") + + +def test_load_diabetes_raw(): + """Test to check that we load a scaled version by default but that we can + get an unscaled version when setting `scaled=False`.""" + diabetes_raw = load_diabetes(scaled=False) + assert diabetes_raw.data.shape == (442, 10) + assert diabetes_raw.target.size, 442 + assert len(diabetes_raw.feature_names) == 10 + assert diabetes_raw.DESCR + + diabetes_default = load_diabetes() + + np.testing.assert_allclose( + scale(diabetes_raw.data) / (442**0.5), diabetes_default.data, atol=1e-04 + ) + + +@pytest.mark.parametrize( + "loader_func, data_shape, target_shape, n_target, has_descr, filenames", + [ + (load_breast_cancer, (569, 30), (569,), 2, True, ["filename"]), + (load_wine, (178, 13), (178,), 3, True, []), + (load_iris, (150, 4), (150,), 3, True, ["filename"]), + ( + load_linnerud, + (20, 3), + (20, 3), + 3, + True, + ["data_filename", "target_filename"], + ), + (load_diabetes, (442, 10), (442,), None, True, []), + (load_digits, (1797, 64), (1797,), 10, True, []), + (partial(load_digits, n_class=9), (1617, 64), (1617,), 10, True, []), + ], +) +def test_loader(loader_func, data_shape, target_shape, n_target, has_descr, filenames): + bunch = loader_func() + + assert isinstance(bunch, Bunch) + assert bunch.data.shape == data_shape + assert bunch.target.shape == target_shape + if hasattr(bunch, "feature_names"): + assert len(bunch.feature_names) == data_shape[1] + if n_target is not None: + assert len(bunch.target_names) == n_target + if has_descr: + assert bunch.DESCR + if filenames: + assert "data_module" in bunch + assert all( + [ + f in bunch + and (resources.files(bunch["data_module"]) / bunch[f]).is_file() + for f in filenames + ] + ) + + +@pytest.mark.parametrize( + "loader_func, data_dtype, target_dtype", + [ + (load_breast_cancer, np.float64, int), + (load_diabetes, np.float64, np.float64), + (load_digits, np.float64, int), + (load_iris, np.float64, int), + (load_linnerud, np.float64, np.float64), + (load_wine, np.float64, int), + ], +) +def test_toy_dataset_frame_dtype(loader_func, data_dtype, target_dtype): + default_result = loader_func() + check_as_frame( + default_result, + loader_func, + expected_data_dtype=data_dtype, + expected_target_dtype=target_dtype, + ) + + +def test_loads_dumps_bunch(): + bunch = Bunch(x="x") + bunch_from_pkl = loads(dumps(bunch)) + bunch_from_pkl.x = "y" + assert bunch_from_pkl["x"] == bunch_from_pkl.x + + +def test_bunch_pickle_generated_with_0_16_and_read_with_0_17(): + bunch = Bunch(key="original") + # This reproduces a problem when Bunch pickles have been created + # with scikit-learn 0.16 and are read with 0.17. Basically there + # is a surprising behaviour because reading bunch.key uses + # bunch.__dict__ (which is non empty for 0.16 Bunch objects) + # whereas assigning into bunch.key uses bunch.__setattr__. See + # https://github.com/scikit-learn/scikit-learn/issues/6196 for + # more details + bunch.__dict__["key"] = "set from __dict__" + bunch_from_pkl = loads(dumps(bunch)) + # After loading from pickle the __dict__ should have been ignored + assert bunch_from_pkl.key == "original" + assert bunch_from_pkl["key"] == "original" + # Making sure that changing the attr does change the value + # associated with __getitem__ as well + bunch_from_pkl.key = "changed" + assert bunch_from_pkl.key == "changed" + assert bunch_from_pkl["key"] == "changed" + + +def test_bunch_dir(): + # check that dir (important for autocomplete) shows attributes + data = load_iris() + assert "data" in dir(data) + + +def test_load_boston_error(): + """Check that we raise the ethical warning when trying to import `load_boston`.""" + msg = "The Boston housing prices dataset has an ethical problem" + with pytest.raises(ImportError, match=msg): + from sklearn.datasets import load_boston # noqa + + # other non-existing function should raise the usual import error + msg = "cannot import name 'non_existing_function' from 'sklearn.datasets'" + with pytest.raises(ImportError, match=msg): + from sklearn.datasets import non_existing_function # noqa diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_california_housing.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_california_housing.py new file mode 100644 index 0000000000000000000000000000000000000000..ef6fc95db80bfe46bb712113474ebb6bde4d3912 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_california_housing.py @@ -0,0 +1,37 @@ +"""Test the california_housing loader, if the data is available, +or if specifically requested via environment variable +(e.g. for CI jobs).""" +from functools import partial + +import pytest + +from sklearn.datasets.tests.test_common import check_return_X_y + + +def test_fetch(fetch_california_housing_fxt): + data = fetch_california_housing_fxt() + assert (20640, 8) == data.data.shape + assert (20640,) == data.target.shape + assert data.DESCR.startswith(".. _california_housing_dataset:") + + # test return_X_y option + fetch_func = partial(fetch_california_housing_fxt) + check_return_X_y(data, fetch_func) + + +def test_fetch_asframe(fetch_california_housing_fxt): + pd = pytest.importorskip("pandas") + bunch = fetch_california_housing_fxt(as_frame=True) + frame = bunch.frame + assert hasattr(bunch, "frame") is True + assert frame.shape == (20640, 9) + assert isinstance(bunch.data, pd.DataFrame) + assert isinstance(bunch.target, pd.Series) + + +def test_pandas_dependency_message(fetch_california_housing_fxt, hide_available_pandas): + # Check that pandas is imported lazily and that an informative error + # message is raised when pandas is missing: + expected_msg = "fetch_california_housing with as_frame=True requires pandas" + with pytest.raises(ImportError, match=expected_msg): + fetch_california_housing_fxt(as_frame=True) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_common.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..8048a31041ddcc4926649ad8225fc11954e0eb57 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_common.py @@ -0,0 +1,135 @@ +"""Test loaders for common functionality.""" +import inspect +import os + +import numpy as np +import pytest + +import sklearn.datasets + + +def is_pillow_installed(): + try: + import PIL # noqa + + return True + except ImportError: + return False + + +FETCH_PYTEST_MARKERS = { + "return_X_y": { + "fetch_20newsgroups": pytest.mark.xfail( + reason="X is a list and does not have a shape argument" + ), + "fetch_openml": pytest.mark.xfail( + reason="fetch_opeml requires a dataset name or id" + ), + "fetch_lfw_people": pytest.mark.skipif( + not is_pillow_installed(), reason="pillow is not installed" + ), + }, + "as_frame": { + "fetch_openml": pytest.mark.xfail( + reason="fetch_opeml requires a dataset name or id" + ), + }, +} + + +def check_pandas_dependency_message(fetch_func): + try: + import pandas # noqa + + pytest.skip("This test requires pandas to not be installed") + except ImportError: + # Check that pandas is imported lazily and that an informative error + # message is raised when pandas is missing: + name = fetch_func.__name__ + expected_msg = f"{name} with as_frame=True requires pandas" + with pytest.raises(ImportError, match=expected_msg): + fetch_func(as_frame=True) + + +def check_return_X_y(bunch, dataset_func): + X_y_tuple = dataset_func(return_X_y=True) + assert isinstance(X_y_tuple, tuple) + assert X_y_tuple[0].shape == bunch.data.shape + assert X_y_tuple[1].shape == bunch.target.shape + + +def check_as_frame( + bunch, dataset_func, expected_data_dtype=None, expected_target_dtype=None +): + pd = pytest.importorskip("pandas") + frame_bunch = dataset_func(as_frame=True) + assert hasattr(frame_bunch, "frame") + assert isinstance(frame_bunch.frame, pd.DataFrame) + assert isinstance(frame_bunch.data, pd.DataFrame) + assert frame_bunch.data.shape == bunch.data.shape + if frame_bunch.target.ndim > 1: + assert isinstance(frame_bunch.target, pd.DataFrame) + else: + assert isinstance(frame_bunch.target, pd.Series) + assert frame_bunch.target.shape[0] == bunch.target.shape[0] + if expected_data_dtype is not None: + assert np.all(frame_bunch.data.dtypes == expected_data_dtype) + if expected_target_dtype is not None: + assert np.all(frame_bunch.target.dtypes == expected_target_dtype) + + # Test for return_X_y and as_frame=True + frame_X, frame_y = dataset_func(as_frame=True, return_X_y=True) + assert isinstance(frame_X, pd.DataFrame) + if frame_y.ndim > 1: + assert isinstance(frame_X, pd.DataFrame) + else: + assert isinstance(frame_y, pd.Series) + + +def _skip_network_tests(): + return os.environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "1" + + +def _generate_func_supporting_param(param, dataset_type=("load", "fetch")): + markers_fetch = FETCH_PYTEST_MARKERS.get(param, {}) + for name, obj in inspect.getmembers(sklearn.datasets): + if not inspect.isfunction(obj): + continue + + is_dataset_type = any([name.startswith(t) for t in dataset_type]) + is_support_param = param in inspect.signature(obj).parameters + if is_dataset_type and is_support_param: + # check if we should skip if we don't have network support + marks = [ + pytest.mark.skipif( + condition=name.startswith("fetch") and _skip_network_tests(), + reason="Skip because fetcher requires internet network", + ) + ] + if name in markers_fetch: + marks.append(markers_fetch[name]) + + yield pytest.param(name, obj, marks=marks) + + +@pytest.mark.parametrize( + "name, dataset_func", _generate_func_supporting_param("return_X_y") +) +def test_common_check_return_X_y(name, dataset_func): + bunch = dataset_func() + check_return_X_y(bunch, dataset_func) + + +@pytest.mark.parametrize( + "name, dataset_func", _generate_func_supporting_param("as_frame") +) +def test_common_check_as_frame(name, dataset_func): + bunch = dataset_func() + check_as_frame(bunch, dataset_func) + + +@pytest.mark.parametrize( + "name, dataset_func", _generate_func_supporting_param("as_frame") +) +def test_common_check_pandas_dependency(name, dataset_func): + check_pandas_dependency_message(dataset_func) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_covtype.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_covtype.py new file mode 100644 index 0000000000000000000000000000000000000000..e44fdaae69ec3ec7f1c7bc8c77fc1f6a15d5f331 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_covtype.py @@ -0,0 +1,54 @@ +"""Test the covtype loader, if the data is available, +or if specifically requested via environment variable +(e.g. for CI jobs).""" +from functools import partial + +import pytest + +from sklearn.datasets.tests.test_common import check_return_X_y + + +def test_fetch(fetch_covtype_fxt, global_random_seed): + data1 = fetch_covtype_fxt(shuffle=True, random_state=global_random_seed) + data2 = fetch_covtype_fxt(shuffle=True, random_state=global_random_seed + 1) + + X1, X2 = data1["data"], data2["data"] + assert (581012, 54) == X1.shape + assert X1.shape == X2.shape + + assert X1.sum() == X2.sum() + + y1, y2 = data1["target"], data2["target"] + assert (X1.shape[0],) == y1.shape + assert (X1.shape[0],) == y2.shape + + descr_prefix = ".. _covtype_dataset:" + assert data1.DESCR.startswith(descr_prefix) + assert data2.DESCR.startswith(descr_prefix) + + # test return_X_y option + fetch_func = partial(fetch_covtype_fxt) + check_return_X_y(data1, fetch_func) + + +def test_fetch_asframe(fetch_covtype_fxt): + pytest.importorskip("pandas") + + bunch = fetch_covtype_fxt(as_frame=True) + assert hasattr(bunch, "frame") + frame = bunch.frame + assert frame.shape == (581012, 55) + assert bunch.data.shape == (581012, 54) + assert bunch.target.shape == (581012,) + + column_names = set(frame.columns) + + # enumerated names are added correctly + assert set(f"Wilderness_Area_{i}" for i in range(4)) < column_names + assert set(f"Soil_Type_{i}" for i in range(40)) < column_names + + +def test_pandas_dependency_message(fetch_covtype_fxt, hide_available_pandas): + expected_msg = "fetch_covtype with as_frame=True requires pandas" + with pytest.raises(ImportError, match=expected_msg): + fetch_covtype_fxt(as_frame=True) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_kddcup99.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_kddcup99.py new file mode 100644 index 0000000000000000000000000000000000000000..5f6e9c83a30b8d419880f3d15fffb0fe83f2b559 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_kddcup99.py @@ -0,0 +1,89 @@ +"""Test kddcup99 loader, if the data is available, +or if specifically requested via environment variable +(e.g. for CI jobs). + +Only 'percent10' mode is tested, as the full data +is too big to use in unit-testing. +""" + +from functools import partial + +import pytest + +from sklearn.datasets.tests.test_common import ( + check_as_frame, + check_pandas_dependency_message, + check_return_X_y, +) + + +@pytest.mark.parametrize("as_frame", [True, False]) +@pytest.mark.parametrize( + "subset, n_samples, n_features", + [ + (None, 494021, 41), + ("SA", 100655, 41), + ("SF", 73237, 4), + ("http", 58725, 3), + ("smtp", 9571, 3), + ], +) +def test_fetch_kddcup99_percent10( + fetch_kddcup99_fxt, as_frame, subset, n_samples, n_features +): + data = fetch_kddcup99_fxt(subset=subset, as_frame=as_frame) + assert data.data.shape == (n_samples, n_features) + assert data.target.shape == (n_samples,) + if as_frame: + assert data.frame.shape == (n_samples, n_features + 1) + assert data.DESCR.startswith(".. _kddcup99_dataset:") + + +def test_fetch_kddcup99_return_X_y(fetch_kddcup99_fxt): + fetch_func = partial(fetch_kddcup99_fxt, subset="smtp") + data = fetch_func() + check_return_X_y(data, fetch_func) + + +def test_fetch_kddcup99_as_frame(fetch_kddcup99_fxt): + bunch = fetch_kddcup99_fxt() + check_as_frame(bunch, fetch_kddcup99_fxt) + + +def test_fetch_kddcup99_shuffle(fetch_kddcup99_fxt): + dataset = fetch_kddcup99_fxt( + random_state=0, + subset="SA", + percent10=True, + ) + dataset_shuffled = fetch_kddcup99_fxt( + random_state=0, + subset="SA", + shuffle=True, + percent10=True, + ) + assert set(dataset["target"]) == set(dataset_shuffled["target"]) + assert dataset_shuffled.data.shape == dataset.data.shape + assert dataset_shuffled.target.shape == dataset.target.shape + + +def test_pandas_dependency_message(fetch_kddcup99_fxt, hide_available_pandas): + check_pandas_dependency_message(fetch_kddcup99_fxt) + + +def test_corrupted_file_error_message(fetch_kddcup99_fxt, tmp_path): + """Check that a nice error message is raised when cache is corrupted.""" + kddcup99_dir = tmp_path / "kddcup99_10-py3" + kddcup99_dir.mkdir() + samples_path = kddcup99_dir / "samples" + + with samples_path.open("wb") as f: + f.write(b"THIS IS CORRUPTED") + + msg = ( + "The cache for fetch_kddcup99 is invalid, please " + f"delete {str(kddcup99_dir)} and run the fetch_kddcup99 again" + ) + + with pytest.raises(OSError, match=msg): + fetch_kddcup99_fxt(data_home=str(tmp_path)) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_lfw.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_lfw.py new file mode 100644 index 0000000000000000000000000000000000000000..92edb99ce3b0b0a158c74f64812aaa997e7b36dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_lfw.py @@ -0,0 +1,241 @@ +"""This test for the LFW require medium-size data downloading and processing + +If the data has not been already downloaded by running the examples, +the tests won't run (skipped). + +If the test are run, the first execution will be long (typically a bit +more than a couple of minutes) but as the dataset loader is leveraging +joblib, successive runs will be fast (less than 200ms). +""" + +import os +import random +import shutil +import tempfile +from functools import partial + +import numpy as np +import pytest + +from sklearn.datasets import fetch_lfw_pairs, fetch_lfw_people +from sklearn.datasets.tests.test_common import check_return_X_y +from sklearn.utils._testing import assert_array_equal + +SCIKIT_LEARN_DATA = None +SCIKIT_LEARN_EMPTY_DATA = None +LFW_HOME = None + +FAKE_NAMES = [ + "Abdelatif_Smith", + "Abhati_Kepler", + "Camara_Alvaro", + "Chen_Dupont", + "John_Lee", + "Lin_Bauman", + "Onur_Lopez", +] + + +def setup_module(): + """Test fixture run once and common to all tests of this module""" + Image = pytest.importorskip("PIL.Image") + + global SCIKIT_LEARN_DATA, SCIKIT_LEARN_EMPTY_DATA, LFW_HOME + + SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_") + LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, "lfw_home") + + SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_") + + if not os.path.exists(LFW_HOME): + os.makedirs(LFW_HOME) + + random_state = random.Random(42) + np_rng = np.random.RandomState(42) + + # generate some random jpeg files for each person + counts = {} + for name in FAKE_NAMES: + folder_name = os.path.join(LFW_HOME, "lfw_funneled", name) + if not os.path.exists(folder_name): + os.makedirs(folder_name) + + n_faces = np_rng.randint(1, 5) + counts[name] = n_faces + for i in range(n_faces): + file_path = os.path.join(folder_name, name + "_%04d.jpg" % i) + uniface = np_rng.randint(0, 255, size=(250, 250, 3)) + img = Image.fromarray(uniface.astype(np.uint8)) + img.save(file_path) + + # add some random file pollution to test robustness + with open(os.path.join(LFW_HOME, "lfw_funneled", ".test.swp"), "wb") as f: + f.write(b"Text file to be ignored by the dataset loader.") + + # generate some pairing metadata files using the same format as LFW + with open(os.path.join(LFW_HOME, "pairsDevTrain.txt"), "wb") as f: + f.write(b"10\n") + more_than_two = [name for name, count in counts.items() if count >= 2] + for i in range(5): + name = random_state.choice(more_than_two) + first, second = random_state.sample(range(counts[name]), 2) + f.write(("%s\t%d\t%d\n" % (name, first, second)).encode()) + + for i in range(5): + first_name, second_name = random_state.sample(FAKE_NAMES, 2) + first_index = np_rng.choice(np.arange(counts[first_name])) + second_index = np_rng.choice(np.arange(counts[second_name])) + f.write( + ( + "%s\t%d\t%s\t%d\n" + % (first_name, first_index, second_name, second_index) + ).encode() + ) + + with open(os.path.join(LFW_HOME, "pairsDevTest.txt"), "wb") as f: + f.write(b"Fake place holder that won't be tested") + + with open(os.path.join(LFW_HOME, "pairs.txt"), "wb") as f: + f.write(b"Fake place holder that won't be tested") + + +def teardown_module(): + """Test fixture (clean up) run once after all tests of this module""" + if os.path.isdir(SCIKIT_LEARN_DATA): + shutil.rmtree(SCIKIT_LEARN_DATA) + if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA): + shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA) + + +def test_load_empty_lfw_people(): + with pytest.raises(OSError): + fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False) + + +def test_load_fake_lfw_people(): + lfw_people = fetch_lfw_people( + data_home=SCIKIT_LEARN_DATA, min_faces_per_person=3, download_if_missing=False + ) + + # The data is croped around the center as a rectangular bounding box + # around the face. Colors are converted to gray levels: + assert lfw_people.images.shape == (10, 62, 47) + assert lfw_people.data.shape == (10, 2914) + + # the target is array of person integer ids + assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2]) + + # names of the persons can be found using the target_names array + expected_classes = ["Abdelatif Smith", "Abhati Kepler", "Onur Lopez"] + assert_array_equal(lfw_people.target_names, expected_classes) + + # It is possible to ask for the original data without any croping or color + # conversion and not limit on the number of picture per person + lfw_people = fetch_lfw_people( + data_home=SCIKIT_LEARN_DATA, + resize=None, + slice_=None, + color=True, + download_if_missing=False, + ) + assert lfw_people.images.shape == (17, 250, 250, 3) + assert lfw_people.DESCR.startswith(".. _labeled_faces_in_the_wild_dataset:") + + # the ids and class names are the same as previously + assert_array_equal( + lfw_people.target, [0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2] + ) + assert_array_equal( + lfw_people.target_names, + [ + "Abdelatif Smith", + "Abhati Kepler", + "Camara Alvaro", + "Chen Dupont", + "John Lee", + "Lin Bauman", + "Onur Lopez", + ], + ) + + # test return_X_y option + fetch_func = partial( + fetch_lfw_people, + data_home=SCIKIT_LEARN_DATA, + resize=None, + slice_=None, + color=True, + download_if_missing=False, + ) + check_return_X_y(lfw_people, fetch_func) + + +def test_load_fake_lfw_people_too_restrictive(): + with pytest.raises(ValueError): + fetch_lfw_people( + data_home=SCIKIT_LEARN_DATA, + min_faces_per_person=100, + download_if_missing=False, + ) + + +def test_load_empty_lfw_pairs(): + with pytest.raises(OSError): + fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False) + + +def test_load_fake_lfw_pairs(): + lfw_pairs_train = fetch_lfw_pairs( + data_home=SCIKIT_LEARN_DATA, download_if_missing=False + ) + + # The data is croped around the center as a rectangular bounding box + # around the face. Colors are converted to gray levels: + assert lfw_pairs_train.pairs.shape == (10, 2, 62, 47) + + # the target is whether the person is the same or not + assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) + + # names of the persons can be found using the target_names array + expected_classes = ["Different persons", "Same person"] + assert_array_equal(lfw_pairs_train.target_names, expected_classes) + + # It is possible to ask for the original data without any croping or color + # conversion + lfw_pairs_train = fetch_lfw_pairs( + data_home=SCIKIT_LEARN_DATA, + resize=None, + slice_=None, + color=True, + download_if_missing=False, + ) + assert lfw_pairs_train.pairs.shape == (10, 2, 250, 250, 3) + + # the ids and class names are the same as previously + assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) + assert_array_equal(lfw_pairs_train.target_names, expected_classes) + + assert lfw_pairs_train.DESCR.startswith(".. _labeled_faces_in_the_wild_dataset:") + + +def test_fetch_lfw_people_internal_cropping(): + """Check that we properly crop the images. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/24942 + """ + # If cropping was not done properly and we don't resize the images, the images would + # have their original size (250x250) and the image would not fit in the NumPy array + # pre-allocated based on `slice_` parameter. + slice_ = (slice(70, 195), slice(78, 172)) + lfw = fetch_lfw_people( + data_home=SCIKIT_LEARN_DATA, + min_faces_per_person=3, + download_if_missing=False, + resize=None, + slice_=slice_, + ) + assert lfw.images[0].shape == ( + slice_[0].stop - slice_[0].start, + slice_[1].stop - slice_[1].start, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_olivetti_faces.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_olivetti_faces.py new file mode 100644 index 0000000000000000000000000000000000000000..e5d6c853aa454ff31dd1edfccee4993c1e133c4d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_olivetti_faces.py @@ -0,0 +1,26 @@ +"""Test Olivetti faces fetcher, if the data is available, +or if specifically requested via environment variable +(e.g. for CI jobs).""" + +import numpy as np + +from sklearn.datasets.tests.test_common import check_return_X_y +from sklearn.utils import Bunch +from sklearn.utils._testing import assert_array_equal + + +def test_olivetti_faces(fetch_olivetti_faces_fxt): + data = fetch_olivetti_faces_fxt(shuffle=True, random_state=0) + + assert isinstance(data, Bunch) + for expected_keys in ("data", "images", "target", "DESCR"): + assert expected_keys in data.keys() + + assert data.data.shape == (400, 4096) + assert data.images.shape == (400, 64, 64) + assert data.target.shape == (400,) + assert_array_equal(np.unique(np.sort(data.target)), np.arange(40)) + assert data.DESCR.startswith(".. _olivetti_faces_dataset:") + + # test the return_X_y option + check_return_X_y(data, fetch_olivetti_faces_fxt) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_openml.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_openml.py new file mode 100644 index 0000000000000000000000000000000000000000..3ff2557aa4f9efae21d819507ada5115ec277f0e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_openml.py @@ -0,0 +1,1657 @@ +"""Test the openml loader.""" +import gzip +import json +import os +import re +from functools import partial +from importlib import resources +from io import BytesIO +from urllib.error import HTTPError + +import numpy as np +import pytest +import scipy.sparse + +import sklearn +from sklearn import config_context +from sklearn.datasets import fetch_openml as fetch_openml_orig +from sklearn.datasets._openml import ( + _OPENML_PREFIX, + _get_local_path, + _open_openml_url, + _retry_with_clean_cache, +) +from sklearn.utils import Bunch, check_pandas_support +from sklearn.utils._testing import ( + SkipTest, + assert_allclose, + assert_array_equal, + fails_if_pypy, +) + +OPENML_TEST_DATA_MODULE = "sklearn.datasets.tests.data.openml" +# if True, urlopen will be monkey patched to only use local files +test_offline = True + + +class _MockHTTPResponse: + def __init__(self, data, is_gzip): + self.data = data + self.is_gzip = is_gzip + + def read(self, amt=-1): + return self.data.read(amt) + + def close(self): + self.data.close() + + def info(self): + if self.is_gzip: + return {"Content-Encoding": "gzip"} + return {} + + def __iter__(self): + return iter(self.data) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return False + + +# Disable the disk-based cache when testing `fetch_openml`: +# the mock data in sklearn/datasets/tests/data/openml/ is not always consistent +# with the version on openml.org. If one were to load the dataset outside of +# the tests, it may result in data that does not represent openml.org. +fetch_openml = partial(fetch_openml_orig, data_home=None) + + +def _monkey_patch_webbased_functions(context, data_id, gzip_response): + # monkey patches the urlopen function. Important note: Do NOT use this + # in combination with a regular cache directory, as the files that are + # stored as cache should not be mixed up with real openml datasets + url_prefix_data_description = "https://api.openml.org/api/v1/json/data/" + url_prefix_data_features = "https://api.openml.org/api/v1/json/data/features/" + url_prefix_download_data = "https://api.openml.org/data/v1/" + url_prefix_data_list = "https://api.openml.org/api/v1/json/data/list/" + + path_suffix = ".gz" + read_fn = gzip.open + + data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}" + + def _file_name(url, suffix): + output = ( + re.sub(r"\W", "-", url[len("https://api.openml.org/") :]) + + suffix + + path_suffix + ) + # Shorten the filenames to have better compatibility with windows 10 + # and filenames > 260 characters + return ( + output.replace("-json-data-list", "-jdl") + .replace("-json-data-features", "-jdf") + .replace("-json-data-qualities", "-jdq") + .replace("-json-data", "-jd") + .replace("-data_name", "-dn") + .replace("-download", "-dl") + .replace("-limit", "-l") + .replace("-data_version", "-dv") + .replace("-status", "-s") + .replace("-deactivated", "-dact") + .replace("-active", "-act") + ) + + def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix): + assert url.startswith(expected_prefix) + + data_file_name = _file_name(url, suffix) + data_file_path = resources.files(data_module) / data_file_name + + with data_file_path.open("rb") as f: + if has_gzip_header and gzip_response: + fp = BytesIO(f.read()) + return _MockHTTPResponse(fp, True) + else: + decompressed_f = read_fn(f, "rb") + fp = BytesIO(decompressed_f.read()) + return _MockHTTPResponse(fp, False) + + def _mock_urlopen_data_description(url, has_gzip_header): + return _mock_urlopen_shared( + url=url, + has_gzip_header=has_gzip_header, + expected_prefix=url_prefix_data_description, + suffix=".json", + ) + + def _mock_urlopen_data_features(url, has_gzip_header): + return _mock_urlopen_shared( + url=url, + has_gzip_header=has_gzip_header, + expected_prefix=url_prefix_data_features, + suffix=".json", + ) + + def _mock_urlopen_download_data(url, has_gzip_header): + return _mock_urlopen_shared( + url=url, + has_gzip_header=has_gzip_header, + expected_prefix=url_prefix_download_data, + suffix=".arff", + ) + + def _mock_urlopen_data_list(url, has_gzip_header): + assert url.startswith(url_prefix_data_list) + + data_file_name = _file_name(url, ".json") + data_file_path = resources.files(data_module) / data_file_name + + # load the file itself, to simulate a http error + with data_file_path.open("rb") as f: + decompressed_f = read_fn(f, "rb") + decoded_s = decompressed_f.read().decode("utf-8") + json_data = json.loads(decoded_s) + if "error" in json_data: + raise HTTPError( + url=None, code=412, msg="Simulated mock error", hdrs=None, fp=BytesIO() + ) + + with data_file_path.open("rb") as f: + if has_gzip_header: + fp = BytesIO(f.read()) + return _MockHTTPResponse(fp, True) + else: + decompressed_f = read_fn(f, "rb") + fp = BytesIO(decompressed_f.read()) + return _MockHTTPResponse(fp, False) + + def _mock_urlopen(request, *args, **kwargs): + url = request.get_full_url() + has_gzip_header = request.get_header("Accept-encoding") == "gzip" + if url.startswith(url_prefix_data_list): + return _mock_urlopen_data_list(url, has_gzip_header) + elif url.startswith(url_prefix_data_features): + return _mock_urlopen_data_features(url, has_gzip_header) + elif url.startswith(url_prefix_download_data): + return _mock_urlopen_download_data(url, has_gzip_header) + elif url.startswith(url_prefix_data_description): + return _mock_urlopen_data_description(url, has_gzip_header) + else: + raise ValueError("Unknown mocking URL pattern: %s" % url) + + # XXX: Global variable + if test_offline: + context.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen) + + +############################################################################### +# Test the behaviour of `fetch_openml` depending of the input parameters. + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize( + "data_id, dataset_params, n_samples, n_features, n_targets", + [ + # iris + (61, {"data_id": 61}, 150, 4, 1), + (61, {"name": "iris", "version": 1}, 150, 4, 1), + # anneal + (2, {"data_id": 2}, 11, 38, 1), + (2, {"name": "anneal", "version": 1}, 11, 38, 1), + # cpu + (561, {"data_id": 561}, 209, 7, 1), + (561, {"name": "cpu", "version": 1}, 209, 7, 1), + # emotions + (40589, {"data_id": 40589}, 13, 72, 6), + # adult-census + (1119, {"data_id": 1119}, 10, 14, 1), + (1119, {"name": "adult-census"}, 10, 14, 1), + # miceprotein + (40966, {"data_id": 40966}, 7, 77, 1), + (40966, {"name": "MiceProtein"}, 7, 77, 1), + # titanic + (40945, {"data_id": 40945}, 1309, 13, 1), + ], +) +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_fetch_openml_as_frame_true( + monkeypatch, + data_id, + dataset_params, + n_samples, + n_features, + n_targets, + parser, + gzip_response, +): + """Check the behaviour of `fetch_openml` with `as_frame=True`. + + Fetch by ID and/or name (depending if the file was previously cached). + """ + pd = pytest.importorskip("pandas") + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response) + bunch = fetch_openml( + as_frame=True, + cache=False, + parser=parser, + **dataset_params, + ) + + assert int(bunch.details["id"]) == data_id + assert isinstance(bunch, Bunch) + + assert isinstance(bunch.frame, pd.DataFrame) + assert bunch.frame.shape == (n_samples, n_features + n_targets) + + assert isinstance(bunch.data, pd.DataFrame) + assert bunch.data.shape == (n_samples, n_features) + + if n_targets == 1: + assert isinstance(bunch.target, pd.Series) + assert bunch.target.shape == (n_samples,) + else: + assert isinstance(bunch.target, pd.DataFrame) + assert bunch.target.shape == (n_samples, n_targets) + + assert bunch.categories is None + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize( + "data_id, dataset_params, n_samples, n_features, n_targets", + [ + # iris + (61, {"data_id": 61}, 150, 4, 1), + (61, {"name": "iris", "version": 1}, 150, 4, 1), + # anneal + (2, {"data_id": 2}, 11, 38, 1), + (2, {"name": "anneal", "version": 1}, 11, 38, 1), + # cpu + (561, {"data_id": 561}, 209, 7, 1), + (561, {"name": "cpu", "version": 1}, 209, 7, 1), + # emotions + (40589, {"data_id": 40589}, 13, 72, 6), + # adult-census + (1119, {"data_id": 1119}, 10, 14, 1), + (1119, {"name": "adult-census"}, 10, 14, 1), + # miceprotein + (40966, {"data_id": 40966}, 7, 77, 1), + (40966, {"name": "MiceProtein"}, 7, 77, 1), + ], +) +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_fetch_openml_as_frame_false( + monkeypatch, + data_id, + dataset_params, + n_samples, + n_features, + n_targets, + parser, +): + """Check the behaviour of `fetch_openml` with `as_frame=False`. + + Fetch both by ID and/or name + version. + """ + pytest.importorskip("pandas") + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) + bunch = fetch_openml( + as_frame=False, + cache=False, + parser=parser, + **dataset_params, + ) + assert int(bunch.details["id"]) == data_id + assert isinstance(bunch, Bunch) + + assert bunch.frame is None + + assert isinstance(bunch.data, np.ndarray) + assert bunch.data.shape == (n_samples, n_features) + + assert isinstance(bunch.target, np.ndarray) + if n_targets == 1: + assert bunch.target.shape == (n_samples,) + else: + assert bunch.target.shape == (n_samples, n_targets) + + assert isinstance(bunch.categories, dict) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize("data_id", [61, 1119, 40945]) +def test_fetch_openml_consistency_parser(monkeypatch, data_id): + """Check the consistency of the LIAC-ARFF and pandas parsers.""" + pd = pytest.importorskip("pandas") + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) + bunch_liac = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser="liac-arff", + ) + bunch_pandas = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser="pandas", + ) + + # The data frames for the input features should match up to some numerical + # dtype conversions (e.g. float64 <=> Int64) due to limitations of the + # LIAC-ARFF parser. + data_liac, data_pandas = bunch_liac.data, bunch_pandas.data + + def convert_numerical_dtypes(series): + pandas_series = data_pandas[series.name] + if pd.api.types.is_numeric_dtype(pandas_series): + return series.astype(pandas_series.dtype) + else: + return series + + data_liac_with_fixed_dtypes = data_liac.apply(convert_numerical_dtypes) + pd.testing.assert_frame_equal(data_liac_with_fixed_dtypes, data_pandas) + + # Let's also check that the .frame attributes also match + frame_liac, frame_pandas = bunch_liac.frame, bunch_pandas.frame + + # Note that the .frame attribute is a superset of the .data attribute: + pd.testing.assert_frame_equal(frame_pandas[bunch_pandas.feature_names], data_pandas) + + # However the remaining columns, typically the target(s), are not necessarily + # dtyped similarly by both parsers due to limitations of the LIAC-ARFF parser. + # Therefore, extra dtype conversions are required for those columns: + + def convert_numerical_and_categorical_dtypes(series): + pandas_series = frame_pandas[series.name] + if pd.api.types.is_numeric_dtype(pandas_series): + return series.astype(pandas_series.dtype) + elif isinstance(pandas_series.dtype, pd.CategoricalDtype): + # Compare categorical features by converting categorical liac uses + # strings to denote the categories, we rename the categories to make + # them comparable to the pandas parser. Fixing this behavior in + # LIAC-ARFF would allow to check the consistency in the future but + # we do not plan to maintain the LIAC-ARFF on the long term. + return series.cat.rename_categories(pandas_series.cat.categories) + else: + return series + + frame_liac_with_fixed_dtypes = frame_liac.apply( + convert_numerical_and_categorical_dtypes + ) + pd.testing.assert_frame_equal(frame_liac_with_fixed_dtypes, frame_pandas) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser): + """Check the equivalence of the dataset when using `as_frame=False` and + `as_frame=True`. + """ + pytest.importorskip("pandas") + + data_id = 61 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) + bunch_as_frame_true = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser=parser, + ) + + bunch_as_frame_false = fetch_openml( + data_id=data_id, + as_frame=False, + cache=False, + parser=parser, + ) + + assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data) + assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_fetch_openml_iris_pandas(monkeypatch, parser): + """Check fetching on a numerical only dataset with string labels.""" + pd = pytest.importorskip("pandas") + CategoricalDtype = pd.api.types.CategoricalDtype + data_id = 61 + data_shape = (150, 4) + target_shape = (150,) + frame_shape = (150, 5) + + target_dtype = CategoricalDtype( + ["Iris-setosa", "Iris-versicolor", "Iris-virginica"] + ) + data_dtypes = [np.float64] * 4 + data_names = ["sepallength", "sepalwidth", "petallength", "petalwidth"] + target_name = "class" + + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + + bunch = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser=parser, + ) + data = bunch.data + target = bunch.target + frame = bunch.frame + + assert isinstance(data, pd.DataFrame) + assert np.all(data.dtypes == data_dtypes) + assert data.shape == data_shape + assert np.all(data.columns == data_names) + assert np.all(bunch.feature_names == data_names) + assert bunch.target_names == [target_name] + + assert isinstance(target, pd.Series) + assert target.dtype == target_dtype + assert target.shape == target_shape + assert target.name == target_name + assert target.index.is_unique + + assert isinstance(frame, pd.DataFrame) + assert frame.shape == frame_shape + assert np.all(frame.dtypes == data_dtypes + [target_dtype]) + assert frame.index.is_unique + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +@pytest.mark.parametrize("target_column", ["petalwidth", ["petalwidth", "petallength"]]) +def test_fetch_openml_forcing_targets(monkeypatch, parser, target_column): + """Check that we can force the target to not be the default target.""" + pd = pytest.importorskip("pandas") + + data_id = 61 + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + bunch_forcing_target = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + target_column=target_column, + parser=parser, + ) + bunch_default = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser=parser, + ) + + pd.testing.assert_frame_equal(bunch_forcing_target.frame, bunch_default.frame) + if isinstance(target_column, list): + pd.testing.assert_index_equal( + bunch_forcing_target.target.columns, pd.Index(target_column) + ) + assert bunch_forcing_target.data.shape == (150, 3) + else: + assert bunch_forcing_target.target.name == target_column + assert bunch_forcing_target.data.shape == (150, 4) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize("data_id", [61, 2, 561, 40589, 1119]) +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_fetch_openml_equivalence_frame_return_X_y(monkeypatch, data_id, parser): + """Check the behaviour of `return_X_y=True` when `as_frame=True`.""" + pd = pytest.importorskip("pandas") + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) + bunch = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + return_X_y=False, + parser=parser, + ) + X, y = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + return_X_y=True, + parser=parser, + ) + + pd.testing.assert_frame_equal(bunch.data, X) + if isinstance(y, pd.Series): + pd.testing.assert_series_equal(bunch.target, y) + else: + pd.testing.assert_frame_equal(bunch.target, y) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize("data_id", [61, 561, 40589, 1119]) +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_fetch_openml_equivalence_array_return_X_y(monkeypatch, data_id, parser): + """Check the behaviour of `return_X_y=True` when `as_frame=False`.""" + pytest.importorskip("pandas") + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) + bunch = fetch_openml( + data_id=data_id, + as_frame=False, + cache=False, + return_X_y=False, + parser=parser, + ) + X, y = fetch_openml( + data_id=data_id, + as_frame=False, + cache=False, + return_X_y=True, + parser=parser, + ) + + assert_array_equal(bunch.data, X) + assert_array_equal(bunch.target, y) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +def test_fetch_openml_difference_parsers(monkeypatch): + """Check the difference between liac-arff and pandas parser.""" + pytest.importorskip("pandas") + + data_id = 1119 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) + # When `as_frame=False`, the categories will be ordinally encoded with + # liac-arff parser while this is not the case with pandas parser. + as_frame = False + bunch_liac_arff = fetch_openml( + data_id=data_id, + as_frame=as_frame, + cache=False, + parser="liac-arff", + ) + bunch_pandas = fetch_openml( + data_id=data_id, + as_frame=as_frame, + cache=False, + parser="pandas", + ) + + assert bunch_liac_arff.data.dtype.kind == "f" + assert bunch_pandas.data.dtype == "O" + + +############################################################################### +# Test the ARFF parsing on several dataset to check if detect the correct +# types (categories, integers, floats). + + +@pytest.fixture(scope="module") +def datasets_column_names(): + """Returns the columns names for each dataset.""" + return { + 61: ["sepallength", "sepalwidth", "petallength", "petalwidth", "class"], + 2: [ + "family", + "product-type", + "steel", + "carbon", + "hardness", + "temper_rolling", + "condition", + "formability", + "strength", + "non-ageing", + "surface-finish", + "surface-quality", + "enamelability", + "bc", + "bf", + "bt", + "bw%2Fme", + "bl", + "m", + "chrom", + "phos", + "cbond", + "marvi", + "exptl", + "ferro", + "corr", + "blue%2Fbright%2Fvarn%2Fclean", + "lustre", + "jurofm", + "s", + "p", + "shape", + "thick", + "width", + "len", + "oil", + "bore", + "packing", + "class", + ], + 561: ["vendor", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "class"], + 40589: [ + "Mean_Acc1298_Mean_Mem40_Centroid", + "Mean_Acc1298_Mean_Mem40_Rolloff", + "Mean_Acc1298_Mean_Mem40_Flux", + "Mean_Acc1298_Mean_Mem40_MFCC_0", + "Mean_Acc1298_Mean_Mem40_MFCC_1", + "Mean_Acc1298_Mean_Mem40_MFCC_2", + "Mean_Acc1298_Mean_Mem40_MFCC_3", + "Mean_Acc1298_Mean_Mem40_MFCC_4", + "Mean_Acc1298_Mean_Mem40_MFCC_5", + "Mean_Acc1298_Mean_Mem40_MFCC_6", + "Mean_Acc1298_Mean_Mem40_MFCC_7", + "Mean_Acc1298_Mean_Mem40_MFCC_8", + "Mean_Acc1298_Mean_Mem40_MFCC_9", + "Mean_Acc1298_Mean_Mem40_MFCC_10", + "Mean_Acc1298_Mean_Mem40_MFCC_11", + "Mean_Acc1298_Mean_Mem40_MFCC_12", + "Mean_Acc1298_Std_Mem40_Centroid", + "Mean_Acc1298_Std_Mem40_Rolloff", + "Mean_Acc1298_Std_Mem40_Flux", + "Mean_Acc1298_Std_Mem40_MFCC_0", + "Mean_Acc1298_Std_Mem40_MFCC_1", + "Mean_Acc1298_Std_Mem40_MFCC_2", + "Mean_Acc1298_Std_Mem40_MFCC_3", + "Mean_Acc1298_Std_Mem40_MFCC_4", + "Mean_Acc1298_Std_Mem40_MFCC_5", + "Mean_Acc1298_Std_Mem40_MFCC_6", + "Mean_Acc1298_Std_Mem40_MFCC_7", + "Mean_Acc1298_Std_Mem40_MFCC_8", + "Mean_Acc1298_Std_Mem40_MFCC_9", + "Mean_Acc1298_Std_Mem40_MFCC_10", + "Mean_Acc1298_Std_Mem40_MFCC_11", + "Mean_Acc1298_Std_Mem40_MFCC_12", + "Std_Acc1298_Mean_Mem40_Centroid", + "Std_Acc1298_Mean_Mem40_Rolloff", + "Std_Acc1298_Mean_Mem40_Flux", + "Std_Acc1298_Mean_Mem40_MFCC_0", + "Std_Acc1298_Mean_Mem40_MFCC_1", + "Std_Acc1298_Mean_Mem40_MFCC_2", + "Std_Acc1298_Mean_Mem40_MFCC_3", + "Std_Acc1298_Mean_Mem40_MFCC_4", + "Std_Acc1298_Mean_Mem40_MFCC_5", + "Std_Acc1298_Mean_Mem40_MFCC_6", + "Std_Acc1298_Mean_Mem40_MFCC_7", + "Std_Acc1298_Mean_Mem40_MFCC_8", + "Std_Acc1298_Mean_Mem40_MFCC_9", + "Std_Acc1298_Mean_Mem40_MFCC_10", + "Std_Acc1298_Mean_Mem40_MFCC_11", + "Std_Acc1298_Mean_Mem40_MFCC_12", + "Std_Acc1298_Std_Mem40_Centroid", + "Std_Acc1298_Std_Mem40_Rolloff", + "Std_Acc1298_Std_Mem40_Flux", + "Std_Acc1298_Std_Mem40_MFCC_0", + "Std_Acc1298_Std_Mem40_MFCC_1", + "Std_Acc1298_Std_Mem40_MFCC_2", + "Std_Acc1298_Std_Mem40_MFCC_3", + "Std_Acc1298_Std_Mem40_MFCC_4", + "Std_Acc1298_Std_Mem40_MFCC_5", + "Std_Acc1298_Std_Mem40_MFCC_6", + "Std_Acc1298_Std_Mem40_MFCC_7", + "Std_Acc1298_Std_Mem40_MFCC_8", + "Std_Acc1298_Std_Mem40_MFCC_9", + "Std_Acc1298_Std_Mem40_MFCC_10", + "Std_Acc1298_Std_Mem40_MFCC_11", + "Std_Acc1298_Std_Mem40_MFCC_12", + "BH_LowPeakAmp", + "BH_LowPeakBPM", + "BH_HighPeakAmp", + "BH_HighPeakBPM", + "BH_HighLowRatio", + "BHSUM1", + "BHSUM2", + "BHSUM3", + "amazed.suprised", + "happy.pleased", + "relaxing.calm", + "quiet.still", + "sad.lonely", + "angry.aggresive", + ], + 1119: [ + "age", + "workclass", + "fnlwgt:", + "education:", + "education-num:", + "marital-status:", + "occupation:", + "relationship:", + "race:", + "sex:", + "capital-gain:", + "capital-loss:", + "hours-per-week:", + "native-country:", + "class", + ], + 40966: [ + "DYRK1A_N", + "ITSN1_N", + "BDNF_N", + "NR1_N", + "NR2A_N", + "pAKT_N", + "pBRAF_N", + "pCAMKII_N", + "pCREB_N", + "pELK_N", + "pERK_N", + "pJNK_N", + "PKCA_N", + "pMEK_N", + "pNR1_N", + "pNR2A_N", + "pNR2B_N", + "pPKCAB_N", + "pRSK_N", + "AKT_N", + "BRAF_N", + "CAMKII_N", + "CREB_N", + "ELK_N", + "ERK_N", + "GSK3B_N", + "JNK_N", + "MEK_N", + "TRKA_N", + "RSK_N", + "APP_N", + "Bcatenin_N", + "SOD1_N", + "MTOR_N", + "P38_N", + "pMTOR_N", + "DSCR1_N", + "AMPKA_N", + "NR2B_N", + "pNUMB_N", + "RAPTOR_N", + "TIAM1_N", + "pP70S6_N", + "NUMB_N", + "P70S6_N", + "pGSK3B_N", + "pPKCG_N", + "CDK5_N", + "S6_N", + "ADARB1_N", + "AcetylH3K9_N", + "RRP1_N", + "BAX_N", + "ARC_N", + "ERBB4_N", + "nNOS_N", + "Tau_N", + "GFAP_N", + "GluR3_N", + "GluR4_N", + "IL1B_N", + "P3525_N", + "pCASP9_N", + "PSD95_N", + "SNCA_N", + "Ubiquitin_N", + "pGSK3B_Tyr216_N", + "SHH_N", + "BAD_N", + "BCL2_N", + "pS6_N", + "pCFOS_N", + "SYP_N", + "H3AcK18_N", + "EGR1_N", + "H3MeK4_N", + "CaNA_N", + "class", + ], + 40945: [ + "pclass", + "survived", + "name", + "sex", + "age", + "sibsp", + "parch", + "ticket", + "fare", + "cabin", + "embarked", + "boat", + "body", + "home.dest", + ], + } + + +@pytest.fixture(scope="module") +def datasets_missing_values(): + return { + 61: {}, + 2: { + "family": 11, + "temper_rolling": 9, + "condition": 2, + "formability": 4, + "non-ageing": 10, + "surface-finish": 11, + "enamelability": 11, + "bc": 11, + "bf": 10, + "bt": 11, + "bw%2Fme": 8, + "bl": 9, + "m": 11, + "chrom": 11, + "phos": 11, + "cbond": 10, + "marvi": 11, + "exptl": 11, + "ferro": 11, + "corr": 11, + "blue%2Fbright%2Fvarn%2Fclean": 11, + "lustre": 8, + "jurofm": 11, + "s": 11, + "p": 11, + "oil": 10, + "packing": 11, + }, + 561: {}, + 40589: {}, + 1119: {}, + 40966: {"BCL2_N": 7}, + 40945: { + "age": 263, + "fare": 1, + "cabin": 1014, + "embarked": 2, + "boat": 823, + "body": 1188, + "home.dest": 564, + }, + } + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize( + "data_id, parser, expected_n_categories, expected_n_floats, expected_n_ints", + [ + # iris dataset + (61, "liac-arff", 1, 4, 0), + (61, "pandas", 1, 4, 0), + # anneal dataset + (2, "liac-arff", 33, 6, 0), + (2, "pandas", 33, 2, 4), + # cpu dataset + (561, "liac-arff", 1, 7, 0), + (561, "pandas", 1, 0, 7), + # emotions dataset + (40589, "liac-arff", 6, 72, 0), + (40589, "pandas", 6, 69, 3), + # adult-census dataset + (1119, "liac-arff", 9, 6, 0), + (1119, "pandas", 9, 0, 6), + # miceprotein + (40966, "liac-arff", 1, 77, 0), + (40966, "pandas", 1, 77, 0), + # titanic + (40945, "liac-arff", 3, 6, 0), + (40945, "pandas", 3, 3, 3), + ], +) +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_fetch_openml_types_inference( + monkeypatch, + data_id, + parser, + expected_n_categories, + expected_n_floats, + expected_n_ints, + gzip_response, + datasets_column_names, + datasets_missing_values, +): + """Check that `fetch_openml` infer the right number of categories, integers, and + floats.""" + pd = pytest.importorskip("pandas") + CategoricalDtype = pd.api.types.CategoricalDtype + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response) + + bunch = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser=parser, + ) + frame = bunch.frame + + n_categories = len( + [dtype for dtype in frame.dtypes if isinstance(dtype, CategoricalDtype)] + ) + n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == "f"]) + n_ints = len([dtype for dtype in frame.dtypes if dtype.kind == "i"]) + + assert n_categories == expected_n_categories + assert n_floats == expected_n_floats + assert n_ints == expected_n_ints + + assert frame.columns.tolist() == datasets_column_names[data_id] + + frame_feature_to_n_nan = frame.isna().sum().to_dict() + for name, n_missing in frame_feature_to_n_nan.items(): + expected_missing = datasets_missing_values[data_id].get(name, 0) + assert n_missing == expected_missing + + +############################################################################### +# Test some more specific behaviour + + +@pytest.mark.parametrize( + "params, err_msg", + [ + ( + {"parser": "unknown"}, + "The 'parser' parameter of fetch_openml must be a str among", + ), + ( + {"as_frame": "unknown"}, + "The 'as_frame' parameter of fetch_openml must be an instance", + ), + ], +) +def test_fetch_openml_validation_parameter(monkeypatch, params, err_msg): + data_id = 1119 + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + with pytest.raises(ValueError, match=err_msg): + fetch_openml(data_id=data_id, **params) + + +@pytest.mark.parametrize( + "params", + [ + {"as_frame": True, "parser": "auto"}, + {"as_frame": "auto", "parser": "auto"}, + {"as_frame": False, "parser": "pandas"}, + {"as_frame": False, "parser": "auto"}, + ], +) +def test_fetch_openml_requires_pandas_error(monkeypatch, params): + """Check that we raise the proper errors when we require pandas.""" + data_id = 1119 + try: + check_pandas_support("test_fetch_openml_requires_pandas") + except ImportError: + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + err_msg = "requires pandas to be installed. Alternatively, explicitly" + with pytest.raises(ImportError, match=err_msg): + fetch_openml(data_id=data_id, **params) + else: + raise SkipTest("This test requires pandas to not be installed.") + + +@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive") +@pytest.mark.parametrize( + "params, err_msg", + [ + ( + {"parser": "pandas"}, + "Sparse ARFF datasets cannot be loaded with parser='pandas'", + ), + ( + {"as_frame": True}, + "Sparse ARFF datasets cannot be loaded with as_frame=True.", + ), + ( + {"parser": "pandas", "as_frame": True}, + "Sparse ARFF datasets cannot be loaded with as_frame=True.", + ), + ], +) +def test_fetch_openml_sparse_arff_error(monkeypatch, params, err_msg): + """Check that we raise the expected error for sparse ARFF datasets and + a wrong set of incompatible parameters. + """ + pytest.importorskip("pandas") + data_id = 292 + + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + with pytest.raises(ValueError, match=err_msg): + fetch_openml( + data_id=data_id, + cache=False, + **params, + ) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive") +@pytest.mark.parametrize( + "data_id, data_type", + [ + (61, "dataframe"), # iris dataset version 1 + (292, "sparse"), # Australian dataset version 1 + ], +) +def test_fetch_openml_auto_mode(monkeypatch, data_id, data_type): + """Check the auto mode of `fetch_openml`.""" + pd = pytest.importorskip("pandas") + + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + data = fetch_openml(data_id=data_id, as_frame="auto", cache=False) + klass = pd.DataFrame if data_type == "dataframe" else scipy.sparse.csr_matrix + assert isinstance(data.data, klass) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch): + """Check that we raise a warning regarding the working memory when using + LIAC-ARFF parser.""" + pytest.importorskip("pandas") + + data_id = 1119 + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + + msg = "Could not adhere to working_memory config." + with pytest.warns(UserWarning, match=msg): + with config_context(working_memory=1e-6): + fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser="liac-arff", + ) + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_fetch_openml_iris_warn_multiple_version(monkeypatch, gzip_response): + """Check that a warning is raised when multiple versions exist and no version is + requested.""" + data_id = 61 + data_name = "iris" + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + + msg = re.escape( + "Multiple active versions of the dataset matching the name" + " iris exist. Versions may be fundamentally different, " + "returning version 1. Available versions:\n" + "- version 1, status: active\n" + " url: https://www.openml.org/search?type=data&id=61\n" + "- version 3, status: active\n" + " url: https://www.openml.org/search?type=data&id=969\n" + ) + with pytest.warns(UserWarning, match=msg): + fetch_openml( + name=data_name, + as_frame=False, + cache=False, + parser="liac-arff", + ) + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_fetch_openml_no_target(monkeypatch, gzip_response): + """Check that we can get a dataset without target.""" + data_id = 61 + target_column = None + expected_observations = 150 + expected_features = 5 + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + data = fetch_openml( + data_id=data_id, + target_column=target_column, + cache=False, + as_frame=False, + parser="liac-arff", + ) + assert data.data.shape == (expected_observations, expected_features) + assert data.target is None + + +@pytest.mark.parametrize("gzip_response", [True, False]) +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_missing_values_pandas(monkeypatch, gzip_response, parser): + """check that missing values in categories are compatible with pandas + categorical""" + pytest.importorskip("pandas") + + data_id = 42585 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response) + penguins = fetch_openml( + data_id=data_id, + cache=False, + as_frame=True, + parser=parser, + ) + + cat_dtype = penguins.data.dtypes["sex"] + # there are nans in the categorical + assert penguins.data["sex"].isna().any() + assert_array_equal(cat_dtype.categories, ["FEMALE", "MALE", "_"]) + + +@pytest.mark.parametrize("gzip_response", [True, False]) +@pytest.mark.parametrize( + "dataset_params", + [ + {"data_id": 40675}, + {"data_id": None, "name": "glass2", "version": 1}, + ], +) +def test_fetch_openml_inactive(monkeypatch, gzip_response, dataset_params): + """Check that we raise a warning when the dataset is inactive.""" + data_id = 40675 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + msg = "Version 1 of dataset glass2 is inactive," + with pytest.warns(UserWarning, match=msg): + glass2 = fetch_openml( + cache=False, as_frame=False, parser="liac-arff", **dataset_params + ) + assert glass2.data.shape == (163, 9) + assert glass2.details["id"] == "40675" + + +@pytest.mark.parametrize("gzip_response", [True, False]) +@pytest.mark.parametrize( + "data_id, params, err_type, err_msg", + [ + (40675, {"name": "glass2"}, ValueError, "No active dataset glass2 found"), + ( + 61, + {"data_id": 61, "target_column": ["sepalwidth", "class"]}, + ValueError, + "Can only handle homogeneous multi-target datasets", + ), + ( + 40945, + {"data_id": 40945, "as_frame": False}, + ValueError, + ( + "STRING attributes are not supported for array representation. Try" + " as_frame=True" + ), + ), + ( + 2, + {"data_id": 2, "target_column": "family", "as_frame": True}, + ValueError, + "Target column 'family'", + ), + ( + 2, + {"data_id": 2, "target_column": "family", "as_frame": False}, + ValueError, + "Target column 'family'", + ), + ( + 61, + {"data_id": 61, "target_column": "undefined"}, + KeyError, + "Could not find target_column='undefined'", + ), + ( + 61, + {"data_id": 61, "target_column": ["undefined", "class"]}, + KeyError, + "Could not find target_column='undefined'", + ), + ], +) +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_fetch_openml_error( + monkeypatch, gzip_response, data_id, params, err_type, err_msg, parser +): + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + if params.get("as_frame", True) or parser == "pandas": + pytest.importorskip("pandas") + with pytest.raises(err_type, match=err_msg): + fetch_openml(cache=False, parser=parser, **params) + + +@pytest.mark.parametrize( + "params, err_type, err_msg", + [ + ( + {"data_id": -1, "name": None, "version": "version"}, + ValueError, + "The 'version' parameter of fetch_openml must be an int in the range", + ), + ( + {"data_id": -1, "name": "nAmE"}, + ValueError, + "The 'data_id' parameter of fetch_openml must be an int in the range", + ), + ( + {"data_id": -1, "name": "nAmE", "version": "version"}, + ValueError, + "The 'version' parameter of fetch_openml must be an int", + ), + ( + {}, + ValueError, + "Neither name nor data_id are provided. Please provide name or data_id.", + ), + ], +) +def test_fetch_openml_raises_illegal_argument(params, err_type, err_msg): + with pytest.raises(err_type, match=err_msg): + fetch_openml(**params) + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_warn_ignore_attribute(monkeypatch, gzip_response): + data_id = 40966 + expected_row_id_msg = "target_column='{}' has flag is_row_identifier." + expected_ignore_msg = "target_column='{}' has flag is_ignore." + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + # single column test + target_col = "MouseID" + msg = expected_row_id_msg.format(target_col) + with pytest.warns(UserWarning, match=msg): + fetch_openml( + data_id=data_id, + target_column=target_col, + cache=False, + as_frame=False, + parser="liac-arff", + ) + target_col = "Genotype" + msg = expected_ignore_msg.format(target_col) + with pytest.warns(UserWarning, match=msg): + fetch_openml( + data_id=data_id, + target_column=target_col, + cache=False, + as_frame=False, + parser="liac-arff", + ) + # multi column test + target_col = "MouseID" + msg = expected_row_id_msg.format(target_col) + with pytest.warns(UserWarning, match=msg): + fetch_openml( + data_id=data_id, + target_column=[target_col, "class"], + cache=False, + as_frame=False, + parser="liac-arff", + ) + target_col = "Genotype" + msg = expected_ignore_msg.format(target_col) + with pytest.warns(UserWarning, match=msg): + fetch_openml( + data_id=data_id, + target_column=[target_col, "class"], + cache=False, + as_frame=False, + parser="liac-arff", + ) + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_dataset_with_openml_error(monkeypatch, gzip_response): + data_id = 1 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + msg = "OpenML registered a problem with the dataset. It might be unusable. Error:" + with pytest.warns(UserWarning, match=msg): + fetch_openml(data_id=data_id, cache=False, as_frame=False, parser="liac-arff") + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_dataset_with_openml_warning(monkeypatch, gzip_response): + data_id = 3 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + msg = "OpenML raised a warning on the dataset. It might be unusable. Warning:" + with pytest.warns(UserWarning, match=msg): + fetch_openml(data_id=data_id, cache=False, as_frame=False, parser="liac-arff") + + +def test_fetch_openml_overwrite_default_params_read_csv(monkeypatch): + """Check that we can overwrite the default parameters of `read_csv`.""" + pytest.importorskip("pandas") + data_id = 1590 + _monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False) + + common_params = { + "data_id": data_id, + "as_frame": True, + "cache": False, + "parser": "pandas", + } + + # By default, the initial spaces are skipped. We checked that setting the parameter + # `skipinitialspace` to False will have an effect. + adult_without_spaces = fetch_openml(**common_params) + adult_with_spaces = fetch_openml( + **common_params, read_csv_kwargs={"skipinitialspace": False} + ) + assert all( + cat.startswith(" ") for cat in adult_with_spaces.frame["class"].cat.categories + ) + assert not any( + cat.startswith(" ") + for cat in adult_without_spaces.frame["class"].cat.categories + ) + + +############################################################################### +# Test cache, retry mechanisms, checksum, etc. + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_open_openml_url_cache(monkeypatch, gzip_response, tmpdir): + data_id = 61 + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id) + cache_directory = str(tmpdir.mkdir("scikit_learn_data")) + # first fill the cache + response1 = _open_openml_url(openml_path, cache_directory) + # assert file exists + location = _get_local_path(openml_path, cache_directory) + assert os.path.isfile(location) + # redownload, to utilize cache + response2 = _open_openml_url(openml_path, cache_directory) + assert response1.read() == response2.read() + + +@pytest.mark.parametrize("write_to_disk", [True, False]) +def test_open_openml_url_unlinks_local_path(monkeypatch, tmpdir, write_to_disk): + data_id = 61 + openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id) + cache_directory = str(tmpdir.mkdir("scikit_learn_data")) + location = _get_local_path(openml_path, cache_directory) + + def _mock_urlopen(request, *args, **kwargs): + if write_to_disk: + with open(location, "w") as f: + f.write("") + raise ValueError("Invalid request") + + monkeypatch.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen) + + with pytest.raises(ValueError, match="Invalid request"): + _open_openml_url(openml_path, cache_directory) + + assert not os.path.exists(location) + + +def test_retry_with_clean_cache(tmpdir): + data_id = 61 + openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id) + cache_directory = str(tmpdir.mkdir("scikit_learn_data")) + location = _get_local_path(openml_path, cache_directory) + os.makedirs(os.path.dirname(location)) + + with open(location, "w") as f: + f.write("") + + @_retry_with_clean_cache(openml_path, cache_directory) + def _load_data(): + # The first call will raise an error since location exists + if os.path.exists(location): + raise Exception("File exist!") + return 1 + + warn_msg = "Invalid cache, redownloading file" + with pytest.warns(RuntimeWarning, match=warn_msg): + result = _load_data() + assert result == 1 + + +def test_retry_with_clean_cache_http_error(tmpdir): + data_id = 61 + openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id) + cache_directory = str(tmpdir.mkdir("scikit_learn_data")) + + @_retry_with_clean_cache(openml_path, cache_directory) + def _load_data(): + raise HTTPError( + url=None, code=412, msg="Simulated mock error", hdrs=None, fp=BytesIO() + ) + + error_msg = "Simulated mock error" + with pytest.raises(HTTPError, match=error_msg): + _load_data() + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir): + def _mock_urlopen_raise(request, *args, **kwargs): + raise ValueError( + "This mechanism intends to test correct cache" + "handling. As such, urlopen should never be " + "accessed. URL: %s" + % request.get_full_url() + ) + + data_id = 61 + cache_directory = str(tmpdir.mkdir("scikit_learn_data")) + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + X_fetched, y_fetched = fetch_openml( + data_id=data_id, + cache=True, + data_home=cache_directory, + return_X_y=True, + as_frame=False, + parser="liac-arff", + ) + + monkeypatch.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen_raise) + + X_cached, y_cached = fetch_openml( + data_id=data_id, + cache=True, + data_home=cache_directory, + return_X_y=True, + as_frame=False, + parser="liac-arff", + ) + np.testing.assert_array_equal(X_fetched, X_cached) + np.testing.assert_array_equal(y_fetched, y_cached) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize( + "as_frame, parser", + [ + (True, "liac-arff"), + (False, "liac-arff"), + (True, "pandas"), + (False, "pandas"), + ], +) +def test_fetch_openml_verify_checksum(monkeypatch, as_frame, cache, tmpdir, parser): + """Check that the checksum is working as expected.""" + if as_frame or parser == "pandas": + pytest.importorskip("pandas") + + data_id = 2 + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + + # create a temporary modified arff file + original_data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}" + original_data_file_name = "data-v1-dl-1666876.arff.gz" + original_data_path = resources.files(original_data_module) / original_data_file_name + corrupt_copy_path = tmpdir / "test_invalid_checksum.arff" + with original_data_path.open("rb") as orig_file: + orig_gzip = gzip.open(orig_file, "rb") + data = bytearray(orig_gzip.read()) + data[len(data) - 1] = 37 + + with gzip.GzipFile(corrupt_copy_path, "wb") as modified_gzip: + modified_gzip.write(data) + + # Requests are already mocked by monkey_patch_webbased_functions. + # We want to reuse that mock for all requests except file download, + # hence creating a thin mock over the original mock + mocked_openml_url = sklearn.datasets._openml.urlopen + + def swap_file_mock(request, *args, **kwargs): + url = request.get_full_url() + if url.endswith("data/v1/download/1666876"): + with open(corrupt_copy_path, "rb") as f: + corrupted_data = f.read() + return _MockHTTPResponse(BytesIO(corrupted_data), is_gzip=True) + else: + return mocked_openml_url(request) + + monkeypatch.setattr(sklearn.datasets._openml, "urlopen", swap_file_mock) + + # validate failed checksum + with pytest.raises(ValueError) as exc: + sklearn.datasets.fetch_openml( + data_id=data_id, cache=False, as_frame=as_frame, parser=parser + ) + # exception message should have file-path + assert exc.match("1666876") + + +def test_open_openml_url_retry_on_network_error(monkeypatch): + def _mock_urlopen_network_error(request, *args, **kwargs): + raise HTTPError( + url=None, code=404, msg="Simulated network error", hdrs=None, fp=BytesIO() + ) + + monkeypatch.setattr( + sklearn.datasets._openml, "urlopen", _mock_urlopen_network_error + ) + + invalid_openml_url = "invalid-url" + + with pytest.warns( + UserWarning, + match=re.escape( + "A network error occurred while downloading" + f" {_OPENML_PREFIX + invalid_openml_url}. Retrying..." + ), + ) as record: + with pytest.raises(HTTPError, match="Simulated network error"): + _open_openml_url(invalid_openml_url, None, delay=0) + assert len(record) == 3 + + +############################################################################### +# Non-regressiont tests + + +@pytest.mark.parametrize("gzip_response", [True, False]) +@pytest.mark.parametrize("parser", ("liac-arff", "pandas")) +def test_fetch_openml_with_ignored_feature(monkeypatch, gzip_response, parser): + """Check that we can load the "zoo" dataset. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/14340 + """ + if parser == "pandas": + pytest.importorskip("pandas") + data_id = 62 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + + dataset = sklearn.datasets.fetch_openml( + data_id=data_id, cache=False, as_frame=False, parser=parser + ) + assert dataset is not None + # The dataset has 17 features, including 1 ignored (animal), + # so we assert that we don't have the ignored feature in the final Bunch + assert dataset["data"].shape == (101, 16) + assert "animal" not in dataset["feature_names"] + + +def test_fetch_openml_strip_quotes(monkeypatch): + """Check that we strip the single quotes when used as a string delimiter. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/23381 + """ + pd = pytest.importorskip("pandas") + data_id = 40966 + _monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False) + + common_params = {"as_frame": True, "cache": False, "data_id": data_id} + mice_pandas = fetch_openml(parser="pandas", **common_params) + mice_liac_arff = fetch_openml(parser="liac-arff", **common_params) + pd.testing.assert_series_equal(mice_pandas.target, mice_liac_arff.target) + assert not mice_pandas.target.str.startswith("'").any() + assert not mice_pandas.target.str.endswith("'").any() + + # similar behaviour should be observed when the column is not the target + mice_pandas = fetch_openml(parser="pandas", target_column="NUMB_N", **common_params) + mice_liac_arff = fetch_openml( + parser="liac-arff", target_column="NUMB_N", **common_params + ) + pd.testing.assert_series_equal( + mice_pandas.frame["class"], mice_liac_arff.frame["class"] + ) + assert not mice_pandas.frame["class"].str.startswith("'").any() + assert not mice_pandas.frame["class"].str.endswith("'").any() + + +def test_fetch_openml_leading_whitespace(monkeypatch): + """Check that we can strip leading whitespace in pandas parser. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/25311 + """ + pd = pytest.importorskip("pandas") + data_id = 1590 + _monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False) + + common_params = {"as_frame": True, "cache": False, "data_id": data_id} + adult_pandas = fetch_openml(parser="pandas", **common_params) + adult_liac_arff = fetch_openml(parser="liac-arff", **common_params) + pd.testing.assert_series_equal( + adult_pandas.frame["class"], adult_liac_arff.frame["class"] + ) + + +def test_fetch_openml_quotechar_escapechar(monkeypatch): + """Check that we can handle escapechar and single/double quotechar. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/25478 + """ + pd = pytest.importorskip("pandas") + data_id = 42074 + _monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False) + + common_params = {"as_frame": True, "cache": False, "data_id": data_id} + adult_pandas = fetch_openml(parser="pandas", **common_params) + adult_liac_arff = fetch_openml(parser="liac-arff", **common_params) + pd.testing.assert_frame_equal(adult_pandas.frame, adult_liac_arff.frame) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_rcv1.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_rcv1.py new file mode 100644 index 0000000000000000000000000000000000000000..fbb9d67015a308e32a7415ff20ca97c23c006835 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_rcv1.py @@ -0,0 +1,71 @@ +"""Test the rcv1 loader, if the data is available, +or if specifically requested via environment variable +(e.g. for CI jobs).""" + +from functools import partial + +import numpy as np +import scipy.sparse as sp + +from sklearn.datasets.tests.test_common import check_return_X_y +from sklearn.utils._testing import assert_almost_equal, assert_array_equal + + +def test_fetch_rcv1(fetch_rcv1_fxt, global_random_seed): + data1 = fetch_rcv1_fxt(shuffle=False) + X1, Y1 = data1.data, data1.target + cat_list, s1 = data1.target_names.tolist(), data1.sample_id + + # test sparsity + assert sp.issparse(X1) + assert sp.issparse(Y1) + assert 60915113 == X1.data.size + assert 2606875 == Y1.data.size + + # test shapes + assert (804414, 47236) == X1.shape + assert (804414, 103) == Y1.shape + assert (804414,) == s1.shape + assert 103 == len(cat_list) + + # test descr + assert data1.DESCR.startswith(".. _rcv1_dataset:") + + # test ordering of categories + first_categories = ["C11", "C12", "C13", "C14", "C15", "C151"] + assert_array_equal(first_categories, cat_list[:6]) + + # test number of sample for some categories + some_categories = ("GMIL", "E143", "CCAT") + number_non_zero_in_cat = (5, 1206, 381327) + for num, cat in zip(number_non_zero_in_cat, some_categories): + j = cat_list.index(cat) + assert num == Y1[:, j].data.size + + # test shuffling and subset + data2 = fetch_rcv1_fxt( + shuffle=True, subset="train", random_state=global_random_seed + ) + X2, Y2 = data2.data, data2.target + s2 = data2.sample_id + + # test return_X_y option + fetch_func = partial(fetch_rcv1_fxt, shuffle=False, subset="train") + check_return_X_y(data2, fetch_func) + + # The first 23149 samples are the training samples + assert_array_equal(np.sort(s1[:23149]), np.sort(s2)) + + # test some precise values + some_sample_ids = (2286, 3274, 14042) + for sample_id in some_sample_ids: + idx1 = s1.tolist().index(sample_id) + idx2 = s2.tolist().index(sample_id) + + feature_values_1 = X1[idx1, :].toarray() + feature_values_2 = X2[idx2, :].toarray() + assert_almost_equal(feature_values_1, feature_values_2) + + target_values_1 = Y1[idx1, :].toarray() + target_values_2 = Y2[idx2, :].toarray() + assert_almost_equal(target_values_1, target_values_2) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_samples_generator.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_samples_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9cc41d7229c3288e7a399a887266e054adca40 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_samples_generator.py @@ -0,0 +1,742 @@ +import re +from collections import defaultdict +from functools import partial + +import numpy as np +import pytest +import scipy.sparse as sp + +from sklearn.datasets import ( + make_biclusters, + make_blobs, + make_checkerboard, + make_circles, + make_classification, + make_friedman1, + make_friedman2, + make_friedman3, + make_hastie_10_2, + make_low_rank_matrix, + make_moons, + make_multilabel_classification, + make_regression, + make_s_curve, + make_sparse_coded_signal, + make_sparse_spd_matrix, + make_sparse_uncorrelated, + make_spd_matrix, + make_swiss_roll, +) +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.validation import assert_all_finite + + +def test_make_classification(): + weights = [0.1, 0.25] + X, y = make_classification( + n_samples=100, + n_features=20, + n_informative=5, + n_redundant=1, + n_repeated=1, + n_classes=3, + n_clusters_per_class=1, + hypercube=False, + shift=None, + scale=None, + weights=weights, + random_state=0, + ) + + assert weights == [0.1, 0.25] + assert X.shape == (100, 20), "X shape mismatch" + assert y.shape == (100,), "y shape mismatch" + assert np.unique(y).shape == (3,), "Unexpected number of classes" + assert sum(y == 0) == 10, "Unexpected number of samples in class #0" + assert sum(y == 1) == 25, "Unexpected number of samples in class #1" + assert sum(y == 2) == 65, "Unexpected number of samples in class #2" + + # Test for n_features > 30 + X, y = make_classification( + n_samples=2000, + n_features=31, + n_informative=31, + n_redundant=0, + n_repeated=0, + hypercube=True, + scale=0.5, + random_state=0, + ) + + assert X.shape == (2000, 31), "X shape mismatch" + assert y.shape == (2000,), "y shape mismatch" + assert ( + np.unique(X.view([("", X.dtype)] * X.shape[1])) + .view(X.dtype) + .reshape(-1, X.shape[1]) + .shape[0] + == 2000 + ), "Unexpected number of unique rows" + + +def test_make_classification_informative_features(): + """Test the construction of informative features in make_classification + + Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and + fully-specified `weights`. + """ + # Create very separate clusters; check that vertices are unique and + # correspond to classes + class_sep = 1e6 + make = partial( + make_classification, + class_sep=class_sep, + n_redundant=0, + n_repeated=0, + flip_y=0, + shift=0, + scale=1, + shuffle=False, + ) + + for n_informative, weights, n_clusters_per_class in [ + (2, [1], 1), + (2, [1 / 3] * 3, 1), + (2, [1 / 4] * 4, 1), + (2, [1 / 2] * 2, 2), + (2, [3 / 4, 1 / 4], 2), + (10, [1 / 3] * 3, 10), + (int(64), [1], 1), + ]: + n_classes = len(weights) + n_clusters = n_classes * n_clusters_per_class + n_samples = n_clusters * 50 + + for hypercube in (False, True): + X, y = make( + n_samples=n_samples, + n_classes=n_classes, + weights=weights, + n_features=n_informative, + n_informative=n_informative, + n_clusters_per_class=n_clusters_per_class, + hypercube=hypercube, + random_state=0, + ) + + assert X.shape == (n_samples, n_informative) + assert y.shape == (n_samples,) + + # Cluster by sign, viewed as strings to allow uniquing + signs = np.sign(X) + signs = signs.view(dtype="|S{0}".format(signs.strides[0])).ravel() + unique_signs, cluster_index = np.unique(signs, return_inverse=True) + + assert ( + len(unique_signs) == n_clusters + ), "Wrong number of clusters, or not in distinct quadrants" + + clusters_by_class = defaultdict(set) + for cluster, cls in zip(cluster_index, y): + clusters_by_class[cls].add(cluster) + for clusters in clusters_by_class.values(): + assert ( + len(clusters) == n_clusters_per_class + ), "Wrong number of clusters per class" + assert len(clusters_by_class) == n_classes, "Wrong number of classes" + + assert_array_almost_equal( + np.bincount(y) / len(y) // weights, + [1] * n_classes, + err_msg="Wrong number of samples per class", + ) + + # Ensure on vertices of hypercube + for cluster in range(len(unique_signs)): + centroid = X[cluster_index == cluster].mean(axis=0) + if hypercube: + assert_array_almost_equal( + np.abs(centroid) / class_sep, + np.ones(n_informative), + decimal=5, + err_msg="Clusters are not centered on hypercube vertices", + ) + else: + with pytest.raises(AssertionError): + assert_array_almost_equal( + np.abs(centroid) / class_sep, + np.ones(n_informative), + decimal=5, + err_msg=( + "Clusters should not be centered on hypercube vertices" + ), + ) + + with pytest.raises(ValueError): + make(n_features=2, n_informative=2, n_classes=5, n_clusters_per_class=1) + with pytest.raises(ValueError): + make(n_features=2, n_informative=2, n_classes=3, n_clusters_per_class=2) + + +@pytest.mark.parametrize( + "weights, err_type, err_msg", + [ + ([], ValueError, "Weights specified but incompatible with number of classes."), + ( + [0.25, 0.75, 0.1], + ValueError, + "Weights specified but incompatible with number of classes.", + ), + ( + np.array([]), + ValueError, + "Weights specified but incompatible with number of classes.", + ), + ( + np.array([0.25, 0.75, 0.1]), + ValueError, + "Weights specified but incompatible with number of classes.", + ), + ( + np.random.random(3), + ValueError, + "Weights specified but incompatible with number of classes.", + ), + ], +) +def test_make_classification_weights_type(weights, err_type, err_msg): + with pytest.raises(err_type, match=err_msg): + make_classification(weights=weights) + + +@pytest.mark.parametrize("kwargs", [{}, {"n_classes": 3, "n_informative": 3}]) +def test_make_classification_weights_array_or_list_ok(kwargs): + X1, y1 = make_classification(weights=[0.1, 0.9], random_state=0, **kwargs) + X2, y2 = make_classification(weights=np.array([0.1, 0.9]), random_state=0, **kwargs) + assert_almost_equal(X1, X2) + assert_almost_equal(y1, y2) + + +def test_make_multilabel_classification_return_sequences(): + for allow_unlabeled, min_length in zip((True, False), (0, 1)): + X, Y = make_multilabel_classification( + n_samples=100, + n_features=20, + n_classes=3, + random_state=0, + return_indicator=False, + allow_unlabeled=allow_unlabeled, + ) + assert X.shape == (100, 20), "X shape mismatch" + if not allow_unlabeled: + assert max([max(y) for y in Y]) == 2 + assert min([len(y) for y in Y]) == min_length + assert max([len(y) for y in Y]) <= 3 + + +def test_make_multilabel_classification_return_indicator(): + for allow_unlabeled, min_length in zip((True, False), (0, 1)): + X, Y = make_multilabel_classification( + n_samples=25, + n_features=20, + n_classes=3, + random_state=0, + allow_unlabeled=allow_unlabeled, + ) + assert X.shape == (25, 20), "X shape mismatch" + assert Y.shape == (25, 3), "Y shape mismatch" + assert np.all(np.sum(Y, axis=0) > min_length) + + # Also test return_distributions and return_indicator with True + X2, Y2, p_c, p_w_c = make_multilabel_classification( + n_samples=25, + n_features=20, + n_classes=3, + random_state=0, + allow_unlabeled=allow_unlabeled, + return_distributions=True, + ) + + assert_array_almost_equal(X, X2) + assert_array_equal(Y, Y2) + assert p_c.shape == (3,) + assert_almost_equal(p_c.sum(), 1) + assert p_w_c.shape == (20, 3) + assert_almost_equal(p_w_c.sum(axis=0), [1] * 3) + + +def test_make_multilabel_classification_return_indicator_sparse(): + for allow_unlabeled, min_length in zip((True, False), (0, 1)): + X, Y = make_multilabel_classification( + n_samples=25, + n_features=20, + n_classes=3, + random_state=0, + return_indicator="sparse", + allow_unlabeled=allow_unlabeled, + ) + assert X.shape == (25, 20), "X shape mismatch" + assert Y.shape == (25, 3), "Y shape mismatch" + assert sp.issparse(Y) + + +def test_make_hastie_10_2(): + X, y = make_hastie_10_2(n_samples=100, random_state=0) + assert X.shape == (100, 10), "X shape mismatch" + assert y.shape == (100,), "y shape mismatch" + assert np.unique(y).shape == (2,), "Unexpected number of classes" + + +def test_make_regression(): + X, y, c = make_regression( + n_samples=100, + n_features=10, + n_informative=3, + effective_rank=5, + coef=True, + bias=0.0, + noise=1.0, + random_state=0, + ) + + assert X.shape == (100, 10), "X shape mismatch" + assert y.shape == (100,), "y shape mismatch" + assert c.shape == (10,), "coef shape mismatch" + assert sum(c != 0.0) == 3, "Unexpected number of informative features" + + # Test that y ~= np.dot(X, c) + bias + N(0, 1.0). + assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1) + + # Test with small number of features. + X, y = make_regression(n_samples=100, n_features=1) # n_informative=3 + assert X.shape == (100, 1) + + +def test_make_regression_multitarget(): + X, y, c = make_regression( + n_samples=100, + n_features=10, + n_informative=3, + n_targets=3, + coef=True, + noise=1.0, + random_state=0, + ) + + assert X.shape == (100, 10), "X shape mismatch" + assert y.shape == (100, 3), "y shape mismatch" + assert c.shape == (10, 3), "coef shape mismatch" + assert_array_equal(sum(c != 0.0), 3, "Unexpected number of informative features") + + # Test that y ~= np.dot(X, c) + bias + N(0, 1.0) + assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1) + + +def test_make_blobs(): + cluster_stds = np.array([0.05, 0.2, 0.4]) + cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) + X, y = make_blobs( + random_state=0, + n_samples=50, + n_features=2, + centers=cluster_centers, + cluster_std=cluster_stds, + ) + + assert X.shape == (50, 2), "X shape mismatch" + assert y.shape == (50,), "y shape mismatch" + assert np.unique(y).shape == (3,), "Unexpected number of blobs" + for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)): + assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std") + + +def test_make_blobs_n_samples_list(): + n_samples = [50, 30, 20] + X, y = make_blobs(n_samples=n_samples, n_features=2, random_state=0) + + assert X.shape == (sum(n_samples), 2), "X shape mismatch" + assert all( + np.bincount(y, minlength=len(n_samples)) == n_samples + ), "Incorrect number of samples per blob" + + +def test_make_blobs_n_samples_list_with_centers(): + n_samples = [20, 20, 20] + centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) + cluster_stds = np.array([0.05, 0.2, 0.4]) + X, y = make_blobs( + n_samples=n_samples, centers=centers, cluster_std=cluster_stds, random_state=0 + ) + + assert X.shape == (sum(n_samples), 2), "X shape mismatch" + assert all( + np.bincount(y, minlength=len(n_samples)) == n_samples + ), "Incorrect number of samples per blob" + for i, (ctr, std) in enumerate(zip(centers, cluster_stds)): + assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std") + + +@pytest.mark.parametrize( + "n_samples", [[5, 3, 0], np.array([5, 3, 0]), tuple([5, 3, 0])] +) +def test_make_blobs_n_samples_centers_none(n_samples): + centers = None + X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=0) + + assert X.shape == (sum(n_samples), 2), "X shape mismatch" + assert all( + np.bincount(y, minlength=len(n_samples)) == n_samples + ), "Incorrect number of samples per blob" + + +def test_make_blobs_return_centers(): + n_samples = [10, 20] + n_features = 3 + X, y, centers = make_blobs( + n_samples=n_samples, n_features=n_features, return_centers=True, random_state=0 + ) + + assert centers.shape == (len(n_samples), n_features) + + +def test_make_blobs_error(): + n_samples = [20, 20, 20] + centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) + cluster_stds = np.array([0.05, 0.2, 0.4]) + wrong_centers_msg = re.escape( + "Length of `n_samples` not consistent with number of centers. " + f"Got n_samples = {n_samples} and centers = {centers[:-1]}" + ) + with pytest.raises(ValueError, match=wrong_centers_msg): + make_blobs(n_samples, centers=centers[:-1]) + wrong_std_msg = re.escape( + "Length of `clusters_std` not consistent with number of centers. " + f"Got centers = {centers} and cluster_std = {cluster_stds[:-1]}" + ) + with pytest.raises(ValueError, match=wrong_std_msg): + make_blobs(n_samples, centers=centers, cluster_std=cluster_stds[:-1]) + wrong_type_msg = "Parameter `centers` must be array-like. Got {!r} instead".format( + 3 + ) + with pytest.raises(ValueError, match=wrong_type_msg): + make_blobs(n_samples, centers=3) + + +def test_make_friedman1(): + X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0, random_state=0) + + assert X.shape == (5, 10), "X shape mismatch" + assert y.shape == (5,), "y shape mismatch" + + assert_array_almost_equal( + y, + 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + + 20 * (X[:, 2] - 0.5) ** 2 + + 10 * X[:, 3] + + 5 * X[:, 4], + ) + + +def test_make_friedman2(): + X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0) + + assert X.shape == (5, 4), "X shape mismatch" + assert y.shape == (5,), "y shape mismatch" + + assert_array_almost_equal( + y, (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + ) + + +def test_make_friedman3(): + X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0) + + assert X.shape == (5, 4), "X shape mismatch" + assert y.shape == (5,), "y shape mismatch" + + assert_array_almost_equal( + y, np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) + ) + + +def test_make_low_rank_matrix(): + X = make_low_rank_matrix( + n_samples=50, + n_features=25, + effective_rank=5, + tail_strength=0.01, + random_state=0, + ) + + assert X.shape == (50, 25), "X shape mismatch" + + from numpy.linalg import svd + + u, s, v = svd(X) + assert sum(s) - 5 < 0.1, "X rank is not approximately 5" + + +def test_make_sparse_coded_signal(): + Y, D, X = make_sparse_coded_signal( + n_samples=5, + n_components=8, + n_features=10, + n_nonzero_coefs=3, + random_state=0, + ) + assert Y.shape == (5, 10), "Y shape mismatch" + assert D.shape == (8, 10), "D shape mismatch" + assert X.shape == (5, 8), "X shape mismatch" + for row in X: + assert len(np.flatnonzero(row)) == 3, "Non-zero coefs mismatch" + assert_allclose(Y, X @ D) + assert_allclose(np.sqrt((D**2).sum(axis=1)), np.ones(D.shape[0])) + + +# TODO(1.5): remove +@ignore_warnings(category=FutureWarning) +def test_make_sparse_coded_signal_transposed(): + Y, D, X = make_sparse_coded_signal( + n_samples=5, + n_components=8, + n_features=10, + n_nonzero_coefs=3, + random_state=0, + data_transposed=True, + ) + assert Y.shape == (10, 5), "Y shape mismatch" + assert D.shape == (10, 8), "D shape mismatch" + assert X.shape == (8, 5), "X shape mismatch" + for col in X.T: + assert len(np.flatnonzero(col)) == 3, "Non-zero coefs mismatch" + assert_allclose(Y, D @ X) + assert_allclose(np.sqrt((D**2).sum(axis=0)), np.ones(D.shape[1])) + + +# TODO(1.5): remove +def test_make_sparse_code_signal_deprecation_warning(): + """Check the message for future deprecation.""" + warn_msg = "data_transposed was deprecated in version 1.3" + with pytest.warns(FutureWarning, match=warn_msg): + make_sparse_coded_signal( + n_samples=1, + n_components=1, + n_features=1, + n_nonzero_coefs=1, + random_state=0, + data_transposed=True, + ) + + +def test_make_sparse_uncorrelated(): + X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0) + + assert X.shape == (5, 10), "X shape mismatch" + assert y.shape == (5,), "y shape mismatch" + + +def test_make_spd_matrix(): + X = make_spd_matrix(n_dim=5, random_state=0) + + assert X.shape == (5, 5), "X shape mismatch" + assert_array_almost_equal(X, X.T) + + from numpy.linalg import eig + + eigenvalues, _ = eig(X) + assert np.all(eigenvalues > 0), "X is not positive-definite" + + +@pytest.mark.parametrize("norm_diag", [True, False]) +@pytest.mark.parametrize( + "sparse_format", [None, "bsr", "coo", "csc", "csr", "dia", "dok", "lil"] +) +def test_make_sparse_spd_matrix(norm_diag, sparse_format, global_random_seed): + n_dim = 5 + X = make_sparse_spd_matrix( + n_dim=n_dim, + norm_diag=norm_diag, + sparse_format=sparse_format, + random_state=global_random_seed, + ) + + assert X.shape == (n_dim, n_dim), "X shape mismatch" + if sparse_format is None: + assert not sp.issparse(X) + assert_allclose(X, X.T) + Xarr = X + else: + assert sp.issparse(X) and X.format == sparse_format + assert_allclose_dense_sparse(X, X.T) + Xarr = X.toarray() + + from numpy.linalg import eig + + # Do not use scipy.sparse.linalg.eigs because it cannot find all eigenvalues + eigenvalues, _ = eig(Xarr) + assert np.all(eigenvalues > 0), "X is not positive-definite" + + if norm_diag: + # Check that leading diagonal elements are 1 + assert_array_almost_equal(Xarr.diagonal(), np.ones(n_dim)) + + +# TODO(1.6): remove +def test_make_sparse_spd_matrix_deprecation_warning(): + """Check the message for future deprecation.""" + warn_msg = "dim was deprecated in version 1.4" + with pytest.warns(FutureWarning, match=warn_msg): + make_sparse_spd_matrix( + dim=1, + ) + + error_msg = "`dim` and `n_dim` cannot be both specified" + with pytest.raises(ValueError, match=error_msg): + make_sparse_spd_matrix( + dim=1, + n_dim=1, + ) + + X = make_sparse_spd_matrix() + assert X.shape[1] == 1 + + +@pytest.mark.parametrize("hole", [False, True]) +def test_make_swiss_roll(hole): + X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0, hole=hole) + + assert X.shape == (5, 3) + assert t.shape == (5,) + assert_array_almost_equal(X[:, 0], t * np.cos(t)) + assert_array_almost_equal(X[:, 2], t * np.sin(t)) + + +def test_make_s_curve(): + X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0) + + assert X.shape == (5, 3), "X shape mismatch" + assert t.shape == (5,), "t shape mismatch" + assert_array_almost_equal(X[:, 0], np.sin(t)) + assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1)) + + +def test_make_biclusters(): + X, rows, cols = make_biclusters( + shape=(100, 100), n_clusters=4, shuffle=True, random_state=0 + ) + assert X.shape == (100, 100), "X shape mismatch" + assert rows.shape == (4, 100), "rows shape mismatch" + assert cols.shape == ( + 4, + 100, + ), "columns shape mismatch" + assert_all_finite(X) + assert_all_finite(rows) + assert_all_finite(cols) + + X2, _, _ = make_biclusters( + shape=(100, 100), n_clusters=4, shuffle=True, random_state=0 + ) + assert_array_almost_equal(X, X2) + + +def test_make_checkerboard(): + X, rows, cols = make_checkerboard( + shape=(100, 100), n_clusters=(20, 5), shuffle=True, random_state=0 + ) + assert X.shape == (100, 100), "X shape mismatch" + assert rows.shape == (100, 100), "rows shape mismatch" + assert cols.shape == ( + 100, + 100, + ), "columns shape mismatch" + + X, rows, cols = make_checkerboard( + shape=(100, 100), n_clusters=2, shuffle=True, random_state=0 + ) + assert_all_finite(X) + assert_all_finite(rows) + assert_all_finite(cols) + + X1, _, _ = make_checkerboard( + shape=(100, 100), n_clusters=2, shuffle=True, random_state=0 + ) + X2, _, _ = make_checkerboard( + shape=(100, 100), n_clusters=2, shuffle=True, random_state=0 + ) + assert_array_almost_equal(X1, X2) + + +def test_make_moons(): + X, y = make_moons(3, shuffle=False) + for x, label in zip(X, y): + center = [0.0, 0.0] if label == 0 else [1.0, 0.5] + dist_sqr = ((x - center) ** 2).sum() + assert_almost_equal( + dist_sqr, 1.0, err_msg="Point is not on expected unit circle" + ) + + +def test_make_moons_unbalanced(): + X, y = make_moons(n_samples=(7, 5)) + assert ( + np.sum(y == 0) == 7 and np.sum(y == 1) == 5 + ), "Number of samples in a moon is wrong" + assert X.shape == (12, 2), "X shape mismatch" + assert y.shape == (12,), "y shape mismatch" + + with pytest.raises( + ValueError, + match=r"`n_samples` can be either an int " r"or a two-element tuple.", + ): + make_moons(n_samples=(10,)) + + +def test_make_circles(): + factor = 0.3 + + for n_samples, n_outer, n_inner in [(7, 3, 4), (8, 4, 4)]: + # Testing odd and even case, because in the past make_circles always + # created an even number of samples. + X, y = make_circles(n_samples, shuffle=False, noise=None, factor=factor) + assert X.shape == (n_samples, 2), "X shape mismatch" + assert y.shape == (n_samples,), "y shape mismatch" + center = [0.0, 0.0] + for x, label in zip(X, y): + dist_sqr = ((x - center) ** 2).sum() + dist_exp = 1.0 if label == 0 else factor**2 + dist_exp = 1.0 if label == 0 else factor**2 + assert_almost_equal( + dist_sqr, dist_exp, err_msg="Point is not on expected circle" + ) + + assert X[y == 0].shape == ( + n_outer, + 2, + ), "Samples not correctly distributed across circles." + assert X[y == 1].shape == ( + n_inner, + 2, + ), "Samples not correctly distributed across circles." + + +def test_make_circles_unbalanced(): + X, y = make_circles(n_samples=(2, 8)) + + assert np.sum(y == 0) == 2, "Number of samples in inner circle is wrong" + assert np.sum(y == 1) == 8, "Number of samples in outer circle is wrong" + assert X.shape == (10, 2), "X shape mismatch" + assert y.shape == (10,), "y shape mismatch" + + with pytest.raises( + ValueError, + match="When a tuple, n_samples must have exactly two elements.", + ): + make_circles(n_samples=(10,)) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_svmlight_format.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_svmlight_format.py new file mode 100644 index 0000000000000000000000000000000000000000..5c641dd79cc6396cca2201adc499d144a1a4df62 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/test_svmlight_format.py @@ -0,0 +1,616 @@ +import gzip +import os +import shutil +from bz2 import BZ2File +from importlib import resources +from io import BytesIO +from tempfile import NamedTemporaryFile + +import numpy as np +import pytest +import scipy.sparse as sp + +import sklearn +from sklearn.datasets import dump_svmlight_file, load_svmlight_file, load_svmlight_files +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + create_memmap_backed_data, + fails_if_pypy, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +TEST_DATA_MODULE = "sklearn.datasets.tests.data" +datafile = "svmlight_classification.txt" +multifile = "svmlight_multilabel.txt" +invalidfile = "svmlight_invalid.txt" +invalidfile2 = "svmlight_invalid_order.txt" + +pytestmark = fails_if_pypy + + +def _svmlight_local_test_file_path(filename): + return resources.files(TEST_DATA_MODULE) / filename + + +def _load_svmlight_local_test_file(filename, **kwargs): + """ + Helper to load resource `filename` with `importlib.resources` + """ + data_path = _svmlight_local_test_file_path(filename) + with data_path.open("rb") as f: + return load_svmlight_file(f, **kwargs) + + +def test_load_svmlight_file(): + X, y = _load_svmlight_local_test_file(datafile) + + # test X's shape + assert X.indptr.shape[0] == 7 + assert X.shape[0] == 6 + assert X.shape[1] == 21 + assert y.shape[0] == 6 + + # test X's non-zero values + for i, j, val in ( + (0, 2, 2.5), + (0, 10, -5.2), + (0, 15, 1.5), + (1, 5, 1.0), + (1, 12, -3), + (2, 20, 27), + ): + assert X[i, j] == val + + # tests X's zero values + assert X[0, 3] == 0 + assert X[0, 5] == 0 + assert X[1, 8] == 0 + assert X[1, 16] == 0 + assert X[2, 18] == 0 + + # test can change X's values + X[0, 2] *= 2 + assert X[0, 2] == 5 + + # test y + assert_array_equal(y, [1, 2, 3, 4, 1, 2]) + + +def test_load_svmlight_file_fd(): + # test loading from file descriptor + + # GH20081: testing equality between path-based and + # fd-based load_svmlight_file + + data_path = resources.files(TEST_DATA_MODULE) / datafile + data_path = str(data_path) + X1, y1 = load_svmlight_file(data_path) + + fd = os.open(data_path, os.O_RDONLY) + try: + X2, y2 = load_svmlight_file(fd) + assert_array_almost_equal(X1.data, X2.data) + assert_array_almost_equal(y1, y2) + finally: + os.close(fd) + + +def test_load_svmlight_pathlib(): + # test loading from file descriptor + data_path = _svmlight_local_test_file_path(datafile) + X1, y1 = load_svmlight_file(str(data_path)) + X2, y2 = load_svmlight_file(data_path) + + assert_allclose(X1.data, X2.data) + assert_allclose(y1, y2) + + +def test_load_svmlight_file_multilabel(): + X, y = _load_svmlight_local_test_file(multifile, multilabel=True) + assert y == [(0, 1), (2,), (), (1, 2)] + + +def test_load_svmlight_files(): + data_path = _svmlight_local_test_file_path(datafile) + X_train, y_train, X_test, y_test = load_svmlight_files( + [str(data_path)] * 2, dtype=np.float32 + ) + assert_array_equal(X_train.toarray(), X_test.toarray()) + assert_array_almost_equal(y_train, y_test) + assert X_train.dtype == np.float32 + assert X_test.dtype == np.float32 + + X1, y1, X2, y2, X3, y3 = load_svmlight_files([str(data_path)] * 3, dtype=np.float64) + assert X1.dtype == X2.dtype + assert X2.dtype == X3.dtype + assert X3.dtype == np.float64 + + +def test_load_svmlight_file_n_features(): + X, y = _load_svmlight_local_test_file(datafile, n_features=22) + + # test X'shape + assert X.indptr.shape[0] == 7 + assert X.shape[0] == 6 + assert X.shape[1] == 22 + + # test X's non-zero values + for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (1, 5, 1.0), (1, 12, -3)): + assert X[i, j] == val + + # 21 features in file + with pytest.raises(ValueError): + _load_svmlight_local_test_file(datafile, n_features=20) + + +def test_load_compressed(): + X, y = _load_svmlight_local_test_file(datafile) + + with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp: + tmp.close() # necessary under windows + with _svmlight_local_test_file_path(datafile).open("rb") as f: + with gzip.open(tmp.name, "wb") as fh_out: + shutil.copyfileobj(f, fh_out) + Xgz, ygz = load_svmlight_file(tmp.name) + # because we "close" it manually and write to it, + # we need to remove it manually. + os.remove(tmp.name) + assert_array_almost_equal(X.toarray(), Xgz.toarray()) + assert_array_almost_equal(y, ygz) + + with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp: + tmp.close() # necessary under windows + with _svmlight_local_test_file_path(datafile).open("rb") as f: + with BZ2File(tmp.name, "wb") as fh_out: + shutil.copyfileobj(f, fh_out) + Xbz, ybz = load_svmlight_file(tmp.name) + # because we "close" it manually and write to it, + # we need to remove it manually. + os.remove(tmp.name) + assert_array_almost_equal(X.toarray(), Xbz.toarray()) + assert_array_almost_equal(y, ybz) + + +def test_load_invalid_file(): + with pytest.raises(ValueError): + _load_svmlight_local_test_file(invalidfile) + + +def test_load_invalid_order_file(): + with pytest.raises(ValueError): + _load_svmlight_local_test_file(invalidfile2) + + +def test_load_zero_based(): + f = BytesIO(b"-1 4:1.\n1 0:1\n") + with pytest.raises(ValueError): + load_svmlight_file(f, zero_based=False) + + +def test_load_zero_based_auto(): + data1 = b"-1 1:1 2:2 3:3\n" + data2 = b"-1 0:0 1:1\n" + + f1 = BytesIO(data1) + X, y = load_svmlight_file(f1, zero_based="auto") + assert X.shape == (1, 3) + + f1 = BytesIO(data1) + f2 = BytesIO(data2) + X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto") + assert X1.shape == (1, 4) + assert X2.shape == (1, 4) + + +def test_load_with_qid(): + # load svmfile with qid attribute + data = b""" + 3 qid:1 1:0.53 2:0.12 + 2 qid:1 1:0.13 2:0.1 + 7 qid:2 1:0.87 2:0.12""" + X, y = load_svmlight_file(BytesIO(data), query_id=False) + assert_array_equal(y, [3, 2, 7]) + assert_array_equal(X.toarray(), [[0.53, 0.12], [0.13, 0.1], [0.87, 0.12]]) + res1 = load_svmlight_files([BytesIO(data)], query_id=True) + res2 = load_svmlight_file(BytesIO(data), query_id=True) + for X, y, qid in (res1, res2): + assert_array_equal(y, [3, 2, 7]) + assert_array_equal(qid, [1, 1, 2]) + assert_array_equal(X.toarray(), [[0.53, 0.12], [0.13, 0.1], [0.87, 0.12]]) + + +@pytest.mark.skip( + "testing the overflow of 32 bit sparse indexing requires a large amount of memory" +) +def test_load_large_qid(): + """ + load large libsvm / svmlight file with qid attribute. Tests 64-bit query ID + """ + data = b"\n".join( + ( + "3 qid:{0} 1:0.53 2:0.12\n2 qid:{0} 1:0.13 2:0.1".format(i).encode() + for i in range(1, 40 * 1000 * 1000) + ) + ) + X, y, qid = load_svmlight_file(BytesIO(data), query_id=True) + assert_array_equal(y[-4:], [3, 2, 3, 2]) + assert_array_equal(np.unique(qid), np.arange(1, 40 * 1000 * 1000)) + + +def test_load_invalid_file2(): + with pytest.raises(ValueError): + data_path = _svmlight_local_test_file_path(datafile) + invalid_path = _svmlight_local_test_file_path(invalidfile) + load_svmlight_files([str(data_path), str(invalid_path), str(data_path)]) + + +def test_not_a_filename(): + # in python 3 integers are valid file opening arguments (taken as unix + # file descriptors) + with pytest.raises(TypeError): + load_svmlight_file(0.42) + + +def test_invalid_filename(): + with pytest.raises(OSError): + load_svmlight_file("trou pic nic douille") + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dump(csr_container): + X_sparse, y_dense = _load_svmlight_local_test_file(datafile) + X_dense = X_sparse.toarray() + y_sparse = csr_container(np.atleast_2d(y_dense)) + + # slicing a csr_matrix can unsort its .indices, so test that we sort + # those correctly + X_sliced = X_sparse[np.arange(X_sparse.shape[0])] + y_sliced = y_sparse[np.arange(y_sparse.shape[0])] + + for X in (X_sparse, X_dense, X_sliced): + for y in (y_sparse, y_dense, y_sliced): + for zero_based in (True, False): + for dtype in [np.float32, np.float64, np.int32, np.int64]: + f = BytesIO() + # we need to pass a comment to get the version info in; + # LibSVM doesn't grok comments so they're not put in by + # default anymore. + + if sp.issparse(y) and y.shape[0] == 1: + # make sure y's shape is: (n_samples, n_labels) + # when it is sparse + y = y.T + + # Note: with dtype=np.int32 we are performing unsafe casts, + # where X.astype(dtype) overflows. The result is + # then platform dependent and X_dense.astype(dtype) may be + # different from X_sparse.astype(dtype).asarray(). + X_input = X.astype(dtype) + + dump_svmlight_file( + X_input, y, f, comment="test", zero_based=zero_based + ) + f.seek(0) + + comment = f.readline() + comment = str(comment, "utf-8") + + assert "scikit-learn %s" % sklearn.__version__ in comment + + comment = f.readline() + comment = str(comment, "utf-8") + + assert ["one", "zero"][zero_based] + "-based" in comment + + X2, y2 = load_svmlight_file(f, dtype=dtype, zero_based=zero_based) + assert X2.dtype == dtype + assert_array_equal(X2.sorted_indices().indices, X2.indices) + + X2_dense = X2.toarray() + if sp.issparse(X_input): + X_input_dense = X_input.toarray() + else: + X_input_dense = X_input + + if dtype == np.float32: + # allow a rounding error at the last decimal place + assert_array_almost_equal(X_input_dense, X2_dense, 4) + assert_array_almost_equal( + y_dense.astype(dtype, copy=False), y2, 4 + ) + else: + # allow a rounding error at the last decimal place + assert_array_almost_equal(X_input_dense, X2_dense, 15) + assert_array_almost_equal( + y_dense.astype(dtype, copy=False), y2, 15 + ) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dump_multilabel(csr_container): + X = [[1, 0, 3, 0, 5], [0, 0, 0, 0, 0], [0, 5, 0, 1, 0]] + y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]] + y_sparse = csr_container(y_dense) + for y in [y_dense, y_sparse]: + f = BytesIO() + dump_svmlight_file(X, y, f, multilabel=True) + f.seek(0) + # make sure it dumps multilabel correctly + assert f.readline() == b"1 0:1 2:3 4:5\n" + assert f.readline() == b"0,2 \n" + assert f.readline() == b"0,1 1:5 3:1\n" + + +def test_dump_concise(): + one = 1 + two = 2.1 + three = 3.01 + exact = 1.000000000000001 + # loses the last decimal place + almost = 1.0000000000000001 + X = [ + [one, two, three, exact, almost], + [1e9, 2e18, 3e27, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + ] + y = [one, two, three, exact, almost] + f = BytesIO() + dump_svmlight_file(X, y, f) + f.seek(0) + # make sure it's using the most concise format possible + assert f.readline() == b"1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n" + assert f.readline() == b"2.1 0:1000000000 1:2e+18 2:3e+27\n" + assert f.readline() == b"3.01 \n" + assert f.readline() == b"1.000000000000001 \n" + assert f.readline() == b"1 \n" + f.seek(0) + # make sure it's correct too :) + X2, y2 = load_svmlight_file(f) + assert_array_almost_equal(X, X2.toarray()) + assert_array_almost_equal(y, y2) + + +def test_dump_comment(): + X, y = _load_svmlight_local_test_file(datafile) + X = X.toarray() + + f = BytesIO() + ascii_comment = "This is a comment\nspanning multiple lines." + dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False) + f.seek(0) + + X2, y2 = load_svmlight_file(f, zero_based=False) + assert_array_almost_equal(X, X2.toarray()) + assert_array_almost_equal(y, y2) + + # XXX we have to update this to support Python 3.x + utf8_comment = b"It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc" + f = BytesIO() + with pytest.raises(UnicodeDecodeError): + dump_svmlight_file(X, y, f, comment=utf8_comment) + + unicode_comment = utf8_comment.decode("utf-8") + f = BytesIO() + dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False) + f.seek(0) + + X2, y2 = load_svmlight_file(f, zero_based=False) + assert_array_almost_equal(X, X2.toarray()) + assert_array_almost_equal(y, y2) + + f = BytesIO() + with pytest.raises(ValueError): + dump_svmlight_file(X, y, f, comment="I've got a \0.") + + +def test_dump_invalid(): + X, y = _load_svmlight_local_test_file(datafile) + + f = BytesIO() + y2d = [y] + with pytest.raises(ValueError): + dump_svmlight_file(X, y2d, f) + + f = BytesIO() + with pytest.raises(ValueError): + dump_svmlight_file(X, y[:-1], f) + + +def test_dump_query_id(): + # test dumping a file with query_id + X, y = _load_svmlight_local_test_file(datafile) + X = X.toarray() + query_id = np.arange(X.shape[0]) // 2 + f = BytesIO() + dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True) + + f.seek(0) + X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True) + assert_array_almost_equal(X, X1.toarray()) + assert_array_almost_equal(y, y1) + assert_array_almost_equal(query_id, query_id1) + + +def test_load_with_long_qid(): + # load svmfile with longint qid attribute + data = b""" + 1 qid:0 0:1 1:2 2:3 + 0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985 + 0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985 + 3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985""" + X, y, qid = load_svmlight_file(BytesIO(data), query_id=True) + + true_X = [ + [1, 2, 3], + [1440446648, 72048431380967004, 236784985], + [1440446648, 72048431380967004, 236784985], + [1440446648, 72048431380967004, 236784985], + ] + + true_y = [1, 0, 0, 3] + trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807] + assert_array_equal(y, true_y) + assert_array_equal(X.toarray(), true_X) + assert_array_equal(qid, trueQID) + + f = BytesIO() + dump_svmlight_file(X, y, f, query_id=qid, zero_based=True) + f.seek(0) + X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True) + assert_array_equal(y, true_y) + assert_array_equal(X.toarray(), true_X) + assert_array_equal(qid, trueQID) + + f.seek(0) + X, y = load_svmlight_file(f, query_id=False, zero_based=True) + assert_array_equal(y, true_y) + assert_array_equal(X.toarray(), true_X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_load_zeros(csr_container): + f = BytesIO() + true_X = csr_container(np.zeros(shape=(3, 4))) + true_y = np.array([0, 1, 0]) + dump_svmlight_file(true_X, true_y, f) + + for zero_based in ["auto", True, False]: + f.seek(0) + X, y = load_svmlight_file(f, n_features=4, zero_based=zero_based) + assert_array_almost_equal(y, true_y) + assert_array_almost_equal(X.toarray(), true_X.toarray()) + + +@pytest.mark.parametrize("sparsity", [0, 0.1, 0.5, 0.99, 1]) +@pytest.mark.parametrize("n_samples", [13, 101]) +@pytest.mark.parametrize("n_features", [2, 7, 41]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_load_with_offsets(sparsity, n_samples, n_features, csr_container): + rng = np.random.RandomState(0) + X = rng.uniform(low=0.0, high=1.0, size=(n_samples, n_features)) + if sparsity: + X[X < sparsity] = 0.0 + X = csr_container(X) + y = rng.randint(low=0, high=2, size=n_samples) + + f = BytesIO() + dump_svmlight_file(X, y, f) + f.seek(0) + + size = len(f.getvalue()) + + # put some marks that are likely to happen anywhere in a row + mark_0 = 0 + mark_1 = size // 3 + length_0 = mark_1 - mark_0 + mark_2 = 4 * size // 5 + length_1 = mark_2 - mark_1 + + # load the original sparse matrix into 3 independent CSR matrices + X_0, y_0 = load_svmlight_file( + f, n_features=n_features, offset=mark_0, length=length_0 + ) + X_1, y_1 = load_svmlight_file( + f, n_features=n_features, offset=mark_1, length=length_1 + ) + X_2, y_2 = load_svmlight_file(f, n_features=n_features, offset=mark_2) + + y_concat = np.concatenate([y_0, y_1, y_2]) + X_concat = sp.vstack([X_0, X_1, X_2]) + assert_array_almost_equal(y, y_concat) + assert_array_almost_equal(X.toarray(), X_concat.toarray()) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_load_offset_exhaustive_splits(csr_container): + rng = np.random.RandomState(0) + X = np.array( + [ + [0, 0, 0, 0, 0, 0], + [1, 2, 3, 4, 0, 6], + [1, 2, 3, 4, 0, 6], + [0, 0, 0, 0, 0, 0], + [1, 0, 3, 0, 0, 0], + [0, 0, 0, 0, 0, 1], + [1, 0, 0, 0, 0, 0], + ] + ) + X = csr_container(X) + n_samples, n_features = X.shape + y = rng.randint(low=0, high=2, size=n_samples) + query_id = np.arange(n_samples) // 2 + + f = BytesIO() + dump_svmlight_file(X, y, f, query_id=query_id) + f.seek(0) + + size = len(f.getvalue()) + + # load the same data in 2 parts with all the possible byte offsets to + # locate the split so has to test for particular boundary cases + for mark in range(size): + f.seek(0) + X_0, y_0, q_0 = load_svmlight_file( + f, n_features=n_features, query_id=True, offset=0, length=mark + ) + X_1, y_1, q_1 = load_svmlight_file( + f, n_features=n_features, query_id=True, offset=mark, length=-1 + ) + q_concat = np.concatenate([q_0, q_1]) + y_concat = np.concatenate([y_0, y_1]) + X_concat = sp.vstack([X_0, X_1]) + assert_array_almost_equal(y, y_concat) + assert_array_equal(query_id, q_concat) + assert_array_almost_equal(X.toarray(), X_concat.toarray()) + + +def test_load_with_offsets_error(): + with pytest.raises(ValueError, match="n_features is required"): + _load_svmlight_local_test_file(datafile, offset=3, length=3) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_multilabel_y_explicit_zeros(tmp_path, csr_container): + """ + Ensure that if y contains explicit zeros (i.e. elements of y.data equal to + 0) then those explicit zeros are not encoded. + """ + save_path = str(tmp_path / "svm_explicit_zero") + rng = np.random.RandomState(42) + X = rng.randn(3, 5).astype(np.float64) + indptr = np.array([0, 2, 3, 6]) + indices = np.array([0, 2, 2, 0, 1, 2]) + # The first and last element are explicit zeros. + data = np.array([0, 1, 1, 1, 1, 0]) + y = csr_container((data, indices, indptr), shape=(3, 3)) + # y as a dense array would look like + # [[0, 0, 1], + # [0, 0, 1], + # [1, 1, 0]] + + dump_svmlight_file(X, y, save_path, multilabel=True) + + _, y_load = load_svmlight_file(save_path, multilabel=True) + y_true = [(2.0,), (2.0,), (0.0, 1.0)] + assert y_load == y_true + + +def test_dump_read_only(tmp_path): + """Ensure that there is no ValueError when dumping a read-only `X`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28026 + """ + rng = np.random.RandomState(42) + X = rng.randn(5, 2) + y = rng.randn(5) + + # Convert to memmap-backed which are read-only + X, y = create_memmap_backed_data([X, y]) + + save_path = str(tmp_path / "svm_read_only") + dump_svmlight_file(X, y, save_path)