diff --git a/ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..a4a9828f9c9446705ae2d07c727584578444cdac --- /dev/null +++ b/ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66b98c071654fe4be7652e90d5a47ba2e1d74f367a2dda69a2c23b018c2f07fe +size 9372 diff --git a/ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f0a7f572c4a66e19da23eb37b87bb44dfd44673c --- /dev/null +++ b/ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc2953903facfdb139199004fc6c3711f8fba6bc6cf251d016623fb4cabdbb08 +size 9293 diff --git a/ckpts/universal/global_step40/zero/25.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/25.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..b99eadf273b9134c8e3c992e9bf9f96d0af03cfb --- /dev/null +++ b/ckpts/universal/global_step40/zero/25.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb2d62594d78b56c8c7deef4a0fb4709e206d5ee0c30690fd1972a44939cbc8d +size 16778396 diff --git a/venv/lib/python3.10/site-packages/sklearn/__check_build/__init__.py b/venv/lib/python3.10/site-packages/sklearn/__check_build/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3895a0e430082238ac6f9995cf0fd08d73e2d53d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/__check_build/__init__.py @@ -0,0 +1,47 @@ +""" Module to give helpful messages to the user that did not +compile scikit-learn properly. +""" +import os + +INPLACE_MSG = """ +It appears that you are importing a local scikit-learn source tree. For +this, you need to have an inplace install. Maybe you are in the source +directory and you need to try from another location.""" + +STANDARD_MSG = """ +If you have used an installer, please check that it is suited for your +Python version, your operating system and your platform.""" + + +def raise_build_error(e): + # Raise a comprehensible error and list the contents of the + # directory to help debugging on the mailing list. + local_dir = os.path.split(__file__)[0] + msg = STANDARD_MSG + if local_dir == "sklearn/__check_build": + # Picking up the local install: this will work only if the + # install is an 'inplace build' + msg = INPLACE_MSG + dir_content = list() + for i, filename in enumerate(os.listdir(local_dir)): + if (i + 1) % 3: + dir_content.append(filename.ljust(26)) + else: + dir_content.append(filename + "\n") + raise ImportError("""%s +___________________________________________________________________________ +Contents of %s: +%s +___________________________________________________________________________ +It seems that scikit-learn has not been built correctly. + +If you have installed scikit-learn from source, please do not forget +to build the package before using it: run `python setup.py install` or +`make` in the source directory. +%s""" % (e, local_dir, "".join(dir_content).strip(), msg)) + + +try: + from ._check_build import check_build # noqa +except ImportError as e: + raise_build_error(e) diff --git a/venv/lib/python3.10/site-packages/sklearn/__check_build/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__check_build/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94c0e7f95722a600ed24e5df361596cf6298ff82 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__check_build/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__check_build/_check_build.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/__check_build/_check_build.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..37d5061af8966be53c262499c4539c45defa06ce Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__check_build/_check_build.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/__init__.py b/venv/lib/python3.10/site-packages/sklearn/externals/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97cda1858d5655b4179183372d271299298c62be --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/externals/__init__.py @@ -0,0 +1,5 @@ + +""" +External, bundled dependencies. + +""" diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cbbf28b2a2cf74356ebb4d878d806047c156e21 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_arff.py b/venv/lib/python3.10/site-packages/sklearn/externals/_arff.py new file mode 100644 index 0000000000000000000000000000000000000000..7c9d51d0702ff5cbe70b80d405747e37a5e6cb1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/externals/_arff.py @@ -0,0 +1,1107 @@ +# ============================================================================= +# Federal University of Rio Grande do Sul (UFRGS) +# Connectionist Artificial Intelligence Laboratory (LIAC) +# Renato de Pontes Pereira - rppereira@inf.ufrgs.br +# ============================================================================= +# Copyright (c) 2011 Renato de Pontes Pereira, renato.ppontes at gmail dot com +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# ============================================================================= + +''' +The liac-arff module implements functions to read and write ARFF files in +Python. It was created in the Connectionist Artificial Intelligence Laboratory +(LIAC), which takes place at the Federal University of Rio Grande do Sul +(UFRGS), in Brazil. + +ARFF (Attribute-Relation File Format) is an file format specially created for +describe datasets which are commonly used for machine learning experiments and +software. This file format was created to be used in Weka, the best +representative software for machine learning automated experiments. + +An ARFF file can be divided into two sections: header and data. The Header +describes the metadata of the dataset, including a general description of the +dataset, its name and its attributes. The source below is an example of a +header section in a XOR dataset:: + + % + % XOR Dataset + % + % Created by Renato Pereira + % rppereira@inf.ufrgs.br + % http://inf.ufrgs.br/~rppereira + % + % + @RELATION XOR + + @ATTRIBUTE input1 REAL + @ATTRIBUTE input2 REAL + @ATTRIBUTE y REAL + +The Data section of an ARFF file describes the observations of the dataset, in +the case of XOR dataset:: + + @DATA + 0.0,0.0,0.0 + 0.0,1.0,1.0 + 1.0,0.0,1.0 + 1.0,1.0,0.0 + % + % + % + +Notice that several lines are starting with an ``%`` symbol, denoting a +comment, thus, lines with ``%`` at the beginning will be ignored, except by the +description part at the beginning of the file. The declarations ``@RELATION``, +``@ATTRIBUTE``, and ``@DATA`` are all case insensitive and obligatory. + +For more information and details about the ARFF file description, consult +http://www.cs.waikato.ac.nz/~ml/weka/arff.html + + +ARFF Files in Python +~~~~~~~~~~~~~~~~~~~~ + +This module uses built-ins python objects to represent a deserialized ARFF +file. A dictionary is used as the container of the data and metadata of ARFF, +and have the following keys: + +- **description**: (OPTIONAL) a string with the description of the dataset. +- **relation**: (OBLIGATORY) a string with the name of the dataset. +- **attributes**: (OBLIGATORY) a list of attributes with the following + template:: + + (attribute_name, attribute_type) + + the attribute_name is a string, and attribute_type must be an string + or a list of strings. +- **data**: (OBLIGATORY) a list of data instances. Each data instance must be + a list with values, depending on the attributes. + +The above keys must follow the case which were described, i.e., the keys are +case sensitive. The attribute type ``attribute_type`` must be one of these +strings (they are not case sensitive): ``NUMERIC``, ``INTEGER``, ``REAL`` or +``STRING``. For nominal attributes, the ``atribute_type`` must be a list of +strings. + +In this format, the XOR dataset presented above can be represented as a python +object as:: + + xor_dataset = { + 'description': 'XOR Dataset', + 'relation': 'XOR', + 'attributes': [ + ('input1', 'REAL'), + ('input2', 'REAL'), + ('y', 'REAL'), + ], + 'data': [ + [0.0, 0.0, 0.0], + [0.0, 1.0, 1.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0] + ] + } + + +Features +~~~~~~~~ + +This module provides several features, including: + +- Read and write ARFF files using python built-in structures, such dictionaries + and lists; +- Supports `scipy.sparse.coo `_ + and lists of dictionaries as used by SVMLight +- Supports the following attribute types: NUMERIC, REAL, INTEGER, STRING, and + NOMINAL; +- Has an interface similar to other built-in modules such as ``json``, or + ``zipfile``; +- Supports read and write the descriptions of files; +- Supports missing values and names with spaces; +- Supports unicode values and names; +- Fully compatible with Python 2.7+, Python 3.5+, pypy and pypy3; +- Under `MIT License `_ + +''' +__author__ = 'Renato de Pontes Pereira, Matthias Feurer, Joel Nothman' +__author_email__ = ('renato.ppontes@gmail.com, ' + 'feurerm@informatik.uni-freiburg.de, ' + 'joel.nothman@gmail.com') +__version__ = '2.4.0' + +import re +import csv +from typing import TYPE_CHECKING +from typing import Optional, List, Dict, Any, Iterator, Union, Tuple + +# CONSTANTS =================================================================== +_SIMPLE_TYPES = ['NUMERIC', 'REAL', 'INTEGER', 'STRING'] + +_TK_DESCRIPTION = '%' +_TK_COMMENT = '%' +_TK_RELATION = '@RELATION' +_TK_ATTRIBUTE = '@ATTRIBUTE' +_TK_DATA = '@DATA' + +_RE_RELATION = re.compile(r'^([^\{\}%,\s]*|\".*\"|\'.*\')$', re.UNICODE) +_RE_ATTRIBUTE = re.compile(r'^(\".*\"|\'.*\'|[^\{\}%,\s]*)\s+(.+)$', re.UNICODE) +_RE_QUOTE_CHARS = re.compile(r'["\'\\\s%,\000-\031]', re.UNICODE) +_RE_ESCAPE_CHARS = re.compile(r'(?=["\'\\%])|[\n\r\t\000-\031]') +_RE_SPARSE_LINE = re.compile(r'^\s*\{.*\}\s*$', re.UNICODE) +_RE_NONTRIVIAL_DATA = re.compile('["\'{}\\s]', re.UNICODE) + +ArffDenseDataType = Iterator[List] +ArffSparseDataType = Tuple[List, ...] + + +if TYPE_CHECKING: + # typing_extensions is available when mypy is installed + from typing_extensions import TypedDict + + class ArffContainerType(TypedDict): + description: str + relation: str + attributes: List + data: Union[ArffDenseDataType, ArffSparseDataType] + +else: + ArffContainerType = Dict[str, Any] + + +def _build_re_values(): + quoted_re = r''' + " # open quote followed by zero or more of: + (?: + (?= len(conversors): + raise BadDataFormat(row) + # XXX: int 0 is used for implicit values, not '0' + values = [values[i] if i in values else 0 for i in + range(len(conversors))] + else: + if len(values) != len(conversors): + raise BadDataFormat(row) + + yield self._decode_values(values, conversors) + + @staticmethod + def _decode_values(values, conversors): + try: + values = [None if value is None else conversor(value) + for conversor, value + in zip(conversors, values)] + except ValueError as exc: + if 'float: ' in str(exc): + raise BadNumericalValue() + return values + + def encode_data(self, data, attributes): + '''(INTERNAL) Encodes a line of data. + + Data instances follow the csv format, i.e, attribute values are + delimited by commas. After converted from csv. + + :param data: a list of values. + :param attributes: a list of attributes. Used to check if data is valid. + :return: a string with the encoded data line. + ''' + current_row = 0 + + for inst in data: + if len(inst) != len(attributes): + raise BadObject( + 'Instance %d has %d attributes, expected %d' % + (current_row, len(inst), len(attributes)) + ) + + new_data = [] + for value in inst: + if value is None or value == '' or value != value: + s = '?' + else: + s = encode_string(str(value)) + new_data.append(s) + + current_row += 1 + yield ','.join(new_data) + + +class _DataListMixin: + """Mixin to return a list from decode_rows instead of a generator""" + def decode_rows(self, stream, conversors): + return list(super().decode_rows(stream, conversors)) + + +class Data(_DataListMixin, DenseGeneratorData): + pass + + +class COOData: + def decode_rows(self, stream, conversors): + data, rows, cols = [], [], [] + for i, row in enumerate(stream): + values = _parse_values(row) + if not isinstance(values, dict): + raise BadLayout() + if not values: + continue + row_cols, values = zip(*sorted(values.items())) + try: + values = [value if value is None else conversors[key](value) + for key, value in zip(row_cols, values)] + except ValueError as exc: + if 'float: ' in str(exc): + raise BadNumericalValue() + raise + except IndexError: + # conversor out of range + raise BadDataFormat(row) + + data.extend(values) + rows.extend([i] * len(values)) + cols.extend(row_cols) + + return data, rows, cols + + def encode_data(self, data, attributes): + num_attributes = len(attributes) + new_data = [] + current_row = 0 + + row = data.row + col = data.col + data = data.data + + # Check if the rows are sorted + if not all(row[i] <= row[i + 1] for i in range(len(row) - 1)): + raise ValueError("liac-arff can only output COO matrices with " + "sorted rows.") + + for v, col, row in zip(data, col, row): + if row > current_row: + # Add empty rows if necessary + while current_row < row: + yield " ".join(["{", ','.join(new_data), "}"]) + new_data = [] + current_row += 1 + + if col >= num_attributes: + raise BadObject( + 'Instance %d has at least %d attributes, expected %d' % + (current_row, col + 1, num_attributes) + ) + + if v is None or v == '' or v != v: + s = '?' + else: + s = encode_string(str(v)) + new_data.append("%d %s" % (col, s)) + + yield " ".join(["{", ','.join(new_data), "}"]) + +class LODGeneratorData: + def decode_rows(self, stream, conversors): + for row in stream: + values = _parse_values(row) + + if not isinstance(values, dict): + raise BadLayout() + try: + yield {key: None if value is None else conversors[key](value) + for key, value in values.items()} + except ValueError as exc: + if 'float: ' in str(exc): + raise BadNumericalValue() + raise + except IndexError: + # conversor out of range + raise BadDataFormat(row) + + def encode_data(self, data, attributes): + current_row = 0 + + num_attributes = len(attributes) + for row in data: + new_data = [] + + if len(row) > 0 and max(row) >= num_attributes: + raise BadObject( + 'Instance %d has %d attributes, expected %d' % + (current_row, max(row) + 1, num_attributes) + ) + + for col in sorted(row): + v = row[col] + if v is None or v == '' or v != v: + s = '?' + else: + s = encode_string(str(v)) + new_data.append("%d %s" % (col, s)) + + current_row += 1 + yield " ".join(["{", ','.join(new_data), "}"]) + +class LODData(_DataListMixin, LODGeneratorData): + pass + + +def _get_data_object_for_decoding(matrix_type): + if matrix_type == DENSE: + return Data() + elif matrix_type == COO: + return COOData() + elif matrix_type == LOD: + return LODData() + elif matrix_type == DENSE_GEN: + return DenseGeneratorData() + elif matrix_type == LOD_GEN: + return LODGeneratorData() + else: + raise ValueError("Matrix type %s not supported." % str(matrix_type)) + +def _get_data_object_for_encoding(matrix): + # Probably a scipy.sparse + if hasattr(matrix, 'format'): + if matrix.format == 'coo': + return COOData() + else: + raise ValueError('Cannot guess matrix format!') + elif isinstance(matrix[0], dict): + return LODData() + else: + return Data() + +# ============================================================================= + +# ADVANCED INTERFACE ========================================================== +class ArffDecoder: + '''An ARFF decoder.''' + + def __init__(self): + '''Constructor.''' + self._conversors = [] + self._current_line = 0 + + def _decode_comment(self, s): + '''(INTERNAL) Decodes a comment line. + + Comments are single line strings starting, obligatorily, with the ``%`` + character, and can have any symbol, including whitespaces or special + characters. + + This method must receive a normalized string, i.e., a string without + padding, including the "\r\n" characters. + + :param s: a normalized string. + :return: a string with the decoded comment. + ''' + res = re.sub(r'^\%( )?', '', s) + return res + + def _decode_relation(self, s): + '''(INTERNAL) Decodes a relation line. + + The relation declaration is a line with the format ``@RELATION + ``, where ``relation-name`` is a string. The string must + start with alphabetic character and must be quoted if the name includes + spaces, otherwise this method will raise a `BadRelationFormat` exception. + + This method must receive a normalized string, i.e., a string without + padding, including the "\r\n" characters. + + :param s: a normalized string. + :return: a string with the decoded relation name. + ''' + _, v = s.split(' ', 1) + v = v.strip() + + if not _RE_RELATION.match(v): + raise BadRelationFormat() + + res = str(v.strip('"\'')) + return res + + def _decode_attribute(self, s): + '''(INTERNAL) Decodes an attribute line. + + The attribute is the most complex declaration in an arff file. All + attributes must follow the template:: + + @attribute + + where ``attribute-name`` is a string, quoted if the name contains any + whitespace, and ``datatype`` can be: + + - Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``. + - Strings as ``STRING``. + - Dates (NOT IMPLEMENTED). + - Nominal attributes with format: + + {, , , ...} + + The nominal names follow the rules for the attribute names, i.e., they + must be quoted if the name contains whitespaces. + + This method must receive a normalized string, i.e., a string without + padding, including the "\r\n" characters. + + :param s: a normalized string. + :return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES). + ''' + _, v = s.split(' ', 1) + v = v.strip() + + # Verify the general structure of declaration + m = _RE_ATTRIBUTE.match(v) + if not m: + raise BadAttributeFormat() + + # Extracts the raw name and type + name, type_ = m.groups() + + # Extracts the final name + name = str(name.strip('"\'')) + + # Extracts the final type + if type_[:1] == "{" and type_[-1:] == "}": + try: + type_ = _parse_values(type_.strip('{} ')) + except Exception: + raise BadAttributeType() + if isinstance(type_, dict): + raise BadAttributeType() + + else: + # If not nominal, verify the type name + type_ = str(type_).upper() + if type_ not in ['NUMERIC', 'REAL', 'INTEGER', 'STRING']: + raise BadAttributeType() + + return (name, type_) + + def _decode(self, s, encode_nominal=False, matrix_type=DENSE): + '''Do the job the ``encode``.''' + + # Make sure this method is idempotent + self._current_line = 0 + + # If string, convert to a list of lines + if isinstance(s, str): + s = s.strip('\r\n ').replace('\r\n', '\n').split('\n') + + # Create the return object + obj: ArffContainerType = { + 'description': '', + 'relation': '', + 'attributes': [], + 'data': [] + } + attribute_names = {} + + # Create the data helper object + data = _get_data_object_for_decoding(matrix_type) + + # Read all lines + STATE = _TK_DESCRIPTION + s = iter(s) + for row in s: + self._current_line += 1 + # Ignore empty lines + row = row.strip(' \r\n') + if not row: continue + + u_row = row.upper() + + # DESCRIPTION ----------------------------------------------------- + if u_row.startswith(_TK_DESCRIPTION) and STATE == _TK_DESCRIPTION: + obj['description'] += self._decode_comment(row) + '\n' + # ----------------------------------------------------------------- + + # RELATION -------------------------------------------------------- + elif u_row.startswith(_TK_RELATION): + if STATE != _TK_DESCRIPTION: + raise BadLayout() + + STATE = _TK_RELATION + obj['relation'] = self._decode_relation(row) + # ----------------------------------------------------------------- + + # ATTRIBUTE ------------------------------------------------------- + elif u_row.startswith(_TK_ATTRIBUTE): + if STATE != _TK_RELATION and STATE != _TK_ATTRIBUTE: + raise BadLayout() + + STATE = _TK_ATTRIBUTE + + attr = self._decode_attribute(row) + if attr[0] in attribute_names: + raise BadAttributeName(attr[0], attribute_names[attr[0]]) + else: + attribute_names[attr[0]] = self._current_line + obj['attributes'].append(attr) + + if isinstance(attr[1], (list, tuple)): + if encode_nominal: + conversor = EncodedNominalConversor(attr[1]) + else: + conversor = NominalConversor(attr[1]) + else: + CONVERSOR_MAP = {'STRING': str, + 'INTEGER': lambda x: int(float(x)), + 'NUMERIC': float, + 'REAL': float} + conversor = CONVERSOR_MAP[attr[1]] + + self._conversors.append(conversor) + # ----------------------------------------------------------------- + + # DATA ------------------------------------------------------------ + elif u_row.startswith(_TK_DATA): + if STATE != _TK_ATTRIBUTE: + raise BadLayout() + + break + # ----------------------------------------------------------------- + + # COMMENT --------------------------------------------------------- + elif u_row.startswith(_TK_COMMENT): + pass + # ----------------------------------------------------------------- + else: + # Never found @DATA + raise BadLayout() + + def stream(): + for row in s: + self._current_line += 1 + row = row.strip() + # Ignore empty lines and comment lines. + if row and not row.startswith(_TK_COMMENT): + yield row + + # Alter the data object + obj['data'] = data.decode_rows(stream(), self._conversors) + if obj['description'].endswith('\n'): + obj['description'] = obj['description'][:-1] + + return obj + + def decode(self, s, encode_nominal=False, return_type=DENSE): + '''Returns the Python representation of a given ARFF file. + + When a file object is passed as an argument, this method reads lines + iteratively, avoiding to load unnecessary information to the memory. + + :param s: a string or file object with the ARFF file. + :param encode_nominal: boolean, if True perform a label encoding + while reading the .arff file. + :param return_type: determines the data structure used to store the + dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`, + `arff.DENSE_GEN` or `arff.LOD_GEN`. + Consult the sections on `working with sparse data`_ and `loading + progressively`_. + ''' + try: + return self._decode(s, encode_nominal=encode_nominal, + matrix_type=return_type) + except ArffException as e: + e.line = self._current_line + raise e + + +class ArffEncoder: + '''An ARFF encoder.''' + + def _encode_comment(self, s=''): + '''(INTERNAL) Encodes a comment line. + + Comments are single line strings starting, obligatorily, with the ``%`` + character, and can have any symbol, including whitespaces or special + characters. + + If ``s`` is None, this method will simply return an empty comment. + + :param s: (OPTIONAL) string. + :return: a string with the encoded comment line. + ''' + if s: + return '%s %s'%(_TK_COMMENT, s) + else: + return '%s' % _TK_COMMENT + + def _encode_relation(self, name): + '''(INTERNAL) Decodes a relation line. + + The relation declaration is a line with the format ``@RELATION + ``, where ``relation-name`` is a string. + + :param name: a string. + :return: a string with the encoded relation declaration. + ''' + for char in ' %{},': + if char in name: + name = '"%s"'%name + break + + return '%s %s'%(_TK_RELATION, name) + + def _encode_attribute(self, name, type_): + '''(INTERNAL) Encodes an attribute line. + + The attribute follow the template:: + + @attribute + + where ``attribute-name`` is a string, and ``datatype`` can be: + + - Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``. + - Strings as ``STRING``. + - Dates (NOT IMPLEMENTED). + - Nominal attributes with format: + + {, , , ...} + + This method must receive a the name of the attribute and its type, if + the attribute type is nominal, ``type`` must be a list of values. + + :param name: a string. + :param type_: a string or a list of string. + :return: a string with the encoded attribute declaration. + ''' + for char in ' %{},': + if char in name: + name = '"%s"'%name + break + + if isinstance(type_, (tuple, list)): + type_tmp = ['%s' % encode_string(type_k) for type_k in type_] + type_ = '{%s}'%(', '.join(type_tmp)) + + return '%s %s %s'%(_TK_ATTRIBUTE, name, type_) + + def encode(self, obj): + '''Encodes a given object to an ARFF file. + + :param obj: the object containing the ARFF information. + :return: the ARFF file as an string. + ''' + data = [row for row in self.iter_encode(obj)] + + return '\n'.join(data) + + def iter_encode(self, obj): + '''The iterative version of `arff.ArffEncoder.encode`. + + This encodes iteratively a given object and return, one-by-one, the + lines of the ARFF file. + + :param obj: the object containing the ARFF information. + :return: (yields) the ARFF file as strings. + ''' + # DESCRIPTION + if obj.get('description', None): + for row in obj['description'].split('\n'): + yield self._encode_comment(row) + + # RELATION + if not obj.get('relation'): + raise BadObject('Relation name not found or with invalid value.') + + yield self._encode_relation(obj['relation']) + yield '' + + # ATTRIBUTES + if not obj.get('attributes'): + raise BadObject('Attributes not found.') + + attribute_names = set() + for attr in obj['attributes']: + # Verify for bad object format + if not isinstance(attr, (tuple, list)) or \ + len(attr) != 2 or \ + not isinstance(attr[0], str): + raise BadObject('Invalid attribute declaration "%s"'%str(attr)) + + if isinstance(attr[1], str): + # Verify for invalid types + if attr[1] not in _SIMPLE_TYPES: + raise BadObject('Invalid attribute type "%s"'%str(attr)) + + # Verify for bad object format + elif not isinstance(attr[1], (tuple, list)): + raise BadObject('Invalid attribute type "%s"'%str(attr)) + + # Verify attribute name is not used twice + if attr[0] in attribute_names: + raise BadObject('Trying to use attribute name "%s" for the ' + 'second time.' % str(attr[0])) + else: + attribute_names.add(attr[0]) + + yield self._encode_attribute(attr[0], attr[1]) + yield '' + attributes = obj['attributes'] + + # DATA + yield _TK_DATA + if 'data' in obj: + data = _get_data_object_for_encoding(obj.get('data')) + yield from data.encode_data(obj.get('data'), attributes) + + yield '' + +# ============================================================================= + +# BASIC INTERFACE ============================================================= +def load(fp, encode_nominal=False, return_type=DENSE): + '''Load a file-like object containing the ARFF document and convert it into + a Python object. + + :param fp: a file-like object. + :param encode_nominal: boolean, if True perform a label encoding + while reading the .arff file. + :param return_type: determines the data structure used to store the + dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`, + `arff.DENSE_GEN` or `arff.LOD_GEN`. + Consult the sections on `working with sparse data`_ and `loading + progressively`_. + :return: a dictionary. + ''' + decoder = ArffDecoder() + return decoder.decode(fp, encode_nominal=encode_nominal, + return_type=return_type) + +def loads(s, encode_nominal=False, return_type=DENSE): + '''Convert a string instance containing the ARFF document into a Python + object. + + :param s: a string object. + :param encode_nominal: boolean, if True perform a label encoding + while reading the .arff file. + :param return_type: determines the data structure used to store the + dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`, + `arff.DENSE_GEN` or `arff.LOD_GEN`. + Consult the sections on `working with sparse data`_ and `loading + progressively`_. + :return: a dictionary. + ''' + decoder = ArffDecoder() + return decoder.decode(s, encode_nominal=encode_nominal, + return_type=return_type) + +def dump(obj, fp): + '''Serialize an object representing the ARFF document to a given file-like + object. + + :param obj: a dictionary. + :param fp: a file-like object. + ''' + encoder = ArffEncoder() + generator = encoder.iter_encode(obj) + + last_row = next(generator) + for row in generator: + fp.write(last_row + '\n') + last_row = row + fp.write(last_row) + + return fp + +def dumps(obj): + '''Serialize an object representing the ARFF document, returning a string. + + :param obj: a dictionary. + :return: a string with the ARFF document. + ''' + encoder = ArffEncoder() + return encoder.encode(obj) +# ============================================================================= diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__init__.py b/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec8b0bf69d699ae9a5341502fb03b14df1e99cab Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/_structures.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/_structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94b9e712bb0bdee785e6e9112fe66fefa5f00e86 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/_structures.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/version.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..440dac3de179a642703211f82393c7fa5cf968a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/_structures.py b/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..837e3a7946d70355b46606d20a4b6c0f038b0815 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/_structures.py @@ -0,0 +1,90 @@ +"""Vendoered from +https://github.com/pypa/packaging/blob/main/packaging/_structures.py +""" +# Copyright (c) Donald Stufft and individual contributors. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/version.py b/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/version.py new file mode 100644 index 0000000000000000000000000000000000000000..0f1e5b833699c38690679515bb788820de4168b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/externals/_packaging/version.py @@ -0,0 +1,535 @@ +"""Vendoered from +https://github.com/pypa/packaging/blob/main/packaging/version.py +""" +# Copyright (c) Donald Stufft and individual contributors. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import collections +import itertools +import re +import warnings +from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] + +InfiniteTypes = Union[InfinityType, NegativeInfinityType] +PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] +SubLocalType = Union[InfiniteTypes, int, str] +LocalType = Union[ + NegativeInfinityType, + Tuple[ + Union[ + SubLocalType, + Tuple[SubLocalType, str], + Tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], +] +CmpKey = Tuple[ + int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType +] +LegacyCmpKey = Tuple[int, Tuple[str, ...]] +VersionComparisonMethod = Callable[ + [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool +] + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version: str) -> Union["LegacyVersion", "Version"]: + """Parse the given version from a string to an appropriate class. + + Parameters + ---------- + version : str + Version in a string format, eg. "0.9.1" or "1.2.dev0". + + Returns + ------- + version : :class:`Version` object or a :class:`LegacyVersion` object + Returned class depends on the given version: if is a valid + PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + _key: Union[CmpKey, LegacyCmpKey] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +class LegacyVersion(_BaseVersion): + def __init__(self, version: str) -> None: + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release", + DeprecationWarning, + ) + + def __str__(self) -> str: + return self._version + + def __repr__(self) -> str: + return f"" + + @property + def public(self) -> str: + return self._version + + @property + def base_version(self) -> str: + return self._version + + @property + def epoch(self) -> int: + return -1 + + @property + def release(self) -> None: + return None + + @property + def pre(self) -> None: + return None + + @property + def post(self) -> None: + return None + + @property + def dev(self) -> None: + return None + + @property + def local(self) -> None: + return None + + @property + def is_prerelease(self) -> bool: + return False + + @property + def is_postrelease(self) -> bool: + return False + + @property + def is_devrelease(self) -> bool: + return False + + +_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) + +_legacy_version_replacement_map = { + "pre": "c", + "preview": "c", + "-": "final-", + "rc": "c", + "dev": "@", +} + + +def _parse_version_parts(s: str) -> Iterator[str]: + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version: str) -> LegacyCmpKey: + + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts: List[str] = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + + return epoch, tuple(parts) + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    def __init__(self, version: str) -> None:
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        return f""
+
+    def __str__(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        _epoch: int = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        _release: Tuple[int, ...] = self._version.release
+        return _release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        _pre: Optional[Tuple[str, int]] = self._version.pre
+        return _pre
+
+    @property
+    def post(self) -> Optional[int]:
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: str, number: Union[str, bytes, SupportsInt]
+) -> Optional[Tuple[str, int]]:
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> Optional[LocalType]:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[Tuple[SubLocalType]],
+) -> CmpKey:
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: PrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: PrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: PrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: LocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/conftest.py b/venv/lib/python3.10/site-packages/sklearn/externals/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f7a4af34987815852c9d76c612fdbd9b85c39a9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/externals/conftest.py
@@ -0,0 +1,6 @@
+# Do not collect any tests in externals. This is more robust than using
+# --ignore because --ignore needs a path and it is not convenient to pass in
+# the externals path (very long install-dependent path in site-packages) when
+# using --pyargs
+def pytest_ignore_collect(path, config):
+    return True
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/__init__.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce697656b4c2e3b1a181d845d898b6447dbdaa72
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/__init__.py
@@ -0,0 +1,42 @@
+"""
+The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
+algorithm.
+"""
+
+from ._ball_tree import BallTree
+from ._base import VALID_METRICS, VALID_METRICS_SPARSE, sort_graph_by_row_values
+from ._classification import KNeighborsClassifier, RadiusNeighborsClassifier
+from ._graph import (
+    KNeighborsTransformer,
+    RadiusNeighborsTransformer,
+    kneighbors_graph,
+    radius_neighbors_graph,
+)
+from ._kd_tree import KDTree
+from ._kde import KernelDensity
+from ._lof import LocalOutlierFactor
+from ._nca import NeighborhoodComponentsAnalysis
+from ._nearest_centroid import NearestCentroid
+from ._regression import KNeighborsRegressor, RadiusNeighborsRegressor
+from ._unsupervised import NearestNeighbors
+
+__all__ = [
+    "BallTree",
+    "KDTree",
+    "KNeighborsClassifier",
+    "KNeighborsRegressor",
+    "KNeighborsTransformer",
+    "NearestCentroid",
+    "NearestNeighbors",
+    "RadiusNeighborsClassifier",
+    "RadiusNeighborsRegressor",
+    "RadiusNeighborsTransformer",
+    "kneighbors_graph",
+    "radius_neighbors_graph",
+    "KernelDensity",
+    "LocalOutlierFactor",
+    "NeighborhoodComponentsAnalysis",
+    "sort_graph_by_row_values",
+    "VALID_METRICS",
+    "VALID_METRICS_SPARSE",
+]
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..758d8df6e7bb0740e98c4ff25dd024fbaaaa4da0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b54495c6848c0bfa1f4300a474d5b617dea37be8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_base.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba40de0edfb91f31514328a999cfaecceb1713cb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_classification.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_graph.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..78bc5fd0e62ecd26d6761965ca89b9e99423b73f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_graph.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_kde.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_kde.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..01c752447c8816e0eb2d9be90ff38380806af8d1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_kde.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_lof.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_lof.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f91e098ad60f8f81b77061a20f003478de2e8ed6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_lof.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nca.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nca.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7191f8b69e2acd93b2dd1da5cd0b3215ce5dc43d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nca.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nearest_centroid.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nearest_centroid.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b6652e97d4e63350654c07b66135f7e1dea476c8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nearest_centroid.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_regression.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_regression.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..86eba4dd7cff21915e2baab200870c1f75b73c32
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_regression.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_unsupervised.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_unsupervised.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a78ae46b71698546effe60a13f65418326f38c04
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_unsupervised.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_ball_tree.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/neighbors/_ball_tree.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..7a80b38a5359a2de68c5ed9ba674c812695d0d02
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/_ball_tree.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_kd_tree.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/neighbors/_kd_tree.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..ac526d5f5f775cd1401e7196302edea1bf61ac6a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/_kd_tree.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_kde.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/_kde.py
new file mode 100644
index 0000000000000000000000000000000000000000..8885fb4c8c5d0a756d614c8e93e2d27ea242fe82
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/_kde.py
@@ -0,0 +1,365 @@
+"""
+Kernel Density Estimation
+-------------------------
+"""
+# Author: Jake Vanderplas 
+import itertools
+from numbers import Integral, Real
+
+import numpy as np
+from scipy.special import gammainc
+
+from ..base import BaseEstimator, _fit_context
+from ..neighbors._base import VALID_METRICS
+from ..utils import check_random_state
+from ..utils._param_validation import Interval, StrOptions
+from ..utils.extmath import row_norms
+from ..utils.validation import _check_sample_weight, check_is_fitted
+from ._ball_tree import BallTree
+from ._kd_tree import KDTree
+
+VALID_KERNELS = [
+    "gaussian",
+    "tophat",
+    "epanechnikov",
+    "exponential",
+    "linear",
+    "cosine",
+]
+
+TREE_DICT = {"ball_tree": BallTree, "kd_tree": KDTree}
+
+
+# TODO: implement a brute force version for testing purposes
+# TODO: create a density estimation base class?
+class KernelDensity(BaseEstimator):
+    """Kernel Density Estimation.
+
+    Read more in the :ref:`User Guide `.
+
+    Parameters
+    ----------
+    bandwidth : float or {"scott", "silverman"}, default=1.0
+        The bandwidth of the kernel. If bandwidth is a float, it defines the
+        bandwidth of the kernel. If bandwidth is a string, one of the estimation
+        methods is implemented.
+
+    algorithm : {'kd_tree', 'ball_tree', 'auto'}, default='auto'
+        The tree algorithm to use.
+
+    kernel : {'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', \
+                 'cosine'}, default='gaussian'
+        The kernel to use.
+
+    metric : str, default='euclidean'
+        Metric to use for distance computation. See the
+        documentation of `scipy.spatial.distance
+        `_ and
+        the metrics listed in
+        :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
+        values.
+
+        Not all metrics are valid with all algorithms: refer to the
+        documentation of :class:`BallTree` and :class:`KDTree`. Note that the
+        normalization of the density output is correct only for the Euclidean
+        distance metric.
+
+    atol : float, default=0
+        The desired absolute tolerance of the result.  A larger tolerance will
+        generally lead to faster execution.
+
+    rtol : float, default=0
+        The desired relative tolerance of the result.  A larger tolerance will
+        generally lead to faster execution.
+
+    breadth_first : bool, default=True
+        If true (default), use a breadth-first approach to the problem.
+        Otherwise use a depth-first approach.
+
+    leaf_size : int, default=40
+        Specify the leaf size of the underlying tree.  See :class:`BallTree`
+        or :class:`KDTree` for details.
+
+    metric_params : dict, default=None
+        Additional parameters to be passed to the tree for use with the
+        metric.  For more information, see the documentation of
+        :class:`BallTree` or :class:`KDTree`.
+
+    Attributes
+    ----------
+    n_features_in_ : int
+        Number of features seen during :term:`fit`.
+
+        .. versionadded:: 0.24
+
+    tree_ : ``BinaryTree`` instance
+        The tree algorithm for fast generalized N-point problems.
+
+    feature_names_in_ : ndarray of shape (`n_features_in_`,)
+        Names of features seen during :term:`fit`. Defined only when `X`
+        has feature names that are all strings.
+
+    bandwidth_ : float
+        Value of the bandwidth, given directly by the bandwidth parameter or
+        estimated using the 'scott' or 'silverman' method.
+
+        .. versionadded:: 1.0
+
+    See Also
+    --------
+    sklearn.neighbors.KDTree : K-dimensional tree for fast generalized N-point
+        problems.
+    sklearn.neighbors.BallTree : Ball tree for fast generalized N-point
+        problems.
+
+    Examples
+    --------
+    Compute a gaussian kernel density estimate with a fixed bandwidth.
+
+    >>> from sklearn.neighbors import KernelDensity
+    >>> import numpy as np
+    >>> rng = np.random.RandomState(42)
+    >>> X = rng.random_sample((100, 3))
+    >>> kde = KernelDensity(kernel='gaussian', bandwidth=0.5).fit(X)
+    >>> log_density = kde.score_samples(X[:3])
+    >>> log_density
+    array([-1.52955942, -1.51462041, -1.60244657])
+    """
+
+    _parameter_constraints: dict = {
+        "bandwidth": [
+            Interval(Real, 0, None, closed="neither"),
+            StrOptions({"scott", "silverman"}),
+        ],
+        "algorithm": [StrOptions(set(TREE_DICT.keys()) | {"auto"})],
+        "kernel": [StrOptions(set(VALID_KERNELS))],
+        "metric": [
+            StrOptions(
+                set(itertools.chain(*[VALID_METRICS[alg] for alg in TREE_DICT.keys()]))
+            )
+        ],
+        "atol": [Interval(Real, 0, None, closed="left")],
+        "rtol": [Interval(Real, 0, None, closed="left")],
+        "breadth_first": ["boolean"],
+        "leaf_size": [Interval(Integral, 1, None, closed="left")],
+        "metric_params": [None, dict],
+    }
+
+    def __init__(
+        self,
+        *,
+        bandwidth=1.0,
+        algorithm="auto",
+        kernel="gaussian",
+        metric="euclidean",
+        atol=0,
+        rtol=0,
+        breadth_first=True,
+        leaf_size=40,
+        metric_params=None,
+    ):
+        self.algorithm = algorithm
+        self.bandwidth = bandwidth
+        self.kernel = kernel
+        self.metric = metric
+        self.atol = atol
+        self.rtol = rtol
+        self.breadth_first = breadth_first
+        self.leaf_size = leaf_size
+        self.metric_params = metric_params
+
+    def _choose_algorithm(self, algorithm, metric):
+        # given the algorithm string + metric string, choose the optimal
+        # algorithm to compute the result.
+        if algorithm == "auto":
+            # use KD Tree if possible
+            if metric in KDTree.valid_metrics:
+                return "kd_tree"
+            elif metric in BallTree.valid_metrics:
+                return "ball_tree"
+        else:  # kd_tree or ball_tree
+            if metric not in TREE_DICT[algorithm].valid_metrics:
+                raise ValueError(
+                    "invalid metric for {0}: '{1}'".format(TREE_DICT[algorithm], metric)
+                )
+            return algorithm
+
+    @_fit_context(
+        # KernelDensity.metric is not validated yet
+        prefer_skip_nested_validation=False
+    )
+    def fit(self, X, y=None, sample_weight=None):
+        """Fit the Kernel Density model on the data.
+
+        Parameters
+        ----------
+        X : array-like of shape (n_samples, n_features)
+            List of n_features-dimensional data points.  Each row
+            corresponds to a single data point.
+
+        y : None
+            Ignored. This parameter exists only for compatibility with
+            :class:`~sklearn.pipeline.Pipeline`.
+
+        sample_weight : array-like of shape (n_samples,), default=None
+            List of sample weights attached to the data X.
+
+            .. versionadded:: 0.20
+
+        Returns
+        -------
+        self : object
+            Returns the instance itself.
+        """
+        algorithm = self._choose_algorithm(self.algorithm, self.metric)
+
+        if isinstance(self.bandwidth, str):
+            if self.bandwidth == "scott":
+                self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4))
+            elif self.bandwidth == "silverman":
+                self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** (
+                    -1 / (X.shape[1] + 4)
+                )
+        else:
+            self.bandwidth_ = self.bandwidth
+
+        X = self._validate_data(X, order="C", dtype=np.float64)
+
+        if sample_weight is not None:
+            sample_weight = _check_sample_weight(
+                sample_weight, X, dtype=np.float64, only_non_negative=True
+            )
+
+        kwargs = self.metric_params
+        if kwargs is None:
+            kwargs = {}
+        self.tree_ = TREE_DICT[algorithm](
+            X,
+            metric=self.metric,
+            leaf_size=self.leaf_size,
+            sample_weight=sample_weight,
+            **kwargs,
+        )
+        return self
+
+    def score_samples(self, X):
+        """Compute the log-likelihood of each sample under the model.
+
+        Parameters
+        ----------
+        X : array-like of shape (n_samples, n_features)
+            An array of points to query.  Last dimension should match dimension
+            of training data (n_features).
+
+        Returns
+        -------
+        density : ndarray of shape (n_samples,)
+            Log-likelihood of each sample in `X`. These are normalized to be
+            probability densities, so values will be low for high-dimensional
+            data.
+        """
+        check_is_fitted(self)
+        # The returned density is normalized to the number of points.
+        # For it to be a probability, we must scale it.  For this reason
+        # we'll also scale atol.
+        X = self._validate_data(X, order="C", dtype=np.float64, reset=False)
+        if self.tree_.sample_weight is None:
+            N = self.tree_.data.shape[0]
+        else:
+            N = self.tree_.sum_weight
+        atol_N = self.atol * N
+        log_density = self.tree_.kernel_density(
+            X,
+            h=self.bandwidth_,
+            kernel=self.kernel,
+            atol=atol_N,
+            rtol=self.rtol,
+            breadth_first=self.breadth_first,
+            return_log=True,
+        )
+        log_density -= np.log(N)
+        return log_density
+
+    def score(self, X, y=None):
+        """Compute the total log-likelihood under the model.
+
+        Parameters
+        ----------
+        X : array-like of shape (n_samples, n_features)
+            List of n_features-dimensional data points.  Each row
+            corresponds to a single data point.
+
+        y : None
+            Ignored. This parameter exists only for compatibility with
+            :class:`~sklearn.pipeline.Pipeline`.
+
+        Returns
+        -------
+        logprob : float
+            Total log-likelihood of the data in X. This is normalized to be a
+            probability density, so the value will be low for high-dimensional
+            data.
+        """
+        return np.sum(self.score_samples(X))
+
+    def sample(self, n_samples=1, random_state=None):
+        """Generate random samples from the model.
+
+        Currently, this is implemented only for gaussian and tophat kernels.
+
+        Parameters
+        ----------
+        n_samples : int, default=1
+            Number of samples to generate.
+
+        random_state : int, RandomState instance or None, default=None
+            Determines random number generation used to generate
+            random samples. Pass an int for reproducible results
+            across multiple function calls.
+            See :term:`Glossary `.
+
+        Returns
+        -------
+        X : array-like of shape (n_samples, n_features)
+            List of samples.
+        """
+        check_is_fitted(self)
+        # TODO: implement sampling for other valid kernel shapes
+        if self.kernel not in ["gaussian", "tophat"]:
+            raise NotImplementedError()
+
+        data = np.asarray(self.tree_.data)
+
+        rng = check_random_state(random_state)
+        u = rng.uniform(0, 1, size=n_samples)
+        if self.tree_.sample_weight is None:
+            i = (u * data.shape[0]).astype(np.int64)
+        else:
+            cumsum_weight = np.cumsum(np.asarray(self.tree_.sample_weight))
+            sum_weight = cumsum_weight[-1]
+            i = np.searchsorted(cumsum_weight, u * sum_weight)
+        if self.kernel == "gaussian":
+            return np.atleast_2d(rng.normal(data[i], self.bandwidth_))
+
+        elif self.kernel == "tophat":
+            # we first draw points from a d-dimensional normal distribution,
+            # then use an incomplete gamma function to map them to a uniform
+            # d-dimensional tophat distribution.
+            dim = data.shape[1]
+            X = rng.normal(size=(n_samples, dim))
+            s_sq = row_norms(X, squared=True)
+            correction = (
+                gammainc(0.5 * dim, 0.5 * s_sq) ** (1.0 / dim)
+                * self.bandwidth_
+                / np.sqrt(s_sq)
+            )
+            return data[i] + X * correction[:, np.newaxis]
+
+    def _more_tags(self):
+        return {
+            "_xfail_checks": {
+                "check_sample_weights_invariance": (
+                    "sample_weight must have positive values"
+                ),
+            }
+        }
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_lof.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/_lof.py
new file mode 100644
index 0000000000000000000000000000000000000000..05dfdb13a1cbe2a6bab62e909b8796e2a91d581e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/_lof.py
@@ -0,0 +1,516 @@
+# Authors: Nicolas Goix 
+#          Alexandre Gramfort 
+# License: BSD 3 clause
+
+import warnings
+from numbers import Real
+
+import numpy as np
+
+from ..base import OutlierMixin, _fit_context
+from ..utils import check_array
+from ..utils._param_validation import Interval, StrOptions
+from ..utils.metaestimators import available_if
+from ..utils.validation import check_is_fitted
+from ._base import KNeighborsMixin, NeighborsBase
+
+__all__ = ["LocalOutlierFactor"]
+
+
+class LocalOutlierFactor(KNeighborsMixin, OutlierMixin, NeighborsBase):
+    """Unsupervised Outlier Detection using the Local Outlier Factor (LOF).
+
+    The anomaly score of each sample is called the Local Outlier Factor.
+    It measures the local deviation of the density of a given sample with respect
+    to its neighbors.
+    It is local in that the anomaly score depends on how isolated the object
+    is with respect to the surrounding neighborhood.
+    More precisely, locality is given by k-nearest neighbors, whose distance
+    is used to estimate the local density.
+    By comparing the local density of a sample to the local densities of its
+    neighbors, one can identify samples that have a substantially lower density
+    than their neighbors. These are considered outliers.
+
+    .. versionadded:: 0.19
+
+    Parameters
+    ----------
+    n_neighbors : int, default=20
+        Number of neighbors to use by default for :meth:`kneighbors` queries.
+        If n_neighbors is larger than the number of samples provided,
+        all samples will be used.
+
+    algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
+        Algorithm used to compute the nearest neighbors:
+
+        - 'ball_tree' will use :class:`BallTree`
+        - 'kd_tree' will use :class:`KDTree`
+        - 'brute' will use a brute-force search.
+        - 'auto' will attempt to decide the most appropriate algorithm
+          based on the values passed to :meth:`fit` method.
+
+        Note: fitting on sparse input will override the setting of
+        this parameter, using brute force.
+
+    leaf_size : int, default=30
+        Leaf is size passed to :class:`BallTree` or :class:`KDTree`. This can
+        affect the speed of the construction and query, as well as the memory
+        required to store the tree. The optimal value depends on the
+        nature of the problem.
+
+    metric : str or callable, default='minkowski'
+        Metric to use for distance computation. Default is "minkowski", which
+        results in the standard Euclidean distance when p = 2. See the
+        documentation of `scipy.spatial.distance
+        `_ and
+        the metrics listed in
+        :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
+        values.
+
+        If metric is "precomputed", X is assumed to be a distance matrix and
+        must be square during fit. X may be a :term:`sparse graph`, in which
+        case only "nonzero" elements may be considered neighbors.
+
+        If metric is a callable function, it takes two arrays representing 1D
+        vectors as inputs and must return one value indicating the distance
+        between those vectors. This works for Scipy's metrics, but is less
+        efficient than passing the metric name as a string.
+
+    p : float, default=2
+        Parameter for the Minkowski metric from
+        :func:`sklearn.metrics.pairwise_distances`. When p = 1, this
+        is equivalent to using manhattan_distance (l1), and euclidean_distance
+        (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
+
+    metric_params : dict, default=None
+        Additional keyword arguments for the metric function.
+
+    contamination : 'auto' or float, default='auto'
+        The amount of contamination of the data set, i.e. the proportion
+        of outliers in the data set. When fitting this is used to define the
+        threshold on the scores of the samples.
+
+        - if 'auto', the threshold is determined as in the
+          original paper,
+        - if a float, the contamination should be in the range (0, 0.5].
+
+        .. versionchanged:: 0.22
+           The default value of ``contamination`` changed from 0.1
+           to ``'auto'``.
+
+    novelty : bool, default=False
+        By default, LocalOutlierFactor is only meant to be used for outlier
+        detection (novelty=False). Set novelty to True if you want to use
+        LocalOutlierFactor for novelty detection. In this case be aware that
+        you should only use predict, decision_function and score_samples
+        on new unseen data and not on the training set; and note that the
+        results obtained this way may differ from the standard LOF results.
+
+        .. versionadded:: 0.20
+
+    n_jobs : int, default=None
+        The number of parallel jobs to run for neighbors search.
+        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
+        ``-1`` means using all processors. See :term:`Glossary `
+        for more details.
+
+    Attributes
+    ----------
+    negative_outlier_factor_ : ndarray of shape (n_samples,)
+        The opposite LOF of the training samples. The higher, the more normal.
+        Inliers tend to have a LOF score close to 1
+        (``negative_outlier_factor_`` close to -1), while outliers tend to have
+        a larger LOF score.
+
+        The local outlier factor (LOF) of a sample captures its
+        supposed 'degree of abnormality'.
+        It is the average of the ratio of the local reachability density of
+        a sample and those of its k-nearest neighbors.
+
+    n_neighbors_ : int
+        The actual number of neighbors used for :meth:`kneighbors` queries.
+
+    offset_ : float
+        Offset used to obtain binary labels from the raw scores.
+        Observations having a negative_outlier_factor smaller than `offset_`
+        are detected as abnormal.
+        The offset is set to -1.5 (inliers score around -1), except when a
+        contamination parameter different than "auto" is provided. In that
+        case, the offset is defined in such a way we obtain the expected
+        number of outliers in training.
+
+        .. versionadded:: 0.20
+
+    effective_metric_ : str
+        The effective metric used for the distance computation.
+
+    effective_metric_params_ : dict
+        The effective additional keyword arguments for the metric function.
+
+    n_features_in_ : int
+        Number of features seen during :term:`fit`.
+
+        .. versionadded:: 0.24
+
+    feature_names_in_ : ndarray of shape (`n_features_in_`,)
+        Names of features seen during :term:`fit`. Defined only when `X`
+        has feature names that are all strings.
+
+        .. versionadded:: 1.0
+
+    n_samples_fit_ : int
+        It is the number of samples in the fitted data.
+
+    See Also
+    --------
+    sklearn.svm.OneClassSVM: Unsupervised Outlier Detection using
+        Support Vector Machine.
+
+    References
+    ----------
+    .. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
+           LOF: identifying density-based local outliers. In ACM sigmod record.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from sklearn.neighbors import LocalOutlierFactor
+    >>> X = [[-1.1], [0.2], [101.1], [0.3]]
+    >>> clf = LocalOutlierFactor(n_neighbors=2)
+    >>> clf.fit_predict(X)
+    array([ 1,  1, -1,  1])
+    >>> clf.negative_outlier_factor_
+    array([ -0.9821...,  -1.0370..., -73.3697...,  -0.9821...])
+    """
+
+    _parameter_constraints: dict = {
+        **NeighborsBase._parameter_constraints,
+        "contamination": [
+            StrOptions({"auto"}),
+            Interval(Real, 0, 0.5, closed="right"),
+        ],
+        "novelty": ["boolean"],
+    }
+    _parameter_constraints.pop("radius")
+
+    def __init__(
+        self,
+        n_neighbors=20,
+        *,
+        algorithm="auto",
+        leaf_size=30,
+        metric="minkowski",
+        p=2,
+        metric_params=None,
+        contamination="auto",
+        novelty=False,
+        n_jobs=None,
+    ):
+        super().__init__(
+            n_neighbors=n_neighbors,
+            algorithm=algorithm,
+            leaf_size=leaf_size,
+            metric=metric,
+            p=p,
+            metric_params=metric_params,
+            n_jobs=n_jobs,
+        )
+        self.contamination = contamination
+        self.novelty = novelty
+
+    def _check_novelty_fit_predict(self):
+        if self.novelty:
+            msg = (
+                "fit_predict is not available when novelty=True. Use "
+                "novelty=False if you want to predict on the training set."
+            )
+            raise AttributeError(msg)
+        return True
+
+    @available_if(_check_novelty_fit_predict)
+    def fit_predict(self, X, y=None):
+        """Fit the model to the training set X and return the labels.
+
+        **Not available for novelty detection (when novelty is set to True).**
+        Label is 1 for an inlier and -1 for an outlier according to the LOF
+        score and the contamination parameter.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
+            The query sample or samples to compute the Local Outlier Factor
+            w.r.t. the training samples.
+
+        y : Ignored
+            Not used, present for API consistency by convention.
+
+        Returns
+        -------
+        is_inlier : ndarray of shape (n_samples,)
+            Returns -1 for anomalies/outliers and 1 for inliers.
+        """
+
+        # As fit_predict would be different from fit.predict, fit_predict is
+        # only available for outlier detection (novelty=False)
+
+        return self.fit(X)._predict()
+
+    @_fit_context(
+        # LocalOutlierFactor.metric is not validated yet
+        prefer_skip_nested_validation=False
+    )
+    def fit(self, X, y=None):
+        """Fit the local outlier factor detector from the training dataset.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
+                (n_samples, n_samples) if metric='precomputed'
+            Training data.
+
+        y : Ignored
+            Not used, present for API consistency by convention.
+
+        Returns
+        -------
+        self : LocalOutlierFactor
+            The fitted local outlier factor detector.
+        """
+        self._fit(X)
+
+        n_samples = self.n_samples_fit_
+        if self.n_neighbors > n_samples:
+            warnings.warn(
+                "n_neighbors (%s) is greater than the "
+                "total number of samples (%s). n_neighbors "
+                "will be set to (n_samples - 1) for estimation."
+                % (self.n_neighbors, n_samples)
+            )
+        self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
+
+        self._distances_fit_X_, _neighbors_indices_fit_X_ = self.kneighbors(
+            n_neighbors=self.n_neighbors_
+        )
+
+        if self._fit_X.dtype == np.float32:
+            self._distances_fit_X_ = self._distances_fit_X_.astype(
+                self._fit_X.dtype,
+                copy=False,
+            )
+
+        self._lrd = self._local_reachability_density(
+            self._distances_fit_X_, _neighbors_indices_fit_X_
+        )
+
+        # Compute lof score over training samples to define offset_:
+        lrd_ratios_array = (
+            self._lrd[_neighbors_indices_fit_X_] / self._lrd[:, np.newaxis]
+        )
+
+        self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
+
+        if self.contamination == "auto":
+            # inliers score around -1 (the higher, the less abnormal).
+            self.offset_ = -1.5
+        else:
+            self.offset_ = np.percentile(
+                self.negative_outlier_factor_, 100.0 * self.contamination
+            )
+
+        return self
+
+    def _check_novelty_predict(self):
+        if not self.novelty:
+            msg = (
+                "predict is not available when novelty=False, use "
+                "fit_predict if you want to predict on training data. Use "
+                "novelty=True if you want to use LOF for novelty detection "
+                "and predict on new unseen data."
+            )
+            raise AttributeError(msg)
+        return True
+
+    @available_if(_check_novelty_predict)
+    def predict(self, X=None):
+        """Predict the labels (1 inlier, -1 outlier) of X according to LOF.
+
+        **Only available for novelty detection (when novelty is set to True).**
+        This method allows to generalize prediction to *new observations* (not
+        in the training set). Note that the result of ``clf.fit(X)`` then
+        ``clf.predict(X)`` with ``novelty=True`` may differ from the result
+        obtained by ``clf.fit_predict(X)`` with ``novelty=False``.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The query sample or samples to compute the Local Outlier Factor
+            w.r.t. the training samples.
+
+        Returns
+        -------
+        is_inlier : ndarray of shape (n_samples,)
+            Returns -1 for anomalies/outliers and +1 for inliers.
+        """
+        return self._predict(X)
+
+    def _predict(self, X=None):
+        """Predict the labels (1 inlier, -1 outlier) of X according to LOF.
+
+        If X is None, returns the same as fit_predict(X_train).
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
+            The query sample or samples to compute the Local Outlier Factor
+            w.r.t. the training samples. If None, makes prediction on the
+            training data without considering them as their own neighbors.
+
+        Returns
+        -------
+        is_inlier : ndarray of shape (n_samples,)
+            Returns -1 for anomalies/outliers and +1 for inliers.
+        """
+        check_is_fitted(self)
+
+        if X is not None:
+            X = check_array(X, accept_sparse="csr")
+            is_inlier = np.ones(X.shape[0], dtype=int)
+            is_inlier[self.decision_function(X) < 0] = -1
+        else:
+            is_inlier = np.ones(self.n_samples_fit_, dtype=int)
+            is_inlier[self.negative_outlier_factor_ < self.offset_] = -1
+
+        return is_inlier
+
+    def _check_novelty_decision_function(self):
+        if not self.novelty:
+            msg = (
+                "decision_function is not available when novelty=False. "
+                "Use novelty=True if you want to use LOF for novelty "
+                "detection and compute decision_function for new unseen "
+                "data. Note that the opposite LOF of the training samples "
+                "is always available by considering the "
+                "negative_outlier_factor_ attribute."
+            )
+            raise AttributeError(msg)
+        return True
+
+    @available_if(_check_novelty_decision_function)
+    def decision_function(self, X):
+        """Shifted opposite of the Local Outlier Factor of X.
+
+        Bigger is better, i.e. large values correspond to inliers.
+
+        **Only available for novelty detection (when novelty is set to True).**
+        The shift offset allows a zero threshold for being an outlier.
+        The argument X is supposed to contain *new data*: if X contains a
+        point from training, it considers the later in its own neighborhood.
+        Also, the samples in X are not considered in the neighborhood of any
+        point.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The query sample or samples to compute the Local Outlier Factor
+            w.r.t. the training samples.
+
+        Returns
+        -------
+        shifted_opposite_lof_scores : ndarray of shape (n_samples,)
+            The shifted opposite of the Local Outlier Factor of each input
+            samples. The lower, the more abnormal. Negative scores represent
+            outliers, positive scores represent inliers.
+        """
+        return self.score_samples(X) - self.offset_
+
+    def _check_novelty_score_samples(self):
+        if not self.novelty:
+            msg = (
+                "score_samples is not available when novelty=False. The "
+                "scores of the training samples are always available "
+                "through the negative_outlier_factor_ attribute. Use "
+                "novelty=True if you want to use LOF for novelty detection "
+                "and compute score_samples for new unseen data."
+            )
+            raise AttributeError(msg)
+        return True
+
+    @available_if(_check_novelty_score_samples)
+    def score_samples(self, X):
+        """Opposite of the Local Outlier Factor of X.
+
+        It is the opposite as bigger is better, i.e. large values correspond
+        to inliers.
+
+        **Only available for novelty detection (when novelty is set to True).**
+        The argument X is supposed to contain *new data*: if X contains a
+        point from training, it considers the later in its own neighborhood.
+        Also, the samples in X are not considered in the neighborhood of any
+        point. Because of this, the scores obtained via ``score_samples`` may
+        differ from the standard LOF scores.
+        The standard LOF scores for the training data is available via the
+        ``negative_outlier_factor_`` attribute.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The query sample or samples to compute the Local Outlier Factor
+            w.r.t. the training samples.
+
+        Returns
+        -------
+        opposite_lof_scores : ndarray of shape (n_samples,)
+            The opposite of the Local Outlier Factor of each input samples.
+            The lower, the more abnormal.
+        """
+        check_is_fitted(self)
+        X = check_array(X, accept_sparse="csr")
+
+        distances_X, neighbors_indices_X = self.kneighbors(
+            X, n_neighbors=self.n_neighbors_
+        )
+
+        if X.dtype == np.float32:
+            distances_X = distances_X.astype(X.dtype, copy=False)
+
+        X_lrd = self._local_reachability_density(
+            distances_X,
+            neighbors_indices_X,
+        )
+
+        lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]
+
+        # as bigger is better:
+        return -np.mean(lrd_ratios_array, axis=1)
+
+    def _local_reachability_density(self, distances_X, neighbors_indices):
+        """The local reachability density (LRD)
+
+        The LRD of a sample is the inverse of the average reachability
+        distance of its k-nearest neighbors.
+
+        Parameters
+        ----------
+        distances_X : ndarray of shape (n_queries, self.n_neighbors)
+            Distances to the neighbors (in the training samples `self._fit_X`)
+            of each query point to compute the LRD.
+
+        neighbors_indices : ndarray of shape (n_queries, self.n_neighbors)
+            Neighbors indices (of each query point) among training samples
+            self._fit_X.
+
+        Returns
+        -------
+        local_reachability_density : ndarray of shape (n_queries,)
+            The local reachability density of each sample.
+        """
+        dist_k = self._distances_fit_X_[neighbors_indices, self.n_neighbors_ - 1]
+        reach_dist_array = np.maximum(distances_X, dist_k)
+
+        # 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_:
+        return 1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10)
+
+    def _more_tags(self):
+        return {
+            "preserves_dtype": [np.float64, np.float32],
+        }
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_nearest_centroid.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/_nearest_centroid.py
new file mode 100644
index 0000000000000000000000000000000000000000..75086ee25448e909b9dfe7dee6c1fe4c88de543b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/_nearest_centroid.py
@@ -0,0 +1,261 @@
+"""
+Nearest Centroid Classification
+"""
+
+# Author: Robert Layton 
+#         Olivier Grisel 
+#
+# License: BSD 3 clause
+
+import warnings
+from numbers import Real
+
+import numpy as np
+from scipy import sparse as sp
+
+from sklearn.metrics.pairwise import _VALID_METRICS
+
+from ..base import BaseEstimator, ClassifierMixin, _fit_context
+from ..metrics.pairwise import pairwise_distances_argmin
+from ..preprocessing import LabelEncoder
+from ..utils._param_validation import Interval, StrOptions
+from ..utils.multiclass import check_classification_targets
+from ..utils.sparsefuncs import csc_median_axis_0
+from ..utils.validation import check_is_fitted
+
+
+class NearestCentroid(ClassifierMixin, BaseEstimator):
+    """Nearest centroid classifier.
+
+    Each class is represented by its centroid, with test samples classified to
+    the class with the nearest centroid.
+
+    Read more in the :ref:`User Guide `.
+
+    Parameters
+    ----------
+    metric : str or callable, default="euclidean"
+        Metric to use for distance computation. See the documentation of
+        `scipy.spatial.distance
+        `_ and
+        the metrics listed in
+        :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
+        values. Note that "wminkowski", "seuclidean" and "mahalanobis" are not
+        supported.
+
+        The centroids for the samples corresponding to each class is
+        the point from which the sum of the distances (according to the metric)
+        of all samples that belong to that particular class are minimized.
+        If the `"manhattan"` metric is provided, this centroid is the median
+        and for all other metrics, the centroid is now set to be the mean.
+
+        .. deprecated:: 1.3
+            Support for metrics other than `euclidean` and `manhattan` and for
+            callables was deprecated in version 1.3 and will be removed in
+            version 1.5.
+
+        .. versionchanged:: 0.19
+            `metric='precomputed'` was deprecated and now raises an error
+
+    shrink_threshold : float, default=None
+        Threshold for shrinking centroids to remove features.
+
+    Attributes
+    ----------
+    centroids_ : array-like of shape (n_classes, n_features)
+        Centroid of each class.
+
+    classes_ : array of shape (n_classes,)
+        The unique classes labels.
+
+    n_features_in_ : int
+        Number of features seen during :term:`fit`.
+
+        .. versionadded:: 0.24
+
+    feature_names_in_ : ndarray of shape (`n_features_in_`,)
+        Names of features seen during :term:`fit`. Defined only when `X`
+        has feature names that are all strings.
+
+        .. versionadded:: 1.0
+
+    See Also
+    --------
+    KNeighborsClassifier : Nearest neighbors classifier.
+
+    Notes
+    -----
+    When used for text classification with tf-idf vectors, this classifier is
+    also known as the Rocchio classifier.
+
+    References
+    ----------
+    Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
+    multiple cancer types by shrunken centroids of gene expression. Proceedings
+    of the National Academy of Sciences of the United States of America,
+    99(10), 6567-6572. The National Academy of Sciences.
+
+    Examples
+    --------
+    >>> from sklearn.neighbors import NearestCentroid
+    >>> import numpy as np
+    >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
+    >>> y = np.array([1, 1, 1, 2, 2, 2])
+    >>> clf = NearestCentroid()
+    >>> clf.fit(X, y)
+    NearestCentroid()
+    >>> print(clf.predict([[-0.8, -1]]))
+    [1]
+    """
+
+    _valid_metrics = set(_VALID_METRICS) - {"mahalanobis", "seuclidean", "wminkowski"}
+
+    _parameter_constraints: dict = {
+        "metric": [
+            StrOptions(
+                _valid_metrics, deprecated=_valid_metrics - {"manhattan", "euclidean"}
+            ),
+            callable,
+        ],
+        "shrink_threshold": [Interval(Real, 0, None, closed="neither"), None],
+    }
+
+    def __init__(self, metric="euclidean", *, shrink_threshold=None):
+        self.metric = metric
+        self.shrink_threshold = shrink_threshold
+
+    @_fit_context(prefer_skip_nested_validation=True)
+    def fit(self, X, y):
+        """
+        Fit the NearestCentroid model according to the given training data.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            Training vector, where `n_samples` is the number of samples and
+            `n_features` is the number of features.
+            Note that centroid shrinking cannot be used with sparse matrices.
+        y : array-like of shape (n_samples,)
+            Target values.
+
+        Returns
+        -------
+        self : object
+            Fitted estimator.
+        """
+        if isinstance(self.metric, str) and self.metric not in (
+            "manhattan",
+            "euclidean",
+        ):
+            warnings.warn(
+                (
+                    "Support for distance metrics other than euclidean and "
+                    "manhattan and for callables was deprecated in version "
+                    "1.3 and will be removed in version 1.5."
+                ),
+                FutureWarning,
+            )
+
+        # If X is sparse and the metric is "manhattan", store it in a csc
+        # format is easier to calculate the median.
+        if self.metric == "manhattan":
+            X, y = self._validate_data(X, y, accept_sparse=["csc"])
+        else:
+            X, y = self._validate_data(X, y, accept_sparse=["csr", "csc"])
+        is_X_sparse = sp.issparse(X)
+        if is_X_sparse and self.shrink_threshold:
+            raise ValueError("threshold shrinking not supported for sparse input")
+        check_classification_targets(y)
+
+        n_samples, n_features = X.shape
+        le = LabelEncoder()
+        y_ind = le.fit_transform(y)
+        self.classes_ = classes = le.classes_
+        n_classes = classes.size
+        if n_classes < 2:
+            raise ValueError(
+                "The number of classes has to be greater than one; got %d class"
+                % (n_classes)
+            )
+
+        # Mask mapping each class to its members.
+        self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
+        # Number of clusters in each class.
+        nk = np.zeros(n_classes)
+
+        for cur_class in range(n_classes):
+            center_mask = y_ind == cur_class
+            nk[cur_class] = np.sum(center_mask)
+            if is_X_sparse:
+                center_mask = np.where(center_mask)[0]
+
+            if self.metric == "manhattan":
+                # NumPy does not calculate median of sparse matrices.
+                if not is_X_sparse:
+                    self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
+                else:
+                    self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
+            else:
+                # TODO(1.5) remove warning when metric is only manhattan or euclidean
+                if self.metric != "euclidean":
+                    warnings.warn(
+                        "Averaging for metrics other than "
+                        "euclidean and manhattan not supported. "
+                        "The average is set to be the mean."
+                    )
+                self.centroids_[cur_class] = X[center_mask].mean(axis=0)
+
+        if self.shrink_threshold:
+            if np.all(np.ptp(X, axis=0) == 0):
+                raise ValueError("All features have zero variance. Division by zero.")
+            dataset_centroid_ = np.mean(X, axis=0)
+
+            # m parameter for determining deviation
+            m = np.sqrt((1.0 / nk) - (1.0 / n_samples))
+            # Calculate deviation using the standard deviation of centroids.
+            variance = (X - self.centroids_[y_ind]) ** 2
+            variance = variance.sum(axis=0)
+            s = np.sqrt(variance / (n_samples - n_classes))
+            s += np.median(s)  # To deter outliers from affecting the results.
+            mm = m.reshape(len(m), 1)  # Reshape to allow broadcasting.
+            ms = mm * s
+            deviation = (self.centroids_ - dataset_centroid_) / ms
+            # Soft thresholding: if the deviation crosses 0 during shrinking,
+            # it becomes zero.
+            signs = np.sign(deviation)
+            deviation = np.abs(deviation) - self.shrink_threshold
+            np.clip(deviation, 0, None, out=deviation)
+            deviation *= signs
+            # Now adjust the centroids using the deviation
+            msd = ms * deviation
+            self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
+        return self
+
+    # TODO(1.5) remove note about precomputed metric
+    def predict(self, X):
+        """Perform classification on an array of test vectors `X`.
+
+        The predicted class `C` for each sample in `X` is returned.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            Test samples.
+
+        Returns
+        -------
+        C : ndarray of shape (n_samples,)
+            The predicted classes.
+
+        Notes
+        -----
+        If the metric constructor parameter is `"precomputed"`, `X` is assumed
+        to be the distance matrix between the data to be predicted and
+        `self.centroids_`.
+        """
+        check_is_fitted(self)
+
+        X = self._validate_data(X, accept_sparse="csr", reset=False)
+        return self.classes_[
+            pairwise_distances_argmin(X, self.centroids_, metric=self.metric)
+        ]
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pxd b/venv/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pxd
new file mode 100644
index 0000000000000000000000000000000000000000..bd2160cc3b26f4eaf0821735aeb278fd3a16eb15
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pxd
@@ -0,0 +1,10 @@
+from cython cimport floating
+from ..utils._typedefs cimport float64_t, intp_t
+
+cdef int partition_node_indices(
+        const floating *data,
+        intp_t *node_indices,
+        intp_t split_dim,
+        intp_t split_index,
+        intp_t n_features,
+        intp_t n_points) except -1
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd b/venv/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd
new file mode 100644
index 0000000000000000000000000000000000000000..9ed033e747314ef4b2f7599c99da85be6dbce73e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd
@@ -0,0 +1,92 @@
+# Author: Thomas Moreau 
+# Author: Olivier Grisel 
+
+# See quad_tree.pyx for details.
+
+cimport numpy as cnp
+from ..utils._typedefs cimport float32_t, intp_t
+
+# This is effectively an ifdef statement in Cython
+# It allows us to write printf debugging lines
+# and remove them at compile time
+cdef enum:
+    DEBUGFLAG = 0
+
+cdef float EPSILON = 1e-6
+
+# XXX: Careful to not change the order of the arguments. It is important to
+# have is_leaf and max_width consecutive as it permits to avoid padding by
+# the compiler and keep the size coherent for both C and numpy data structures.
+cdef struct Cell:
+    # Base storage structure for cells in a QuadTree object
+
+    # Tree structure
+    intp_t parent                # Parent cell of this cell
+    intp_t[8] children           # Array pointing to children of this cell
+
+    # Cell description
+    intp_t cell_id               # Id of the cell in the cells array in the Tree
+    intp_t point_index           # Index of the point at this cell (only defined
+    #                            # in non empty leaf)
+    bint is_leaf                 # Does this cell have children?
+    float32_t squared_max_width  # Squared value of the maximum width w
+    intp_t depth                 # Depth of the cell in the tree
+    intp_t cumulative_size       # Number of points included in the subtree with
+    #                            # this cell as a root.
+
+    # Internal constants
+    float32_t[3] center          # Store the center for quick split of cells
+    float32_t[3] barycenter      # Keep track of the center of mass of the cell
+
+    # Cell boundaries
+    float32_t[3] min_bounds      # Inferior boundaries of this cell (inclusive)
+    float32_t[3] max_bounds      # Superior boundaries of this cell (exclusive)
+
+
+cdef class _QuadTree:
+    # The QuadTree object is a quad tree structure constructed by inserting
+    # recursively points in the tree and splitting cells in 4 so that each
+    # leaf cell contains at most one point.
+    # This structure also handle 3D data, inserted in trees with 8 children
+    # for each node.
+
+    # Parameters of the tree
+    cdef public int n_dimensions         # Number of dimensions in X
+    cdef public int verbose              # Verbosity of the output
+    cdef intp_t n_cells_per_cell         # Number of children per node. (2 ** n_dimension)
+
+    # Tree inner structure
+    cdef public intp_t max_depth         # Max depth of the tree
+    cdef public intp_t cell_count        # Counter for node IDs
+    cdef public intp_t capacity          # Capacity of tree, in terms of nodes
+    cdef public intp_t n_points          # Total number of points
+    cdef Cell* cells                     # Array of nodes
+
+    # Point insertion methods
+    cdef int insert_point(self, float32_t[3] point, intp_t point_index,
+                          intp_t cell_id=*) except -1 nogil
+    cdef intp_t _insert_point_in_new_child(self, float32_t[3] point, Cell* cell,
+                                           intp_t point_index, intp_t size=*
+                                           ) noexcept nogil
+    cdef intp_t _select_child(self, float32_t[3] point, Cell* cell) noexcept nogil
+    cdef bint _is_duplicate(self, float32_t[3] point1, float32_t[3] point2) noexcept nogil
+
+    # Create a summary of the Tree compare to a query point
+    cdef long summarize(self, float32_t[3] point, float32_t* results,
+                        float squared_theta=*, intp_t cell_id=*, long idx=*
+                        ) noexcept nogil
+
+    # Internal cell initialization methods
+    cdef void _init_cell(self, Cell* cell, intp_t parent, intp_t depth) noexcept nogil
+    cdef void _init_root(self, float32_t[3] min_bounds, float32_t[3] max_bounds
+                         ) noexcept nogil
+
+    # Private methods
+    cdef int _check_point_in_cell(self, float32_t[3] point, Cell* cell
+                                  ) except -1 nogil
+
+    # Private array manipulation to manage the ``cells`` array
+    cdef int _resize(self, intp_t capacity) except -1 nogil
+    cdef int _resize_c(self, intp_t capacity=*) except -1 nogil
+    cdef int _get_cell(self, float32_t[3] point, intp_t cell_id=*) except -1 nogil
+    cdef Cell[:] _get_cell_ndarray(self)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04ea7da95b14b7c9cb29f51a3d85b1869aaede97
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_ball_tree.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_ball_tree.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b78f8a91d4113af3b61433897aae3bd3f3846827
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_ball_tree.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_graph.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5d04922195664daedabf43d2ef27b81849d0f08b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_graph.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kd_tree.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kd_tree.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f7a986369b697bce2e5dcef7a56b2052f4104bc1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kd_tree.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kde.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kde.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..54f3ba782e7be05ca6d038d1385cddda29bed1f8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kde.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_lof.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_lof.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8906355a33be94ca84d8f9eb34d8808be053d9f5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_lof.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nca.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nca.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26fefd862b52b7efa20216fd172a4eece442a00c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nca.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nearest_centroid.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nearest_centroid.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d290c63f2eb501d6dcd449cb10e48ab44ed0f6ca
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nearest_centroid.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8a12e4ff0409bbb404ed1f71d3fabbc38dd04bd7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_pipeline.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_pipeline.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba9a7fec5ee090a98303718b613ad368d73323c6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_pipeline.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_tree.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_tree.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d80da3d43ac9bd0a98b67c55a4ca6e1f23f18cb5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_tree.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_quad_tree.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_quad_tree.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1423a7fc348a31ea595cf186d69ce97f66fadd9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_quad_tree.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_ball_tree.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_ball_tree.py
new file mode 100644
index 0000000000000000000000000000000000000000..5263f201f320b17ced98fb223e7aaaf624d9271d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_ball_tree.py
@@ -0,0 +1,200 @@
+import itertools
+
+import numpy as np
+import pytest
+from numpy.testing import assert_allclose, assert_array_almost_equal, assert_equal
+
+from sklearn.neighbors._ball_tree import BallTree, BallTree32, BallTree64
+from sklearn.utils import check_random_state
+from sklearn.utils._testing import _convert_container
+from sklearn.utils.validation import check_array
+
+rng = np.random.RandomState(10)
+V_mahalanobis = rng.rand(3, 3)
+V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T)
+
+DIMENSION = 3
+
+METRICS = {
+    "euclidean": {},
+    "manhattan": {},
+    "minkowski": dict(p=3),
+    "chebyshev": {},
+}
+
+DISCRETE_METRICS = ["hamming", "canberra", "braycurtis"]
+
+BOOLEAN_METRICS = [
+    "jaccard",
+    "dice",
+    "rogerstanimoto",
+    "russellrao",
+    "sokalmichener",
+    "sokalsneath",
+]
+
+BALL_TREE_CLASSES = [
+    BallTree64,
+    BallTree32,
+]
+
+
+def brute_force_neighbors(X, Y, k, metric, **kwargs):
+    from sklearn.metrics import DistanceMetric
+
+    X, Y = check_array(X), check_array(Y)
+    D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
+    ind = np.argsort(D, axis=1)[:, :k]
+    dist = D[np.arange(Y.shape[0])[:, None], ind]
+    return dist, ind
+
+
+def test_BallTree_is_BallTree64_subclass():
+    assert issubclass(BallTree, BallTree64)
+
+
+@pytest.mark.parametrize("metric", itertools.chain(BOOLEAN_METRICS, DISCRETE_METRICS))
+@pytest.mark.parametrize("array_type", ["list", "array"])
+@pytest.mark.parametrize("BallTreeImplementation", BALL_TREE_CLASSES)
+def test_ball_tree_query_metrics(metric, array_type, BallTreeImplementation):
+    rng = check_random_state(0)
+    if metric in BOOLEAN_METRICS:
+        X = rng.random_sample((40, 10)).round(0)
+        Y = rng.random_sample((10, 10)).round(0)
+    elif metric in DISCRETE_METRICS:
+        X = (4 * rng.random_sample((40, 10))).round(0)
+        Y = (4 * rng.random_sample((10, 10))).round(0)
+    X = _convert_container(X, array_type)
+    Y = _convert_container(Y, array_type)
+
+    k = 5
+
+    bt = BallTreeImplementation(X, leaf_size=1, metric=metric)
+    dist1, ind1 = bt.query(Y, k)
+    dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
+    assert_array_almost_equal(dist1, dist2)
+
+
+@pytest.mark.parametrize(
+    "BallTreeImplementation, decimal_tol", zip(BALL_TREE_CLASSES, [6, 5])
+)
+def test_query_haversine(BallTreeImplementation, decimal_tol):
+    rng = check_random_state(0)
+    X = 2 * np.pi * rng.random_sample((40, 2))
+    bt = BallTreeImplementation(X, leaf_size=1, metric="haversine")
+    dist1, ind1 = bt.query(X, k=5)
+    dist2, ind2 = brute_force_neighbors(X, X, k=5, metric="haversine")
+
+    assert_array_almost_equal(dist1, dist2, decimal=decimal_tol)
+    assert_array_almost_equal(ind1, ind2)
+
+
+@pytest.mark.parametrize("BallTreeImplementation", BALL_TREE_CLASSES)
+def test_array_object_type(BallTreeImplementation):
+    """Check that we do not accept object dtype array."""
+    X = np.array([(1, 2, 3), (2, 5), (5, 5, 1, 2)], dtype=object)
+    with pytest.raises(ValueError, match="setting an array element with a sequence"):
+        BallTreeImplementation(X)
+
+
+@pytest.mark.parametrize("BallTreeImplementation", BALL_TREE_CLASSES)
+def test_bad_pyfunc_metric(BallTreeImplementation):
+    def wrong_returned_value(x, y):
+        return "1"
+
+    def one_arg_func(x):
+        return 1.0  # pragma: no cover
+
+    X = np.ones((5, 2))
+    msg = "Custom distance function must accept two vectors and return a float."
+    with pytest.raises(TypeError, match=msg):
+        BallTreeImplementation(X, metric=wrong_returned_value)
+
+    msg = "takes 1 positional argument but 2 were given"
+    with pytest.raises(TypeError, match=msg):
+        BallTreeImplementation(X, metric=one_arg_func)
+
+
+@pytest.mark.parametrize("metric", itertools.chain(METRICS, BOOLEAN_METRICS))
+def test_ball_tree_numerical_consistency(global_random_seed, metric):
+    # Results on float64 and float32 versions of a dataset must be
+    # numerically close.
+    X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(
+        random_seed=global_random_seed, features=50
+    )
+
+    metric_params = METRICS.get(metric, {})
+    bt_64 = BallTree64(X_64, leaf_size=1, metric=metric, **metric_params)
+    bt_32 = BallTree32(X_32, leaf_size=1, metric=metric, **metric_params)
+
+    # Test consistency with respect to the `query` method
+    k = 5
+    dist_64, ind_64 = bt_64.query(Y_64, k=k)
+    dist_32, ind_32 = bt_32.query(Y_32, k=k)
+    assert_allclose(dist_64, dist_32, rtol=1e-5)
+    assert_equal(ind_64, ind_32)
+    assert dist_64.dtype == np.float64
+    assert dist_32.dtype == np.float32
+
+    # Test consistency with respect to the `query_radius` method
+    r = 2.38
+    ind_64 = bt_64.query_radius(Y_64, r=r)
+    ind_32 = bt_32.query_radius(Y_32, r=r)
+    for _ind64, _ind32 in zip(ind_64, ind_32):
+        assert_equal(_ind64, _ind32)
+
+    # Test consistency with respect to the `query_radius` method
+    # with return distances being true
+    ind_64, dist_64 = bt_64.query_radius(Y_64, r=r, return_distance=True)
+    ind_32, dist_32 = bt_32.query_radius(Y_32, r=r, return_distance=True)
+    for _ind64, _ind32, _dist_64, _dist_32 in zip(ind_64, ind_32, dist_64, dist_32):
+        assert_equal(_ind64, _ind32)
+        assert_allclose(_dist_64, _dist_32, rtol=1e-5)
+        assert _dist_64.dtype == np.float64
+        assert _dist_32.dtype == np.float32
+
+
+@pytest.mark.parametrize("metric", itertools.chain(METRICS, BOOLEAN_METRICS))
+def test_kernel_density_numerical_consistency(global_random_seed, metric):
+    # Test consistency with respect to the `kernel_density` method
+    X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(random_seed=global_random_seed)
+
+    metric_params = METRICS.get(metric, {})
+    bt_64 = BallTree64(X_64, leaf_size=1, metric=metric, **metric_params)
+    bt_32 = BallTree32(X_32, leaf_size=1, metric=metric, **metric_params)
+
+    kernel = "gaussian"
+    h = 0.1
+    density64 = bt_64.kernel_density(Y_64, h=h, kernel=kernel, breadth_first=True)
+    density32 = bt_32.kernel_density(Y_32, h=h, kernel=kernel, breadth_first=True)
+    assert_allclose(density64, density32, rtol=1e-5)
+    assert density64.dtype == np.float64
+    assert density32.dtype == np.float32
+
+
+def test_two_point_correlation_numerical_consistency(global_random_seed):
+    # Test consistency with respect to the `two_point_correlation` method
+    X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(random_seed=global_random_seed)
+
+    bt_64 = BallTree64(X_64, leaf_size=10)
+    bt_32 = BallTree32(X_32, leaf_size=10)
+
+    r = np.linspace(0, 1, 10)
+
+    counts_64 = bt_64.two_point_correlation(Y_64, r=r, dualtree=True)
+    counts_32 = bt_32.two_point_correlation(Y_32, r=r, dualtree=True)
+    assert_allclose(counts_64, counts_32)
+
+
+def get_dataset_for_binary_tree(random_seed, features=3):
+    rng = np.random.RandomState(random_seed)
+    _X = rng.rand(100, features)
+    _Y = rng.rand(5, features)
+
+    X_64 = _X.astype(dtype=np.float64, copy=False)
+    Y_64 = _Y.astype(dtype=np.float64, copy=False)
+
+    X_32 = _X.astype(dtype=np.float32, copy=False)
+    Y_32 = _Y.astype(dtype=np.float32, copy=False)
+
+    return X_64, X_32, Y_64, Y_32
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_graph.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_graph.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb593485d17a8155f784ef881b3868338348e1a8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_graph.py
@@ -0,0 +1,101 @@
+import numpy as np
+import pytest
+
+from sklearn.metrics import euclidean_distances
+from sklearn.neighbors import KNeighborsTransformer, RadiusNeighborsTransformer
+from sklearn.neighbors._base import _is_sorted_by_data
+from sklearn.utils._testing import assert_array_equal
+
+
+def test_transformer_result():
+    # Test the number of neighbors returned
+    n_neighbors = 5
+    n_samples_fit = 20
+    n_queries = 18
+    n_features = 10
+
+    rng = np.random.RandomState(42)
+    X = rng.randn(n_samples_fit, n_features)
+    X2 = rng.randn(n_queries, n_features)
+    radius = np.percentile(euclidean_distances(X), 10)
+
+    # with n_neighbors
+    for mode in ["distance", "connectivity"]:
+        add_one = mode == "distance"
+        nnt = KNeighborsTransformer(n_neighbors=n_neighbors, mode=mode)
+        Xt = nnt.fit_transform(X)
+        assert Xt.shape == (n_samples_fit, n_samples_fit)
+        assert Xt.data.shape == (n_samples_fit * (n_neighbors + add_one),)
+        assert Xt.format == "csr"
+        assert _is_sorted_by_data(Xt)
+
+        X2t = nnt.transform(X2)
+        assert X2t.shape == (n_queries, n_samples_fit)
+        assert X2t.data.shape == (n_queries * (n_neighbors + add_one),)
+        assert X2t.format == "csr"
+        assert _is_sorted_by_data(X2t)
+
+    # with radius
+    for mode in ["distance", "connectivity"]:
+        add_one = mode == "distance"
+        nnt = RadiusNeighborsTransformer(radius=radius, mode=mode)
+        Xt = nnt.fit_transform(X)
+        assert Xt.shape == (n_samples_fit, n_samples_fit)
+        assert not Xt.data.shape == (n_samples_fit * (n_neighbors + add_one),)
+        assert Xt.format == "csr"
+        assert _is_sorted_by_data(Xt)
+
+        X2t = nnt.transform(X2)
+        assert X2t.shape == (n_queries, n_samples_fit)
+        assert not X2t.data.shape == (n_queries * (n_neighbors + add_one),)
+        assert X2t.format == "csr"
+        assert _is_sorted_by_data(X2t)
+
+
+def _has_explicit_diagonal(X):
+    """Return True if the diagonal is explicitly stored"""
+    X = X.tocoo()
+    explicit = X.row[X.row == X.col]
+    return len(explicit) == X.shape[0]
+
+
+def test_explicit_diagonal():
+    # Test that the diagonal is explicitly stored in the sparse graph
+    n_neighbors = 5
+    n_samples_fit, n_samples_transform, n_features = 20, 18, 10
+    rng = np.random.RandomState(42)
+    X = rng.randn(n_samples_fit, n_features)
+    X2 = rng.randn(n_samples_transform, n_features)
+
+    nnt = KNeighborsTransformer(n_neighbors=n_neighbors)
+    Xt = nnt.fit_transform(X)
+    assert _has_explicit_diagonal(Xt)
+    assert np.all(Xt.data.reshape(n_samples_fit, n_neighbors + 1)[:, 0] == 0)
+
+    Xt = nnt.transform(X)
+    assert _has_explicit_diagonal(Xt)
+    assert np.all(Xt.data.reshape(n_samples_fit, n_neighbors + 1)[:, 0] == 0)
+
+    # Using transform on new data should not always have zero diagonal
+    X2t = nnt.transform(X2)
+    assert not _has_explicit_diagonal(X2t)
+
+
+@pytest.mark.parametrize("Klass", [KNeighborsTransformer, RadiusNeighborsTransformer])
+def test_graph_feature_names_out(Klass):
+    """Check `get_feature_names_out` for transformers defined in `_graph.py`."""
+
+    n_samples_fit = 20
+    n_features = 10
+    rng = np.random.RandomState(42)
+    X = rng.randn(n_samples_fit, n_features)
+
+    est = Klass().fit(X)
+    names_out = est.get_feature_names_out()
+
+    class_name_lower = Klass.__name__.lower()
+    expected_names_out = np.array(
+        [f"{class_name_lower}{i}" for i in range(est.n_samples_fit_)],
+        dtype=object,
+    )
+    assert_array_equal(names_out, expected_names_out)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kd_tree.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kd_tree.py
new file mode 100644
index 0000000000000000000000000000000000000000..749601baaf66fdbf96e8396ca1df45c5bdab4a1e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kd_tree.py
@@ -0,0 +1,100 @@
+import numpy as np
+import pytest
+from numpy.testing import assert_allclose, assert_equal
+
+from sklearn.neighbors._kd_tree import KDTree, KDTree32, KDTree64
+from sklearn.neighbors.tests.test_ball_tree import get_dataset_for_binary_tree
+from sklearn.utils.parallel import Parallel, delayed
+
+DIMENSION = 3
+
+METRICS = {"euclidean": {}, "manhattan": {}, "chebyshev": {}, "minkowski": dict(p=3)}
+
+KD_TREE_CLASSES = [
+    KDTree64,
+    KDTree32,
+]
+
+
+def test_KDTree_is_KDTree64_subclass():
+    assert issubclass(KDTree, KDTree64)
+
+
+@pytest.mark.parametrize("BinarySearchTree", KD_TREE_CLASSES)
+def test_array_object_type(BinarySearchTree):
+    """Check that we do not accept object dtype array."""
+    X = np.array([(1, 2, 3), (2, 5), (5, 5, 1, 2)], dtype=object)
+    with pytest.raises(ValueError, match="setting an array element with a sequence"):
+        BinarySearchTree(X)
+
+
+@pytest.mark.parametrize("BinarySearchTree", KD_TREE_CLASSES)
+def test_kdtree_picklable_with_joblib(BinarySearchTree):
+    """Make sure that KDTree queries work when joblib memmaps.
+
+    Non-regression test for #21685 and #21228."""
+    rng = np.random.RandomState(0)
+    X = rng.random_sample((10, 3))
+    tree = BinarySearchTree(X, leaf_size=2)
+
+    # Call Parallel with max_nbytes=1 to trigger readonly memory mapping that
+    # use to raise "ValueError: buffer source array is read-only" in a previous
+    # version of the Cython code.
+    Parallel(n_jobs=2, max_nbytes=1)(delayed(tree.query)(data) for data in 2 * [X])
+
+
+@pytest.mark.parametrize("metric", METRICS)
+def test_kd_tree_numerical_consistency(global_random_seed, metric):
+    # Results on float64 and float32 versions of a dataset must be
+    # numerically close.
+    X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(
+        random_seed=global_random_seed, features=50
+    )
+
+    metric_params = METRICS.get(metric, {})
+    kd_64 = KDTree64(X_64, leaf_size=2, metric=metric, **metric_params)
+    kd_32 = KDTree32(X_32, leaf_size=2, metric=metric, **metric_params)
+
+    # Test consistency with respect to the `query` method
+    k = 4
+    dist_64, ind_64 = kd_64.query(Y_64, k=k)
+    dist_32, ind_32 = kd_32.query(Y_32, k=k)
+    assert_allclose(dist_64, dist_32, rtol=1e-5)
+    assert_equal(ind_64, ind_32)
+    assert dist_64.dtype == np.float64
+    assert dist_32.dtype == np.float32
+
+    # Test consistency with respect to the `query_radius` method
+    r = 2.38
+    ind_64 = kd_64.query_radius(Y_64, r=r)
+    ind_32 = kd_32.query_radius(Y_32, r=r)
+    for _ind64, _ind32 in zip(ind_64, ind_32):
+        assert_equal(_ind64, _ind32)
+
+    # Test consistency with respect to the `query_radius` method
+    # with return distances being true
+    ind_64, dist_64 = kd_64.query_radius(Y_64, r=r, return_distance=True)
+    ind_32, dist_32 = kd_32.query_radius(Y_32, r=r, return_distance=True)
+    for _ind64, _ind32, _dist_64, _dist_32 in zip(ind_64, ind_32, dist_64, dist_32):
+        assert_equal(_ind64, _ind32)
+        assert_allclose(_dist_64, _dist_32, rtol=1e-5)
+        assert _dist_64.dtype == np.float64
+        assert _dist_32.dtype == np.float32
+
+
+@pytest.mark.parametrize("metric", METRICS)
+def test_kernel_density_numerical_consistency(global_random_seed, metric):
+    # Test consistency with respect to the `kernel_density` method
+    X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(random_seed=global_random_seed)
+
+    metric_params = METRICS.get(metric, {})
+    kd_64 = KDTree64(X_64, leaf_size=2, metric=metric, **metric_params)
+    kd_32 = KDTree32(X_32, leaf_size=2, metric=metric, **metric_params)
+
+    kernel = "gaussian"
+    h = 0.1
+    density64 = kd_64.kernel_density(Y_64, h=h, kernel=kernel, breadth_first=True)
+    density32 = kd_32.kernel_density(Y_32, h=h, kernel=kernel, breadth_first=True)
+    assert_allclose(density64, density32, rtol=1e-5)
+    assert density64.dtype == np.float64
+    assert density32.dtype == np.float32
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kde.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kde.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6bf09d01b672b7ad5a3abf3506443b0ac620915
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kde.py
@@ -0,0 +1,252 @@
+import joblib
+import numpy as np
+import pytest
+
+from sklearn.datasets import make_blobs
+from sklearn.exceptions import NotFittedError
+from sklearn.model_selection import GridSearchCV
+from sklearn.neighbors import KDTree, KernelDensity, NearestNeighbors
+from sklearn.neighbors._ball_tree import kernel_norm
+from sklearn.pipeline import make_pipeline
+from sklearn.preprocessing import StandardScaler
+from sklearn.utils._testing import assert_allclose
+
+
+# XXX Duplicated in test_neighbors_tree, test_kde
+def compute_kernel_slow(Y, X, kernel, h):
+    if h == "scott":
+        h = X.shape[0] ** (-1 / (X.shape[1] + 4))
+    elif h == "silverman":
+        h = (X.shape[0] * (X.shape[1] + 2) / 4) ** (-1 / (X.shape[1] + 4))
+
+    d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
+    norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
+
+    if kernel == "gaussian":
+        return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
+    elif kernel == "tophat":
+        return norm * (d < h).sum(-1)
+    elif kernel == "epanechnikov":
+        return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
+    elif kernel == "exponential":
+        return norm * (np.exp(-d / h)).sum(-1)
+    elif kernel == "linear":
+        return norm * ((1 - d / h) * (d < h)).sum(-1)
+    elif kernel == "cosine":
+        return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
+    else:
+        raise ValueError("kernel not recognized")
+
+
+def check_results(kernel, bandwidth, atol, rtol, X, Y, dens_true):
+    kde = KernelDensity(kernel=kernel, bandwidth=bandwidth, atol=atol, rtol=rtol)
+    log_dens = kde.fit(X).score_samples(Y)
+    assert_allclose(np.exp(log_dens), dens_true, atol=atol, rtol=max(1e-7, rtol))
+    assert_allclose(
+        np.exp(kde.score(Y)), np.prod(dens_true), atol=atol, rtol=max(1e-7, rtol)
+    )
+
+
+@pytest.mark.parametrize(
+    "kernel", ["gaussian", "tophat", "epanechnikov", "exponential", "linear", "cosine"]
+)
+@pytest.mark.parametrize("bandwidth", [0.01, 0.1, 1, "scott", "silverman"])
+def test_kernel_density(kernel, bandwidth):
+    n_samples, n_features = (100, 3)
+
+    rng = np.random.RandomState(0)
+    X = rng.randn(n_samples, n_features)
+    Y = rng.randn(n_samples, n_features)
+
+    dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
+
+    for rtol in [0, 1e-5]:
+        for atol in [1e-6, 1e-2]:
+            for breadth_first in (True, False):
+                check_results(kernel, bandwidth, atol, rtol, X, Y, dens_true)
+
+
+def test_kernel_density_sampling(n_samples=100, n_features=3):
+    rng = np.random.RandomState(0)
+    X = rng.randn(n_samples, n_features)
+
+    bandwidth = 0.2
+
+    for kernel in ["gaussian", "tophat"]:
+        # draw a tophat sample
+        kde = KernelDensity(bandwidth=bandwidth, kernel=kernel).fit(X)
+        samp = kde.sample(100)
+        assert X.shape == samp.shape
+
+        # check that samples are in the right range
+        nbrs = NearestNeighbors(n_neighbors=1).fit(X)
+        dist, ind = nbrs.kneighbors(X, return_distance=True)
+
+        if kernel == "tophat":
+            assert np.all(dist < bandwidth)
+        elif kernel == "gaussian":
+            # 5 standard deviations is safe for 100 samples, but there's a
+            # very small chance this test could fail.
+            assert np.all(dist < 5 * bandwidth)
+
+    # check unsupported kernels
+    for kernel in ["epanechnikov", "exponential", "linear", "cosine"]:
+        kde = KernelDensity(bandwidth=bandwidth, kernel=kernel).fit(X)
+        with pytest.raises(NotImplementedError):
+            kde.sample(100)
+
+    # non-regression test: used to return a scalar
+    X = rng.randn(4, 1)
+    kde = KernelDensity(kernel="gaussian").fit(X)
+    assert kde.sample().shape == (1, 1)
+
+
+@pytest.mark.parametrize("algorithm", ["auto", "ball_tree", "kd_tree"])
+@pytest.mark.parametrize(
+    "metric", ["euclidean", "minkowski", "manhattan", "chebyshev", "haversine"]
+)
+def test_kde_algorithm_metric_choice(algorithm, metric):
+    # Smoke test for various metrics and algorithms
+    rng = np.random.RandomState(0)
+    X = rng.randn(10, 2)  # 2 features required for haversine dist.
+    Y = rng.randn(10, 2)
+
+    kde = KernelDensity(algorithm=algorithm, metric=metric)
+
+    if algorithm == "kd_tree" and metric not in KDTree.valid_metrics:
+        with pytest.raises(ValueError, match="invalid metric"):
+            kde.fit(X)
+    else:
+        kde.fit(X)
+        y_dens = kde.score_samples(Y)
+        assert y_dens.shape == Y.shape[:1]
+
+
+def test_kde_score(n_samples=100, n_features=3):
+    pass
+    # FIXME
+    # rng = np.random.RandomState(0)
+    # X = rng.random_sample((n_samples, n_features))
+    # Y = rng.random_sample((n_samples, n_features))
+
+
+def test_kde_sample_weights_error():
+    kde = KernelDensity()
+    with pytest.raises(ValueError):
+        kde.fit(np.random.random((200, 10)), sample_weight=np.random.random((200, 10)))
+    with pytest.raises(ValueError):
+        kde.fit(np.random.random((200, 10)), sample_weight=-np.random.random(200))
+
+
+def test_kde_pipeline_gridsearch():
+    # test that kde plays nice in pipelines and grid-searches
+    X, _ = make_blobs(cluster_std=0.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]])
+    pipe1 = make_pipeline(
+        StandardScaler(with_mean=False, with_std=False),
+        KernelDensity(kernel="gaussian"),
+    )
+    params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
+    search = GridSearchCV(pipe1, param_grid=params)
+    search.fit(X)
+    assert search.best_params_["kerneldensity__bandwidth"] == 0.1
+
+
+def test_kde_sample_weights():
+    n_samples = 400
+    size_test = 20
+    weights_neutral = np.full(n_samples, 3.0)
+    for d in [1, 2, 10]:
+        rng = np.random.RandomState(0)
+        X = rng.rand(n_samples, d)
+        weights = 1 + (10 * X.sum(axis=1)).astype(np.int8)
+        X_repetitions = np.repeat(X, weights, axis=0)
+        n_samples_test = size_test // d
+        test_points = rng.rand(n_samples_test, d)
+        for algorithm in ["auto", "ball_tree", "kd_tree"]:
+            for metric in ["euclidean", "minkowski", "manhattan", "chebyshev"]:
+                if algorithm != "kd_tree" or metric in KDTree.valid_metrics:
+                    kde = KernelDensity(algorithm=algorithm, metric=metric)
+
+                    # Test that adding a constant sample weight has no effect
+                    kde.fit(X, sample_weight=weights_neutral)
+                    scores_const_weight = kde.score_samples(test_points)
+                    sample_const_weight = kde.sample(random_state=1234)
+                    kde.fit(X)
+                    scores_no_weight = kde.score_samples(test_points)
+                    sample_no_weight = kde.sample(random_state=1234)
+                    assert_allclose(scores_const_weight, scores_no_weight)
+                    assert_allclose(sample_const_weight, sample_no_weight)
+
+                    # Test equivalence between sampling and (integer) weights
+                    kde.fit(X, sample_weight=weights)
+                    scores_weight = kde.score_samples(test_points)
+                    sample_weight = kde.sample(random_state=1234)
+                    kde.fit(X_repetitions)
+                    scores_ref_sampling = kde.score_samples(test_points)
+                    sample_ref_sampling = kde.sample(random_state=1234)
+                    assert_allclose(scores_weight, scores_ref_sampling)
+                    assert_allclose(sample_weight, sample_ref_sampling)
+
+                    # Test that sample weights has a non-trivial effect
+                    diff = np.max(np.abs(scores_no_weight - scores_weight))
+                    assert diff > 0.001
+
+                    # Test invariance with respect to arbitrary scaling
+                    scale_factor = rng.rand()
+                    kde.fit(X, sample_weight=(scale_factor * weights))
+                    scores_scaled_weight = kde.score_samples(test_points)
+                    assert_allclose(scores_scaled_weight, scores_weight)
+
+
+@pytest.mark.parametrize("sample_weight", [None, [0.1, 0.2, 0.3]])
+def test_pickling(tmpdir, sample_weight):
+    # Make sure that predictions are the same before and after pickling. Used
+    # to be a bug because sample_weights wasn't pickled and the resulting tree
+    # would miss some info.
+
+    kde = KernelDensity()
+    data = np.reshape([1.0, 2.0, 3.0], (-1, 1))
+    kde.fit(data, sample_weight=sample_weight)
+
+    X = np.reshape([1.1, 2.1], (-1, 1))
+    scores = kde.score_samples(X)
+
+    file_path = str(tmpdir.join("dump.pkl"))
+    joblib.dump(kde, file_path)
+    kde = joblib.load(file_path)
+    scores_pickled = kde.score_samples(X)
+
+    assert_allclose(scores, scores_pickled)
+
+
+@pytest.mark.parametrize("method", ["score_samples", "sample"])
+def test_check_is_fitted(method):
+    # Check that predict raises an exception in an unfitted estimator.
+    # Unfitted estimators should raise a NotFittedError.
+    rng = np.random.RandomState(0)
+    X = rng.randn(10, 2)
+    kde = KernelDensity()
+
+    with pytest.raises(NotFittedError):
+        getattr(kde, method)(X)
+
+
+@pytest.mark.parametrize("bandwidth", ["scott", "silverman", 0.1])
+def test_bandwidth(bandwidth):
+    n_samples, n_features = (100, 3)
+    rng = np.random.RandomState(0)
+    X = rng.randn(n_samples, n_features)
+    kde = KernelDensity(bandwidth=bandwidth).fit(X)
+    samp = kde.sample(100)
+    kde_sc = kde.score_samples(X)
+    assert X.shape == samp.shape
+    assert kde_sc.shape == (n_samples,)
+
+    # Test that the attribute self.bandwidth_ has the expected value
+    if bandwidth == "scott":
+        h = X.shape[0] ** (-1 / (X.shape[1] + 4))
+    elif bandwidth == "silverman":
+        h = (X.shape[0] * (X.shape[1] + 2) / 4) ** (-1 / (X.shape[1] + 4))
+    else:
+        h = bandwidth
+    assert kde.bandwidth_ == pytest.approx(h)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_lof.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_lof.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f5c1e161b7e88012b3d3334e0dd621797416248
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_lof.py
@@ -0,0 +1,361 @@
+# Authors: Nicolas Goix 
+#          Alexandre Gramfort 
+# License: BSD 3 clause
+
+import re
+from math import sqrt
+
+import numpy as np
+import pytest
+
+from sklearn import metrics, neighbors
+from sklearn.datasets import load_iris
+from sklearn.metrics import roc_auc_score
+from sklearn.utils import check_random_state
+from sklearn.utils._testing import assert_allclose, assert_array_equal
+from sklearn.utils.estimator_checks import (
+    check_outlier_corruption,
+    parametrize_with_checks,
+)
+from sklearn.utils.fixes import CSR_CONTAINERS
+
+# load the iris dataset
+# and randomly permute it
+rng = check_random_state(0)
+iris = load_iris()
+perm = rng.permutation(iris.target.size)
+iris.data = iris.data[perm]
+iris.target = iris.target[perm]
+
+
+def test_lof(global_dtype):
+    # Toy sample (the last two samples are outliers):
+    X = np.asarray(
+        [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [5, 3], [-4, 2]],
+        dtype=global_dtype,
+    )
+
+    # Test LocalOutlierFactor:
+    clf = neighbors.LocalOutlierFactor(n_neighbors=5)
+    score = clf.fit(X).negative_outlier_factor_
+    assert_array_equal(clf._fit_X, X)
+
+    # Assert largest outlier score is smaller than smallest inlier score:
+    assert np.min(score[:-2]) > np.max(score[-2:])
+
+    # Assert predict() works:
+    clf = neighbors.LocalOutlierFactor(contamination=0.25, n_neighbors=5).fit(X)
+    expected_predictions = 6 * [1] + 2 * [-1]
+    assert_array_equal(clf._predict(), expected_predictions)
+    assert_array_equal(clf.fit_predict(X), expected_predictions)
+
+
+def test_lof_performance(global_dtype):
+    # Generate train/test data
+    rng = check_random_state(2)
+    X = 0.3 * rng.randn(120, 2).astype(global_dtype, copy=False)
+    X_train = X[:100]
+
+    # Generate some abnormal novel observations
+    X_outliers = rng.uniform(low=-4, high=4, size=(20, 2)).astype(
+        global_dtype, copy=False
+    )
+    X_test = np.r_[X[100:], X_outliers]
+    y_test = np.array([0] * 20 + [1] * 20)
+
+    # fit the model for novelty detection
+    clf = neighbors.LocalOutlierFactor(novelty=True).fit(X_train)
+
+    # predict scores (the lower, the more normal)
+    y_pred = -clf.decision_function(X_test)
+
+    # check that roc_auc is good
+    assert roc_auc_score(y_test, y_pred) > 0.99
+
+
+def test_lof_values(global_dtype):
+    # toy samples:
+    X_train = np.asarray([[1, 1], [1, 2], [2, 1]], dtype=global_dtype)
+    clf1 = neighbors.LocalOutlierFactor(
+        n_neighbors=2, contamination=0.1, novelty=True
+    ).fit(X_train)
+    clf2 = neighbors.LocalOutlierFactor(n_neighbors=2, novelty=True).fit(X_train)
+    s_0 = 2.0 * sqrt(2.0) / (1.0 + sqrt(2.0))
+    s_1 = (1.0 + sqrt(2)) * (1.0 / (4.0 * sqrt(2.0)) + 1.0 / (2.0 + 2.0 * sqrt(2)))
+    # check predict()
+    assert_allclose(-clf1.negative_outlier_factor_, [s_0, s_1, s_1])
+    assert_allclose(-clf2.negative_outlier_factor_, [s_0, s_1, s_1])
+    # check predict(one sample not in train)
+    assert_allclose(-clf1.score_samples([[2.0, 2.0]]), [s_0])
+    assert_allclose(-clf2.score_samples([[2.0, 2.0]]), [s_0])
+    # check predict(one sample already in train)
+    assert_allclose(-clf1.score_samples([[1.0, 1.0]]), [s_1])
+    assert_allclose(-clf2.score_samples([[1.0, 1.0]]), [s_1])
+
+
+def test_lof_precomputed(global_dtype, random_state=42):
+    """Tests LOF with a distance matrix."""
+    # Note: smaller samples may result in spurious test success
+    rng = np.random.RandomState(random_state)
+    X = rng.random_sample((10, 4)).astype(global_dtype, copy=False)
+    Y = rng.random_sample((3, 4)).astype(global_dtype, copy=False)
+    DXX = metrics.pairwise_distances(X, metric="euclidean")
+    DYX = metrics.pairwise_distances(Y, X, metric="euclidean")
+    # As a feature matrix (n_samples by n_features)
+    lof_X = neighbors.LocalOutlierFactor(n_neighbors=3, novelty=True)
+    lof_X.fit(X)
+    pred_X_X = lof_X._predict()
+    pred_X_Y = lof_X.predict(Y)
+
+    # As a dense distance matrix (n_samples by n_samples)
+    lof_D = neighbors.LocalOutlierFactor(
+        n_neighbors=3, algorithm="brute", metric="precomputed", novelty=True
+    )
+    lof_D.fit(DXX)
+    pred_D_X = lof_D._predict()
+    pred_D_Y = lof_D.predict(DYX)
+
+    assert_allclose(pred_X_X, pred_D_X)
+    assert_allclose(pred_X_Y, pred_D_Y)
+
+
+def test_n_neighbors_attribute():
+    X = iris.data
+    clf = neighbors.LocalOutlierFactor(n_neighbors=500).fit(X)
+    assert clf.n_neighbors_ == X.shape[0] - 1
+
+    clf = neighbors.LocalOutlierFactor(n_neighbors=500)
+    msg = "n_neighbors will be set to (n_samples - 1)"
+    with pytest.warns(UserWarning, match=re.escape(msg)):
+        clf.fit(X)
+    assert clf.n_neighbors_ == X.shape[0] - 1
+
+
+def test_score_samples(global_dtype):
+    X_train = np.asarray([[1, 1], [1, 2], [2, 1]], dtype=global_dtype)
+    X_test = np.asarray([[2.0, 2.0]], dtype=global_dtype)
+    clf1 = neighbors.LocalOutlierFactor(
+        n_neighbors=2, contamination=0.1, novelty=True
+    ).fit(X_train)
+    clf2 = neighbors.LocalOutlierFactor(n_neighbors=2, novelty=True).fit(X_train)
+
+    clf1_scores = clf1.score_samples(X_test)
+    clf1_decisions = clf1.decision_function(X_test)
+
+    clf2_scores = clf2.score_samples(X_test)
+    clf2_decisions = clf2.decision_function(X_test)
+
+    assert_allclose(
+        clf1_scores,
+        clf1_decisions + clf1.offset_,
+    )
+    assert_allclose(
+        clf2_scores,
+        clf2_decisions + clf2.offset_,
+    )
+    assert_allclose(clf1_scores, clf2_scores)
+
+
+def test_novelty_errors():
+    X = iris.data
+
+    # check errors for novelty=False
+    clf = neighbors.LocalOutlierFactor()
+    clf.fit(X)
+    # predict, decision_function and score_samples raise ValueError
+    for method in ["predict", "decision_function", "score_samples"]:
+        outer_msg = f"'LocalOutlierFactor' has no attribute '{method}'"
+        inner_msg = "{} is not available when novelty=False".format(method)
+        with pytest.raises(AttributeError, match=outer_msg) as exec_info:
+            getattr(clf, method)
+
+        assert isinstance(exec_info.value.__cause__, AttributeError)
+        assert inner_msg in str(exec_info.value.__cause__)
+
+    # check errors for novelty=True
+    clf = neighbors.LocalOutlierFactor(novelty=True)
+
+    outer_msg = "'LocalOutlierFactor' has no attribute 'fit_predict'"
+    inner_msg = "fit_predict is not available when novelty=True"
+    with pytest.raises(AttributeError, match=outer_msg) as exec_info:
+        getattr(clf, "fit_predict")
+
+    assert isinstance(exec_info.value.__cause__, AttributeError)
+    assert inner_msg in str(exec_info.value.__cause__)
+
+
+def test_novelty_training_scores(global_dtype):
+    # check that the scores of the training samples are still accessible
+    # when novelty=True through the negative_outlier_factor_ attribute
+    X = iris.data.astype(global_dtype)
+
+    # fit with novelty=False
+    clf_1 = neighbors.LocalOutlierFactor()
+    clf_1.fit(X)
+    scores_1 = clf_1.negative_outlier_factor_
+
+    # fit with novelty=True
+    clf_2 = neighbors.LocalOutlierFactor(novelty=True)
+    clf_2.fit(X)
+    scores_2 = clf_2.negative_outlier_factor_
+
+    assert_allclose(scores_1, scores_2)
+
+
+def test_hasattr_prediction():
+    # check availability of prediction methods depending on novelty value.
+    X = [[1, 1], [1, 2], [2, 1]]
+
+    # when novelty=True
+    clf = neighbors.LocalOutlierFactor(novelty=True)
+    clf.fit(X)
+    assert hasattr(clf, "predict")
+    assert hasattr(clf, "decision_function")
+    assert hasattr(clf, "score_samples")
+    assert not hasattr(clf, "fit_predict")
+
+    # when novelty=False
+    clf = neighbors.LocalOutlierFactor(novelty=False)
+    clf.fit(X)
+    assert hasattr(clf, "fit_predict")
+    assert not hasattr(clf, "predict")
+    assert not hasattr(clf, "decision_function")
+    assert not hasattr(clf, "score_samples")
+
+
+@parametrize_with_checks([neighbors.LocalOutlierFactor(novelty=True)])
+def test_novelty_true_common_tests(estimator, check):
+    # the common tests are run for the default LOF (novelty=False).
+    # here we run these common tests for LOF when novelty=True
+    check(estimator)
+
+
+@pytest.mark.parametrize("expected_outliers", [30, 53])
+def test_predicted_outlier_number(expected_outliers):
+    # the number of predicted outliers should be equal to the number of
+    # expected outliers unless there are ties in the abnormality scores.
+    X = iris.data
+    n_samples = X.shape[0]
+    contamination = float(expected_outliers) / n_samples
+
+    clf = neighbors.LocalOutlierFactor(contamination=contamination)
+    y_pred = clf.fit_predict(X)
+
+    num_outliers = np.sum(y_pred != 1)
+    if num_outliers != expected_outliers:
+        y_dec = clf.negative_outlier_factor_
+        check_outlier_corruption(num_outliers, expected_outliers, y_dec)
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_sparse(csr_container):
+    # LocalOutlierFactor must support CSR inputs
+    # TODO: compare results on dense and sparse data as proposed in:
+    # https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186
+    X = csr_container(iris.data)
+
+    lof = neighbors.LocalOutlierFactor(novelty=True)
+    lof.fit(X)
+    lof.predict(X)
+    lof.score_samples(X)
+    lof.decision_function(X)
+
+    lof = neighbors.LocalOutlierFactor(novelty=False)
+    lof.fit_predict(X)
+
+
+def test_lof_error_n_neighbors_too_large():
+    """Check that we raise a proper error message when n_neighbors == n_samples.
+
+    Non-regression test for:
+    https://github.com/scikit-learn/scikit-learn/issues/17207
+    """
+    X = np.ones((7, 7))
+
+    msg = (
+        "Expected n_neighbors < n_samples_fit, but n_neighbors = 1, "
+        "n_samples_fit = 1, n_samples = 1"
+    )
+    with pytest.raises(ValueError, match=msg):
+        lof = neighbors.LocalOutlierFactor(n_neighbors=1).fit(X[:1])
+
+    lof = neighbors.LocalOutlierFactor(n_neighbors=2).fit(X[:2])
+    assert lof.n_samples_fit_ == 2
+
+    msg = (
+        "Expected n_neighbors < n_samples_fit, but n_neighbors = 2, "
+        "n_samples_fit = 2, n_samples = 2"
+    )
+    with pytest.raises(ValueError, match=msg):
+        lof.kneighbors(None, n_neighbors=2)
+
+    distances, indices = lof.kneighbors(None, n_neighbors=1)
+    assert distances.shape == (2, 1)
+    assert indices.shape == (2, 1)
+
+    msg = (
+        "Expected n_neighbors <= n_samples_fit, but n_neighbors = 3, "
+        "n_samples_fit = 2, n_samples = 7"
+    )
+    with pytest.raises(ValueError, match=msg):
+        lof.kneighbors(X, n_neighbors=3)
+
+    (
+        distances,
+        indices,
+    ) = lof.kneighbors(X, n_neighbors=2)
+    assert distances.shape == (7, 2)
+    assert indices.shape == (7, 2)
+
+
+@pytest.mark.parametrize("algorithm", ["auto", "ball_tree", "kd_tree", "brute"])
+@pytest.mark.parametrize("novelty", [True, False])
+@pytest.mark.parametrize("contamination", [0.5, "auto"])
+def test_lof_input_dtype_preservation(global_dtype, algorithm, contamination, novelty):
+    """Check that the fitted attributes are stored using the data type of X."""
+    X = iris.data.astype(global_dtype, copy=False)
+
+    iso = neighbors.LocalOutlierFactor(
+        n_neighbors=5, algorithm=algorithm, contamination=contamination, novelty=novelty
+    )
+    iso.fit(X)
+
+    assert iso.negative_outlier_factor_.dtype == global_dtype
+
+    for method in ("score_samples", "decision_function"):
+        if hasattr(iso, method):
+            y_pred = getattr(iso, method)(X)
+            assert y_pred.dtype == global_dtype
+
+
+@pytest.mark.parametrize("algorithm", ["auto", "ball_tree", "kd_tree", "brute"])
+@pytest.mark.parametrize("novelty", [True, False])
+@pytest.mark.parametrize("contamination", [0.5, "auto"])
+def test_lof_dtype_equivalence(algorithm, novelty, contamination):
+    """Check the equivalence of the results with 32 and 64 bits input."""
+
+    inliers = iris.data[:50]  # setosa iris are really distinct from others
+    outliers = iris.data[-5:]  # virginica will be considered as outliers
+    # lower the precision of the input data to check that we have an equivalence when
+    # making the computation in 32 and 64 bits.
+    X = np.concatenate([inliers, outliers], axis=0).astype(np.float32)
+
+    lof_32 = neighbors.LocalOutlierFactor(
+        algorithm=algorithm, novelty=novelty, contamination=contamination
+    )
+    X_32 = X.astype(np.float32, copy=True)
+    lof_32.fit(X_32)
+
+    lof_64 = neighbors.LocalOutlierFactor(
+        algorithm=algorithm, novelty=novelty, contamination=contamination
+    )
+    X_64 = X.astype(np.float64, copy=True)
+    lof_64.fit(X_64)
+
+    assert_allclose(lof_32.negative_outlier_factor_, lof_64.negative_outlier_factor_)
+
+    for method in ("score_samples", "decision_function", "predict", "fit_predict"):
+        if hasattr(lof_32, method):
+            y_pred_32 = getattr(lof_32, method)(X_32)
+            y_pred_64 = getattr(lof_64, method)(X_64)
+            assert_allclose(y_pred_32, y_pred_64, atol=0.0002)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_nca.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_nca.py
new file mode 100644
index 0000000000000000000000000000000000000000..7dedd97ff423b802546e8ee457bc403863ec4d9d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_nca.py
@@ -0,0 +1,548 @@
+"""
+Testing for Neighborhood Component Analysis module (sklearn.neighbors.nca)
+"""
+
+# Authors: William de Vazelhes 
+#          John Chiotellis 
+# License: BSD 3 clause
+
+import re
+
+import numpy as np
+import pytest
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from scipy.optimize import check_grad
+
+from sklearn import clone
+from sklearn.datasets import load_iris, make_blobs, make_classification
+from sklearn.exceptions import ConvergenceWarning
+from sklearn.metrics import pairwise_distances
+from sklearn.neighbors import NeighborhoodComponentsAnalysis
+from sklearn.preprocessing import LabelEncoder
+from sklearn.utils import check_random_state
+
+rng = check_random_state(0)
+# load and shuffle iris dataset
+iris = load_iris()
+perm = rng.permutation(iris.target.size)
+iris_data = iris.data[perm]
+iris_target = iris.target[perm]
+EPS = np.finfo(float).eps
+
+
+def test_simple_example():
+    """Test on a simple example.
+
+    Puts four points in the input space where the opposite labels points are
+    next to each other. After transform the samples from the same class
+    should be next to each other.
+
+    """
+    X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
+    y = np.array([1, 0, 1, 0])
+    nca = NeighborhoodComponentsAnalysis(
+        n_components=2, init="identity", random_state=42
+    )
+    nca.fit(X, y)
+    X_t = nca.transform(X)
+    assert_array_equal(pairwise_distances(X_t).argsort()[:, 1], np.array([2, 3, 0, 1]))
+
+
+def test_toy_example_collapse_points():
+    """Test on a toy example of three points that should collapse
+
+    We build a simple example: two points from the same class and a point from
+    a different class in the middle of them. On this simple example, the new
+    (transformed) points should all collapse into one single point. Indeed, the
+    objective is 2/(1 + exp(d/2)), with d the euclidean distance between the
+    two samples from the same class. This is maximized for d=0 (because d>=0),
+    with an objective equal to 1 (loss=-1.).
+
+    """
+    rng = np.random.RandomState(42)
+    input_dim = 5
+    two_points = rng.randn(2, input_dim)
+    X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]])
+    y = [0, 0, 1]
+
+    class LossStorer:
+        def __init__(self, X, y):
+            self.loss = np.inf  # initialize the loss to very high
+            # Initialize a fake NCA and variables needed to compute the loss:
+            self.fake_nca = NeighborhoodComponentsAnalysis()
+            self.fake_nca.n_iter_ = np.inf
+            self.X, y = self.fake_nca._validate_data(X, y, ensure_min_samples=2)
+            y = LabelEncoder().fit_transform(y)
+            self.same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
+
+        def callback(self, transformation, n_iter):
+            """Stores the last value of the loss function"""
+            self.loss, _ = self.fake_nca._loss_grad_lbfgs(
+                transformation, self.X, self.same_class_mask, -1.0
+            )
+
+    loss_storer = LossStorer(X, y)
+    nca = NeighborhoodComponentsAnalysis(random_state=42, callback=loss_storer.callback)
+    X_t = nca.fit_transform(X, y)
+    print(X_t)
+    # test that points are collapsed into one point
+    assert_array_almost_equal(X_t - X_t[0], 0.0)
+    assert abs(loss_storer.loss + 1) < 1e-10
+
+
+def test_finite_differences(global_random_seed):
+    """Test gradient of loss function
+
+    Assert that the gradient is almost equal to its finite differences
+    approximation.
+    """
+    # Initialize the transformation `M`, as well as `X` and `y` and `NCA`
+    rng = np.random.RandomState(global_random_seed)
+    X, y = make_classification(random_state=global_random_seed)
+    M = rng.randn(rng.randint(1, X.shape[1] + 1), X.shape[1])
+    nca = NeighborhoodComponentsAnalysis()
+    nca.n_iter_ = 0
+    mask = y[:, np.newaxis] == y[np.newaxis, :]
+
+    def fun(M):
+        return nca._loss_grad_lbfgs(M, X, mask)[0]
+
+    def grad(M):
+        return nca._loss_grad_lbfgs(M, X, mask)[1]
+
+    # compare the gradient to a finite difference approximation
+    diff = check_grad(fun, grad, M.ravel())
+    assert diff == pytest.approx(0.0, abs=1e-4)
+
+
+def test_params_validation():
+    # Test that invalid parameters raise value error
+    X = np.arange(12).reshape(4, 3)
+    y = [1, 1, 2, 2]
+    NCA = NeighborhoodComponentsAnalysis
+    rng = np.random.RandomState(42)
+
+    init = rng.rand(5, 3)
+    msg = (
+        f"The output dimensionality ({init.shape[0]}) "
+        "of the given linear transformation `init` cannot be "
+        f"greater than its input dimensionality ({init.shape[1]})."
+    )
+    with pytest.raises(ValueError, match=re.escape(msg)):
+        NCA(init=init).fit(X, y)
+    n_components = 10
+    msg = (
+        "The preferred dimensionality of the projected space "
+        f"`n_components` ({n_components}) cannot be greater "
+        f"than the given data dimensionality ({X.shape[1]})!"
+    )
+    with pytest.raises(ValueError, match=re.escape(msg)):
+        NCA(n_components=n_components).fit(X, y)
+
+
+def test_transformation_dimensions():
+    X = np.arange(12).reshape(4, 3)
+    y = [1, 1, 2, 2]
+
+    # Fail if transformation input dimension does not match inputs dimensions
+    transformation = np.array([[1, 2], [3, 4]])
+    with pytest.raises(ValueError):
+        NeighborhoodComponentsAnalysis(init=transformation).fit(X, y)
+
+    # Fail if transformation output dimension is larger than
+    # transformation input dimension
+    transformation = np.array([[1, 2], [3, 4], [5, 6]])
+    # len(transformation) > len(transformation[0])
+    with pytest.raises(ValueError):
+        NeighborhoodComponentsAnalysis(init=transformation).fit(X, y)
+
+    # Pass otherwise
+    transformation = np.arange(9).reshape(3, 3)
+    NeighborhoodComponentsAnalysis(init=transformation).fit(X, y)
+
+
+def test_n_components():
+    rng = np.random.RandomState(42)
+    X = np.arange(12).reshape(4, 3)
+    y = [1, 1, 2, 2]
+
+    init = rng.rand(X.shape[1] - 1, 3)
+
+    # n_components = X.shape[1] != transformation.shape[0]
+    n_components = X.shape[1]
+    nca = NeighborhoodComponentsAnalysis(init=init, n_components=n_components)
+    msg = (
+        "The preferred dimensionality of the projected space "
+        f"`n_components` ({n_components}) does not match the output "
+        "dimensionality of the given linear transformation "
+        f"`init` ({init.shape[0]})!"
+    )
+    with pytest.raises(ValueError, match=re.escape(msg)):
+        nca.fit(X, y)
+
+    # n_components > X.shape[1]
+    n_components = X.shape[1] + 2
+    nca = NeighborhoodComponentsAnalysis(init=init, n_components=n_components)
+    msg = (
+        "The preferred dimensionality of the projected space "
+        f"`n_components` ({n_components}) cannot be greater than "
+        f"the given data dimensionality ({X.shape[1]})!"
+    )
+    with pytest.raises(ValueError, match=re.escape(msg)):
+        nca.fit(X, y)
+
+    # n_components < X.shape[1]
+    nca = NeighborhoodComponentsAnalysis(n_components=2, init="identity")
+    nca.fit(X, y)
+
+
+def test_init_transformation():
+    rng = np.random.RandomState(42)
+    X, y = make_blobs(n_samples=30, centers=6, n_features=5, random_state=0)
+
+    # Start learning from scratch
+    nca = NeighborhoodComponentsAnalysis(init="identity")
+    nca.fit(X, y)
+
+    # Initialize with random
+    nca_random = NeighborhoodComponentsAnalysis(init="random")
+    nca_random.fit(X, y)
+
+    # Initialize with auto
+    nca_auto = NeighborhoodComponentsAnalysis(init="auto")
+    nca_auto.fit(X, y)
+
+    # Initialize with PCA
+    nca_pca = NeighborhoodComponentsAnalysis(init="pca")
+    nca_pca.fit(X, y)
+
+    # Initialize with LDA
+    nca_lda = NeighborhoodComponentsAnalysis(init="lda")
+    nca_lda.fit(X, y)
+
+    init = rng.rand(X.shape[1], X.shape[1])
+    nca = NeighborhoodComponentsAnalysis(init=init)
+    nca.fit(X, y)
+
+    # init.shape[1] must match X.shape[1]
+    init = rng.rand(X.shape[1], X.shape[1] + 1)
+    nca = NeighborhoodComponentsAnalysis(init=init)
+    msg = (
+        f"The input dimensionality ({init.shape[1]}) of the given "
+        "linear transformation `init` must match the "
+        f"dimensionality of the given inputs `X` ({X.shape[1]})."
+    )
+    with pytest.raises(ValueError, match=re.escape(msg)):
+        nca.fit(X, y)
+
+    # init.shape[0] must be <= init.shape[1]
+    init = rng.rand(X.shape[1] + 1, X.shape[1])
+    nca = NeighborhoodComponentsAnalysis(init=init)
+    msg = (
+        f"The output dimensionality ({init.shape[0]}) of the given "
+        "linear transformation `init` cannot be "
+        f"greater than its input dimensionality ({init.shape[1]})."
+    )
+    with pytest.raises(ValueError, match=re.escape(msg)):
+        nca.fit(X, y)
+
+    # init.shape[0] must match n_components
+    init = rng.rand(X.shape[1], X.shape[1])
+    n_components = X.shape[1] - 2
+    nca = NeighborhoodComponentsAnalysis(init=init, n_components=n_components)
+    msg = (
+        "The preferred dimensionality of the "
+        f"projected space `n_components` ({n_components}) "
+        "does not match the output dimensionality of the given "
+        f"linear transformation `init` ({init.shape[0]})!"
+    )
+    with pytest.raises(ValueError, match=re.escape(msg)):
+        nca.fit(X, y)
+
+
+@pytest.mark.parametrize("n_samples", [3, 5, 7, 11])
+@pytest.mark.parametrize("n_features", [3, 5, 7, 11])
+@pytest.mark.parametrize("n_classes", [5, 7, 11])
+@pytest.mark.parametrize("n_components", [3, 5, 7, 11])
+def test_auto_init(n_samples, n_features, n_classes, n_components):
+    # Test that auto choose the init as expected with every configuration
+    # of order of n_samples, n_features, n_classes and n_components.
+    rng = np.random.RandomState(42)
+    nca_base = NeighborhoodComponentsAnalysis(
+        init="auto", n_components=n_components, max_iter=1, random_state=rng
+    )
+    if n_classes >= n_samples:
+        pass
+        # n_classes > n_samples is impossible, and n_classes == n_samples
+        # throws an error from lda but is an absurd case
+    else:
+        X = rng.randn(n_samples, n_features)
+        y = np.tile(range(n_classes), n_samples // n_classes + 1)[:n_samples]
+        if n_components > n_features:
+            # this would return a ValueError, which is already tested in
+            # test_params_validation
+            pass
+        else:
+            nca = clone(nca_base)
+            nca.fit(X, y)
+            if n_components <= min(n_classes - 1, n_features):
+                nca_other = clone(nca_base).set_params(init="lda")
+            elif n_components < min(n_features, n_samples):
+                nca_other = clone(nca_base).set_params(init="pca")
+            else:
+                nca_other = clone(nca_base).set_params(init="identity")
+            nca_other.fit(X, y)
+            assert_array_almost_equal(nca.components_, nca_other.components_)
+
+
+def test_warm_start_validation():
+    X, y = make_classification(
+        n_samples=30,
+        n_features=5,
+        n_classes=4,
+        n_redundant=0,
+        n_informative=5,
+        random_state=0,
+    )
+
+    nca = NeighborhoodComponentsAnalysis(warm_start=True, max_iter=5)
+    nca.fit(X, y)
+
+    X_less_features, y = make_classification(
+        n_samples=30,
+        n_features=4,
+        n_classes=4,
+        n_redundant=0,
+        n_informative=4,
+        random_state=0,
+    )
+    msg = (
+        f"The new inputs dimensionality ({X_less_features.shape[1]}) "
+        "does not match the input dimensionality of the previously learned "
+        f"transformation ({nca.components_.shape[1]})."
+    )
+    with pytest.raises(ValueError, match=re.escape(msg)):
+        nca.fit(X_less_features, y)
+
+
+def test_warm_start_effectiveness():
+    # A 1-iteration second fit on same data should give almost same result
+    # with warm starting, and quite different result without warm starting.
+
+    nca_warm = NeighborhoodComponentsAnalysis(warm_start=True, random_state=0)
+    nca_warm.fit(iris_data, iris_target)
+    transformation_warm = nca_warm.components_
+    nca_warm.max_iter = 1
+    nca_warm.fit(iris_data, iris_target)
+    transformation_warm_plus_one = nca_warm.components_
+
+    nca_cold = NeighborhoodComponentsAnalysis(warm_start=False, random_state=0)
+    nca_cold.fit(iris_data, iris_target)
+    transformation_cold = nca_cold.components_
+    nca_cold.max_iter = 1
+    nca_cold.fit(iris_data, iris_target)
+    transformation_cold_plus_one = nca_cold.components_
+
+    diff_warm = np.sum(np.abs(transformation_warm_plus_one - transformation_warm))
+    diff_cold = np.sum(np.abs(transformation_cold_plus_one - transformation_cold))
+    assert diff_warm < 3.0, (
+        "Transformer changed significantly after one "
+        "iteration even though it was warm-started."
+    )
+
+    assert diff_cold > diff_warm, (
+        "Cold-started transformer changed less "
+        "significantly than warm-started "
+        "transformer after one iteration."
+    )
+
+
+@pytest.mark.parametrize(
+    "init_name", ["pca", "lda", "identity", "random", "precomputed"]
+)
+def test_verbose(init_name, capsys):
+    # assert there is proper output when verbose = 1, for every initialization
+    # except auto because auto will call one of the others
+    rng = np.random.RandomState(42)
+    X, y = make_blobs(n_samples=30, centers=6, n_features=5, random_state=0)
+    regexp_init = r"... done in \ *\d+\.\d{2}s"
+    msgs = {
+        "pca": "Finding principal components" + regexp_init,
+        "lda": "Finding most discriminative components" + regexp_init,
+    }
+    if init_name == "precomputed":
+        init = rng.randn(X.shape[1], X.shape[1])
+    else:
+        init = init_name
+    nca = NeighborhoodComponentsAnalysis(verbose=1, init=init)
+    nca.fit(X, y)
+    out, _ = capsys.readouterr()
+
+    # check output
+    lines = re.split("\n+", out)
+    # if pca or lda init, an additional line is printed, so we test
+    # it and remove it to test the rest equally among initializations
+    if init_name in ["pca", "lda"]:
+        assert re.match(msgs[init_name], lines[0])
+        lines = lines[1:]
+    assert lines[0] == "[NeighborhoodComponentsAnalysis]"
+    header = "{:>10} {:>20} {:>10}".format("Iteration", "Objective Value", "Time(s)")
+    assert lines[1] == "[NeighborhoodComponentsAnalysis] {}".format(header)
+    assert lines[2] == "[NeighborhoodComponentsAnalysis] {}".format("-" * len(header))
+    for line in lines[3:-2]:
+        # The following regex will match for instance:
+        # '[NeighborhoodComponentsAnalysis]  0    6.988936e+01   0.01'
+        assert re.match(
+            r"\[NeighborhoodComponentsAnalysis\] *\d+ *\d\.\d{6}e"
+            r"[+|-]\d+\ *\d+\.\d{2}",
+            line,
+        )
+    assert re.match(
+        r"\[NeighborhoodComponentsAnalysis\] Training took\ *" r"\d+\.\d{2}s\.",
+        lines[-2],
+    )
+    assert lines[-1] == ""
+
+
+def test_no_verbose(capsys):
+    # assert by default there is no output (verbose=0)
+    nca = NeighborhoodComponentsAnalysis()
+    nca.fit(iris_data, iris_target)
+    out, _ = capsys.readouterr()
+    # check output
+    assert out == ""
+
+
+def test_singleton_class():
+    X = iris_data
+    y = iris_target
+
+    # one singleton class
+    singleton_class = 1
+    (ind_singleton,) = np.where(y == singleton_class)
+    y[ind_singleton] = 2
+    y[ind_singleton[0]] = singleton_class
+
+    nca = NeighborhoodComponentsAnalysis(max_iter=30)
+    nca.fit(X, y)
+
+    # One non-singleton class
+    (ind_1,) = np.where(y == 1)
+    (ind_2,) = np.where(y == 2)
+    y[ind_1] = 0
+    y[ind_1[0]] = 1
+    y[ind_2] = 0
+    y[ind_2[0]] = 2
+
+    nca = NeighborhoodComponentsAnalysis(max_iter=30)
+    nca.fit(X, y)
+
+    # Only singleton classes
+    (ind_0,) = np.where(y == 0)
+    (ind_1,) = np.where(y == 1)
+    (ind_2,) = np.where(y == 2)
+    X = X[[ind_0[0], ind_1[0], ind_2[0]]]
+    y = y[[ind_0[0], ind_1[0], ind_2[0]]]
+
+    nca = NeighborhoodComponentsAnalysis(init="identity", max_iter=30)
+    nca.fit(X, y)
+    assert_array_equal(X, nca.transform(X))
+
+
+def test_one_class():
+    X = iris_data[iris_target == 0]
+    y = iris_target[iris_target == 0]
+
+    nca = NeighborhoodComponentsAnalysis(
+        max_iter=30, n_components=X.shape[1], init="identity"
+    )
+    nca.fit(X, y)
+    assert_array_equal(X, nca.transform(X))
+
+
+def test_callback(capsys):
+    max_iter = 10
+
+    def my_cb(transformation, n_iter):
+        assert transformation.shape == (iris_data.shape[1] ** 2,)
+        rem_iter = max_iter - n_iter
+        print("{} iterations remaining...".format(rem_iter))
+
+    # assert that my_cb is called
+    nca = NeighborhoodComponentsAnalysis(max_iter=max_iter, callback=my_cb, verbose=1)
+    nca.fit(iris_data, iris_target)
+    out, _ = capsys.readouterr()
+
+    # check output
+    assert "{} iterations remaining...".format(max_iter - 1) in out
+
+
+def test_expected_transformation_shape():
+    """Test that the transformation has the expected shape."""
+    X = iris_data
+    y = iris_target
+
+    class TransformationStorer:
+        def __init__(self, X, y):
+            # Initialize a fake NCA and variables needed to call the loss
+            # function:
+            self.fake_nca = NeighborhoodComponentsAnalysis()
+            self.fake_nca.n_iter_ = np.inf
+            self.X, y = self.fake_nca._validate_data(X, y, ensure_min_samples=2)
+            y = LabelEncoder().fit_transform(y)
+            self.same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
+
+        def callback(self, transformation, n_iter):
+            """Stores the last value of the transformation taken as input by
+            the optimizer"""
+            self.transformation = transformation
+
+    transformation_storer = TransformationStorer(X, y)
+    cb = transformation_storer.callback
+    nca = NeighborhoodComponentsAnalysis(max_iter=5, callback=cb)
+    nca.fit(X, y)
+    assert transformation_storer.transformation.size == X.shape[1] ** 2
+
+
+def test_convergence_warning():
+    nca = NeighborhoodComponentsAnalysis(max_iter=2, verbose=1)
+    cls_name = nca.__class__.__name__
+    msg = "[{}] NCA did not converge".format(cls_name)
+    with pytest.warns(ConvergenceWarning, match=re.escape(msg)):
+        nca.fit(iris_data, iris_target)
+
+
+@pytest.mark.parametrize(
+    "param, value",
+    [
+        ("n_components", np.int32(3)),
+        ("max_iter", np.int32(100)),
+        ("tol", np.float32(0.0001)),
+    ],
+)
+def test_parameters_valid_types(param, value):
+    # check that no error is raised when parameters have numpy integer or
+    # floating types.
+    nca = NeighborhoodComponentsAnalysis(**{param: value})
+
+    X = iris_data
+    y = iris_target
+
+    nca.fit(X, y)
+
+
+def test_nca_feature_names_out():
+    """Check `get_feature_names_out` for `NeighborhoodComponentsAnalysis`."""
+
+    X = iris_data
+    y = iris_target
+
+    est = NeighborhoodComponentsAnalysis().fit(X, y)
+    names_out = est.get_feature_names_out()
+
+    class_name_lower = est.__class__.__name__.lower()
+    expected_names_out = np.array(
+        [f"{class_name_lower}{i}" for i in range(est.components_.shape[1])],
+        dtype=object,
+    )
+    assert_array_equal(names_out, expected_names_out)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_nearest_centroid.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_nearest_centroid.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee548d801781000cbbebb7da36e56e7b5e2724f3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_nearest_centroid.py
@@ -0,0 +1,178 @@
+"""
+Testing for the nearest centroid module.
+"""
+import numpy as np
+import pytest
+from numpy.testing import assert_array_equal
+
+from sklearn import datasets
+from sklearn.neighbors import NearestCentroid
+from sklearn.utils.fixes import CSR_CONTAINERS
+
+# toy sample
+X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
+y = [-1, -1, -1, 1, 1, 1]
+T = [[-1, -1], [2, 2], [3, 2]]
+true_result = [-1, 1, 1]
+
+# also load the iris dataset
+# and randomly permute it
+iris = datasets.load_iris()
+rng = np.random.RandomState(1)
+perm = rng.permutation(iris.target.size)
+iris.data = iris.data[perm]
+iris.target = iris.target[perm]
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_classification_toy(csr_container):
+    # Check classification on a toy dataset, including sparse versions.
+    X_csr = csr_container(X)
+    T_csr = csr_container(T)
+
+    clf = NearestCentroid()
+    clf.fit(X, y)
+    assert_array_equal(clf.predict(T), true_result)
+
+    # Same test, but with a sparse matrix to fit and test.
+    clf = NearestCentroid()
+    clf.fit(X_csr, y)
+    assert_array_equal(clf.predict(T_csr), true_result)
+
+    # Fit with sparse, test with non-sparse
+    clf = NearestCentroid()
+    clf.fit(X_csr, y)
+    assert_array_equal(clf.predict(T), true_result)
+
+    # Fit with non-sparse, test with sparse
+    clf = NearestCentroid()
+    clf.fit(X, y)
+    assert_array_equal(clf.predict(T_csr), true_result)
+
+    # Fit and predict with non-CSR sparse matrices
+    clf = NearestCentroid()
+    clf.fit(X_csr.tocoo(), y)
+    assert_array_equal(clf.predict(T_csr.tolil()), true_result)
+
+
+# TODO(1.5): Remove filterwarnings when support for some metrics is removed
+@pytest.mark.filterwarnings("ignore:Support for distance metrics:FutureWarning:sklearn")
+def test_iris():
+    # Check consistency on dataset iris.
+    for metric in ("euclidean", "cosine"):
+        clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
+        score = np.mean(clf.predict(iris.data) == iris.target)
+        assert score > 0.9, "Failed with score = " + str(score)
+
+
+# TODO(1.5): Remove filterwarnings when support for some metrics is removed
+@pytest.mark.filterwarnings("ignore:Support for distance metrics:FutureWarning:sklearn")
+def test_iris_shrinkage():
+    # Check consistency on dataset iris, when using shrinkage.
+    for metric in ("euclidean", "cosine"):
+        for shrink_threshold in [None, 0.1, 0.5]:
+            clf = NearestCentroid(metric=metric, shrink_threshold=shrink_threshold)
+            clf = clf.fit(iris.data, iris.target)
+            score = np.mean(clf.predict(iris.data) == iris.target)
+            assert score > 0.8, "Failed with score = " + str(score)
+
+
+def test_pickle():
+    import pickle
+
+    # classification
+    obj = NearestCentroid()
+    obj.fit(iris.data, iris.target)
+    score = obj.score(iris.data, iris.target)
+    s = pickle.dumps(obj)
+
+    obj2 = pickle.loads(s)
+    assert type(obj2) == obj.__class__
+    score2 = obj2.score(iris.data, iris.target)
+    assert_array_equal(
+        score,
+        score2,
+        "Failed to generate same score after pickling (classification).",
+    )
+
+
+def test_shrinkage_correct():
+    # Ensure that the shrinking is correct.
+    # The expected result is calculated by R (pamr),
+    # which is implemented by the author of the original paper.
+    # (One need to modify the code to output the new centroid in pamr.predict)
+
+    X = np.array([[0, 1], [1, 0], [1, 1], [2, 0], [6, 8]])
+    y = np.array([1, 1, 2, 2, 2])
+    clf = NearestCentroid(shrink_threshold=0.1)
+    clf.fit(X, y)
+    expected_result = np.array([[0.7787310, 0.8545292], [2.814179, 2.763647]])
+    np.testing.assert_array_almost_equal(clf.centroids_, expected_result)
+
+
+def test_shrinkage_threshold_decoded_y():
+    clf = NearestCentroid(shrink_threshold=0.01)
+    y_ind = np.asarray(y)
+    y_ind[y_ind == -1] = 0
+    clf.fit(X, y_ind)
+    centroid_encoded = clf.centroids_
+    clf.fit(X, y)
+    assert_array_equal(centroid_encoded, clf.centroids_)
+
+
+def test_predict_translated_data():
+    # Test that NearestCentroid gives same results on translated data
+
+    rng = np.random.RandomState(0)
+    X = rng.rand(50, 50)
+    y = rng.randint(0, 3, 50)
+    noise = rng.rand(50)
+    clf = NearestCentroid(shrink_threshold=0.1)
+    clf.fit(X, y)
+    y_init = clf.predict(X)
+    clf = NearestCentroid(shrink_threshold=0.1)
+    X_noise = X + noise
+    clf.fit(X_noise, y)
+    y_translate = clf.predict(X_noise)
+    assert_array_equal(y_init, y_translate)
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_manhattan_metric(csr_container):
+    # Test the manhattan metric.
+    X_csr = csr_container(X)
+
+    clf = NearestCentroid(metric="manhattan")
+    clf.fit(X, y)
+    dense_centroid = clf.centroids_
+    clf.fit(X_csr, y)
+    assert_array_equal(clf.centroids_, dense_centroid)
+    assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
+
+
+# TODO(1.5): remove this test
+@pytest.mark.parametrize(
+    "metric", sorted(list(NearestCentroid._valid_metrics - {"manhattan", "euclidean"}))
+)
+def test_deprecated_distance_metric_supports(metric):
+    # Check that a warning is raised for all deprecated distance metric supports
+    clf = NearestCentroid(metric=metric)
+    with pytest.warns(
+        FutureWarning,
+        match="Support for distance metrics other than euclidean and manhattan",
+    ):
+        clf.fit(X, y)
+
+
+def test_features_zero_var():
+    # Test that features with 0 variance throw error
+
+    X = np.empty((10, 2))
+    X[:, 0] = -0.13725701
+    X[:, 1] = -0.9853293
+    y = np.zeros((10))
+    y[0] = 1
+
+    clf = NearestCentroid(shrink_threshold=0.1)
+    with pytest.raises(ValueError):
+        clf.fit(X, y)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3fc71478e6f551723c716f63d8219cf0bd421da
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors.py
@@ -0,0 +1,2372 @@
+import re
+import warnings
+from itertools import product
+
+import joblib
+import numpy as np
+import pytest
+from scipy.sparse import issparse
+
+from sklearn import (
+    config_context,
+    datasets,
+    metrics,
+    neighbors,
+)
+from sklearn.base import clone
+from sklearn.exceptions import DataConversionWarning, EfficiencyWarning, NotFittedError
+from sklearn.metrics._dist_metrics import (
+    DistanceMetric,
+)
+from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS, pairwise_distances
+from sklearn.metrics.tests.test_dist_metrics import BOOL_METRICS
+from sklearn.metrics.tests.test_pairwise_distances_reduction import (
+    assert_compatible_argkmin_results,
+    assert_compatible_radius_results,
+)
+from sklearn.model_selection import cross_val_score, train_test_split
+from sklearn.neighbors import (
+    VALID_METRICS_SPARSE,
+    KNeighborsRegressor,
+)
+from sklearn.neighbors._base import (
+    KNeighborsMixin,
+    _check_precomputed,
+    _is_sorted_by_data,
+    sort_graph_by_row_values,
+)
+from sklearn.pipeline import make_pipeline
+from sklearn.utils._testing import (
+    assert_allclose,
+    assert_array_equal,
+    ignore_warnings,
+)
+from sklearn.utils.fixes import (
+    BSR_CONTAINERS,
+    COO_CONTAINERS,
+    CSC_CONTAINERS,
+    CSR_CONTAINERS,
+    DIA_CONTAINERS,
+    DOK_CONTAINERS,
+    LIL_CONTAINERS,
+    parse_version,
+    sp_version,
+)
+from sklearn.utils.validation import check_random_state
+
+rng = np.random.RandomState(0)
+# load and shuffle iris dataset
+iris = datasets.load_iris()
+perm = rng.permutation(iris.target.size)
+iris.data = iris.data[perm]
+iris.target = iris.target[perm]
+
+# load and shuffle digits
+digits = datasets.load_digits()
+perm = rng.permutation(digits.target.size)
+digits.data = digits.data[perm]
+digits.target = digits.target[perm]
+
+SPARSE_TYPES = tuple(
+    BSR_CONTAINERS
+    + COO_CONTAINERS
+    + CSC_CONTAINERS
+    + CSR_CONTAINERS
+    + DOK_CONTAINERS
+    + LIL_CONTAINERS
+)
+SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
+
+ALGORITHMS = ("ball_tree", "brute", "kd_tree", "auto")
+COMMON_VALID_METRICS = sorted(
+    set.intersection(*map(set, neighbors.VALID_METRICS.values()))
+)  # type: ignore
+
+P = (1, 2, 3, 4, np.inf)
+
+# Filter deprecation warnings.
+neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
+neighbors.radius_neighbors_graph = ignore_warnings(neighbors.radius_neighbors_graph)
+
+# A list containing metrics where the string specifies the use of the
+# DistanceMetric object directly (as resolved in _parse_metric)
+DISTANCE_METRIC_OBJS = ["DM_euclidean"]
+
+
+def _parse_metric(metric: str, dtype=None):
+    """
+    Helper function for properly building a type-specialized DistanceMetric instances.
+
+    Constructs a type-specialized DistanceMetric instance from a string
+    beginning with "DM_" while allowing a pass-through for other metric-specifying
+    strings. This is necessary since we wish to parameterize dtype independent of
+    metric, yet DistanceMetric requires it for construction.
+
+    """
+    if metric[:3] == "DM_":
+        return DistanceMetric.get_metric(metric[3:], dtype=dtype)
+    return metric
+
+
+def _generate_test_params_for(metric: str, n_features: int):
+    """Return list of DistanceMetric kwargs for tests."""
+
+    # Distinguishing on cases not to compute unneeded datastructures.
+    rng = np.random.RandomState(1)
+
+    if metric == "minkowski":
+        minkowski_kwargs = [dict(p=1.5), dict(p=2), dict(p=3), dict(p=np.inf)]
+        if sp_version >= parse_version("1.8.0.dev0"):
+            # TODO: remove the test once we no longer support scipy < 1.8.0.
+            # Recent scipy versions accept weights in the Minkowski metric directly:
+            # type: ignore
+            minkowski_kwargs.append(dict(p=3, w=rng.rand(n_features)))
+        return minkowski_kwargs
+
+    if metric == "seuclidean":
+        return [dict(V=rng.rand(n_features))]
+
+    if metric == "mahalanobis":
+        A = rng.rand(n_features, n_features)
+        # Make the matrix symmetric positive definite
+        VI = A + A.T + 3 * np.eye(n_features)
+        return [dict(VI=VI)]
+
+    # Case of: "euclidean", "manhattan", "chebyshev", "haversine" or any other metric.
+    # In those cases, no kwargs are needed.
+    return [{}]
+
+
+def _weight_func(dist):
+    """Weight function to replace lambda d: d ** -2.
+    The lambda function is not valid because:
+    if d==0 then 0^-2 is not valid."""
+
+    # Dist could be multidimensional, flatten it so all values
+    # can be looped
+    with np.errstate(divide="ignore"):
+        retval = 1.0 / dist
+    return retval**2
+
+
+WEIGHTS = ["uniform", "distance", _weight_func]
+
+
+@pytest.mark.parametrize(
+    "n_samples, n_features, n_query_pts, n_neighbors",
+    [
+        (100, 100, 10, 100),
+        (1000, 5, 100, 1),
+    ],
+)
+@pytest.mark.parametrize("query_is_train", [False, True])
+@pytest.mark.parametrize("metric", COMMON_VALID_METRICS + DISTANCE_METRIC_OBJS)  # type: ignore # noqa
+def test_unsupervised_kneighbors(
+    global_dtype,
+    n_samples,
+    n_features,
+    n_query_pts,
+    n_neighbors,
+    query_is_train,
+    metric,
+):
+    # The different algorithms must return identical results
+    # on their common metrics, with and without returning
+    # distances
+
+    metric = _parse_metric(metric, global_dtype)
+
+    # Redefining the rng locally to use the same generated X
+    local_rng = np.random.RandomState(0)
+    X = local_rng.rand(n_samples, n_features).astype(global_dtype, copy=False)
+
+    query = (
+        X
+        if query_is_train
+        else local_rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False)
+    )
+
+    results_nodist = []
+    results = []
+
+    for algorithm in ALGORITHMS:
+        if isinstance(metric, DistanceMetric) and global_dtype == np.float32:
+            if "tree" in algorithm:  # pragma: nocover
+                pytest.skip(
+                    "Neither KDTree nor BallTree support 32-bit distance metric"
+                    " objects."
+                )
+        neigh = neighbors.NearestNeighbors(
+            n_neighbors=n_neighbors, algorithm=algorithm, metric=metric
+        )
+        neigh.fit(X)
+
+        results_nodist.append(neigh.kneighbors(query, return_distance=False))
+        results.append(neigh.kneighbors(query, return_distance=True))
+
+    for i in range(len(results) - 1):
+        algorithm = ALGORITHMS[i]
+        next_algorithm = ALGORITHMS[i + 1]
+
+        indices_no_dist = results_nodist[i]
+        distances, next_distances = results[i][0], results[i + 1][0]
+        indices, next_indices = results[i][1], results[i + 1][1]
+        assert_array_equal(
+            indices_no_dist,
+            indices,
+            err_msg=(
+                f"The '{algorithm}' algorithm returns different"
+                "indices depending on 'return_distances'."
+            ),
+        )
+        assert_array_equal(
+            indices,
+            next_indices,
+            err_msg=(
+                f"The '{algorithm}' and '{next_algorithm}' "
+                "algorithms return different indices."
+            ),
+        )
+        assert_allclose(
+            distances,
+            next_distances,
+            err_msg=(
+                f"The '{algorithm}' and '{next_algorithm}' "
+                "algorithms return different distances."
+            ),
+            atol=1e-6,
+        )
+
+
+@pytest.mark.parametrize(
+    "n_samples, n_features, n_query_pts",
+    [
+        (100, 100, 10),
+        (1000, 5, 100),
+    ],
+)
+@pytest.mark.parametrize("metric", COMMON_VALID_METRICS + DISTANCE_METRIC_OBJS)  # type: ignore # noqa
+@pytest.mark.parametrize("n_neighbors, radius", [(1, 100), (50, 500), (100, 1000)])
+@pytest.mark.parametrize(
+    "NeighborsMixinSubclass",
+    [
+        neighbors.KNeighborsClassifier,
+        neighbors.KNeighborsRegressor,
+        neighbors.RadiusNeighborsClassifier,
+        neighbors.RadiusNeighborsRegressor,
+    ],
+)
+def test_neigh_predictions_algorithm_agnosticity(
+    global_dtype,
+    n_samples,
+    n_features,
+    n_query_pts,
+    metric,
+    n_neighbors,
+    radius,
+    NeighborsMixinSubclass,
+):
+    # The different algorithms must return identical predictions results
+    # on their common metrics.
+
+    metric = _parse_metric(metric, global_dtype)
+    if isinstance(metric, DistanceMetric):
+        if "Classifier" in NeighborsMixinSubclass.__name__:
+            pytest.skip(
+                "Metrics of type `DistanceMetric` are not yet supported for"
+                " classifiers."
+            )
+        if "Radius" in NeighborsMixinSubclass.__name__:
+            pytest.skip(
+                "Metrics of type `DistanceMetric` are not yet supported for"
+                " radius-neighbor estimators."
+            )
+
+    # Redefining the rng locally to use the same generated X
+    local_rng = np.random.RandomState(0)
+    X = local_rng.rand(n_samples, n_features).astype(global_dtype, copy=False)
+    y = local_rng.randint(3, size=n_samples)
+
+    query = local_rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False)
+
+    predict_results = []
+
+    parameter = (
+        n_neighbors if issubclass(NeighborsMixinSubclass, KNeighborsMixin) else radius
+    )
+
+    for algorithm in ALGORITHMS:
+        if isinstance(metric, DistanceMetric) and global_dtype == np.float32:
+            if "tree" in algorithm:  # pragma: nocover
+                pytest.skip(
+                    "Neither KDTree nor BallTree support 32-bit distance metric"
+                    " objects."
+                )
+        neigh = NeighborsMixinSubclass(parameter, algorithm=algorithm, metric=metric)
+        neigh.fit(X, y)
+
+        predict_results.append(neigh.predict(query))
+
+    for i in range(len(predict_results) - 1):
+        algorithm = ALGORITHMS[i]
+        next_algorithm = ALGORITHMS[i + 1]
+
+        predictions, next_predictions = predict_results[i], predict_results[i + 1]
+
+        assert_allclose(
+            predictions,
+            next_predictions,
+            err_msg=(
+                f"The '{algorithm}' and '{next_algorithm}' "
+                "algorithms return different predictions."
+            ),
+        )
+
+
+@pytest.mark.parametrize(
+    "KNeighborsMixinSubclass",
+    [
+        neighbors.KNeighborsClassifier,
+        neighbors.KNeighborsRegressor,
+        neighbors.NearestNeighbors,
+    ],
+)
+def test_unsupervised_inputs(global_dtype, KNeighborsMixinSubclass):
+    # Test unsupervised inputs for neighbors estimators
+
+    X = rng.random_sample((10, 3)).astype(global_dtype, copy=False)
+    y = rng.randint(3, size=10)
+    nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
+    nbrs_fid.fit(X)
+
+    dist1, ind1 = nbrs_fid.kneighbors(X)
+
+    nbrs = KNeighborsMixinSubclass(n_neighbors=1)
+
+    for data in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
+        nbrs.fit(data, y)
+
+        dist2, ind2 = nbrs.kneighbors(X)
+
+        assert_allclose(dist1, dist2)
+        assert_array_equal(ind1, ind2)
+
+
+def test_not_fitted_error_gets_raised():
+    X = [[1]]
+    neighbors_ = neighbors.NearestNeighbors()
+    with pytest.raises(NotFittedError):
+        neighbors_.kneighbors_graph(X)
+    with pytest.raises(NotFittedError):
+        neighbors_.radius_neighbors_graph(X)
+
+
+@pytest.mark.filterwarnings("ignore:EfficiencyWarning")
+def check_precomputed(make_train_test, estimators):
+    """Tests unsupervised NearestNeighbors with a distance matrix."""
+    # Note: smaller samples may result in spurious test success
+    rng = np.random.RandomState(42)
+    X = rng.random_sample((10, 4))
+    Y = rng.random_sample((3, 4))
+    DXX, DYX = make_train_test(X, Y)
+    for method in [
+        "kneighbors",
+    ]:
+        # TODO: also test radius_neighbors, but requires different assertion
+
+        # As a feature matrix (n_samples by n_features)
+        nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
+        nbrs_X.fit(X)
+        dist_X, ind_X = getattr(nbrs_X, method)(Y)
+
+        # As a dense distance matrix (n_samples by n_samples)
+        nbrs_D = neighbors.NearestNeighbors(
+            n_neighbors=3, algorithm="brute", metric="precomputed"
+        )
+        nbrs_D.fit(DXX)
+        dist_D, ind_D = getattr(nbrs_D, method)(DYX)
+        assert_allclose(dist_X, dist_D)
+        assert_array_equal(ind_X, ind_D)
+
+        # Check auto works too
+        nbrs_D = neighbors.NearestNeighbors(
+            n_neighbors=3, algorithm="auto", metric="precomputed"
+        )
+        nbrs_D.fit(DXX)
+        dist_D, ind_D = getattr(nbrs_D, method)(DYX)
+        assert_allclose(dist_X, dist_D)
+        assert_array_equal(ind_X, ind_D)
+
+        # Check X=None in prediction
+        dist_X, ind_X = getattr(nbrs_X, method)(None)
+        dist_D, ind_D = getattr(nbrs_D, method)(None)
+        assert_allclose(dist_X, dist_D)
+        assert_array_equal(ind_X, ind_D)
+
+        # Must raise a ValueError if the matrix is not of correct shape
+        with pytest.raises(ValueError):
+            getattr(nbrs_D, method)(X)
+
+    target = np.arange(X.shape[0])
+    for Est in estimators:
+        est = Est(metric="euclidean")
+        est.radius = est.n_neighbors = 1
+        pred_X = est.fit(X, target).predict(Y)
+        est.metric = "precomputed"
+        pred_D = est.fit(DXX, target).predict(DYX)
+        assert_allclose(pred_X, pred_D)
+
+
+def test_precomputed_dense():
+    def make_train_test(X_train, X_test):
+        return (
+            metrics.pairwise_distances(X_train),
+            metrics.pairwise_distances(X_test, X_train),
+        )
+
+    estimators = [
+        neighbors.KNeighborsClassifier,
+        neighbors.KNeighborsRegressor,
+        neighbors.RadiusNeighborsClassifier,
+        neighbors.RadiusNeighborsRegressor,
+    ]
+    check_precomputed(make_train_test, estimators)
+
+
+@pytest.mark.parametrize("fmt", ["csr", "lil"])
+def test_precomputed_sparse_knn(fmt):
+    def make_train_test(X_train, X_test):
+        nn = neighbors.NearestNeighbors(n_neighbors=3 + 1).fit(X_train)
+        return (
+            nn.kneighbors_graph(X_train, mode="distance").asformat(fmt),
+            nn.kneighbors_graph(X_test, mode="distance").asformat(fmt),
+        )
+
+    # We do not test RadiusNeighborsClassifier and RadiusNeighborsRegressor
+    # since the precomputed neighbors graph is built with k neighbors only.
+    estimators = [
+        neighbors.KNeighborsClassifier,
+        neighbors.KNeighborsRegressor,
+    ]
+    check_precomputed(make_train_test, estimators)
+
+
+@pytest.mark.parametrize("fmt", ["csr", "lil"])
+def test_precomputed_sparse_radius(fmt):
+    def make_train_test(X_train, X_test):
+        nn = neighbors.NearestNeighbors(radius=1).fit(X_train)
+        return (
+            nn.radius_neighbors_graph(X_train, mode="distance").asformat(fmt),
+            nn.radius_neighbors_graph(X_test, mode="distance").asformat(fmt),
+        )
+
+    # We do not test KNeighborsClassifier and KNeighborsRegressor
+    # since the precomputed neighbors graph is built with a radius.
+    estimators = [
+        neighbors.RadiusNeighborsClassifier,
+        neighbors.RadiusNeighborsRegressor,
+    ]
+    check_precomputed(make_train_test, estimators)
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_is_sorted_by_data(csr_container):
+    # Test that _is_sorted_by_data works as expected. In CSR sparse matrix,
+    # entries in each row can be sorted by indices, by data, or unsorted.
+    # _is_sorted_by_data should return True when entries are sorted by data,
+    # and False in all other cases.
+
+    # Test with sorted single row sparse array
+    X = csr_container(np.arange(10).reshape(1, 10))
+    assert _is_sorted_by_data(X)
+    # Test with unsorted 1D array
+    X[0, 2] = 5
+    assert not _is_sorted_by_data(X)
+
+    # Test when the data is sorted in each sample, but not necessarily
+    # between samples
+    X = csr_container([[0, 1, 2], [3, 0, 0], [3, 4, 0], [1, 0, 2]])
+    assert _is_sorted_by_data(X)
+
+    # Test with duplicates entries in X.indptr
+    data, indices, indptr = [0, 4, 2, 2], [0, 1, 1, 1], [0, 2, 2, 4]
+    X = csr_container((data, indices, indptr), shape=(3, 3))
+    assert _is_sorted_by_data(X)
+
+
+@pytest.mark.filterwarnings("ignore:EfficiencyWarning")
+@pytest.mark.parametrize("function", [sort_graph_by_row_values, _check_precomputed])
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_sort_graph_by_row_values(function, csr_container):
+    # Test that sort_graph_by_row_values returns a graph sorted by row values
+    X = csr_container(np.abs(np.random.RandomState(42).randn(10, 10)))
+    assert not _is_sorted_by_data(X)
+    Xt = function(X)
+    assert _is_sorted_by_data(Xt)
+
+    # test with a different number of nonzero entries for each sample
+    mask = np.random.RandomState(42).randint(2, size=(10, 10))
+    X = X.toarray()
+    X[mask == 1] = 0
+    X = csr_container(X)
+    assert not _is_sorted_by_data(X)
+    Xt = function(X)
+    assert _is_sorted_by_data(Xt)
+
+
+@pytest.mark.filterwarnings("ignore:EfficiencyWarning")
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_sort_graph_by_row_values_copy(csr_container):
+    # Test if the sorting is done inplace if X is CSR, so that Xt is X.
+    X_ = csr_container(np.abs(np.random.RandomState(42).randn(10, 10)))
+    assert not _is_sorted_by_data(X_)
+
+    # sort_graph_by_row_values is done inplace if copy=False
+    X = X_.copy()
+    assert sort_graph_by_row_values(X).data is X.data
+
+    X = X_.copy()
+    assert sort_graph_by_row_values(X, copy=False).data is X.data
+
+    X = X_.copy()
+    assert sort_graph_by_row_values(X, copy=True).data is not X.data
+
+    # _check_precomputed is never done inplace
+    X = X_.copy()
+    assert _check_precomputed(X).data is not X.data
+
+    # do not raise if X is not CSR and copy=True
+    sort_graph_by_row_values(X.tocsc(), copy=True)
+
+    # raise if X is not CSR and copy=False
+    with pytest.raises(ValueError, match="Use copy=True to allow the conversion"):
+        sort_graph_by_row_values(X.tocsc(), copy=False)
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_sort_graph_by_row_values_warning(csr_container):
+    # Test that the parameter warn_when_not_sorted works as expected.
+    X = csr_container(np.abs(np.random.RandomState(42).randn(10, 10)))
+    assert not _is_sorted_by_data(X)
+
+    # warning
+    with pytest.warns(EfficiencyWarning, match="was not sorted by row values"):
+        sort_graph_by_row_values(X, copy=True)
+    with pytest.warns(EfficiencyWarning, match="was not sorted by row values"):
+        sort_graph_by_row_values(X, copy=True, warn_when_not_sorted=True)
+    with pytest.warns(EfficiencyWarning, match="was not sorted by row values"):
+        _check_precomputed(X)
+
+    # no warning
+    with warnings.catch_warnings():
+        warnings.simplefilter("error")
+        sort_graph_by_row_values(X, copy=True, warn_when_not_sorted=False)
+
+
+@pytest.mark.parametrize(
+    "sparse_container", DOK_CONTAINERS + BSR_CONTAINERS + DIA_CONTAINERS
+)
+def test_sort_graph_by_row_values_bad_sparse_format(sparse_container):
+    # Test that sort_graph_by_row_values and _check_precomputed error on bad formats
+    X = sparse_container(np.abs(np.random.RandomState(42).randn(10, 10)))
+    with pytest.raises(TypeError, match="format is not supported"):
+        sort_graph_by_row_values(X)
+    with pytest.raises(TypeError, match="format is not supported"):
+        _check_precomputed(X)
+
+
+@pytest.mark.filterwarnings("ignore:EfficiencyWarning")
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_precomputed_sparse_invalid(csr_container):
+    dist = np.array([[0.0, 2.0, 1.0], [2.0, 0.0, 3.0], [1.0, 3.0, 0.0]])
+    dist_csr = csr_container(dist)
+    neigh = neighbors.NearestNeighbors(n_neighbors=1, metric="precomputed")
+    neigh.fit(dist_csr)
+    neigh.kneighbors(None, n_neighbors=1)
+    neigh.kneighbors(np.array([[0.0, 0.0, 0.0]]), n_neighbors=2)
+
+    # Ensures enough number of nearest neighbors
+    dist = np.array([[0.0, 2.0, 0.0], [2.0, 0.0, 3.0], [0.0, 3.0, 0.0]])
+    dist_csr = csr_container(dist)
+    neigh.fit(dist_csr)
+    msg = "2 neighbors per samples are required, but some samples have only 1"
+    with pytest.raises(ValueError, match=msg):
+        neigh.kneighbors(None, n_neighbors=1)
+
+    # Checks error with inconsistent distance matrix
+    dist = np.array([[5.0, 2.0, 1.0], [-2.0, 0.0, 3.0], [1.0, 3.0, 0.0]])
+    dist_csr = csr_container(dist)
+    msg = "Negative values in data passed to precomputed distance matrix."
+    with pytest.raises(ValueError, match=msg):
+        neigh.kneighbors(dist_csr, n_neighbors=1)
+
+
+def test_precomputed_cross_validation():
+    # Ensure array is split correctly
+    rng = np.random.RandomState(0)
+    X = rng.rand(20, 2)
+    D = pairwise_distances(X, metric="euclidean")
+    y = rng.randint(3, size=20)
+    for Est in (
+        neighbors.KNeighborsClassifier,
+        neighbors.RadiusNeighborsClassifier,
+        neighbors.KNeighborsRegressor,
+        neighbors.RadiusNeighborsRegressor,
+    ):
+        metric_score = cross_val_score(Est(), X, y)
+        precomp_score = cross_val_score(Est(metric="precomputed"), D, y)
+        assert_array_equal(metric_score, precomp_score)
+
+
+def test_unsupervised_radius_neighbors(
+    global_dtype, n_samples=20, n_features=5, n_query_pts=2, radius=0.5, random_state=0
+):
+    # Test unsupervised radius-based query
+    rng = np.random.RandomState(random_state)
+
+    X = rng.rand(n_samples, n_features).astype(global_dtype, copy=False)
+
+    test = rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False)
+
+    for p in P:
+        results = []
+
+        for algorithm in ALGORITHMS:
+            neigh = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm, p=p)
+            neigh.fit(X)
+
+            ind1 = neigh.radius_neighbors(test, return_distance=False)
+
+            # sort the results: this is not done automatically for
+            # radius searches
+            dist, ind = neigh.radius_neighbors(test, return_distance=True)
+            for d, i, i1 in zip(dist, ind, ind1):
+                j = d.argsort()
+                d[:] = d[j]
+                i[:] = i[j]
+                i1[:] = i1[j]
+            results.append((dist, ind))
+
+            assert_allclose(np.concatenate(list(ind)), np.concatenate(list(ind1)))
+
+        for i in range(len(results) - 1):
+            assert_allclose(
+                np.concatenate(list(results[i][0])),
+                np.concatenate(list(results[i + 1][0])),
+            ),
+            assert_allclose(
+                np.concatenate(list(results[i][1])),
+                np.concatenate(list(results[i + 1][1])),
+            )
+
+
+@pytest.mark.parametrize("algorithm", ALGORITHMS)
+@pytest.mark.parametrize("weights", WEIGHTS)
+def test_kneighbors_classifier(
+    global_dtype,
+    algorithm,
+    weights,
+    n_samples=40,
+    n_features=5,
+    n_test_pts=10,
+    n_neighbors=5,
+    random_state=0,
+):
+    # Test k-neighbors classification
+    rng = np.random.RandomState(random_state)
+    X = 2 * rng.rand(n_samples, n_features).astype(global_dtype, copy=False) - 1
+    y = ((X**2).sum(axis=1) < 0.5).astype(int)
+    y_str = y.astype(str)
+
+    knn = neighbors.KNeighborsClassifier(
+        n_neighbors=n_neighbors, weights=weights, algorithm=algorithm
+    )
+    knn.fit(X, y)
+    epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
+    y_pred = knn.predict(X[:n_test_pts] + epsilon)
+    assert_array_equal(y_pred, y[:n_test_pts])
+    # Test prediction with y_str
+    knn.fit(X, y_str)
+    y_pred = knn.predict(X[:n_test_pts] + epsilon)
+    assert_array_equal(y_pred, y_str[:n_test_pts])
+
+
+def test_kneighbors_classifier_float_labels(
+    global_dtype,
+    n_samples=40,
+    n_features=5,
+    n_test_pts=10,
+    n_neighbors=5,
+    random_state=0,
+):
+    # Test k-neighbors classification
+    rng = np.random.RandomState(random_state)
+    X = 2 * rng.rand(n_samples, n_features).astype(global_dtype, copy=False) - 1
+    y = ((X**2).sum(axis=1) < 0.5).astype(int)
+
+    knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
+    knn.fit(X, y.astype(float))
+    epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
+    y_pred = knn.predict(X[:n_test_pts] + epsilon)
+    assert_array_equal(y_pred, y[:n_test_pts])
+
+
+def test_kneighbors_classifier_predict_proba(global_dtype):
+    # Test KNeighborsClassifier.predict_proba() method
+    X = np.array(
+        [[0, 2, 0], [0, 2, 1], [2, 0, 0], [2, 2, 0], [0, 0, 2], [0, 0, 1]]
+    ).astype(global_dtype, copy=False)
+    y = np.array([4, 4, 5, 5, 1, 1])
+    cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1)  # cityblock dist
+    cls.fit(X, y)
+    y_prob = cls.predict_proba(X)
+    real_prob = (
+        np.array(
+            [
+                [0, 2, 1],
+                [1, 2, 0],
+                [1, 0, 2],
+                [0, 1, 2],
+                [2, 1, 0],
+                [2, 1, 0],
+            ]
+        )
+        / 3.0
+    )
+    assert_array_equal(real_prob, y_prob)
+    # Check that it also works with non integer labels
+    cls.fit(X, y.astype(str))
+    y_prob = cls.predict_proba(X)
+    assert_array_equal(real_prob, y_prob)
+    # Check that it works with weights='distance'
+    cls = neighbors.KNeighborsClassifier(n_neighbors=2, p=1, weights="distance")
+    cls.fit(X, y)
+    y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
+    real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
+    assert_allclose(real_prob, y_prob)
+
+
+@pytest.mark.parametrize("algorithm", ALGORITHMS)
+@pytest.mark.parametrize("weights", WEIGHTS)
+def test_radius_neighbors_classifier(
+    global_dtype,
+    algorithm,
+    weights,
+    n_samples=40,
+    n_features=5,
+    n_test_pts=10,
+    radius=0.5,
+    random_state=0,
+):
+    # Test radius-based classification
+    rng = np.random.RandomState(random_state)
+    X = 2 * rng.rand(n_samples, n_features).astype(global_dtype, copy=False) - 1
+    y = ((X**2).sum(axis=1) < radius).astype(int)
+    y_str = y.astype(str)
+
+    neigh = neighbors.RadiusNeighborsClassifier(
+        radius=radius, weights=weights, algorithm=algorithm
+    )
+    neigh.fit(X, y)
+    epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
+    y_pred = neigh.predict(X[:n_test_pts] + epsilon)
+    assert_array_equal(y_pred, y[:n_test_pts])
+    neigh.fit(X, y_str)
+    y_pred = neigh.predict(X[:n_test_pts] + epsilon)
+    assert_array_equal(y_pred, y_str[:n_test_pts])
+
+
+@pytest.mark.parametrize("algorithm", ALGORITHMS)
+@pytest.mark.parametrize("weights", WEIGHTS)
+@pytest.mark.parametrize("outlier_label", [0, -1, None])
+def test_radius_neighbors_classifier_when_no_neighbors(
+    global_dtype, algorithm, weights, outlier_label
+):
+    # Test radius-based classifier when no neighbors found.
+    # In this case it should rise an informative exception
+
+    X = np.array([[1.0, 1.0], [2.0, 2.0]], dtype=global_dtype)
+    y = np.array([1, 2])
+    radius = 0.1
+
+    # no outliers
+    z1 = np.array([[1.01, 1.01], [2.01, 2.01]], dtype=global_dtype)
+
+    # one outlier
+    z2 = np.array([[1.01, 1.01], [1.4, 1.4]], dtype=global_dtype)
+
+    rnc = neighbors.RadiusNeighborsClassifier
+    clf = rnc(
+        radius=radius,
+        weights=weights,
+        algorithm=algorithm,
+        outlier_label=outlier_label,
+    )
+    clf.fit(X, y)
+    assert_array_equal(np.array([1, 2]), clf.predict(z1))
+    if outlier_label is None:
+        with pytest.raises(ValueError):
+            clf.predict(z2)
+
+
+@pytest.mark.parametrize("algorithm", ALGORITHMS)
+@pytest.mark.parametrize("weights", WEIGHTS)
+def test_radius_neighbors_classifier_outlier_labeling(global_dtype, algorithm, weights):
+    # Test radius-based classifier when no neighbors found and outliers
+    # are labeled.
+
+    X = np.array(
+        [[1.0, 1.0], [2.0, 2.0], [0.99, 0.99], [0.98, 0.98], [2.01, 2.01]],
+        dtype=global_dtype,
+    )
+    y = np.array([1, 2, 1, 1, 2])
+    radius = 0.1
+
+    # no outliers
+    z1 = np.array([[1.01, 1.01], [2.01, 2.01]], dtype=global_dtype)
+
+    # one outlier
+    z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]], dtype=global_dtype)
+
+    correct_labels1 = np.array([1, 2])
+    correct_labels2 = np.array([-1, 1, 2])
+    outlier_proba = np.array([0, 0])
+
+    clf = neighbors.RadiusNeighborsClassifier(
+        radius=radius, weights=weights, algorithm=algorithm, outlier_label=-1
+    )
+    clf.fit(X, y)
+    assert_array_equal(correct_labels1, clf.predict(z1))
+    with pytest.warns(UserWarning, match="Outlier label -1 is not in training classes"):
+        assert_array_equal(correct_labels2, clf.predict(z2))
+    with pytest.warns(UserWarning, match="Outlier label -1 is not in training classes"):
+        assert_allclose(outlier_proba, clf.predict_proba(z2)[0])
+
+    # test outlier_labeling of using predict_proba()
+    RNC = neighbors.RadiusNeighborsClassifier
+    X = np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]], dtype=global_dtype)
+    y = np.array([0, 2, 2, 1, 1, 1, 3, 3, 3, 3])
+
+    # test outlier_label scalar verification
+    def check_array_exception():
+        clf = RNC(radius=1, outlier_label=[[5]])
+        clf.fit(X, y)
+
+    with pytest.raises(TypeError):
+        check_array_exception()
+
+    # test invalid outlier_label dtype
+    def check_dtype_exception():
+        clf = RNC(radius=1, outlier_label="a")
+        clf.fit(X, y)
+
+    with pytest.raises(TypeError):
+        check_dtype_exception()
+
+    # test most frequent
+    clf = RNC(radius=1, outlier_label="most_frequent")
+    clf.fit(X, y)
+    proba = clf.predict_proba([[1], [15]])
+    assert_array_equal(proba[1, :], [0, 0, 0, 1])
+
+    # test manual label in y
+    clf = RNC(radius=1, outlier_label=1)
+    clf.fit(X, y)
+    proba = clf.predict_proba([[1], [15]])
+    assert_array_equal(proba[1, :], [0, 1, 0, 0])
+    pred = clf.predict([[1], [15]])
+    assert_array_equal(pred, [2, 1])
+
+    # test manual label out of y warning
+    def check_warning():
+        clf = RNC(radius=1, outlier_label=4)
+        clf.fit(X, y)
+        clf.predict_proba([[1], [15]])
+
+    with pytest.warns(UserWarning):
+        check_warning()
+
+    # test multi output same outlier label
+    y_multi = [
+        [0, 1],
+        [2, 1],
+        [2, 2],
+        [1, 2],
+        [1, 2],
+        [1, 3],
+        [3, 3],
+        [3, 3],
+        [3, 0],
+        [3, 0],
+    ]
+    clf = RNC(radius=1, outlier_label=1)
+    clf.fit(X, y_multi)
+    proba = clf.predict_proba([[7], [15]])
+    assert_array_equal(proba[1][1, :], [0, 1, 0, 0])
+    pred = clf.predict([[7], [15]])
+    assert_array_equal(pred[1, :], [1, 1])
+
+    # test multi output different outlier label
+    y_multi = [
+        [0, 0],
+        [2, 2],
+        [2, 2],
+        [1, 1],
+        [1, 1],
+        [1, 1],
+        [3, 3],
+        [3, 3],
+        [3, 3],
+        [3, 3],
+    ]
+    clf = RNC(radius=1, outlier_label=[0, 1])
+    clf.fit(X, y_multi)
+    proba = clf.predict_proba([[7], [15]])
+    assert_array_equal(proba[0][1, :], [1, 0, 0, 0])
+    assert_array_equal(proba[1][1, :], [0, 1, 0, 0])
+    pred = clf.predict([[7], [15]])
+    assert_array_equal(pred[1, :], [0, 1])
+
+    # test inconsistent outlier label list length
+    def check_exception():
+        clf = RNC(radius=1, outlier_label=[0, 1, 2])
+        clf.fit(X, y_multi)
+
+    with pytest.raises(ValueError):
+        check_exception()
+
+
+def test_radius_neighbors_classifier_zero_distance():
+    # Test radius-based classifier, when distance to a sample is zero.
+
+    X = np.array([[1.0, 1.0], [2.0, 2.0]])
+    y = np.array([1, 2])
+    radius = 0.1
+
+    z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
+    correct_labels1 = np.array([1, 2])
+
+    weight_func = _weight_func
+
+    for algorithm in ALGORITHMS:
+        for weights in ["uniform", "distance", weight_func]:
+            clf = neighbors.RadiusNeighborsClassifier(
+                radius=radius, weights=weights, algorithm=algorithm
+            )
+            clf.fit(X, y)
+            with np.errstate(invalid="ignore"):
+                # Ignore the warning raised in _weight_func when making
+                # predictions with null distances resulting in np.inf values.
+                assert_array_equal(correct_labels1, clf.predict(z1))
+
+
+def test_neighbors_regressors_zero_distance():
+    # Test radius-based regressor, when distance to a sample is zero.
+
+    X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
+    y = np.array([1.0, 1.5, 2.0, 0.0])
+    radius = 0.2
+    z = np.array([[1.1, 1.1], [2.0, 2.0]])
+
+    rnn_correct_labels = np.array([1.25, 2.0])
+
+    knn_correct_unif = np.array([1.25, 1.0])
+    knn_correct_dist = np.array([1.25, 2.0])
+
+    for algorithm in ALGORITHMS:
+        # we don't test for weights=_weight_func since user will be expected
+        # to handle zero distances themselves in the function.
+        for weights in ["uniform", "distance"]:
+            rnn = neighbors.RadiusNeighborsRegressor(
+                radius=radius, weights=weights, algorithm=algorithm
+            )
+            rnn.fit(X, y)
+            assert_allclose(rnn_correct_labels, rnn.predict(z))
+
+        for weights, corr_labels in zip(
+            ["uniform", "distance"], [knn_correct_unif, knn_correct_dist]
+        ):
+            knn = neighbors.KNeighborsRegressor(
+                n_neighbors=2, weights=weights, algorithm=algorithm
+            )
+            knn.fit(X, y)
+            assert_allclose(corr_labels, knn.predict(z))
+
+
+def test_radius_neighbors_boundary_handling():
+    """Test whether points lying on boundary are handled consistently
+
+    Also ensures that even with only one query point, an object array
+    is returned rather than a 2d array.
+    """
+
+    X = np.array([[1.5], [3.0], [3.01]])
+    radius = 3.0
+
+    for algorithm in ALGORITHMS:
+        nbrs = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm).fit(X)
+        results = nbrs.radius_neighbors([[0.0]], return_distance=False)
+        assert results.shape == (1,)
+        assert results.dtype == object
+        assert_array_equal(results[0], [0, 1])
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_radius_neighbors_returns_array_of_objects(csr_container):
+    # check that we can pass precomputed distances to
+    # NearestNeighbors.radius_neighbors()
+    # non-regression test for
+    # https://github.com/scikit-learn/scikit-learn/issues/16036
+    X = csr_container(np.ones((4, 4)))
+    X.setdiag([0, 0, 0, 0])
+
+    nbrs = neighbors.NearestNeighbors(
+        radius=0.5, algorithm="auto", leaf_size=30, metric="precomputed"
+    ).fit(X)
+    neigh_dist, neigh_ind = nbrs.radius_neighbors(X, return_distance=True)
+
+    expected_dist = np.empty(X.shape[0], dtype=object)
+    expected_dist[:] = [np.array([0]), np.array([0]), np.array([0]), np.array([0])]
+    expected_ind = np.empty(X.shape[0], dtype=object)
+    expected_ind[:] = [np.array([0]), np.array([1]), np.array([2]), np.array([3])]
+
+    assert_array_equal(neigh_dist, expected_dist)
+    assert_array_equal(neigh_ind, expected_ind)
+
+
+@pytest.mark.parametrize("algorithm", ["ball_tree", "kd_tree", "brute"])
+def test_query_equidistant_kth_nn(algorithm):
+    # For several candidates for the k-th nearest neighbor position,
+    # the first candidate should be chosen
+    query_point = np.array([[0, 0]])
+    equidistant_points = np.array([[1, 0], [0, 1], [-1, 0], [0, -1]])
+    # The 3rd and 4th points should not replace the 2nd point
+    # for the 2th nearest neighbor position
+    k = 2
+    knn_indices = np.array([[0, 1]])
+    nn = neighbors.NearestNeighbors(algorithm=algorithm).fit(equidistant_points)
+    indices = np.sort(nn.kneighbors(query_point, n_neighbors=k, return_distance=False))
+    assert_array_equal(indices, knn_indices)
+
+
+@pytest.mark.parametrize(
+    ["algorithm", "metric"],
+    list(
+        product(
+            ("kd_tree", "ball_tree", "brute"),
+            ("euclidean", *DISTANCE_METRIC_OBJS),
+        )
+    )
+    + [
+        ("brute", "euclidean"),
+        ("brute", "precomputed"),
+    ],
+)
+def test_radius_neighbors_sort_results(algorithm, metric):
+    # Test radius_neighbors[_graph] output when sort_result is True
+
+    metric = _parse_metric(metric, np.float64)
+    if isinstance(metric, DistanceMetric):
+        pytest.skip(
+            "Metrics of type `DistanceMetric` are not yet supported for radius-neighbor"
+            " estimators."
+        )
+    n_samples = 10
+    rng = np.random.RandomState(42)
+    X = rng.random_sample((n_samples, 4))
+
+    if metric == "precomputed":
+        X = neighbors.radius_neighbors_graph(X, radius=np.inf, mode="distance")
+    model = neighbors.NearestNeighbors(algorithm=algorithm, metric=metric)
+    model.fit(X)
+
+    # self.radius_neighbors
+    distances, indices = model.radius_neighbors(X=X, radius=np.inf, sort_results=True)
+    for ii in range(n_samples):
+        assert_array_equal(distances[ii], np.sort(distances[ii]))
+
+    # sort_results=True and return_distance=False
+    if metric != "precomputed":  # no need to raise with precomputed graph
+        with pytest.raises(ValueError, match="return_distance must be True"):
+            model.radius_neighbors(
+                X=X, radius=np.inf, sort_results=True, return_distance=False
+            )
+
+    # self.radius_neighbors_graph
+    graph = model.radius_neighbors_graph(
+        X=X, radius=np.inf, mode="distance", sort_results=True
+    )
+    assert _is_sorted_by_data(graph)
+
+
+def test_RadiusNeighborsClassifier_multioutput():
+    # Test k-NN classifier on multioutput data
+    rng = check_random_state(0)
+    n_features = 2
+    n_samples = 40
+    n_output = 3
+
+    X = rng.rand(n_samples, n_features)
+    y = rng.randint(0, 3, (n_samples, n_output))
+
+    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
+
+    weights = [None, "uniform", "distance", _weight_func]
+
+    for algorithm, weights in product(ALGORITHMS, weights):
+        # Stack single output prediction
+        y_pred_so = []
+        for o in range(n_output):
+            rnn = neighbors.RadiusNeighborsClassifier(
+                weights=weights, algorithm=algorithm
+            )
+            rnn.fit(X_train, y_train[:, o])
+            y_pred_so.append(rnn.predict(X_test))
+
+        y_pred_so = np.vstack(y_pred_so).T
+        assert y_pred_so.shape == y_test.shape
+
+        # Multioutput prediction
+        rnn_mo = neighbors.RadiusNeighborsClassifier(
+            weights=weights, algorithm=algorithm
+        )
+        rnn_mo.fit(X_train, y_train)
+        y_pred_mo = rnn_mo.predict(X_test)
+
+        assert y_pred_mo.shape == y_test.shape
+        assert_array_equal(y_pred_mo, y_pred_so)
+
+
+def test_kneighbors_classifier_sparse(
+    n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0
+):
+    # Test k-NN classifier on sparse matrices
+    # Like the above, but with various types of sparse matrices
+    rng = np.random.RandomState(random_state)
+    X = 2 * rng.rand(n_samples, n_features) - 1
+    X *= X > 0.2
+    y = ((X**2).sum(axis=1) < 0.5).astype(int)
+
+    for sparsemat in SPARSE_TYPES:
+        knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm="auto")
+        knn.fit(sparsemat(X), y)
+        epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
+        for sparsev in SPARSE_TYPES + (np.asarray,):
+            X_eps = sparsev(X[:n_test_pts] + epsilon)
+            y_pred = knn.predict(X_eps)
+            assert_array_equal(y_pred, y[:n_test_pts])
+
+
+def test_KNeighborsClassifier_multioutput():
+    # Test k-NN classifier on multioutput data
+    rng = check_random_state(0)
+    n_features = 5
+    n_samples = 50
+    n_output = 3
+
+    X = rng.rand(n_samples, n_features)
+    y = rng.randint(0, 3, (n_samples, n_output))
+
+    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
+
+    weights = [None, "uniform", "distance", _weight_func]
+
+    for algorithm, weights in product(ALGORITHMS, weights):
+        # Stack single output prediction
+        y_pred_so = []
+        y_pred_proba_so = []
+        for o in range(n_output):
+            knn = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm)
+            knn.fit(X_train, y_train[:, o])
+            y_pred_so.append(knn.predict(X_test))
+            y_pred_proba_so.append(knn.predict_proba(X_test))
+
+        y_pred_so = np.vstack(y_pred_so).T
+        assert y_pred_so.shape == y_test.shape
+        assert len(y_pred_proba_so) == n_output
+
+        # Multioutput prediction
+        knn_mo = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm)
+        knn_mo.fit(X_train, y_train)
+        y_pred_mo = knn_mo.predict(X_test)
+
+        assert y_pred_mo.shape == y_test.shape
+        assert_array_equal(y_pred_mo, y_pred_so)
+
+        # Check proba
+        y_pred_proba_mo = knn_mo.predict_proba(X_test)
+        assert len(y_pred_proba_mo) == n_output
+
+        for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
+            assert_array_equal(proba_mo, proba_so)
+
+
+def test_kneighbors_regressor(
+    n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0
+):
+    # Test k-neighbors regression
+    rng = np.random.RandomState(random_state)
+    X = 2 * rng.rand(n_samples, n_features) - 1
+    y = np.sqrt((X**2).sum(1))
+    y /= y.max()
+
+    y_target = y[:n_test_pts]
+
+    weight_func = _weight_func
+
+    for algorithm in ALGORITHMS:
+        for weights in ["uniform", "distance", weight_func]:
+            knn = neighbors.KNeighborsRegressor(
+                n_neighbors=n_neighbors, weights=weights, algorithm=algorithm
+            )
+            knn.fit(X, y)
+            epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
+            y_pred = knn.predict(X[:n_test_pts] + epsilon)
+            assert np.all(abs(y_pred - y_target) < 0.3)
+
+
+def test_KNeighborsRegressor_multioutput_uniform_weight():
+    # Test k-neighbors in multi-output regression with uniform weight
+    rng = check_random_state(0)
+    n_features = 5
+    n_samples = 40
+    n_output = 4
+
+    X = rng.rand(n_samples, n_features)
+    y = rng.rand(n_samples, n_output)
+
+    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
+    for algorithm, weights in product(ALGORITHMS, [None, "uniform"]):
+        knn = neighbors.KNeighborsRegressor(weights=weights, algorithm=algorithm)
+        knn.fit(X_train, y_train)
+
+        neigh_idx = knn.kneighbors(X_test, return_distance=False)
+        y_pred_idx = np.array([np.mean(y_train[idx], axis=0) for idx in neigh_idx])
+
+        y_pred = knn.predict(X_test)
+
+        assert y_pred.shape == y_test.shape
+        assert y_pred_idx.shape == y_test.shape
+        assert_allclose(y_pred, y_pred_idx)
+
+
+def test_kneighbors_regressor_multioutput(
+    n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0
+):
+    # Test k-neighbors in multi-output regression
+    rng = np.random.RandomState(random_state)
+    X = 2 * rng.rand(n_samples, n_features) - 1
+    y = np.sqrt((X**2).sum(1))
+    y /= y.max()
+    y = np.vstack([y, y]).T
+
+    y_target = y[:n_test_pts]
+
+    weights = ["uniform", "distance", _weight_func]
+    for algorithm, weights in product(ALGORITHMS, weights):
+        knn = neighbors.KNeighborsRegressor(
+            n_neighbors=n_neighbors, weights=weights, algorithm=algorithm
+        )
+        knn.fit(X, y)
+        epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
+        y_pred = knn.predict(X[:n_test_pts] + epsilon)
+        assert y_pred.shape == y_target.shape
+
+        assert np.all(np.abs(y_pred - y_target) < 0.3)
+
+
+def test_radius_neighbors_regressor(
+    n_samples=40, n_features=3, n_test_pts=10, radius=0.5, random_state=0
+):
+    # Test radius-based neighbors regression
+    rng = np.random.RandomState(random_state)
+    X = 2 * rng.rand(n_samples, n_features) - 1
+    y = np.sqrt((X**2).sum(1))
+    y /= y.max()
+
+    y_target = y[:n_test_pts]
+
+    weight_func = _weight_func
+
+    for algorithm in ALGORITHMS:
+        for weights in ["uniform", "distance", weight_func]:
+            neigh = neighbors.RadiusNeighborsRegressor(
+                radius=radius, weights=weights, algorithm=algorithm
+            )
+            neigh.fit(X, y)
+            epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
+            y_pred = neigh.predict(X[:n_test_pts] + epsilon)
+            assert np.all(abs(y_pred - y_target) < radius / 2)
+
+    # test that nan is returned when no nearby observations
+    for weights in ["uniform", "distance"]:
+        neigh = neighbors.RadiusNeighborsRegressor(
+            radius=radius, weights=weights, algorithm="auto"
+        )
+        neigh.fit(X, y)
+        X_test_nan = np.full((1, n_features), -1.0)
+        empty_warning_msg = (
+            "One or more samples have no neighbors "
+            "within specified radius; predicting NaN."
+        )
+        with pytest.warns(UserWarning, match=re.escape(empty_warning_msg)):
+            pred = neigh.predict(X_test_nan)
+        assert np.all(np.isnan(pred))
+
+
+def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
+    # Test radius neighbors in multi-output regression (uniform weight)
+
+    rng = check_random_state(0)
+    n_features = 5
+    n_samples = 40
+    n_output = 4
+
+    X = rng.rand(n_samples, n_features)
+    y = rng.rand(n_samples, n_output)
+    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
+
+    for algorithm, weights in product(ALGORITHMS, [None, "uniform"]):
+        rnn = neighbors.RadiusNeighborsRegressor(weights=weights, algorithm=algorithm)
+        rnn.fit(X_train, y_train)
+
+        neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
+        y_pred_idx = np.array([np.mean(y_train[idx], axis=0) for idx in neigh_idx])
+
+        y_pred_idx = np.array(y_pred_idx)
+        y_pred = rnn.predict(X_test)
+
+        assert y_pred_idx.shape == y_test.shape
+        assert y_pred.shape == y_test.shape
+        assert_allclose(y_pred, y_pred_idx)
+
+
+def test_RadiusNeighborsRegressor_multioutput(
+    n_samples=40, n_features=5, n_test_pts=10, random_state=0
+):
+    # Test k-neighbors in multi-output regression with various weight
+    rng = np.random.RandomState(random_state)
+    X = 2 * rng.rand(n_samples, n_features) - 1
+    y = np.sqrt((X**2).sum(1))
+    y /= y.max()
+    y = np.vstack([y, y]).T
+
+    y_target = y[:n_test_pts]
+    weights = ["uniform", "distance", _weight_func]
+
+    for algorithm, weights in product(ALGORITHMS, weights):
+        rnn = neighbors.RadiusNeighborsRegressor(weights=weights, algorithm=algorithm)
+        rnn.fit(X, y)
+        epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
+        y_pred = rnn.predict(X[:n_test_pts] + epsilon)
+
+        assert y_pred.shape == y_target.shape
+        assert np.all(np.abs(y_pred - y_target) < 0.3)
+
+
+@pytest.mark.filterwarnings("ignore:EfficiencyWarning")
+def test_kneighbors_regressor_sparse(
+    n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0
+):
+    # Test radius-based regression on sparse matrices
+    # Like the above, but with various types of sparse matrices
+    rng = np.random.RandomState(random_state)
+    X = 2 * rng.rand(n_samples, n_features) - 1
+    y = ((X**2).sum(axis=1) < 0.25).astype(int)
+
+    for sparsemat in SPARSE_TYPES:
+        knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, algorithm="auto")
+        knn.fit(sparsemat(X), y)
+
+        knn_pre = neighbors.KNeighborsRegressor(
+            n_neighbors=n_neighbors, metric="precomputed"
+        )
+        knn_pre.fit(pairwise_distances(X, metric="euclidean"), y)
+
+        for sparsev in SPARSE_OR_DENSE:
+            X2 = sparsev(X)
+            assert np.mean(knn.predict(X2).round() == y) > 0.95
+
+            X2_pre = sparsev(pairwise_distances(X, metric="euclidean"))
+            if sparsev in DOK_CONTAINERS + BSR_CONTAINERS:
+                msg = "not supported due to its handling of explicit zeros"
+                with pytest.raises(TypeError, match=msg):
+                    knn_pre.predict(X2_pre)
+            else:
+                assert np.mean(knn_pre.predict(X2_pre).round() == y) > 0.95
+
+
+def test_neighbors_iris():
+    # Sanity checks on the iris dataset
+    # Puts three points of each label in the plane and performs a
+    # nearest neighbor query on points near the decision boundary.
+
+    for algorithm in ALGORITHMS:
+        clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm=algorithm)
+        clf.fit(iris.data, iris.target)
+        assert_array_equal(clf.predict(iris.data), iris.target)
+
+        clf.set_params(n_neighbors=9, algorithm=algorithm)
+        clf.fit(iris.data, iris.target)
+        assert np.mean(clf.predict(iris.data) == iris.target) > 0.95
+
+        rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
+        rgs.fit(iris.data, iris.target)
+        assert np.mean(rgs.predict(iris.data).round() == iris.target) > 0.95
+
+
+def test_neighbors_digits():
+    # Sanity check on the digits dataset
+    # the 'brute' algorithm has been observed to fail if the input
+    # dtype is uint8 due to overflow in distance calculations.
+
+    X = digits.data.astype("uint8")
+    Y = digits.target
+    (n_samples, n_features) = X.shape
+    train_test_boundary = int(n_samples * 0.8)
+    train = np.arange(0, train_test_boundary)
+    test = np.arange(train_test_boundary, n_samples)
+    (X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
+
+    clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm="brute")
+    score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
+    score_float = clf.fit(X_train.astype(float, copy=False), Y_train).score(
+        X_test.astype(float, copy=False), Y_test
+    )
+    assert score_uint8 == score_float
+
+
+def test_kneighbors_graph():
+    # Test kneighbors_graph to build the k-Nearest Neighbor graph.
+    X = np.array([[0, 1], [1.01, 1.0], [2, 0]])
+
+    # n_neighbors = 1
+    A = neighbors.kneighbors_graph(X, 1, mode="connectivity", include_self=True)
+    assert_array_equal(A.toarray(), np.eye(A.shape[0]))
+
+    A = neighbors.kneighbors_graph(X, 1, mode="distance")
+    assert_allclose(
+        A.toarray(), [[0.00, 1.01, 0.0], [1.01, 0.0, 0.0], [0.00, 1.40716026, 0.0]]
+    )
+
+    # n_neighbors = 2
+    A = neighbors.kneighbors_graph(X, 2, mode="connectivity", include_self=True)
+    assert_array_equal(A.toarray(), [[1.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 1.0]])
+
+    A = neighbors.kneighbors_graph(X, 2, mode="distance")
+    assert_allclose(
+        A.toarray(),
+        [
+            [0.0, 1.01, 2.23606798],
+            [1.01, 0.0, 1.40716026],
+            [2.23606798, 1.40716026, 0.0],
+        ],
+    )
+
+    # n_neighbors = 3
+    A = neighbors.kneighbors_graph(X, 3, mode="connectivity", include_self=True)
+    assert_allclose(A.toarray(), [[1, 1, 1], [1, 1, 1], [1, 1, 1]])
+
+
+@pytest.mark.parametrize("n_neighbors", [1, 2, 3])
+@pytest.mark.parametrize("mode", ["connectivity", "distance"])
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_kneighbors_graph_sparse(n_neighbors, mode, csr_container, seed=36):
+    # Test kneighbors_graph to build the k-Nearest Neighbor graph
+    # for sparse input.
+    rng = np.random.RandomState(seed)
+    X = rng.randn(10, 10)
+    Xcsr = csr_container(X)
+
+    assert_allclose(
+        neighbors.kneighbors_graph(X, n_neighbors, mode=mode).toarray(),
+        neighbors.kneighbors_graph(Xcsr, n_neighbors, mode=mode).toarray(),
+    )
+
+
+def test_radius_neighbors_graph():
+    # Test radius_neighbors_graph to build the Nearest Neighbor graph.
+    X = np.array([[0, 1], [1.01, 1.0], [2, 0]])
+
+    A = neighbors.radius_neighbors_graph(X, 1.5, mode="connectivity", include_self=True)
+    assert_array_equal(A.toarray(), [[1.0, 1.0, 0.0], [1.0, 1.0, 1.0], [0.0, 1.0, 1.0]])
+
+    A = neighbors.radius_neighbors_graph(X, 1.5, mode="distance")
+    assert_allclose(
+        A.toarray(), [[0.0, 1.01, 0.0], [1.01, 0.0, 1.40716026], [0.0, 1.40716026, 0.0]]
+    )
+
+
+@pytest.mark.parametrize("n_neighbors", [1, 2, 3])
+@pytest.mark.parametrize("mode", ["connectivity", "distance"])
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_radius_neighbors_graph_sparse(n_neighbors, mode, csr_container, seed=36):
+    # Test radius_neighbors_graph to build the Nearest Neighbor graph
+    # for sparse input.
+    rng = np.random.RandomState(seed)
+    X = rng.randn(10, 10)
+    Xcsr = csr_container(X)
+
+    assert_allclose(
+        neighbors.radius_neighbors_graph(X, n_neighbors, mode=mode).toarray(),
+        neighbors.radius_neighbors_graph(Xcsr, n_neighbors, mode=mode).toarray(),
+    )
+
+
+@pytest.mark.parametrize(
+    "Estimator",
+    [
+        neighbors.KNeighborsClassifier,
+        neighbors.RadiusNeighborsClassifier,
+        neighbors.KNeighborsRegressor,
+        neighbors.RadiusNeighborsRegressor,
+    ],
+)
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_neighbors_validate_parameters(Estimator, csr_container):
+    """Additional parameter validation for *Neighbors* estimators not covered by common
+    validation."""
+    X = rng.random_sample((10, 2))
+    Xsparse = csr_container(X)
+    X3 = rng.random_sample((10, 3))
+    y = np.ones(10)
+
+    nbrs = Estimator(algorithm="ball_tree", metric="haversine")
+    msg = "instance is not fitted yet"
+    with pytest.raises(ValueError, match=msg):
+        nbrs.predict(X)
+    msg = "Metric 'haversine' not valid for sparse input."
+    with pytest.raises(ValueError, match=msg):
+        ignore_warnings(nbrs.fit(Xsparse, y))
+
+    nbrs = Estimator(metric="haversine", algorithm="brute")
+    nbrs.fit(X3, y)
+    msg = "Haversine distance only valid in 2 dimensions"
+    with pytest.raises(ValueError, match=msg):
+        nbrs.predict(X3)
+
+    nbrs = Estimator()
+    msg = re.escape("Found array with 0 sample(s)")
+    with pytest.raises(ValueError, match=msg):
+        nbrs.fit(np.ones((0, 2)), np.ones(0))
+
+    msg = "Found array with dim 3"
+    with pytest.raises(ValueError, match=msg):
+        nbrs.fit(X[:, :, None], y)
+    nbrs.fit(X, y)
+
+    msg = re.escape("Found array with 0 feature(s)")
+    with pytest.raises(ValueError, match=msg):
+        nbrs.predict([[]])
+
+
+@pytest.mark.parametrize(
+    "Estimator",
+    [
+        neighbors.KNeighborsClassifier,
+        neighbors.RadiusNeighborsClassifier,
+        neighbors.KNeighborsRegressor,
+        neighbors.RadiusNeighborsRegressor,
+    ],
+)
+@pytest.mark.parametrize("n_features", [2, 100])
+@pytest.mark.parametrize("algorithm", ["auto", "brute"])
+def test_neighbors_minkowski_semimetric_algo_warn(Estimator, n_features, algorithm):
+    """
+    Validation of all classes extending NeighborsBase with
+    Minkowski semi-metrics (i.e. when 0 < p < 1). That proper
+    Warning is raised for `algorithm="auto"` and "brute".
+    """
+    X = rng.random_sample((10, n_features))
+    y = np.ones(10)
+
+    model = Estimator(p=0.1, algorithm=algorithm)
+    msg = (
+        "Mind that for 0 < p < 1, Minkowski metrics are not distance"
+        " metrics. Continuing the execution with `algorithm='brute'`."
+    )
+    with pytest.warns(UserWarning, match=msg):
+        model.fit(X, y)
+
+    assert model._fit_method == "brute"
+
+
+@pytest.mark.parametrize(
+    "Estimator",
+    [
+        neighbors.KNeighborsClassifier,
+        neighbors.RadiusNeighborsClassifier,
+        neighbors.KNeighborsRegressor,
+        neighbors.RadiusNeighborsRegressor,
+    ],
+)
+@pytest.mark.parametrize("n_features", [2, 100])
+@pytest.mark.parametrize("algorithm", ["kd_tree", "ball_tree"])
+def test_neighbors_minkowski_semimetric_algo_error(Estimator, n_features, algorithm):
+    """Check that we raise a proper error if `algorithm!='brute'` and `p<1`."""
+    X = rng.random_sample((10, 2))
+    y = np.ones(10)
+
+    model = Estimator(algorithm=algorithm, p=0.1)
+    msg = (
+        f'algorithm="{algorithm}" does not support 0 < p < 1 for '
+        "the Minkowski metric. To resolve this problem either "
+        'set p >= 1 or algorithm="brute".'
+    )
+    with pytest.raises(ValueError, match=msg):
+        model.fit(X, y)
+
+
+# TODO: remove when NearestNeighbors methods uses parameter validation mechanism
+def test_nearest_neighbors_validate_params():
+    """Validate parameter of NearestNeighbors."""
+    X = rng.random_sample((10, 2))
+
+    nbrs = neighbors.NearestNeighbors().fit(X)
+    msg = (
+        'Unsupported mode, must be one of "connectivity", or "distance" but got "blah"'
+        " instead"
+    )
+    with pytest.raises(ValueError, match=msg):
+        nbrs.kneighbors_graph(X, mode="blah")
+    with pytest.raises(ValueError, match=msg):
+        nbrs.radius_neighbors_graph(X, mode="blah")
+
+
+@pytest.mark.parametrize(
+    "metric",
+    sorted(
+        set(neighbors.VALID_METRICS["ball_tree"]).intersection(
+            neighbors.VALID_METRICS["brute"]
+        )
+        - set(["pyfunc", *BOOL_METRICS])
+    )
+    + DISTANCE_METRIC_OBJS,
+)
+def test_neighbors_metrics(
+    global_dtype, metric, n_samples=20, n_features=3, n_query_pts=2, n_neighbors=5
+):
+    metric = _parse_metric(metric, global_dtype)
+
+    # Test computing the neighbors for various metrics
+    algorithms = ["brute", "ball_tree", "kd_tree"]
+    X_train = rng.rand(n_samples, n_features).astype(global_dtype, copy=False)
+    X_test = rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False)
+
+    metric_params_list = _generate_test_params_for(metric, n_features)
+
+    for metric_params in metric_params_list:
+        # Some metric (e.g. Weighted minkowski) are not supported by KDTree
+        exclude_kd_tree = (
+            False
+            if isinstance(metric, DistanceMetric)
+            else metric not in neighbors.VALID_METRICS["kd_tree"]
+            or ("minkowski" in metric and "w" in metric_params)
+        )
+        results = {}
+        p = metric_params.pop("p", 2)
+        for algorithm in algorithms:
+            if isinstance(metric, DistanceMetric) and global_dtype == np.float32:
+                if "tree" in algorithm:  # pragma: nocover
+                    pytest.skip(
+                        "Neither KDTree nor BallTree support 32-bit distance metric"
+                        " objects."
+                    )
+            neigh = neighbors.NearestNeighbors(
+                n_neighbors=n_neighbors,
+                algorithm=algorithm,
+                metric=metric,
+                p=p,
+                metric_params=metric_params,
+            )
+
+            if exclude_kd_tree and algorithm == "kd_tree":
+                with pytest.raises(ValueError):
+                    neigh.fit(X_train)
+                continue
+
+            # Haversine distance only accepts 2D data
+            if metric == "haversine":
+                feature_sl = slice(None, 2)
+                X_train = np.ascontiguousarray(X_train[:, feature_sl])
+                X_test = np.ascontiguousarray(X_test[:, feature_sl])
+
+            neigh.fit(X_train)
+            results[algorithm] = neigh.kneighbors(X_test, return_distance=True)
+
+        brute_dst, brute_idx = results["brute"]
+        ball_tree_dst, ball_tree_idx = results["ball_tree"]
+
+        assert_allclose(brute_dst, ball_tree_dst)
+        assert_array_equal(brute_idx, ball_tree_idx)
+
+        if not exclude_kd_tree:
+            kd_tree_dst, kd_tree_idx = results["kd_tree"]
+            assert_allclose(brute_dst, kd_tree_dst)
+            assert_array_equal(brute_idx, kd_tree_idx)
+
+            assert_allclose(ball_tree_dst, kd_tree_dst)
+            assert_array_equal(ball_tree_idx, kd_tree_idx)
+
+
+@pytest.mark.parametrize(
+    "metric", sorted(set(neighbors.VALID_METRICS["brute"]) - set(["precomputed"]))
+)
+def test_kneighbors_brute_backend(
+    metric,
+    global_dtype,
+    global_random_seed,
+    n_samples=2000,
+    n_features=30,
+    n_query_pts=5,
+    n_neighbors=5,
+):
+    rng = np.random.RandomState(global_random_seed)
+    # Both backend for the 'brute' algorithm of kneighbors must give identical results.
+    X_train = rng.rand(n_samples, n_features).astype(global_dtype, copy=False)
+    X_test = rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False)
+
+    # Haversine distance only accepts 2D data
+    if metric == "haversine":
+        feature_sl = slice(None, 2)
+        X_train = np.ascontiguousarray(X_train[:, feature_sl])
+        X_test = np.ascontiguousarray(X_test[:, feature_sl])
+
+    if metric in PAIRWISE_BOOLEAN_FUNCTIONS:
+        X_train = X_train > 0.5
+        X_test = X_test > 0.5
+
+    metric_params_list = _generate_test_params_for(metric, n_features)
+
+    for metric_params in metric_params_list:
+        p = metric_params.pop("p", 2)
+
+        neigh = neighbors.NearestNeighbors(
+            n_neighbors=n_neighbors,
+            algorithm="brute",
+            metric=metric,
+            p=p,
+            metric_params=metric_params,
+        )
+
+        neigh.fit(X_train)
+
+        with config_context(enable_cython_pairwise_dist=False):
+            # Use the legacy backend for brute
+            legacy_brute_dst, legacy_brute_idx = neigh.kneighbors(
+                X_test, return_distance=True
+            )
+        with config_context(enable_cython_pairwise_dist=True):
+            # Use the pairwise-distances reduction backend for brute
+            pdr_brute_dst, pdr_brute_idx = neigh.kneighbors(
+                X_test, return_distance=True
+            )
+
+        assert_compatible_argkmin_results(
+            legacy_brute_dst, pdr_brute_dst, legacy_brute_idx, pdr_brute_idx
+        )
+
+
+def test_callable_metric():
+    def custom_metric(x1, x2):
+        return np.sqrt(np.sum(x1**2 + x2**2))
+
+    X = np.random.RandomState(42).rand(20, 2)
+    nbrs1 = neighbors.NearestNeighbors(
+        n_neighbors=3, algorithm="auto", metric=custom_metric
+    )
+    nbrs2 = neighbors.NearestNeighbors(
+        n_neighbors=3, algorithm="brute", metric=custom_metric
+    )
+
+    nbrs1.fit(X)
+    nbrs2.fit(X)
+
+    dist1, ind1 = nbrs1.kneighbors(X)
+    dist2, ind2 = nbrs2.kneighbors(X)
+
+    assert_allclose(dist1, dist2)
+
+
+@pytest.mark.parametrize(
+    "metric", neighbors.VALID_METRICS["brute"] + DISTANCE_METRIC_OBJS
+)
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_valid_brute_metric_for_auto_algorithm(
+    global_dtype, metric, csr_container, n_samples=20, n_features=12
+):
+    metric = _parse_metric(metric, global_dtype)
+
+    X = rng.rand(n_samples, n_features).astype(global_dtype, copy=False)
+    Xcsr = csr_container(X)
+
+    metric_params_list = _generate_test_params_for(metric, n_features)
+
+    if metric == "precomputed":
+        X_precomputed = rng.random_sample((10, 4))
+        Y_precomputed = rng.random_sample((3, 4))
+        DXX = metrics.pairwise_distances(X_precomputed, metric="euclidean")
+        DYX = metrics.pairwise_distances(
+            Y_precomputed, X_precomputed, metric="euclidean"
+        )
+        nb_p = neighbors.NearestNeighbors(n_neighbors=3, metric="precomputed")
+        nb_p.fit(DXX)
+        nb_p.kneighbors(DYX)
+
+    else:
+        for metric_params in metric_params_list:
+            nn = neighbors.NearestNeighbors(
+                n_neighbors=3,
+                algorithm="auto",
+                metric=metric,
+                metric_params=metric_params,
+            )
+            # Haversine distance only accepts 2D data
+            if metric == "haversine":
+                feature_sl = slice(None, 2)
+                X = np.ascontiguousarray(X[:, feature_sl])
+
+            nn.fit(X)
+            nn.kneighbors(X)
+
+            if metric in VALID_METRICS_SPARSE["brute"]:
+                nn = neighbors.NearestNeighbors(
+                    n_neighbors=3, algorithm="auto", metric=metric
+                ).fit(Xcsr)
+                nn.kneighbors(Xcsr)
+
+
+def test_metric_params_interface():
+    X = rng.rand(5, 5)
+    y = rng.randint(0, 2, 5)
+    est = neighbors.KNeighborsClassifier(metric_params={"p": 3})
+    with pytest.warns(SyntaxWarning):
+        est.fit(X, y)
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_predict_sparse_ball_kd_tree(csr_container):
+    rng = np.random.RandomState(0)
+    X = rng.rand(5, 5)
+    y = rng.randint(0, 2, 5)
+    nbrs1 = neighbors.KNeighborsClassifier(1, algorithm="kd_tree")
+    nbrs2 = neighbors.KNeighborsRegressor(1, algorithm="ball_tree")
+    for model in [nbrs1, nbrs2]:
+        model.fit(X, y)
+        with pytest.raises(ValueError):
+            model.predict(csr_container(X))
+
+
+def test_non_euclidean_kneighbors():
+    rng = np.random.RandomState(0)
+    X = rng.rand(5, 5)
+
+    # Find a reasonable radius.
+    dist_array = pairwise_distances(X).flatten()
+    np.sort(dist_array)
+    radius = dist_array[15]
+
+    # Test kneighbors_graph
+    for metric in ["manhattan", "chebyshev"]:
+        nbrs_graph = neighbors.kneighbors_graph(
+            X, 3, metric=metric, mode="connectivity", include_self=True
+        ).toarray()
+        nbrs1 = neighbors.NearestNeighbors(n_neighbors=3, metric=metric).fit(X)
+        assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
+
+    # Test radiusneighbors_graph
+    for metric in ["manhattan", "chebyshev"]:
+        nbrs_graph = neighbors.radius_neighbors_graph(
+            X, radius, metric=metric, mode="connectivity", include_self=True
+        ).toarray()
+        nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
+        assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).toarray())
+
+    # Raise error when wrong parameters are supplied,
+    X_nbrs = neighbors.NearestNeighbors(n_neighbors=3, metric="manhattan")
+    X_nbrs.fit(X)
+    with pytest.raises(ValueError):
+        neighbors.kneighbors_graph(X_nbrs, 3, metric="euclidean")
+    X_nbrs = neighbors.NearestNeighbors(radius=radius, metric="manhattan")
+    X_nbrs.fit(X)
+    with pytest.raises(ValueError):
+        neighbors.radius_neighbors_graph(X_nbrs, radius, metric="euclidean")
+
+
+def check_object_arrays(nparray, list_check):
+    for ind, ele in enumerate(nparray):
+        assert_array_equal(ele, list_check[ind])
+
+
+def test_k_and_radius_neighbors_train_is_not_query():
+    # Test kneighbors et.al when query is not training data
+
+    for algorithm in ALGORITHMS:
+        nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
+
+        X = [[0], [1]]
+        nn.fit(X)
+        test_data = [[2], [1]]
+
+        # Test neighbors.
+        dist, ind = nn.kneighbors(test_data)
+        assert_array_equal(dist, [[1], [0]])
+        assert_array_equal(ind, [[1], [1]])
+        dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
+        check_object_arrays(dist, [[1], [1, 0]])
+        check_object_arrays(ind, [[1], [0, 1]])
+
+        # Test the graph variants.
+        assert_array_equal(
+            nn.kneighbors_graph(test_data).toarray(), [[0.0, 1.0], [0.0, 1.0]]
+        )
+        assert_array_equal(
+            nn.kneighbors_graph([[2], [1]], mode="distance").toarray(),
+            np.array([[0.0, 1.0], [0.0, 0.0]]),
+        )
+        rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
+        assert_array_equal(rng.toarray(), [[0, 1], [1, 1]])
+
+
+@pytest.mark.parametrize("algorithm", ALGORITHMS)
+def test_k_and_radius_neighbors_X_None(algorithm):
+    # Test kneighbors et.al when query is None
+    nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
+
+    X = [[0], [1]]
+    nn.fit(X)
+
+    dist, ind = nn.kneighbors()
+    assert_array_equal(dist, [[1], [1]])
+    assert_array_equal(ind, [[1], [0]])
+    dist, ind = nn.radius_neighbors(None, radius=1.5)
+    check_object_arrays(dist, [[1], [1]])
+    check_object_arrays(ind, [[1], [0]])
+
+    # Test the graph variants.
+    rng = nn.radius_neighbors_graph(None, radius=1.5)
+    kng = nn.kneighbors_graph(None)
+    for graph in [rng, kng]:
+        assert_array_equal(graph.toarray(), [[0, 1], [1, 0]])
+        assert_array_equal(graph.data, [1, 1])
+        assert_array_equal(graph.indices, [1, 0])
+
+    X = [[0, 1], [0, 1], [1, 1]]
+    nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
+    nn.fit(X)
+    assert_array_equal(
+        nn.kneighbors_graph().toarray(),
+        np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0]]),
+    )
+
+
+@pytest.mark.parametrize("algorithm", ALGORITHMS)
+def test_k_and_radius_neighbors_duplicates(algorithm):
+    # Test behavior of kneighbors when duplicates are present in query
+    nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
+    duplicates = [[0], [1], [3]]
+
+    nn.fit(duplicates)
+
+    # Do not do anything special to duplicates.
+    kng = nn.kneighbors_graph(duplicates, mode="distance")
+    assert_allclose(
+        kng.toarray(), np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
+    )
+    assert_allclose(kng.data, [0.0, 0.0, 0.0])
+    assert_allclose(kng.indices, [0, 1, 2])
+
+    dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
+    check_object_arrays(dist, [[0, 1], [1, 0]])
+    check_object_arrays(ind, [[0, 1], [0, 1]])
+
+    rng = nn.radius_neighbors_graph(duplicates, radius=1.5)
+    assert_allclose(
+        rng.toarray(), np.array([[1.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
+    )
+
+    rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5, mode="distance")
+    rng.sort_indices()
+    assert_allclose(rng.toarray(), [[0, 1, 0], [1, 0, 0]])
+    assert_allclose(rng.indices, [0, 1, 0, 1])
+    assert_allclose(rng.data, [0, 1, 1, 0])
+
+    # Mask the first duplicates when n_duplicates > n_neighbors.
+    X = np.ones((3, 1))
+    nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm="brute")
+    nn.fit(X)
+    dist, ind = nn.kneighbors()
+    assert_allclose(dist, np.zeros((3, 1)))
+    assert_allclose(ind, [[1], [0], [1]])
+
+    # Test that zeros are explicitly marked in kneighbors_graph.
+    kng = nn.kneighbors_graph(mode="distance")
+    assert_allclose(kng.toarray(), np.zeros((3, 3)))
+    assert_allclose(kng.data, np.zeros(3))
+    assert_allclose(kng.indices, [1, 0, 1])
+    assert_allclose(
+        nn.kneighbors_graph().toarray(),
+        np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]),
+    )
+
+
+def test_include_self_neighbors_graph():
+    # Test include_self parameter in neighbors_graph
+    X = [[2, 3], [4, 5]]
+    kng = neighbors.kneighbors_graph(X, 1, include_self=True).toarray()
+    kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).toarray()
+    assert_array_equal(kng, [[1.0, 0.0], [0.0, 1.0]])
+    assert_array_equal(kng_not_self, [[0.0, 1.0], [1.0, 0.0]])
+
+    rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).toarray()
+    rng_not_self = neighbors.radius_neighbors_graph(
+        X, 5.0, include_self=False
+    ).toarray()
+    assert_array_equal(rng, [[1.0, 1.0], [1.0, 1.0]])
+    assert_array_equal(rng_not_self, [[0.0, 1.0], [1.0, 0.0]])
+
+
+@pytest.mark.parametrize("algorithm", ALGORITHMS)
+def test_same_knn_parallel(algorithm):
+    X, y = datasets.make_classification(
+        n_samples=30, n_features=5, n_redundant=0, random_state=0
+    )
+    X_train, X_test, y_train, y_test = train_test_split(X, y)
+
+    clf = neighbors.KNeighborsClassifier(n_neighbors=3, algorithm=algorithm)
+    clf.fit(X_train, y_train)
+    y = clf.predict(X_test)
+    dist, ind = clf.kneighbors(X_test)
+    graph = clf.kneighbors_graph(X_test, mode="distance").toarray()
+
+    clf.set_params(n_jobs=3)
+    clf.fit(X_train, y_train)
+    y_parallel = clf.predict(X_test)
+    dist_parallel, ind_parallel = clf.kneighbors(X_test)
+    graph_parallel = clf.kneighbors_graph(X_test, mode="distance").toarray()
+
+    assert_array_equal(y, y_parallel)
+    assert_allclose(dist, dist_parallel)
+    assert_array_equal(ind, ind_parallel)
+    assert_allclose(graph, graph_parallel)
+
+
+@pytest.mark.parametrize("algorithm", ALGORITHMS)
+def test_same_radius_neighbors_parallel(algorithm):
+    X, y = datasets.make_classification(
+        n_samples=30, n_features=5, n_redundant=0, random_state=0
+    )
+    X_train, X_test, y_train, y_test = train_test_split(X, y)
+
+    clf = neighbors.RadiusNeighborsClassifier(radius=10, algorithm=algorithm)
+    clf.fit(X_train, y_train)
+    y = clf.predict(X_test)
+    dist, ind = clf.radius_neighbors(X_test)
+    graph = clf.radius_neighbors_graph(X_test, mode="distance").toarray()
+
+    clf.set_params(n_jobs=3)
+    clf.fit(X_train, y_train)
+    y_parallel = clf.predict(X_test)
+    dist_parallel, ind_parallel = clf.radius_neighbors(X_test)
+    graph_parallel = clf.radius_neighbors_graph(X_test, mode="distance").toarray()
+
+    assert_array_equal(y, y_parallel)
+    for i in range(len(dist)):
+        assert_allclose(dist[i], dist_parallel[i])
+        assert_array_equal(ind[i], ind_parallel[i])
+    assert_allclose(graph, graph_parallel)
+
+
+@pytest.mark.parametrize("backend", ["threading", "loky"])
+@pytest.mark.parametrize("algorithm", ALGORITHMS)
+def test_knn_forcing_backend(backend, algorithm):
+    # Non-regression test which ensures the knn methods are properly working
+    # even when forcing the global joblib backend.
+    with joblib.parallel_backend(backend):
+        X, y = datasets.make_classification(
+            n_samples=30, n_features=5, n_redundant=0, random_state=0
+        )
+        X_train, X_test, y_train, y_test = train_test_split(X, y)
+
+        clf = neighbors.KNeighborsClassifier(
+            n_neighbors=3, algorithm=algorithm, n_jobs=2
+        )
+        clf.fit(X_train, y_train)
+        clf.predict(X_test)
+        clf.kneighbors(X_test)
+        clf.kneighbors_graph(X_test, mode="distance")
+
+
+def test_dtype_convert():
+    classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
+    CLASSES = 15
+    X = np.eye(CLASSES)
+    y = [ch for ch in "ABCDEFGHIJKLMNOPQRSTU"[:CLASSES]]
+
+    result = classifier.fit(X, y).predict(X)
+    assert_array_equal(result, y)
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_sparse_metric_callable(csr_container):
+    def sparse_metric(x, y):  # Metric accepting sparse matrix input (only)
+        assert issparse(x) and issparse(y)
+        return x.dot(y.T).toarray().item()
+
+    X = csr_container(
+        [[1, 1, 1, 1, 1], [1, 0, 1, 0, 1], [0, 0, 1, 0, 0]]  # Population matrix
+    )
+
+    Y = csr_container([[1, 1, 0, 1, 1], [1, 0, 0, 1, 1]])  # Query matrix
+
+    nn = neighbors.NearestNeighbors(
+        algorithm="brute", n_neighbors=2, metric=sparse_metric
+    ).fit(X)
+    N = nn.kneighbors(Y, return_distance=False)
+
+    # GS indices of nearest neighbours in `X` for `sparse_metric`
+    gold_standard_nn = np.array([[2, 1], [2, 1]])
+
+    assert_array_equal(N, gold_standard_nn)
+
+
+# ignore conversion to boolean in pairwise_distances
+@ignore_warnings(category=DataConversionWarning)
+def test_pairwise_boolean_distance():
+    # Non-regression test for #4523
+    # 'brute': uses scipy.spatial.distance through pairwise_distances
+    # 'ball_tree': uses sklearn.neighbors._dist_metrics
+    rng = np.random.RandomState(0)
+    X = rng.uniform(size=(6, 5))
+    NN = neighbors.NearestNeighbors
+
+    nn1 = NN(metric="jaccard", algorithm="brute").fit(X)
+    nn2 = NN(metric="jaccard", algorithm="ball_tree").fit(X)
+    assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0])
+
+
+def test_radius_neighbors_predict_proba():
+    for seed in range(5):
+        X, y = datasets.make_classification(
+            n_samples=50,
+            n_features=5,
+            n_informative=3,
+            n_redundant=0,
+            n_classes=3,
+            random_state=seed,
+        )
+        X_tr, X_te, y_tr, y_te = train_test_split(X, y, random_state=0)
+        outlier_label = int(2 - seed)
+        clf = neighbors.RadiusNeighborsClassifier(radius=2, outlier_label=outlier_label)
+        clf.fit(X_tr, y_tr)
+        pred = clf.predict(X_te)
+        proba = clf.predict_proba(X_te)
+        proba_label = proba.argmax(axis=1)
+        proba_label = np.where(proba.sum(axis=1) == 0, outlier_label, proba_label)
+        assert_array_equal(pred, proba_label)
+
+
+def test_pipeline_with_nearest_neighbors_transformer():
+    # Test chaining KNeighborsTransformer and classifiers/regressors
+    rng = np.random.RandomState(0)
+    X = 2 * rng.rand(40, 5) - 1
+    X2 = 2 * rng.rand(40, 5) - 1
+    y = rng.rand(40, 1)
+
+    n_neighbors = 12
+    radius = 1.5
+    # We precompute more neighbors than necessary, to have equivalence between
+    # k-neighbors estimator after radius-neighbors transformer, and vice-versa.
+    factor = 2
+
+    k_trans = neighbors.KNeighborsTransformer(n_neighbors=n_neighbors, mode="distance")
+    k_trans_factor = neighbors.KNeighborsTransformer(
+        n_neighbors=int(n_neighbors * factor), mode="distance"
+    )
+
+    r_trans = neighbors.RadiusNeighborsTransformer(radius=radius, mode="distance")
+    r_trans_factor = neighbors.RadiusNeighborsTransformer(
+        radius=int(radius * factor), mode="distance"
+    )
+
+    k_reg = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors)
+    r_reg = neighbors.RadiusNeighborsRegressor(radius=radius)
+
+    test_list = [
+        (k_trans, k_reg),
+        (k_trans_factor, r_reg),
+        (r_trans, r_reg),
+        (r_trans_factor, k_reg),
+    ]
+
+    for trans, reg in test_list:
+        # compare the chained version and the compact version
+        reg_compact = clone(reg)
+        reg_precomp = clone(reg)
+        reg_precomp.set_params(metric="precomputed")
+
+        reg_chain = make_pipeline(clone(trans), reg_precomp)
+
+        y_pred_chain = reg_chain.fit(X, y).predict(X2)
+        y_pred_compact = reg_compact.fit(X, y).predict(X2)
+        assert_allclose(y_pred_chain, y_pred_compact)
+
+
+@pytest.mark.parametrize(
+    "X, metric, metric_params, expected_algo",
+    [
+        (np.random.randint(10, size=(10, 10)), "precomputed", None, "brute"),
+        (np.random.randn(10, 20), "euclidean", None, "brute"),
+        (np.random.randn(8, 5), "euclidean", None, "brute"),
+        (np.random.randn(10, 5), "euclidean", None, "kd_tree"),
+        (np.random.randn(10, 5), "seuclidean", {"V": [2] * 5}, "ball_tree"),
+        (np.random.randn(10, 5), "correlation", None, "brute"),
+    ],
+)
+def test_auto_algorithm(X, metric, metric_params, expected_algo):
+    model = neighbors.NearestNeighbors(
+        n_neighbors=4, algorithm="auto", metric=metric, metric_params=metric_params
+    )
+    model.fit(X)
+    assert model._fit_method == expected_algo
+
+
+@pytest.mark.parametrize(
+    "metric", sorted(set(neighbors.VALID_METRICS["brute"]) - set(["precomputed"]))
+)
+def test_radius_neighbors_brute_backend(
+    metric,
+    global_random_seed,
+    global_dtype,
+    n_samples=2000,
+    n_features=30,
+    n_query_pts=5,
+    radius=1.0,
+):
+    rng = np.random.RandomState(global_random_seed)
+    # Both backends for the 'brute' algorithm of radius_neighbors
+    # must give identical results.
+    X_train = rng.rand(n_samples, n_features).astype(global_dtype, copy=False)
+    X_test = rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False)
+
+    # Haversine distance only accepts 2D data
+    if metric == "haversine":
+        feature_sl = slice(None, 2)
+        X_train = np.ascontiguousarray(X_train[:, feature_sl])
+        X_test = np.ascontiguousarray(X_test[:, feature_sl])
+
+    metric_params_list = _generate_test_params_for(metric, n_features)
+
+    for metric_params in metric_params_list:
+        p = metric_params.pop("p", 2)
+
+        neigh = neighbors.NearestNeighbors(
+            radius=radius,
+            algorithm="brute",
+            metric=metric,
+            p=p,
+            metric_params=metric_params,
+        )
+
+        neigh.fit(X_train)
+
+        with config_context(enable_cython_pairwise_dist=False):
+            # Use the legacy backend for brute
+            legacy_brute_dst, legacy_brute_idx = neigh.radius_neighbors(
+                X_test, return_distance=True
+            )
+        with config_context(enable_cython_pairwise_dist=True):
+            # Use the pairwise-distances reduction backend for brute
+            pdr_brute_dst, pdr_brute_idx = neigh.radius_neighbors(
+                X_test, return_distance=True
+            )
+
+        assert_compatible_radius_results(
+            legacy_brute_dst,
+            pdr_brute_dst,
+            legacy_brute_idx,
+            pdr_brute_idx,
+            radius=radius,
+            check_sorted=False,
+        )
+
+
+def test_valid_metrics_has_no_duplicate():
+    for val in neighbors.VALID_METRICS.values():
+        assert len(val) == len(set(val))
+
+
+def test_regressor_predict_on_arraylikes():
+    """Ensures that `predict` works for array-likes when `weights` is a callable.
+
+    Non-regression test for #22687.
+    """
+    X = [[5, 1], [3, 1], [4, 3], [0, 3]]
+    y = [2, 3, 5, 6]
+
+    def _weights(dist):
+        return np.ones_like(dist)
+
+    est = KNeighborsRegressor(n_neighbors=1, algorithm="brute", weights=_weights)
+    est.fit(X, y)
+    assert_allclose(est.predict([[0, 2.5]]), [6])
+
+
+def test_predict_dataframe():
+    """Check that KNN predict works with dataframes
+
+    non-regression test for issue #26768
+    """
+    pd = pytest.importorskip("pandas")
+
+    X = pd.DataFrame(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]), columns=["a", "b"])
+    y = np.array([1, 2, 3, 4])
+
+    knn = neighbors.KNeighborsClassifier(n_neighbors=2).fit(X, y)
+    knn.predict(X)
+
+
+def test_nearest_neighbours_works_with_p_less_than_1():
+    """Check that NearestNeighbors works with :math:`p \\in (0,1)` when `algorithm`
+    is `"auto"` or `"brute"` regardless of the dtype of X.
+
+    Non-regression test for issue #26548
+    """
+    X = np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 1.0]])
+    neigh = neighbors.NearestNeighbors(
+        n_neighbors=3, algorithm="brute", metric_params={"p": 0.5}
+    )
+    neigh.fit(X)
+
+    y = neigh.radius_neighbors(X[0].reshape(1, -1), radius=4, return_distance=False)
+    assert_allclose(y[0], [0, 1, 2])
+
+    y = neigh.kneighbors(X[0].reshape(1, -1), return_distance=False)
+    assert_allclose(y[0], [0, 1, 2])
+
+
+def test_KNeighborsClassifier_raise_on_all_zero_weights():
+    """Check that `predict` and `predict_proba` raises on sample of all zeros weights.
+
+    Related to Issue #25854.
+    """
+    X = [[0, 1], [1, 2], [2, 3], [3, 4]]
+    y = [0, 0, 1, 1]
+
+    def _weights(dist):
+        return np.vectorize(lambda x: 0 if x > 0.5 else 1)(dist)
+
+    est = neighbors.KNeighborsClassifier(n_neighbors=3, weights=_weights)
+    est.fit(X, y)
+
+    msg = (
+        "All neighbors of some sample is getting zero weights. "
+        "Please modify 'weights' to avoid this case if you are "
+        "using a user-defined function."
+    )
+
+    with pytest.raises(ValueError, match=msg):
+        est.predict([[1.1, 1.1]])
+
+    with pytest.raises(ValueError, match=msg):
+        est.predict_proba([[1.1, 1.1]])
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors_pipeline.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors_pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d01a0d0a60a80d4cc84cfac28a179a002508dbf
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors_pipeline.py
@@ -0,0 +1,256 @@
+"""
+This is testing the equivalence between some estimators with internal nearest
+neighbors computations, and the corresponding pipeline versions with
+KNeighborsTransformer or RadiusNeighborsTransformer to precompute the
+neighbors.
+"""
+
+import numpy as np
+
+from sklearn.base import clone
+from sklearn.cluster import DBSCAN, SpectralClustering
+from sklearn.cluster.tests.common import generate_clustered_data
+from sklearn.datasets import make_blobs
+from sklearn.manifold import TSNE, Isomap, SpectralEmbedding
+from sklearn.neighbors import (
+    KNeighborsRegressor,
+    KNeighborsTransformer,
+    LocalOutlierFactor,
+    RadiusNeighborsRegressor,
+    RadiusNeighborsTransformer,
+)
+from sklearn.pipeline import make_pipeline
+from sklearn.utils._testing import assert_array_almost_equal
+
+
+def test_spectral_clustering():
+    # Test chaining KNeighborsTransformer and SpectralClustering
+    n_neighbors = 5
+    X, _ = make_blobs(random_state=0)
+
+    # compare the chained version and the compact version
+    est_chain = make_pipeline(
+        KNeighborsTransformer(n_neighbors=n_neighbors, mode="connectivity"),
+        SpectralClustering(
+            n_neighbors=n_neighbors, affinity="precomputed", random_state=42
+        ),
+    )
+    est_compact = SpectralClustering(
+        n_neighbors=n_neighbors, affinity="nearest_neighbors", random_state=42
+    )
+    labels_compact = est_compact.fit_predict(X)
+    labels_chain = est_chain.fit_predict(X)
+    assert_array_almost_equal(labels_chain, labels_compact)
+
+
+def test_spectral_embedding():
+    # Test chaining KNeighborsTransformer and SpectralEmbedding
+    n_neighbors = 5
+
+    n_samples = 1000
+    centers = np.array(
+        [
+            [0.0, 5.0, 0.0, 0.0, 0.0],
+            [0.0, 0.0, 4.0, 0.0, 0.0],
+            [1.0, 0.0, 0.0, 5.0, 1.0],
+        ]
+    )
+    S, true_labels = make_blobs(
+        n_samples=n_samples, centers=centers, cluster_std=1.0, random_state=42
+    )
+
+    # compare the chained version and the compact version
+    est_chain = make_pipeline(
+        KNeighborsTransformer(n_neighbors=n_neighbors, mode="connectivity"),
+        SpectralEmbedding(
+            n_neighbors=n_neighbors, affinity="precomputed", random_state=42
+        ),
+    )
+    est_compact = SpectralEmbedding(
+        n_neighbors=n_neighbors, affinity="nearest_neighbors", random_state=42
+    )
+    St_compact = est_compact.fit_transform(S)
+    St_chain = est_chain.fit_transform(S)
+    assert_array_almost_equal(St_chain, St_compact)
+
+
+def test_dbscan():
+    # Test chaining RadiusNeighborsTransformer and DBSCAN
+    radius = 0.3
+    n_clusters = 3
+    X = generate_clustered_data(n_clusters=n_clusters)
+
+    # compare the chained version and the compact version
+    est_chain = make_pipeline(
+        RadiusNeighborsTransformer(radius=radius, mode="distance"),
+        DBSCAN(metric="precomputed", eps=radius),
+    )
+    est_compact = DBSCAN(eps=radius)
+
+    labels_chain = est_chain.fit_predict(X)
+    labels_compact = est_compact.fit_predict(X)
+    assert_array_almost_equal(labels_chain, labels_compact)
+
+
+def test_isomap():
+    # Test chaining KNeighborsTransformer and Isomap with
+    # neighbors_algorithm='precomputed'
+    algorithm = "auto"
+    n_neighbors = 10
+
+    X, _ = make_blobs(random_state=0)
+    X2, _ = make_blobs(random_state=1)
+
+    # compare the chained version and the compact version
+    est_chain = make_pipeline(
+        KNeighborsTransformer(
+            n_neighbors=n_neighbors, algorithm=algorithm, mode="distance"
+        ),
+        Isomap(n_neighbors=n_neighbors, metric="precomputed"),
+    )
+    est_compact = Isomap(n_neighbors=n_neighbors, neighbors_algorithm=algorithm)
+
+    Xt_chain = est_chain.fit_transform(X)
+    Xt_compact = est_compact.fit_transform(X)
+    assert_array_almost_equal(Xt_chain, Xt_compact)
+
+    Xt_chain = est_chain.transform(X2)
+    Xt_compact = est_compact.transform(X2)
+    assert_array_almost_equal(Xt_chain, Xt_compact)
+
+
+def test_tsne():
+    # Test chaining KNeighborsTransformer and TSNE
+    n_iter = 250
+    perplexity = 5
+    n_neighbors = int(3.0 * perplexity + 1)
+
+    rng = np.random.RandomState(0)
+    X = rng.randn(20, 2)
+
+    for metric in ["minkowski", "sqeuclidean"]:
+        # compare the chained version and the compact version
+        est_chain = make_pipeline(
+            KNeighborsTransformer(
+                n_neighbors=n_neighbors, mode="distance", metric=metric
+            ),
+            TSNE(
+                init="random",
+                metric="precomputed",
+                perplexity=perplexity,
+                method="barnes_hut",
+                random_state=42,
+                n_iter=n_iter,
+            ),
+        )
+        est_compact = TSNE(
+            init="random",
+            metric=metric,
+            perplexity=perplexity,
+            n_iter=n_iter,
+            method="barnes_hut",
+            random_state=42,
+        )
+
+        Xt_chain = est_chain.fit_transform(X)
+        Xt_compact = est_compact.fit_transform(X)
+        assert_array_almost_equal(Xt_chain, Xt_compact)
+
+
+def test_lof_novelty_false():
+    # Test chaining KNeighborsTransformer and LocalOutlierFactor
+    n_neighbors = 4
+
+    rng = np.random.RandomState(0)
+    X = rng.randn(40, 2)
+
+    # compare the chained version and the compact version
+    est_chain = make_pipeline(
+        KNeighborsTransformer(n_neighbors=n_neighbors, mode="distance"),
+        LocalOutlierFactor(
+            metric="precomputed",
+            n_neighbors=n_neighbors,
+            novelty=False,
+            contamination="auto",
+        ),
+    )
+    est_compact = LocalOutlierFactor(
+        n_neighbors=n_neighbors, novelty=False, contamination="auto"
+    )
+
+    pred_chain = est_chain.fit_predict(X)
+    pred_compact = est_compact.fit_predict(X)
+    assert_array_almost_equal(pred_chain, pred_compact)
+
+
+def test_lof_novelty_true():
+    # Test chaining KNeighborsTransformer and LocalOutlierFactor
+    n_neighbors = 4
+
+    rng = np.random.RandomState(0)
+    X1 = rng.randn(40, 2)
+    X2 = rng.randn(40, 2)
+
+    # compare the chained version and the compact version
+    est_chain = make_pipeline(
+        KNeighborsTransformer(n_neighbors=n_neighbors, mode="distance"),
+        LocalOutlierFactor(
+            metric="precomputed",
+            n_neighbors=n_neighbors,
+            novelty=True,
+            contamination="auto",
+        ),
+    )
+    est_compact = LocalOutlierFactor(
+        n_neighbors=n_neighbors, novelty=True, contamination="auto"
+    )
+
+    pred_chain = est_chain.fit(X1).predict(X2)
+    pred_compact = est_compact.fit(X1).predict(X2)
+    assert_array_almost_equal(pred_chain, pred_compact)
+
+
+def test_kneighbors_regressor():
+    # Test chaining KNeighborsTransformer and classifiers/regressors
+    rng = np.random.RandomState(0)
+    X = 2 * rng.rand(40, 5) - 1
+    X2 = 2 * rng.rand(40, 5) - 1
+    y = rng.rand(40, 1)
+
+    n_neighbors = 12
+    radius = 1.5
+    # We precompute more neighbors than necessary, to have equivalence between
+    # k-neighbors estimator after radius-neighbors transformer, and vice-versa.
+    factor = 2
+
+    k_trans = KNeighborsTransformer(n_neighbors=n_neighbors, mode="distance")
+    k_trans_factor = KNeighborsTransformer(
+        n_neighbors=int(n_neighbors * factor), mode="distance"
+    )
+
+    r_trans = RadiusNeighborsTransformer(radius=radius, mode="distance")
+    r_trans_factor = RadiusNeighborsTransformer(
+        radius=int(radius * factor), mode="distance"
+    )
+
+    k_reg = KNeighborsRegressor(n_neighbors=n_neighbors)
+    r_reg = RadiusNeighborsRegressor(radius=radius)
+
+    test_list = [
+        (k_trans, k_reg),
+        (k_trans_factor, r_reg),
+        (r_trans, r_reg),
+        (r_trans_factor, k_reg),
+    ]
+
+    for trans, reg in test_list:
+        # compare the chained version and the compact version
+        reg_compact = clone(reg)
+        reg_precomp = clone(reg)
+        reg_precomp.set_params(metric="precomputed")
+
+        reg_chain = make_pipeline(clone(trans), reg_precomp)
+
+        y_pred_chain = reg_chain.fit(X, y).predict(X2)
+        y_pred_compact = reg_compact.fit(X, y).predict(X2)
+        assert_array_almost_equal(y_pred_chain, y_pred_compact)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors_tree.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors_tree.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d8bac12f7423caadf1a392b5d42313f6a3f32f8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors_tree.py
@@ -0,0 +1,296 @@
+# License: BSD 3 clause
+
+import itertools
+import pickle
+
+import numpy as np
+import pytest
+from numpy.testing import assert_allclose, assert_array_almost_equal
+
+from sklearn.metrics import DistanceMetric
+from sklearn.neighbors._ball_tree import (
+    BallTree,
+    kernel_norm,
+)
+from sklearn.neighbors._ball_tree import (
+    NeighborsHeap64 as NeighborsHeapBT,
+)
+from sklearn.neighbors._ball_tree import (
+    nodeheap_sort as nodeheap_sort_bt,
+)
+from sklearn.neighbors._ball_tree import (
+    simultaneous_sort as simultaneous_sort_bt,
+)
+from sklearn.neighbors._kd_tree import (
+    KDTree,
+)
+from sklearn.neighbors._kd_tree import (
+    NeighborsHeap64 as NeighborsHeapKDT,
+)
+from sklearn.neighbors._kd_tree import (
+    nodeheap_sort as nodeheap_sort_kdt,
+)
+from sklearn.neighbors._kd_tree import (
+    simultaneous_sort as simultaneous_sort_kdt,
+)
+from sklearn.utils import check_random_state
+
+rng = np.random.RandomState(42)
+V_mahalanobis = rng.rand(3, 3)
+V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T)
+
+DIMENSION = 3
+
+METRICS = {
+    "euclidean": {},
+    "manhattan": {},
+    "minkowski": dict(p=3),
+    "chebyshev": {},
+    "seuclidean": dict(V=rng.random_sample(DIMENSION)),
+    "mahalanobis": dict(V=V_mahalanobis),
+}
+
+KD_TREE_METRICS = ["euclidean", "manhattan", "chebyshev", "minkowski"]
+BALL_TREE_METRICS = list(METRICS)
+
+
+def dist_func(x1, x2, p):
+    return np.sum((x1 - x2) ** p) ** (1.0 / p)
+
+
+def compute_kernel_slow(Y, X, kernel, h):
+    d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
+    norm = kernel_norm(h, X.shape[1], kernel)
+
+    if kernel == "gaussian":
+        return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
+    elif kernel == "tophat":
+        return norm * (d < h).sum(-1)
+    elif kernel == "epanechnikov":
+        return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
+    elif kernel == "exponential":
+        return norm * (np.exp(-d / h)).sum(-1)
+    elif kernel == "linear":
+        return norm * ((1 - d / h) * (d < h)).sum(-1)
+    elif kernel == "cosine":
+        return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
+    else:
+        raise ValueError("kernel not recognized")
+
+
+def brute_force_neighbors(X, Y, k, metric, **kwargs):
+    D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
+    ind = np.argsort(D, axis=1)[:, :k]
+    dist = D[np.arange(Y.shape[0])[:, None], ind]
+    return dist, ind
+
+
+@pytest.mark.parametrize("Cls", [KDTree, BallTree])
+@pytest.mark.parametrize(
+    "kernel", ["gaussian", "tophat", "epanechnikov", "exponential", "linear", "cosine"]
+)
+@pytest.mark.parametrize("h", [0.01, 0.1, 1])
+@pytest.mark.parametrize("rtol", [0, 1e-5])
+@pytest.mark.parametrize("atol", [1e-6, 1e-2])
+@pytest.mark.parametrize("breadth_first", [True, False])
+def test_kernel_density(
+    Cls, kernel, h, rtol, atol, breadth_first, n_samples=100, n_features=3
+):
+    rng = check_random_state(1)
+    X = rng.random_sample((n_samples, n_features))
+    Y = rng.random_sample((n_samples, n_features))
+    dens_true = compute_kernel_slow(Y, X, kernel, h)
+
+    tree = Cls(X, leaf_size=10)
+    dens = tree.kernel_density(
+        Y, h, atol=atol, rtol=rtol, kernel=kernel, breadth_first=breadth_first
+    )
+    assert_allclose(dens, dens_true, atol=atol, rtol=max(rtol, 1e-7))
+
+
+@pytest.mark.parametrize("Cls", [KDTree, BallTree])
+def test_neighbor_tree_query_radius(Cls, n_samples=100, n_features=10):
+    rng = check_random_state(0)
+    X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
+    query_pt = np.zeros(n_features, dtype=float)
+
+    eps = 1e-15  # roundoff error can cause test to fail
+    tree = Cls(X, leaf_size=5)
+    rad = np.sqrt(((X - query_pt) ** 2).sum(1))
+
+    for r in np.linspace(rad[0], rad[-1], 100):
+        ind = tree.query_radius([query_pt], r + eps)[0]
+        i = np.where(rad <= r + eps)[0]
+
+        ind.sort()
+        i.sort()
+
+        assert_array_almost_equal(i, ind)
+
+
+@pytest.mark.parametrize("Cls", [KDTree, BallTree])
+def test_neighbor_tree_query_radius_distance(Cls, n_samples=100, n_features=10):
+    rng = check_random_state(0)
+    X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
+    query_pt = np.zeros(n_features, dtype=float)
+
+    eps = 1e-15  # roundoff error can cause test to fail
+    tree = Cls(X, leaf_size=5)
+    rad = np.sqrt(((X - query_pt) ** 2).sum(1))
+
+    for r in np.linspace(rad[0], rad[-1], 100):
+        ind, dist = tree.query_radius([query_pt], r + eps, return_distance=True)
+
+        ind = ind[0]
+        dist = dist[0]
+
+        d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
+
+        assert_array_almost_equal(d, dist)
+
+
+@pytest.mark.parametrize("Cls", [KDTree, BallTree])
+@pytest.mark.parametrize("dualtree", (True, False))
+def test_neighbor_tree_two_point(Cls, dualtree, n_samples=100, n_features=3):
+    rng = check_random_state(0)
+    X = rng.random_sample((n_samples, n_features))
+    Y = rng.random_sample((n_samples, n_features))
+    r = np.linspace(0, 1, 10)
+    tree = Cls(X, leaf_size=10)
+
+    D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
+    counts_true = [(D <= ri).sum() for ri in r]
+
+    counts = tree.two_point_correlation(Y, r=r, dualtree=dualtree)
+    assert_array_almost_equal(counts, counts_true)
+
+
+@pytest.mark.parametrize("NeighborsHeap", [NeighborsHeapBT, NeighborsHeapKDT])
+def test_neighbors_heap(NeighborsHeap, n_pts=5, n_nbrs=10):
+    heap = NeighborsHeap(n_pts, n_nbrs)
+    rng = check_random_state(0)
+
+    for row in range(n_pts):
+        d_in = rng.random_sample(2 * n_nbrs).astype(np.float64, copy=False)
+        i_in = np.arange(2 * n_nbrs, dtype=np.intp)
+        for d, i in zip(d_in, i_in):
+            heap.push(row, d, i)
+
+        ind = np.argsort(d_in)
+        d_in = d_in[ind]
+        i_in = i_in[ind]
+
+        d_heap, i_heap = heap.get_arrays(sort=True)
+
+        assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
+        assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
+
+
+@pytest.mark.parametrize("nodeheap_sort", [nodeheap_sort_bt, nodeheap_sort_kdt])
+def test_node_heap(nodeheap_sort, n_nodes=50):
+    rng = check_random_state(0)
+    vals = rng.random_sample(n_nodes).astype(np.float64, copy=False)
+
+    i1 = np.argsort(vals)
+    vals2, i2 = nodeheap_sort(vals)
+
+    assert_array_almost_equal(i1, i2)
+    assert_array_almost_equal(vals[i1], vals2)
+
+
+@pytest.mark.parametrize(
+    "simultaneous_sort", [simultaneous_sort_bt, simultaneous_sort_kdt]
+)
+def test_simultaneous_sort(simultaneous_sort, n_rows=10, n_pts=201):
+    rng = check_random_state(0)
+    dist = rng.random_sample((n_rows, n_pts)).astype(np.float64, copy=False)
+    ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(np.intp, copy=False)
+
+    dist2 = dist.copy()
+    ind2 = ind.copy()
+
+    # simultaneous sort rows using function
+    simultaneous_sort(dist, ind)
+
+    # simultaneous sort rows using numpy
+    i = np.argsort(dist2, axis=1)
+    row_ind = np.arange(n_rows)[:, None]
+    dist2 = dist2[row_ind, i]
+    ind2 = ind2[row_ind, i]
+
+    assert_array_almost_equal(dist, dist2)
+    assert_array_almost_equal(ind, ind2)
+
+
+@pytest.mark.parametrize("Cls", [KDTree, BallTree])
+def test_gaussian_kde(Cls, n_samples=1000):
+    # Compare gaussian KDE results to scipy.stats.gaussian_kde
+    from scipy.stats import gaussian_kde
+
+    rng = check_random_state(0)
+    x_in = rng.normal(0, 1, n_samples)
+    x_out = np.linspace(-5, 5, 30)
+
+    for h in [0.01, 0.1, 1]:
+        tree = Cls(x_in[:, None])
+        gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
+
+        dens_tree = tree.kernel_density(x_out[:, None], h) / n_samples
+        dens_gkde = gkde.evaluate(x_out)
+
+        assert_array_almost_equal(dens_tree, dens_gkde, decimal=3)
+
+
+@pytest.mark.parametrize(
+    "Cls, metric",
+    itertools.chain(
+        [(KDTree, metric) for metric in KD_TREE_METRICS],
+        [(BallTree, metric) for metric in BALL_TREE_METRICS],
+    ),
+)
+@pytest.mark.parametrize("k", (1, 3, 5))
+@pytest.mark.parametrize("dualtree", (True, False))
+@pytest.mark.parametrize("breadth_first", (True, False))
+def test_nn_tree_query(Cls, metric, k, dualtree, breadth_first):
+    rng = check_random_state(0)
+    X = rng.random_sample((40, DIMENSION))
+    Y = rng.random_sample((10, DIMENSION))
+
+    kwargs = METRICS[metric]
+
+    kdt = Cls(X, leaf_size=1, metric=metric, **kwargs)
+    dist1, ind1 = kdt.query(Y, k, dualtree=dualtree, breadth_first=breadth_first)
+    dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
+
+    # don't check indices here: if there are any duplicate distances,
+    # the indices may not match.  Distances should not have this problem.
+    assert_array_almost_equal(dist1, dist2)
+
+
+@pytest.mark.parametrize(
+    "Cls, metric",
+    [(KDTree, "euclidean"), (BallTree, "euclidean"), (BallTree, dist_func)],
+)
+@pytest.mark.parametrize("protocol", (0, 1, 2))
+def test_pickle(Cls, metric, protocol):
+    rng = check_random_state(0)
+    X = rng.random_sample((10, 3))
+
+    if hasattr(metric, "__call__"):
+        kwargs = {"p": 2}
+    else:
+        kwargs = {}
+
+    tree1 = Cls(X, leaf_size=1, metric=metric, **kwargs)
+
+    ind1, dist1 = tree1.query(X)
+
+    s = pickle.dumps(tree1, protocol=protocol)
+    tree2 = pickle.loads(s)
+
+    ind2, dist2 = tree2.query(X)
+
+    assert_array_almost_equal(ind1, ind2)
+    assert_array_almost_equal(dist1, dist2)
+
+    assert isinstance(tree2, Cls)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_quad_tree.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_quad_tree.py
new file mode 100644
index 0000000000000000000000000000000000000000..be9a4c5fe549d32a130f9c6a55f6675fa0e42f20
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_quad_tree.py
@@ -0,0 +1,144 @@
+import pickle
+
+import numpy as np
+import pytest
+
+from sklearn.neighbors._quad_tree import _QuadTree
+from sklearn.utils import check_random_state
+
+
+def test_quadtree_boundary_computation():
+    # Introduce a point into a quad tree with boundaries not easy to compute.
+    Xs = []
+
+    # check a random case
+    Xs.append(np.array([[-1, 1], [-4, -1]], dtype=np.float32))
+    # check the case where only 0 are inserted
+    Xs.append(np.array([[0, 0], [0, 0]], dtype=np.float32))
+    # check the case where only negative are inserted
+    Xs.append(np.array([[-1, -2], [-4, 0]], dtype=np.float32))
+    # check the case where only small numbers are inserted
+    Xs.append(np.array([[-1e-6, 1e-6], [-4e-6, -1e-6]], dtype=np.float32))
+
+    for X in Xs:
+        tree = _QuadTree(n_dimensions=2, verbose=0)
+        tree.build_tree(X)
+        tree._check_coherence()
+
+
+def test_quadtree_similar_point():
+    # Introduce a point into a quad tree where a similar point already exists.
+    # Test will hang if it doesn't complete.
+    Xs = []
+
+    # check the case where points are actually different
+    Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
+    # check the case where points are the same on X axis
+    Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
+    # check the case where points are arbitrarily close on X axis
+    Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
+    # check the case where points are the same on Y axis
+    Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
+    # check the case where points are arbitrarily close on Y axis
+    Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
+    # check the case where points are arbitrarily close on both axes
+    Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]], dtype=np.float32))
+
+    # check the case where points are arbitrarily close on both axes
+    # close to machine epsilon - x axis
+    Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]], dtype=np.float32))
+
+    # check the case where points are arbitrarily close on both axes
+    # close to machine epsilon - y axis
+    Xs.append(
+        np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]], dtype=np.float32)
+    )
+
+    for X in Xs:
+        tree = _QuadTree(n_dimensions=2, verbose=0)
+        tree.build_tree(X)
+        tree._check_coherence()
+
+
+@pytest.mark.parametrize("n_dimensions", (2, 3))
+@pytest.mark.parametrize("protocol", (0, 1, 2))
+def test_quad_tree_pickle(n_dimensions, protocol):
+    rng = check_random_state(0)
+
+    X = rng.random_sample((10, n_dimensions))
+
+    tree = _QuadTree(n_dimensions=n_dimensions, verbose=0)
+    tree.build_tree(X)
+
+    s = pickle.dumps(tree, protocol=protocol)
+    bt2 = pickle.loads(s)
+
+    for x in X:
+        cell_x_tree = tree.get_cell(x)
+        cell_x_bt2 = bt2.get_cell(x)
+        assert cell_x_tree == cell_x_bt2
+
+
+@pytest.mark.parametrize("n_dimensions", (2, 3))
+def test_qt_insert_duplicate(n_dimensions):
+    rng = check_random_state(0)
+
+    X = rng.random_sample((10, n_dimensions))
+    Xd = np.r_[X, X[:5]]
+    tree = _QuadTree(n_dimensions=n_dimensions, verbose=0)
+    tree.build_tree(Xd)
+
+    cumulative_size = tree.cumulative_size
+    leafs = tree.leafs
+
+    # Assert that the first 5 are indeed duplicated and that the next
+    # ones are single point leaf
+    for i, x in enumerate(X):
+        cell_id = tree.get_cell(x)
+        assert leafs[cell_id]
+        assert cumulative_size[cell_id] == 1 + (i < 5)
+
+
+def test_summarize():
+    # Simple check for quad tree's summarize
+
+    angle = 0.9
+    X = np.array(
+        [[-10.0, -10.0], [9.0, 10.0], [10.0, 9.0], [10.0, 10.0]], dtype=np.float32
+    )
+    query_pt = X[0, :]
+    n_dimensions = X.shape[1]
+    offset = n_dimensions + 2
+
+    qt = _QuadTree(n_dimensions, verbose=0)
+    qt.build_tree(X)
+
+    idx, summary = qt._py_summarize(query_pt, X, angle)
+
+    node_dist = summary[n_dimensions]
+    node_size = summary[n_dimensions + 1]
+
+    # Summary should contain only 1 node with size 3 and distance to
+    # X[1:] barycenter
+    barycenter = X[1:].mean(axis=0)
+    ds2c = ((X[0] - barycenter) ** 2).sum()
+
+    assert idx == offset
+    assert node_size == 3, "summary size = {}".format(node_size)
+    assert np.isclose(node_dist, ds2c)
+
+    # Summary should contain all 3 node with size 1 and distance to
+    # each point in X[1:] for ``angle=0``
+    idx, summary = qt._py_summarize(query_pt, X, 0.0)
+    barycenter = X[1:].mean(axis=0)
+    ds2c = ((X[0] - barycenter) ** 2).sum()
+
+    assert idx == 3 * (offset)
+    for i in range(3):
+        node_dist = summary[i * offset + n_dimensions]
+        node_size = summary[i * offset + n_dimensions + 1]
+
+        ds2c = ((X[0] - X[i + 1]) ** 2).sum()
+
+        assert node_size == 1, "summary size = {}".format(node_size)
+        assert np.isclose(node_dist, ds2c)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/__init__.py b/venv/lib/python3.10/site-packages/sklearn/neural_network/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b321b605de0ba09605496a96dbfa6746183e232
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neural_network/__init__.py
@@ -0,0 +1,11 @@
+"""
+The :mod:`sklearn.neural_network` module includes models based on neural
+networks.
+"""
+
+# License: BSD 3 clause
+
+from ._multilayer_perceptron import MLPClassifier, MLPRegressor
+from ._rbm import BernoulliRBM
+
+__all__ = ["BernoulliRBM", "MLPClassifier", "MLPRegressor"]
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5be1c97c73bbcddf36b38b853c662d8effa9bb90
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..89b60469d9b79b0183a0ea152438b318f08a910e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/_base.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/_multilayer_perceptron.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/_multilayer_perceptron.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b1533821ec927e0c10c1c17482d42cebd7836e1c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/_multilayer_perceptron.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/_rbm.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/_rbm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59ce76ecc6e6052a907f6cf43b951545010d0710
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/_rbm.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/_stochastic_optimizers.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/_stochastic_optimizers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c907c721df68826da2906b5ee89001f463a2e4ac
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neural_network/__pycache__/_stochastic_optimizers.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/_base.py b/venv/lib/python3.10/site-packages/sklearn/neural_network/_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..73d62f9543e983d3cdfbeaa95a8194c0811c7728
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neural_network/_base.py
@@ -0,0 +1,236 @@
+"""Utilities for the neural network modules
+"""
+
+# Author: Issam H. Laradji 
+# License: BSD 3 clause
+
+import numpy as np
+from scipy.special import expit as logistic_sigmoid
+from scipy.special import xlogy
+
+
+def inplace_identity(X):
+    """Simply leave the input array unchanged.
+
+    Parameters
+    ----------
+    X : {array-like, sparse matrix}, shape (n_samples, n_features)
+        Data, where `n_samples` is the number of samples
+        and `n_features` is the number of features.
+    """
+    # Nothing to do
+
+
+def inplace_logistic(X):
+    """Compute the logistic function inplace.
+
+    Parameters
+    ----------
+    X : {array-like, sparse matrix}, shape (n_samples, n_features)
+        The input data.
+    """
+    logistic_sigmoid(X, out=X)
+
+
+def inplace_tanh(X):
+    """Compute the hyperbolic tan function inplace.
+
+    Parameters
+    ----------
+    X : {array-like, sparse matrix}, shape (n_samples, n_features)
+        The input data.
+    """
+    np.tanh(X, out=X)
+
+
+def inplace_relu(X):
+    """Compute the rectified linear unit function inplace.
+
+    Parameters
+    ----------
+    X : {array-like, sparse matrix}, shape (n_samples, n_features)
+        The input data.
+    """
+    np.maximum(X, 0, out=X)
+
+
+def inplace_softmax(X):
+    """Compute the K-way softmax function inplace.
+
+    Parameters
+    ----------
+    X : {array-like, sparse matrix}, shape (n_samples, n_features)
+        The input data.
+    """
+    tmp = X - X.max(axis=1)[:, np.newaxis]
+    np.exp(tmp, out=X)
+    X /= X.sum(axis=1)[:, np.newaxis]
+
+
+ACTIVATIONS = {
+    "identity": inplace_identity,
+    "tanh": inplace_tanh,
+    "logistic": inplace_logistic,
+    "relu": inplace_relu,
+    "softmax": inplace_softmax,
+}
+
+
+def inplace_identity_derivative(Z, delta):
+    """Apply the derivative of the identity function: do nothing.
+
+    Parameters
+    ----------
+    Z : {array-like, sparse matrix}, shape (n_samples, n_features)
+        The data which was output from the identity activation function during
+        the forward pass.
+
+    delta : {array-like}, shape (n_samples, n_features)
+         The backpropagated error signal to be modified inplace.
+    """
+    # Nothing to do
+
+
+def inplace_logistic_derivative(Z, delta):
+    """Apply the derivative of the logistic sigmoid function.
+
+    It exploits the fact that the derivative is a simple function of the output
+    value from logistic function.
+
+    Parameters
+    ----------
+    Z : {array-like, sparse matrix}, shape (n_samples, n_features)
+        The data which was output from the logistic activation function during
+        the forward pass.
+
+    delta : {array-like}, shape (n_samples, n_features)
+         The backpropagated error signal to be modified inplace.
+    """
+    delta *= Z
+    delta *= 1 - Z
+
+
+def inplace_tanh_derivative(Z, delta):
+    """Apply the derivative of the hyperbolic tanh function.
+
+    It exploits the fact that the derivative is a simple function of the output
+    value from hyperbolic tangent.
+
+    Parameters
+    ----------
+    Z : {array-like, sparse matrix}, shape (n_samples, n_features)
+        The data which was output from the hyperbolic tangent activation
+        function during the forward pass.
+
+    delta : {array-like}, shape (n_samples, n_features)
+         The backpropagated error signal to be modified inplace.
+    """
+    delta *= 1 - Z**2
+
+
+def inplace_relu_derivative(Z, delta):
+    """Apply the derivative of the relu function.
+
+    It exploits the fact that the derivative is a simple function of the output
+    value from rectified linear units activation function.
+
+    Parameters
+    ----------
+    Z : {array-like, sparse matrix}, shape (n_samples, n_features)
+        The data which was output from the rectified linear units activation
+        function during the forward pass.
+
+    delta : {array-like}, shape (n_samples, n_features)
+         The backpropagated error signal to be modified inplace.
+    """
+    delta[Z == 0] = 0
+
+
+DERIVATIVES = {
+    "identity": inplace_identity_derivative,
+    "tanh": inplace_tanh_derivative,
+    "logistic": inplace_logistic_derivative,
+    "relu": inplace_relu_derivative,
+}
+
+
+def squared_loss(y_true, y_pred):
+    """Compute the squared loss for regression.
+
+    Parameters
+    ----------
+    y_true : array-like or label indicator matrix
+        Ground truth (correct) values.
+
+    y_pred : array-like or label indicator matrix
+        Predicted values, as returned by a regression estimator.
+
+    Returns
+    -------
+    loss : float
+        The degree to which the samples are correctly predicted.
+    """
+    return ((y_true - y_pred) ** 2).mean() / 2
+
+
+def log_loss(y_true, y_prob):
+    """Compute Logistic loss for classification.
+
+    Parameters
+    ----------
+    y_true : array-like or label indicator matrix
+        Ground truth (correct) labels.
+
+    y_prob : array-like of float, shape = (n_samples, n_classes)
+        Predicted probabilities, as returned by a classifier's
+        predict_proba method.
+
+    Returns
+    -------
+    loss : float
+        The degree to which the samples are correctly predicted.
+    """
+    eps = np.finfo(y_prob.dtype).eps
+    y_prob = np.clip(y_prob, eps, 1 - eps)
+    if y_prob.shape[1] == 1:
+        y_prob = np.append(1 - y_prob, y_prob, axis=1)
+
+    if y_true.shape[1] == 1:
+        y_true = np.append(1 - y_true, y_true, axis=1)
+
+    return -xlogy(y_true, y_prob).sum() / y_prob.shape[0]
+
+
+def binary_log_loss(y_true, y_prob):
+    """Compute binary logistic loss for classification.
+
+    This is identical to log_loss in binary classification case,
+    but is kept for its use in multilabel case.
+
+    Parameters
+    ----------
+    y_true : array-like or label indicator matrix
+        Ground truth (correct) labels.
+
+    y_prob : array-like of float, shape = (n_samples, 1)
+        Predicted probabilities, as returned by a classifier's
+        predict_proba method.
+
+    Returns
+    -------
+    loss : float
+        The degree to which the samples are correctly predicted.
+    """
+    eps = np.finfo(y_prob.dtype).eps
+    y_prob = np.clip(y_prob, eps, 1 - eps)
+    return (
+        -(xlogy(y_true, y_prob).sum() + xlogy(1 - y_true, 1 - y_prob).sum())
+        / y_prob.shape[0]
+    )
+
+
+LOSS_FUNCTIONS = {
+    "squared_error": squared_loss,
+    "log_loss": log_loss,
+    "binary_log_loss": binary_log_loss,
+}
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/_multilayer_perceptron.py b/venv/lib/python3.10/site-packages/sklearn/neural_network/_multilayer_perceptron.py
new file mode 100644
index 0000000000000000000000000000000000000000..5175247204fb8e94d0a9a95c74fc7feb6f0cea03
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neural_network/_multilayer_perceptron.py
@@ -0,0 +1,1645 @@
+"""Multi-layer Perceptron
+"""
+
+# Authors: Issam H. Laradji 
+#          Andreas Mueller
+#          Jiyuan Qian
+# License: BSD 3 clause
+
+import warnings
+from abc import ABCMeta, abstractmethod
+from itertools import chain
+from numbers import Integral, Real
+
+import numpy as np
+import scipy.optimize
+
+from ..base import (
+    BaseEstimator,
+    ClassifierMixin,
+    RegressorMixin,
+    _fit_context,
+    is_classifier,
+)
+from ..exceptions import ConvergenceWarning
+from ..metrics import accuracy_score, r2_score
+from ..model_selection import train_test_split
+from ..preprocessing import LabelBinarizer
+from ..utils import (
+    _safe_indexing,
+    check_random_state,
+    column_or_1d,
+    gen_batches,
+    shuffle,
+)
+from ..utils._param_validation import Interval, Options, StrOptions
+from ..utils.extmath import safe_sparse_dot
+from ..utils.metaestimators import available_if
+from ..utils.multiclass import (
+    _check_partial_fit_first_call,
+    type_of_target,
+    unique_labels,
+)
+from ..utils.optimize import _check_optimize_result
+from ..utils.validation import check_is_fitted
+from ._base import ACTIVATIONS, DERIVATIVES, LOSS_FUNCTIONS
+from ._stochastic_optimizers import AdamOptimizer, SGDOptimizer
+
+_STOCHASTIC_SOLVERS = ["sgd", "adam"]
+
+
+def _pack(coefs_, intercepts_):
+    """Pack the parameters into a single vector."""
+    return np.hstack([l.ravel() for l in coefs_ + intercepts_])
+
+
+class BaseMultilayerPerceptron(BaseEstimator, metaclass=ABCMeta):
+    """Base class for MLP classification and regression.
+
+    Warning: This class should not be used directly.
+    Use derived classes instead.
+
+    .. versionadded:: 0.18
+    """
+
+    _parameter_constraints: dict = {
+        "hidden_layer_sizes": [
+            "array-like",
+            Interval(Integral, 1, None, closed="left"),
+        ],
+        "activation": [StrOptions({"identity", "logistic", "tanh", "relu"})],
+        "solver": [StrOptions({"lbfgs", "sgd", "adam"})],
+        "alpha": [Interval(Real, 0, None, closed="left")],
+        "batch_size": [
+            StrOptions({"auto"}),
+            Interval(Integral, 1, None, closed="left"),
+        ],
+        "learning_rate": [StrOptions({"constant", "invscaling", "adaptive"})],
+        "learning_rate_init": [Interval(Real, 0, None, closed="neither")],
+        "power_t": [Interval(Real, 0, None, closed="left")],
+        "max_iter": [Interval(Integral, 1, None, closed="left")],
+        "shuffle": ["boolean"],
+        "random_state": ["random_state"],
+        "tol": [Interval(Real, 0, None, closed="left")],
+        "verbose": ["verbose"],
+        "warm_start": ["boolean"],
+        "momentum": [Interval(Real, 0, 1, closed="both")],
+        "nesterovs_momentum": ["boolean"],
+        "early_stopping": ["boolean"],
+        "validation_fraction": [Interval(Real, 0, 1, closed="left")],
+        "beta_1": [Interval(Real, 0, 1, closed="left")],
+        "beta_2": [Interval(Real, 0, 1, closed="left")],
+        "epsilon": [Interval(Real, 0, None, closed="neither")],
+        "n_iter_no_change": [
+            Interval(Integral, 1, None, closed="left"),
+            Options(Real, {np.inf}),
+        ],
+        "max_fun": [Interval(Integral, 1, None, closed="left")],
+    }
+
+    @abstractmethod
+    def __init__(
+        self,
+        hidden_layer_sizes,
+        activation,
+        solver,
+        alpha,
+        batch_size,
+        learning_rate,
+        learning_rate_init,
+        power_t,
+        max_iter,
+        loss,
+        shuffle,
+        random_state,
+        tol,
+        verbose,
+        warm_start,
+        momentum,
+        nesterovs_momentum,
+        early_stopping,
+        validation_fraction,
+        beta_1,
+        beta_2,
+        epsilon,
+        n_iter_no_change,
+        max_fun,
+    ):
+        self.activation = activation
+        self.solver = solver
+        self.alpha = alpha
+        self.batch_size = batch_size
+        self.learning_rate = learning_rate
+        self.learning_rate_init = learning_rate_init
+        self.power_t = power_t
+        self.max_iter = max_iter
+        self.loss = loss
+        self.hidden_layer_sizes = hidden_layer_sizes
+        self.shuffle = shuffle
+        self.random_state = random_state
+        self.tol = tol
+        self.verbose = verbose
+        self.warm_start = warm_start
+        self.momentum = momentum
+        self.nesterovs_momentum = nesterovs_momentum
+        self.early_stopping = early_stopping
+        self.validation_fraction = validation_fraction
+        self.beta_1 = beta_1
+        self.beta_2 = beta_2
+        self.epsilon = epsilon
+        self.n_iter_no_change = n_iter_no_change
+        self.max_fun = max_fun
+
+    def _unpack(self, packed_parameters):
+        """Extract the coefficients and intercepts from packed_parameters."""
+        for i in range(self.n_layers_ - 1):
+            start, end, shape = self._coef_indptr[i]
+            self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)
+
+            start, end = self._intercept_indptr[i]
+            self.intercepts_[i] = packed_parameters[start:end]
+
+    def _forward_pass(self, activations):
+        """Perform a forward pass on the network by computing the values
+        of the neurons in the hidden layers and the output layer.
+
+        Parameters
+        ----------
+        activations : list, length = n_layers - 1
+            The ith element of the list holds the values of the ith layer.
+        """
+        hidden_activation = ACTIVATIONS[self.activation]
+        # Iterate over the hidden layers
+        for i in range(self.n_layers_ - 1):
+            activations[i + 1] = safe_sparse_dot(activations[i], self.coefs_[i])
+            activations[i + 1] += self.intercepts_[i]
+
+            # For the hidden layers
+            if (i + 1) != (self.n_layers_ - 1):
+                hidden_activation(activations[i + 1])
+
+        # For the last layer
+        output_activation = ACTIVATIONS[self.out_activation_]
+        output_activation(activations[i + 1])
+
+        return activations
+
+    def _forward_pass_fast(self, X, check_input=True):
+        """Predict using the trained model
+
+        This is the same as _forward_pass but does not record the activations
+        of all layers and only returns the last layer's activation.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The input data.
+
+        check_input : bool, default=True
+            Perform input data validation or not.
+
+        Returns
+        -------
+        y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
+            The decision function of the samples for each class in the model.
+        """
+        if check_input:
+            X = self._validate_data(X, accept_sparse=["csr", "csc"], reset=False)
+
+        # Initialize first layer
+        activation = X
+
+        # Forward propagate
+        hidden_activation = ACTIVATIONS[self.activation]
+        for i in range(self.n_layers_ - 1):
+            activation = safe_sparse_dot(activation, self.coefs_[i])
+            activation += self.intercepts_[i]
+            if i != self.n_layers_ - 2:
+                hidden_activation(activation)
+        output_activation = ACTIVATIONS[self.out_activation_]
+        output_activation(activation)
+
+        return activation
+
+    def _compute_loss_grad(
+        self, layer, n_samples, activations, deltas, coef_grads, intercept_grads
+    ):
+        """Compute the gradient of loss with respect to coefs and intercept for
+        specified layer.
+
+        This function does backpropagation for the specified one layer.
+        """
+        coef_grads[layer] = safe_sparse_dot(activations[layer].T, deltas[layer])
+        coef_grads[layer] += self.alpha * self.coefs_[layer]
+        coef_grads[layer] /= n_samples
+
+        intercept_grads[layer] = np.mean(deltas[layer], 0)
+
+    def _loss_grad_lbfgs(
+        self, packed_coef_inter, X, y, activations, deltas, coef_grads, intercept_grads
+    ):
+        """Compute the MLP loss function and its corresponding derivatives
+        with respect to the different parameters given in the initialization.
+
+        Returned gradients are packed in a single vector so it can be used
+        in lbfgs
+
+        Parameters
+        ----------
+        packed_coef_inter : ndarray
+            A vector comprising the flattened coefficients and intercepts.
+
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The input data.
+
+        y : ndarray of shape (n_samples,)
+            The target values.
+
+        activations : list, length = n_layers - 1
+            The ith element of the list holds the values of the ith layer.
+
+        deltas : list, length = n_layers - 1
+            The ith element of the list holds the difference between the
+            activations of the i + 1 layer and the backpropagated error.
+            More specifically, deltas are gradients of loss with respect to z
+            in each layer, where z = wx + b is the value of a particular layer
+            before passing through the activation function
+
+        coef_grads : list, length = n_layers - 1
+            The ith element contains the amount of change used to update the
+            coefficient parameters of the ith layer in an iteration.
+
+        intercept_grads : list, length = n_layers - 1
+            The ith element contains the amount of change used to update the
+            intercept parameters of the ith layer in an iteration.
+
+        Returns
+        -------
+        loss : float
+        grad : array-like, shape (number of nodes of all layers,)
+        """
+        self._unpack(packed_coef_inter)
+        loss, coef_grads, intercept_grads = self._backprop(
+            X, y, activations, deltas, coef_grads, intercept_grads
+        )
+        grad = _pack(coef_grads, intercept_grads)
+        return loss, grad
+
+    def _backprop(self, X, y, activations, deltas, coef_grads, intercept_grads):
+        """Compute the MLP loss function and its corresponding derivatives
+        with respect to each parameter: weights and bias vectors.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The input data.
+
+        y : ndarray of shape (n_samples,)
+            The target values.
+
+        activations : list, length = n_layers - 1
+             The ith element of the list holds the values of the ith layer.
+
+        deltas : list, length = n_layers - 1
+            The ith element of the list holds the difference between the
+            activations of the i + 1 layer and the backpropagated error.
+            More specifically, deltas are gradients of loss with respect to z
+            in each layer, where z = wx + b is the value of a particular layer
+            before passing through the activation function
+
+        coef_grads : list, length = n_layers - 1
+            The ith element contains the amount of change used to update the
+            coefficient parameters of the ith layer in an iteration.
+
+        intercept_grads : list, length = n_layers - 1
+            The ith element contains the amount of change used to update the
+            intercept parameters of the ith layer in an iteration.
+
+        Returns
+        -------
+        loss : float
+        coef_grads : list, length = n_layers - 1
+        intercept_grads : list, length = n_layers - 1
+        """
+        n_samples = X.shape[0]
+
+        # Forward propagate
+        activations = self._forward_pass(activations)
+
+        # Get loss
+        loss_func_name = self.loss
+        if loss_func_name == "log_loss" and self.out_activation_ == "logistic":
+            loss_func_name = "binary_log_loss"
+        loss = LOSS_FUNCTIONS[loss_func_name](y, activations[-1])
+        # Add L2 regularization term to loss
+        values = 0
+        for s in self.coefs_:
+            s = s.ravel()
+            values += np.dot(s, s)
+        loss += (0.5 * self.alpha) * values / n_samples
+
+        # Backward propagate
+        last = self.n_layers_ - 2
+
+        # The calculation of delta[last] here works with following
+        # combinations of output activation and loss function:
+        # sigmoid and binary cross entropy, softmax and categorical cross
+        # entropy, and identity with squared loss
+        deltas[last] = activations[-1] - y
+
+        # Compute gradient for the last layer
+        self._compute_loss_grad(
+            last, n_samples, activations, deltas, coef_grads, intercept_grads
+        )
+
+        inplace_derivative = DERIVATIVES[self.activation]
+        # Iterate over the hidden layers
+        for i in range(self.n_layers_ - 2, 0, -1):
+            deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T)
+            inplace_derivative(activations[i], deltas[i - 1])
+
+            self._compute_loss_grad(
+                i - 1, n_samples, activations, deltas, coef_grads, intercept_grads
+            )
+
+        return loss, coef_grads, intercept_grads
+
+    def _initialize(self, y, layer_units, dtype):
+        # set all attributes, allocate weights etc. for first call
+        # Initialize parameters
+        self.n_iter_ = 0
+        self.t_ = 0
+        self.n_outputs_ = y.shape[1]
+
+        # Compute the number of layers
+        self.n_layers_ = len(layer_units)
+
+        # Output for regression
+        if not is_classifier(self):
+            self.out_activation_ = "identity"
+        # Output for multi class
+        elif self._label_binarizer.y_type_ == "multiclass":
+            self.out_activation_ = "softmax"
+        # Output for binary class and multi-label
+        else:
+            self.out_activation_ = "logistic"
+
+        # Initialize coefficient and intercept layers
+        self.coefs_ = []
+        self.intercepts_ = []
+
+        for i in range(self.n_layers_ - 1):
+            coef_init, intercept_init = self._init_coef(
+                layer_units[i], layer_units[i + 1], dtype
+            )
+            self.coefs_.append(coef_init)
+            self.intercepts_.append(intercept_init)
+
+        if self.solver in _STOCHASTIC_SOLVERS:
+            self.loss_curve_ = []
+            self._no_improvement_count = 0
+            if self.early_stopping:
+                self.validation_scores_ = []
+                self.best_validation_score_ = -np.inf
+                self.best_loss_ = None
+            else:
+                self.best_loss_ = np.inf
+                self.validation_scores_ = None
+                self.best_validation_score_ = None
+
+    def _init_coef(self, fan_in, fan_out, dtype):
+        # Use the initialization method recommended by
+        # Glorot et al.
+        factor = 6.0
+        if self.activation == "logistic":
+            factor = 2.0
+        init_bound = np.sqrt(factor / (fan_in + fan_out))
+
+        # Generate weights and bias:
+        coef_init = self._random_state.uniform(
+            -init_bound, init_bound, (fan_in, fan_out)
+        )
+        intercept_init = self._random_state.uniform(-init_bound, init_bound, fan_out)
+        coef_init = coef_init.astype(dtype, copy=False)
+        intercept_init = intercept_init.astype(dtype, copy=False)
+        return coef_init, intercept_init
+
+    def _fit(self, X, y, incremental=False):
+        # Make sure self.hidden_layer_sizes is a list
+        hidden_layer_sizes = self.hidden_layer_sizes
+        if not hasattr(hidden_layer_sizes, "__iter__"):
+            hidden_layer_sizes = [hidden_layer_sizes]
+        hidden_layer_sizes = list(hidden_layer_sizes)
+
+        if np.any(np.array(hidden_layer_sizes) <= 0):
+            raise ValueError(
+                "hidden_layer_sizes must be > 0, got %s." % hidden_layer_sizes
+            )
+        first_pass = not hasattr(self, "coefs_") or (
+            not self.warm_start and not incremental
+        )
+
+        X, y = self._validate_input(X, y, incremental, reset=first_pass)
+
+        n_samples, n_features = X.shape
+
+        # Ensure y is 2D
+        if y.ndim == 1:
+            y = y.reshape((-1, 1))
+
+        self.n_outputs_ = y.shape[1]
+
+        layer_units = [n_features] + hidden_layer_sizes + [self.n_outputs_]
+
+        # check random state
+        self._random_state = check_random_state(self.random_state)
+
+        if first_pass:
+            # First time training the model
+            self._initialize(y, layer_units, X.dtype)
+
+        # Initialize lists
+        activations = [X] + [None] * (len(layer_units) - 1)
+        deltas = [None] * (len(activations) - 1)
+
+        coef_grads = [
+            np.empty((n_fan_in_, n_fan_out_), dtype=X.dtype)
+            for n_fan_in_, n_fan_out_ in zip(layer_units[:-1], layer_units[1:])
+        ]
+
+        intercept_grads = [
+            np.empty(n_fan_out_, dtype=X.dtype) for n_fan_out_ in layer_units[1:]
+        ]
+
+        # Run the Stochastic optimization solver
+        if self.solver in _STOCHASTIC_SOLVERS:
+            self._fit_stochastic(
+                X,
+                y,
+                activations,
+                deltas,
+                coef_grads,
+                intercept_grads,
+                layer_units,
+                incremental,
+            )
+
+        # Run the LBFGS solver
+        elif self.solver == "lbfgs":
+            self._fit_lbfgs(
+                X, y, activations, deltas, coef_grads, intercept_grads, layer_units
+            )
+
+        # validate parameter weights
+        weights = chain(self.coefs_, self.intercepts_)
+        if not all(np.isfinite(w).all() for w in weights):
+            raise ValueError(
+                "Solver produced non-finite parameter weights. The input data may"
+                " contain large values and need to be preprocessed."
+            )
+
+        return self
+
+    def _fit_lbfgs(
+        self, X, y, activations, deltas, coef_grads, intercept_grads, layer_units
+    ):
+        # Store meta information for the parameters
+        self._coef_indptr = []
+        self._intercept_indptr = []
+        start = 0
+
+        # Save sizes and indices of coefficients for faster unpacking
+        for i in range(self.n_layers_ - 1):
+            n_fan_in, n_fan_out = layer_units[i], layer_units[i + 1]
+
+            end = start + (n_fan_in * n_fan_out)
+            self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
+            start = end
+
+        # Save sizes and indices of intercepts for faster unpacking
+        for i in range(self.n_layers_ - 1):
+            end = start + layer_units[i + 1]
+            self._intercept_indptr.append((start, end))
+            start = end
+
+        # Run LBFGS
+        packed_coef_inter = _pack(self.coefs_, self.intercepts_)
+
+        if self.verbose is True or self.verbose >= 1:
+            iprint = 1
+        else:
+            iprint = -1
+
+        opt_res = scipy.optimize.minimize(
+            self._loss_grad_lbfgs,
+            packed_coef_inter,
+            method="L-BFGS-B",
+            jac=True,
+            options={
+                "maxfun": self.max_fun,
+                "maxiter": self.max_iter,
+                "iprint": iprint,
+                "gtol": self.tol,
+            },
+            args=(X, y, activations, deltas, coef_grads, intercept_grads),
+        )
+        self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
+        self.loss_ = opt_res.fun
+        self._unpack(opt_res.x)
+
+    def _fit_stochastic(
+        self,
+        X,
+        y,
+        activations,
+        deltas,
+        coef_grads,
+        intercept_grads,
+        layer_units,
+        incremental,
+    ):
+        params = self.coefs_ + self.intercepts_
+        if not incremental or not hasattr(self, "_optimizer"):
+            if self.solver == "sgd":
+                self._optimizer = SGDOptimizer(
+                    params,
+                    self.learning_rate_init,
+                    self.learning_rate,
+                    self.momentum,
+                    self.nesterovs_momentum,
+                    self.power_t,
+                )
+            elif self.solver == "adam":
+                self._optimizer = AdamOptimizer(
+                    params,
+                    self.learning_rate_init,
+                    self.beta_1,
+                    self.beta_2,
+                    self.epsilon,
+                )
+
+        # early_stopping in partial_fit doesn't make sense
+        if self.early_stopping and incremental:
+            raise ValueError("partial_fit does not support early_stopping=True")
+        early_stopping = self.early_stopping
+        if early_stopping:
+            # don't stratify in multilabel classification
+            should_stratify = is_classifier(self) and self.n_outputs_ == 1
+            stratify = y if should_stratify else None
+            X, X_val, y, y_val = train_test_split(
+                X,
+                y,
+                random_state=self._random_state,
+                test_size=self.validation_fraction,
+                stratify=stratify,
+            )
+            if is_classifier(self):
+                y_val = self._label_binarizer.inverse_transform(y_val)
+        else:
+            X_val = None
+            y_val = None
+
+        n_samples = X.shape[0]
+        sample_idx = np.arange(n_samples, dtype=int)
+
+        if self.batch_size == "auto":
+            batch_size = min(200, n_samples)
+        else:
+            if self.batch_size > n_samples:
+                warnings.warn(
+                    "Got `batch_size` less than 1 or larger than "
+                    "sample size. It is going to be clipped"
+                )
+            batch_size = np.clip(self.batch_size, 1, n_samples)
+
+        try:
+            self.n_iter_ = 0
+            for it in range(self.max_iter):
+                if self.shuffle:
+                    # Only shuffle the sample indices instead of X and y to
+                    # reduce the memory footprint. These indices will be used
+                    # to slice the X and y.
+                    sample_idx = shuffle(sample_idx, random_state=self._random_state)
+
+                accumulated_loss = 0.0
+                for batch_slice in gen_batches(n_samples, batch_size):
+                    if self.shuffle:
+                        X_batch = _safe_indexing(X, sample_idx[batch_slice])
+                        y_batch = y[sample_idx[batch_slice]]
+                    else:
+                        X_batch = X[batch_slice]
+                        y_batch = y[batch_slice]
+
+                    activations[0] = X_batch
+                    batch_loss, coef_grads, intercept_grads = self._backprop(
+                        X_batch,
+                        y_batch,
+                        activations,
+                        deltas,
+                        coef_grads,
+                        intercept_grads,
+                    )
+                    accumulated_loss += batch_loss * (
+                        batch_slice.stop - batch_slice.start
+                    )
+
+                    # update weights
+                    grads = coef_grads + intercept_grads
+                    self._optimizer.update_params(params, grads)
+
+                self.n_iter_ += 1
+                self.loss_ = accumulated_loss / X.shape[0]
+
+                self.t_ += n_samples
+                self.loss_curve_.append(self.loss_)
+                if self.verbose:
+                    print("Iteration %d, loss = %.8f" % (self.n_iter_, self.loss_))
+
+                # update no_improvement_count based on training loss or
+                # validation score according to early_stopping
+                self._update_no_improvement_count(early_stopping, X_val, y_val)
+
+                # for learning rate that needs to be updated at iteration end
+                self._optimizer.iteration_ends(self.t_)
+
+                if self._no_improvement_count > self.n_iter_no_change:
+                    # not better than last `n_iter_no_change` iterations by tol
+                    # stop or decrease learning rate
+                    if early_stopping:
+                        msg = (
+                            "Validation score did not improve more than "
+                            "tol=%f for %d consecutive epochs."
+                            % (self.tol, self.n_iter_no_change)
+                        )
+                    else:
+                        msg = (
+                            "Training loss did not improve more than tol=%f"
+                            " for %d consecutive epochs."
+                            % (self.tol, self.n_iter_no_change)
+                        )
+
+                    is_stopping = self._optimizer.trigger_stopping(msg, self.verbose)
+                    if is_stopping:
+                        break
+                    else:
+                        self._no_improvement_count = 0
+
+                if incremental:
+                    break
+
+                if self.n_iter_ == self.max_iter:
+                    warnings.warn(
+                        "Stochastic Optimizer: Maximum iterations (%d) "
+                        "reached and the optimization hasn't converged yet."
+                        % self.max_iter,
+                        ConvergenceWarning,
+                    )
+        except KeyboardInterrupt:
+            warnings.warn("Training interrupted by user.")
+
+        if early_stopping:
+            # restore best weights
+            self.coefs_ = self._best_coefs
+            self.intercepts_ = self._best_intercepts
+
+    def _update_no_improvement_count(self, early_stopping, X_val, y_val):
+        if early_stopping:
+            # compute validation score, use that for stopping
+            self.validation_scores_.append(self._score(X_val, y_val))
+
+            if self.verbose:
+                print("Validation score: %f" % self.validation_scores_[-1])
+            # update best parameters
+            # use validation_scores_, not loss_curve_
+            # let's hope no-one overloads .score with mse
+            last_valid_score = self.validation_scores_[-1]
+
+            if last_valid_score < (self.best_validation_score_ + self.tol):
+                self._no_improvement_count += 1
+            else:
+                self._no_improvement_count = 0
+
+            if last_valid_score > self.best_validation_score_:
+                self.best_validation_score_ = last_valid_score
+                self._best_coefs = [c.copy() for c in self.coefs_]
+                self._best_intercepts = [i.copy() for i in self.intercepts_]
+        else:
+            if self.loss_curve_[-1] > self.best_loss_ - self.tol:
+                self._no_improvement_count += 1
+            else:
+                self._no_improvement_count = 0
+            if self.loss_curve_[-1] < self.best_loss_:
+                self.best_loss_ = self.loss_curve_[-1]
+
+    @_fit_context(prefer_skip_nested_validation=True)
+    def fit(self, X, y):
+        """Fit the model to data matrix X and target(s) y.
+
+        Parameters
+        ----------
+        X : ndarray or sparse matrix of shape (n_samples, n_features)
+            The input data.
+
+        y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
+            The target values (class labels in classification, real numbers in
+            regression).
+
+        Returns
+        -------
+        self : object
+            Returns a trained MLP model.
+        """
+        return self._fit(X, y, incremental=False)
+
+    def _check_solver(self):
+        if self.solver not in _STOCHASTIC_SOLVERS:
+            raise AttributeError(
+                "partial_fit is only available for stochastic"
+                " optimizers. %s is not stochastic."
+                % self.solver
+            )
+        return True
+
+
+class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
+    """Multi-layer Perceptron classifier.
+
+    This model optimizes the log-loss function using LBFGS or stochastic
+    gradient descent.
+
+    .. versionadded:: 0.18
+
+    Parameters
+    ----------
+    hidden_layer_sizes : array-like of shape(n_layers - 2,), default=(100,)
+        The ith element represents the number of neurons in the ith
+        hidden layer.
+
+    activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu'
+        Activation function for the hidden layer.
+
+        - 'identity', no-op activation, useful to implement linear bottleneck,
+          returns f(x) = x
+
+        - 'logistic', the logistic sigmoid function,
+          returns f(x) = 1 / (1 + exp(-x)).
+
+        - 'tanh', the hyperbolic tan function,
+          returns f(x) = tanh(x).
+
+        - 'relu', the rectified linear unit function,
+          returns f(x) = max(0, x)
+
+    solver : {'lbfgs', 'sgd', 'adam'}, default='adam'
+        The solver for weight optimization.
+
+        - 'lbfgs' is an optimizer in the family of quasi-Newton methods.
+
+        - 'sgd' refers to stochastic gradient descent.
+
+        - 'adam' refers to a stochastic gradient-based optimizer proposed
+          by Kingma, Diederik, and Jimmy Ba
+
+        Note: The default solver 'adam' works pretty well on relatively
+        large datasets (with thousands of training samples or more) in terms of
+        both training time and validation score.
+        For small datasets, however, 'lbfgs' can converge faster and perform
+        better.
+
+    alpha : float, default=0.0001
+        Strength of the L2 regularization term. The L2 regularization term
+        is divided by the sample size when added to the loss.
+
+    batch_size : int, default='auto'
+        Size of minibatches for stochastic optimizers.
+        If the solver is 'lbfgs', the classifier will not use minibatch.
+        When set to "auto", `batch_size=min(200, n_samples)`.
+
+    learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant'
+        Learning rate schedule for weight updates.
+
+        - 'constant' is a constant learning rate given by
+          'learning_rate_init'.
+
+        - 'invscaling' gradually decreases the learning rate at each
+          time step 't' using an inverse scaling exponent of 'power_t'.
+          effective_learning_rate = learning_rate_init / pow(t, power_t)
+
+        - 'adaptive' keeps the learning rate constant to
+          'learning_rate_init' as long as training loss keeps decreasing.
+          Each time two consecutive epochs fail to decrease training loss by at
+          least tol, or fail to increase validation score by at least tol if
+          'early_stopping' is on, the current learning rate is divided by 5.
+
+        Only used when ``solver='sgd'``.
+
+    learning_rate_init : float, default=0.001
+        The initial learning rate used. It controls the step-size
+        in updating the weights. Only used when solver='sgd' or 'adam'.
+
+    power_t : float, default=0.5
+        The exponent for inverse scaling learning rate.
+        It is used in updating effective learning rate when the learning_rate
+        is set to 'invscaling'. Only used when solver='sgd'.
+
+    max_iter : int, default=200
+        Maximum number of iterations. The solver iterates until convergence
+        (determined by 'tol') or this number of iterations. For stochastic
+        solvers ('sgd', 'adam'), note that this determines the number of epochs
+        (how many times each data point will be used), not the number of
+        gradient steps.
+
+    shuffle : bool, default=True
+        Whether to shuffle samples in each iteration. Only used when
+        solver='sgd' or 'adam'.
+
+    random_state : int, RandomState instance, default=None
+        Determines random number generation for weights and bias
+        initialization, train-test split if early stopping is used, and batch
+        sampling when solver='sgd' or 'adam'.
+        Pass an int for reproducible results across multiple function calls.
+        See :term:`Glossary `.
+
+    tol : float, default=1e-4
+        Tolerance for the optimization. When the loss or score is not improving
+        by at least ``tol`` for ``n_iter_no_change`` consecutive iterations,
+        unless ``learning_rate`` is set to 'adaptive', convergence is
+        considered to be reached and training stops.
+
+    verbose : bool, default=False
+        Whether to print progress messages to stdout.
+
+    warm_start : bool, default=False
+        When set to True, reuse the solution of the previous
+        call to fit as initialization, otherwise, just erase the
+        previous solution. See :term:`the Glossary `.
+
+    momentum : float, default=0.9
+        Momentum for gradient descent update. Should be between 0 and 1. Only
+        used when solver='sgd'.
+
+    nesterovs_momentum : bool, default=True
+        Whether to use Nesterov's momentum. Only used when solver='sgd' and
+        momentum > 0.
+
+    early_stopping : bool, default=False
+        Whether to use early stopping to terminate training when validation
+        score is not improving. If set to true, it will automatically set
+        aside 10% of training data as validation and terminate training when
+        validation score is not improving by at least ``tol`` for
+        ``n_iter_no_change`` consecutive epochs. The split is stratified,
+        except in a multilabel setting.
+        If early stopping is False, then the training stops when the training
+        loss does not improve by more than tol for n_iter_no_change consecutive
+        passes over the training set.
+        Only effective when solver='sgd' or 'adam'.
+
+    validation_fraction : float, default=0.1
+        The proportion of training data to set aside as validation set for
+        early stopping. Must be between 0 and 1.
+        Only used if early_stopping is True.
+
+    beta_1 : float, default=0.9
+        Exponential decay rate for estimates of first moment vector in adam,
+        should be in [0, 1). Only used when solver='adam'.
+
+    beta_2 : float, default=0.999
+        Exponential decay rate for estimates of second moment vector in adam,
+        should be in [0, 1). Only used when solver='adam'.
+
+    epsilon : float, default=1e-8
+        Value for numerical stability in adam. Only used when solver='adam'.
+
+    n_iter_no_change : int, default=10
+        Maximum number of epochs to not meet ``tol`` improvement.
+        Only effective when solver='sgd' or 'adam'.
+
+        .. versionadded:: 0.20
+
+    max_fun : int, default=15000
+        Only used when solver='lbfgs'. Maximum number of loss function calls.
+        The solver iterates until convergence (determined by 'tol'), number
+        of iterations reaches max_iter, or this number of loss function calls.
+        Note that number of loss function calls will be greater than or equal
+        to the number of iterations for the `MLPClassifier`.
+
+        .. versionadded:: 0.22
+
+    Attributes
+    ----------
+    classes_ : ndarray or list of ndarray of shape (n_classes,)
+        Class labels for each output.
+
+    loss_ : float
+        The current loss computed with the loss function.
+
+    best_loss_ : float or None
+        The minimum loss reached by the solver throughout fitting.
+        If `early_stopping=True`, this attribute is set to `None`. Refer to
+        the `best_validation_score_` fitted attribute instead.
+
+    loss_curve_ : list of shape (`n_iter_`,)
+        The ith element in the list represents the loss at the ith iteration.
+
+    validation_scores_ : list of shape (`n_iter_`,) or None
+        The score at each iteration on a held-out validation set. The score
+        reported is the accuracy score. Only available if `early_stopping=True`,
+        otherwise the attribute is set to `None`.
+
+    best_validation_score_ : float or None
+        The best validation score (i.e. accuracy score) that triggered the
+        early stopping. Only available if `early_stopping=True`, otherwise the
+        attribute is set to `None`.
+
+    t_ : int
+        The number of training samples seen by the solver during fitting.
+
+    coefs_ : list of shape (n_layers - 1,)
+        The ith element in the list represents the weight matrix corresponding
+        to layer i.
+
+    intercepts_ : list of shape (n_layers - 1,)
+        The ith element in the list represents the bias vector corresponding to
+        layer i + 1.
+
+    n_features_in_ : int
+        Number of features seen during :term:`fit`.
+
+        .. versionadded:: 0.24
+
+    feature_names_in_ : ndarray of shape (`n_features_in_`,)
+        Names of features seen during :term:`fit`. Defined only when `X`
+        has feature names that are all strings.
+
+        .. versionadded:: 1.0
+
+    n_iter_ : int
+        The number of iterations the solver has run.
+
+    n_layers_ : int
+        Number of layers.
+
+    n_outputs_ : int
+        Number of outputs.
+
+    out_activation_ : str
+        Name of the output activation function.
+
+    See Also
+    --------
+    MLPRegressor : Multi-layer Perceptron regressor.
+    BernoulliRBM : Bernoulli Restricted Boltzmann Machine (RBM).
+
+    Notes
+    -----
+    MLPClassifier trains iteratively since at each time step
+    the partial derivatives of the loss function with respect to the model
+    parameters are computed to update the parameters.
+
+    It can also have a regularization term added to the loss function
+    that shrinks model parameters to prevent overfitting.
+
+    This implementation works with data represented as dense numpy arrays or
+    sparse scipy arrays of floating point values.
+
+    References
+    ----------
+    Hinton, Geoffrey E. "Connectionist learning procedures."
+    Artificial intelligence 40.1 (1989): 185-234.
+
+    Glorot, Xavier, and Yoshua Bengio.
+    "Understanding the difficulty of training deep feedforward neural networks."
+    International Conference on Artificial Intelligence and Statistics. 2010.
+
+    :arxiv:`He, Kaiming, et al (2015). "Delving deep into rectifiers:
+    Surpassing human-level performance on imagenet classification." <1502.01852>`
+
+    :arxiv:`Kingma, Diederik, and Jimmy Ba (2014)
+    "Adam: A method for stochastic optimization." <1412.6980>`
+
+    Examples
+    --------
+    >>> from sklearn.neural_network import MLPClassifier
+    >>> from sklearn.datasets import make_classification
+    >>> from sklearn.model_selection import train_test_split
+    >>> X, y = make_classification(n_samples=100, random_state=1)
+    >>> X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
+    ...                                                     random_state=1)
+    >>> clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train)
+    >>> clf.predict_proba(X_test[:1])
+    array([[0.038..., 0.961...]])
+    >>> clf.predict(X_test[:5, :])
+    array([1, 0, 1, 0, 1])
+    >>> clf.score(X_test, y_test)
+    0.8...
+    """
+
+    def __init__(
+        self,
+        hidden_layer_sizes=(100,),
+        activation="relu",
+        *,
+        solver="adam",
+        alpha=0.0001,
+        batch_size="auto",
+        learning_rate="constant",
+        learning_rate_init=0.001,
+        power_t=0.5,
+        max_iter=200,
+        shuffle=True,
+        random_state=None,
+        tol=1e-4,
+        verbose=False,
+        warm_start=False,
+        momentum=0.9,
+        nesterovs_momentum=True,
+        early_stopping=False,
+        validation_fraction=0.1,
+        beta_1=0.9,
+        beta_2=0.999,
+        epsilon=1e-8,
+        n_iter_no_change=10,
+        max_fun=15000,
+    ):
+        super().__init__(
+            hidden_layer_sizes=hidden_layer_sizes,
+            activation=activation,
+            solver=solver,
+            alpha=alpha,
+            batch_size=batch_size,
+            learning_rate=learning_rate,
+            learning_rate_init=learning_rate_init,
+            power_t=power_t,
+            max_iter=max_iter,
+            loss="log_loss",
+            shuffle=shuffle,
+            random_state=random_state,
+            tol=tol,
+            verbose=verbose,
+            warm_start=warm_start,
+            momentum=momentum,
+            nesterovs_momentum=nesterovs_momentum,
+            early_stopping=early_stopping,
+            validation_fraction=validation_fraction,
+            beta_1=beta_1,
+            beta_2=beta_2,
+            epsilon=epsilon,
+            n_iter_no_change=n_iter_no_change,
+            max_fun=max_fun,
+        )
+
+    def _validate_input(self, X, y, incremental, reset):
+        X, y = self._validate_data(
+            X,
+            y,
+            accept_sparse=["csr", "csc"],
+            multi_output=True,
+            dtype=(np.float64, np.float32),
+            reset=reset,
+        )
+        if y.ndim == 2 and y.shape[1] == 1:
+            y = column_or_1d(y, warn=True)
+
+        # Matrix of actions to be taken under the possible combinations:
+        # The case that incremental == True and classes_ not defined is
+        # already checked by _check_partial_fit_first_call that is called
+        # in _partial_fit below.
+        # The cases are already grouped into the respective if blocks below.
+        #
+        # incremental warm_start classes_ def  action
+        #    0            0         0        define classes_
+        #    0            1         0        define classes_
+        #    0            0         1        redefine classes_
+        #
+        #    0            1         1        check compat warm_start
+        #    1            1         1        check compat warm_start
+        #
+        #    1            0         1        check compat last fit
+        #
+        # Note the reliance on short-circuiting here, so that the second
+        # or part implies that classes_ is defined.
+        if (not hasattr(self, "classes_")) or (not self.warm_start and not incremental):
+            self._label_binarizer = LabelBinarizer()
+            self._label_binarizer.fit(y)
+            self.classes_ = self._label_binarizer.classes_
+        else:
+            classes = unique_labels(y)
+            if self.warm_start:
+                if set(classes) != set(self.classes_):
+                    raise ValueError(
+                        "warm_start can only be used where `y` has the same "
+                        "classes as in the previous call to fit. Previously "
+                        f"got {self.classes_}, `y` has {classes}"
+                    )
+            elif len(np.setdiff1d(classes, self.classes_, assume_unique=True)):
+                raise ValueError(
+                    "`y` has classes not in `self.classes_`. "
+                    f"`self.classes_` has {self.classes_}. 'y' has {classes}."
+                )
+
+        # This downcast to bool is to prevent upcasting when working with
+        # float32 data
+        y = self._label_binarizer.transform(y).astype(bool)
+        return X, y
+
+    def predict(self, X):
+        """Predict using the multi-layer perceptron classifier.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The input data.
+
+        Returns
+        -------
+        y : ndarray, shape (n_samples,) or (n_samples, n_classes)
+            The predicted classes.
+        """
+        check_is_fitted(self)
+        return self._predict(X)
+
+    def _predict(self, X, check_input=True):
+        """Private predict method with optional input validation"""
+        y_pred = self._forward_pass_fast(X, check_input=check_input)
+
+        if self.n_outputs_ == 1:
+            y_pred = y_pred.ravel()
+
+        return self._label_binarizer.inverse_transform(y_pred)
+
+    def _score(self, X, y):
+        """Private score method without input validation"""
+        # Input validation would remove feature names, so we disable it
+        return accuracy_score(y, self._predict(X, check_input=False))
+
+    @available_if(lambda est: est._check_solver())
+    @_fit_context(prefer_skip_nested_validation=True)
+    def partial_fit(self, X, y, classes=None):
+        """Update the model with a single iteration over the given data.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The input data.
+
+        y : array-like of shape (n_samples,)
+            The target values.
+
+        classes : array of shape (n_classes,), default=None
+            Classes across all calls to partial_fit.
+            Can be obtained via `np.unique(y_all)`, where y_all is the
+            target vector of the entire dataset.
+            This argument is required for the first call to partial_fit
+            and can be omitted in the subsequent calls.
+            Note that y doesn't need to contain all labels in `classes`.
+
+        Returns
+        -------
+        self : object
+            Trained MLP model.
+        """
+        if _check_partial_fit_first_call(self, classes):
+            self._label_binarizer = LabelBinarizer()
+            if type_of_target(y).startswith("multilabel"):
+                self._label_binarizer.fit(y)
+            else:
+                self._label_binarizer.fit(classes)
+
+        return self._fit(X, y, incremental=True)
+
+    def predict_log_proba(self, X):
+        """Return the log of probability estimates.
+
+        Parameters
+        ----------
+        X : ndarray of shape (n_samples, n_features)
+            The input data.
+
+        Returns
+        -------
+        log_y_prob : ndarray of shape (n_samples, n_classes)
+            The predicted log-probability of the sample for each class
+            in the model, where classes are ordered as they are in
+            `self.classes_`. Equivalent to `log(predict_proba(X))`.
+        """
+        y_prob = self.predict_proba(X)
+        return np.log(y_prob, out=y_prob)
+
+    def predict_proba(self, X):
+        """Probability estimates.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The input data.
+
+        Returns
+        -------
+        y_prob : ndarray of shape (n_samples, n_classes)
+            The predicted probability of the sample for each class in the
+            model, where classes are ordered as they are in `self.classes_`.
+        """
+        check_is_fitted(self)
+        y_pred = self._forward_pass_fast(X)
+
+        if self.n_outputs_ == 1:
+            y_pred = y_pred.ravel()
+
+        if y_pred.ndim == 1:
+            return np.vstack([1 - y_pred, y_pred]).T
+        else:
+            return y_pred
+
+    def _more_tags(self):
+        return {"multilabel": True}
+
+
+class MLPRegressor(RegressorMixin, BaseMultilayerPerceptron):
+    """Multi-layer Perceptron regressor.
+
+    This model optimizes the squared error using LBFGS or stochastic gradient
+    descent.
+
+    .. versionadded:: 0.18
+
+    Parameters
+    ----------
+    hidden_layer_sizes : array-like of shape(n_layers - 2,), default=(100,)
+        The ith element represents the number of neurons in the ith
+        hidden layer.
+
+    activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu'
+        Activation function for the hidden layer.
+
+        - 'identity', no-op activation, useful to implement linear bottleneck,
+          returns f(x) = x
+
+        - 'logistic', the logistic sigmoid function,
+          returns f(x) = 1 / (1 + exp(-x)).
+
+        - 'tanh', the hyperbolic tan function,
+          returns f(x) = tanh(x).
+
+        - 'relu', the rectified linear unit function,
+          returns f(x) = max(0, x)
+
+    solver : {'lbfgs', 'sgd', 'adam'}, default='adam'
+        The solver for weight optimization.
+
+        - 'lbfgs' is an optimizer in the family of quasi-Newton methods.
+
+        - 'sgd' refers to stochastic gradient descent.
+
+        - 'adam' refers to a stochastic gradient-based optimizer proposed by
+          Kingma, Diederik, and Jimmy Ba
+
+        Note: The default solver 'adam' works pretty well on relatively
+        large datasets (with thousands of training samples or more) in terms of
+        both training time and validation score.
+        For small datasets, however, 'lbfgs' can converge faster and perform
+        better.
+
+    alpha : float, default=0.0001
+        Strength of the L2 regularization term. The L2 regularization term
+        is divided by the sample size when added to the loss.
+
+    batch_size : int, default='auto'
+        Size of minibatches for stochastic optimizers.
+        If the solver is 'lbfgs', the regressor will not use minibatch.
+        When set to "auto", `batch_size=min(200, n_samples)`.
+
+    learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant'
+        Learning rate schedule for weight updates.
+
+        - 'constant' is a constant learning rate given by
+          'learning_rate_init'.
+
+        - 'invscaling' gradually decreases the learning rate ``learning_rate_``
+          at each time step 't' using an inverse scaling exponent of 'power_t'.
+          effective_learning_rate = learning_rate_init / pow(t, power_t)
+
+        - 'adaptive' keeps the learning rate constant to
+          'learning_rate_init' as long as training loss keeps decreasing.
+          Each time two consecutive epochs fail to decrease training loss by at
+          least tol, or fail to increase validation score by at least tol if
+          'early_stopping' is on, the current learning rate is divided by 5.
+
+        Only used when solver='sgd'.
+
+    learning_rate_init : float, default=0.001
+        The initial learning rate used. It controls the step-size
+        in updating the weights. Only used when solver='sgd' or 'adam'.
+
+    power_t : float, default=0.5
+        The exponent for inverse scaling learning rate.
+        It is used in updating effective learning rate when the learning_rate
+        is set to 'invscaling'. Only used when solver='sgd'.
+
+    max_iter : int, default=200
+        Maximum number of iterations. The solver iterates until convergence
+        (determined by 'tol') or this number of iterations. For stochastic
+        solvers ('sgd', 'adam'), note that this determines the number of epochs
+        (how many times each data point will be used), not the number of
+        gradient steps.
+
+    shuffle : bool, default=True
+        Whether to shuffle samples in each iteration. Only used when
+        solver='sgd' or 'adam'.
+
+    random_state : int, RandomState instance, default=None
+        Determines random number generation for weights and bias
+        initialization, train-test split if early stopping is used, and batch
+        sampling when solver='sgd' or 'adam'.
+        Pass an int for reproducible results across multiple function calls.
+        See :term:`Glossary `.
+
+    tol : float, default=1e-4
+        Tolerance for the optimization. When the loss or score is not improving
+        by at least ``tol`` for ``n_iter_no_change`` consecutive iterations,
+        unless ``learning_rate`` is set to 'adaptive', convergence is
+        considered to be reached and training stops.
+
+    verbose : bool, default=False
+        Whether to print progress messages to stdout.
+
+    warm_start : bool, default=False
+        When set to True, reuse the solution of the previous
+        call to fit as initialization, otherwise, just erase the
+        previous solution. See :term:`the Glossary `.
+
+    momentum : float, default=0.9
+        Momentum for gradient descent update. Should be between 0 and 1. Only
+        used when solver='sgd'.
+
+    nesterovs_momentum : bool, default=True
+        Whether to use Nesterov's momentum. Only used when solver='sgd' and
+        momentum > 0.
+
+    early_stopping : bool, default=False
+        Whether to use early stopping to terminate training when validation
+        score is not improving. If set to True, it will automatically set
+        aside ``validation_fraction`` of training data as validation and
+        terminate training when validation score is not improving by at
+        least ``tol`` for ``n_iter_no_change`` consecutive epochs.
+        Only effective when solver='sgd' or 'adam'.
+
+    validation_fraction : float, default=0.1
+        The proportion of training data to set aside as validation set for
+        early stopping. Must be between 0 and 1.
+        Only used if early_stopping is True.
+
+    beta_1 : float, default=0.9
+        Exponential decay rate for estimates of first moment vector in adam,
+        should be in [0, 1). Only used when solver='adam'.
+
+    beta_2 : float, default=0.999
+        Exponential decay rate for estimates of second moment vector in adam,
+        should be in [0, 1). Only used when solver='adam'.
+
+    epsilon : float, default=1e-8
+        Value for numerical stability in adam. Only used when solver='adam'.
+
+    n_iter_no_change : int, default=10
+        Maximum number of epochs to not meet ``tol`` improvement.
+        Only effective when solver='sgd' or 'adam'.
+
+        .. versionadded:: 0.20
+
+    max_fun : int, default=15000
+        Only used when solver='lbfgs'. Maximum number of function calls.
+        The solver iterates until convergence (determined by ``tol``), number
+        of iterations reaches max_iter, or this number of function calls.
+        Note that number of function calls will be greater than or equal to
+        the number of iterations for the MLPRegressor.
+
+        .. versionadded:: 0.22
+
+    Attributes
+    ----------
+    loss_ : float
+        The current loss computed with the loss function.
+
+    best_loss_ : float
+        The minimum loss reached by the solver throughout fitting.
+        If `early_stopping=True`, this attribute is set to `None`. Refer to
+        the `best_validation_score_` fitted attribute instead.
+        Only accessible when solver='sgd' or 'adam'.
+
+    loss_curve_ : list of shape (`n_iter_`,)
+        Loss value evaluated at the end of each training step.
+        The ith element in the list represents the loss at the ith iteration.
+        Only accessible when solver='sgd' or 'adam'.
+
+    validation_scores_ : list of shape (`n_iter_`,) or None
+        The score at each iteration on a held-out validation set. The score
+        reported is the R2 score. Only available if `early_stopping=True`,
+        otherwise the attribute is set to `None`.
+        Only accessible when solver='sgd' or 'adam'.
+
+    best_validation_score_ : float or None
+        The best validation score (i.e. R2 score) that triggered the
+        early stopping. Only available if `early_stopping=True`, otherwise the
+        attribute is set to `None`.
+        Only accessible when solver='sgd' or 'adam'.
+
+    t_ : int
+        The number of training samples seen by the solver during fitting.
+        Mathematically equals `n_iters * X.shape[0]`, it means
+        `time_step` and it is used by optimizer's learning rate scheduler.
+
+    coefs_ : list of shape (n_layers - 1,)
+        The ith element in the list represents the weight matrix corresponding
+        to layer i.
+
+    intercepts_ : list of shape (n_layers - 1,)
+        The ith element in the list represents the bias vector corresponding to
+        layer i + 1.
+
+    n_features_in_ : int
+        Number of features seen during :term:`fit`.
+
+        .. versionadded:: 0.24
+
+    feature_names_in_ : ndarray of shape (`n_features_in_`,)
+        Names of features seen during :term:`fit`. Defined only when `X`
+        has feature names that are all strings.
+
+        .. versionadded:: 1.0
+
+    n_iter_ : int
+        The number of iterations the solver has run.
+
+    n_layers_ : int
+        Number of layers.
+
+    n_outputs_ : int
+        Number of outputs.
+
+    out_activation_ : str
+        Name of the output activation function.
+
+    See Also
+    --------
+    BernoulliRBM : Bernoulli Restricted Boltzmann Machine (RBM).
+    MLPClassifier : Multi-layer Perceptron classifier.
+    sklearn.linear_model.SGDRegressor : Linear model fitted by minimizing
+        a regularized empirical loss with SGD.
+
+    Notes
+    -----
+    MLPRegressor trains iteratively since at each time step
+    the partial derivatives of the loss function with respect to the model
+    parameters are computed to update the parameters.
+
+    It can also have a regularization term added to the loss function
+    that shrinks model parameters to prevent overfitting.
+
+    This implementation works with data represented as dense and sparse numpy
+    arrays of floating point values.
+
+    References
+    ----------
+    Hinton, Geoffrey E. "Connectionist learning procedures."
+    Artificial intelligence 40.1 (1989): 185-234.
+
+    Glorot, Xavier, and Yoshua Bengio.
+    "Understanding the difficulty of training deep feedforward neural networks."
+    International Conference on Artificial Intelligence and Statistics. 2010.
+
+    :arxiv:`He, Kaiming, et al (2015). "Delving deep into rectifiers:
+    Surpassing human-level performance on imagenet classification." <1502.01852>`
+
+    :arxiv:`Kingma, Diederik, and Jimmy Ba (2014)
+    "Adam: A method for stochastic optimization." <1412.6980>`
+
+    Examples
+    --------
+    >>> from sklearn.neural_network import MLPRegressor
+    >>> from sklearn.datasets import make_regression
+    >>> from sklearn.model_selection import train_test_split
+    >>> X, y = make_regression(n_samples=200, random_state=1)
+    >>> X_train, X_test, y_train, y_test = train_test_split(X, y,
+    ...                                                     random_state=1)
+    >>> regr = MLPRegressor(random_state=1, max_iter=500).fit(X_train, y_train)
+    >>> regr.predict(X_test[:2])
+    array([-0.9..., -7.1...])
+    >>> regr.score(X_test, y_test)
+    0.4...
+    """
+
+    def __init__(
+        self,
+        hidden_layer_sizes=(100,),
+        activation="relu",
+        *,
+        solver="adam",
+        alpha=0.0001,
+        batch_size="auto",
+        learning_rate="constant",
+        learning_rate_init=0.001,
+        power_t=0.5,
+        max_iter=200,
+        shuffle=True,
+        random_state=None,
+        tol=1e-4,
+        verbose=False,
+        warm_start=False,
+        momentum=0.9,
+        nesterovs_momentum=True,
+        early_stopping=False,
+        validation_fraction=0.1,
+        beta_1=0.9,
+        beta_2=0.999,
+        epsilon=1e-8,
+        n_iter_no_change=10,
+        max_fun=15000,
+    ):
+        super().__init__(
+            hidden_layer_sizes=hidden_layer_sizes,
+            activation=activation,
+            solver=solver,
+            alpha=alpha,
+            batch_size=batch_size,
+            learning_rate=learning_rate,
+            learning_rate_init=learning_rate_init,
+            power_t=power_t,
+            max_iter=max_iter,
+            loss="squared_error",
+            shuffle=shuffle,
+            random_state=random_state,
+            tol=tol,
+            verbose=verbose,
+            warm_start=warm_start,
+            momentum=momentum,
+            nesterovs_momentum=nesterovs_momentum,
+            early_stopping=early_stopping,
+            validation_fraction=validation_fraction,
+            beta_1=beta_1,
+            beta_2=beta_2,
+            epsilon=epsilon,
+            n_iter_no_change=n_iter_no_change,
+            max_fun=max_fun,
+        )
+
+    def predict(self, X):
+        """Predict using the multi-layer perceptron model.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The input data.
+
+        Returns
+        -------
+        y : ndarray of shape (n_samples, n_outputs)
+            The predicted values.
+        """
+        check_is_fitted(self)
+        return self._predict(X)
+
+    def _predict(self, X, check_input=True):
+        """Private predict method with optional input validation"""
+        y_pred = self._forward_pass_fast(X, check_input=check_input)
+        if y_pred.shape[1] == 1:
+            return y_pred.ravel()
+        return y_pred
+
+    def _score(self, X, y):
+        """Private score method without input validation"""
+        # Input validation would remove feature names, so we disable it
+        y_pred = self._predict(X, check_input=False)
+        return r2_score(y, y_pred)
+
+    def _validate_input(self, X, y, incremental, reset):
+        X, y = self._validate_data(
+            X,
+            y,
+            accept_sparse=["csr", "csc"],
+            multi_output=True,
+            y_numeric=True,
+            dtype=(np.float64, np.float32),
+            reset=reset,
+        )
+        if y.ndim == 2 and y.shape[1] == 1:
+            y = column_or_1d(y, warn=True)
+        return X, y
+
+    @available_if(lambda est: est._check_solver)
+    @_fit_context(prefer_skip_nested_validation=True)
+    def partial_fit(self, X, y):
+        """Update the model with a single iteration over the given data.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The input data.
+
+        y : ndarray of shape (n_samples,)
+            The target values.
+
+        Returns
+        -------
+        self : object
+            Trained MLP model.
+        """
+        return self._fit(X, y, incremental=True)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/_rbm.py b/venv/lib/python3.10/site-packages/sklearn/neural_network/_rbm.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec819790c5f735a8ae090f14febefe25ab229e45
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neural_network/_rbm.py
@@ -0,0 +1,453 @@
+"""Restricted Boltzmann Machine
+"""
+
+# Authors: Yann N. Dauphin 
+#          Vlad Niculae
+#          Gabriel Synnaeve
+#          Lars Buitinck
+# License: BSD 3 clause
+
+import time
+from numbers import Integral, Real
+
+import numpy as np
+import scipy.sparse as sp
+from scipy.special import expit  # logistic function
+
+from ..base import (
+    BaseEstimator,
+    ClassNamePrefixFeaturesOutMixin,
+    TransformerMixin,
+    _fit_context,
+)
+from ..utils import check_random_state, gen_even_slices
+from ..utils._param_validation import Interval
+from ..utils.extmath import safe_sparse_dot
+from ..utils.validation import check_is_fitted
+
+
+class BernoulliRBM(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
+    """Bernoulli Restricted Boltzmann Machine (RBM).
+
+    A Restricted Boltzmann Machine with binary visible units and
+    binary hidden units. Parameters are estimated using Stochastic Maximum
+    Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
+    [2].
+
+    The time complexity of this implementation is ``O(d ** 2)`` assuming
+    d ~ n_features ~ n_components.
+
+    Read more in the :ref:`User Guide `.
+
+    Parameters
+    ----------
+    n_components : int, default=256
+        Number of binary hidden units.
+
+    learning_rate : float, default=0.1
+        The learning rate for weight updates. It is *highly* recommended
+        to tune this hyper-parameter. Reasonable values are in the
+        10**[0., -3.] range.
+
+    batch_size : int, default=10
+        Number of examples per minibatch.
+
+    n_iter : int, default=10
+        Number of iterations/sweeps over the training dataset to perform
+        during training.
+
+    verbose : int, default=0
+        The verbosity level. The default, zero, means silent mode. Range
+        of values is [0, inf].
+
+    random_state : int, RandomState instance or None, default=None
+        Determines random number generation for:
+
+        - Gibbs sampling from visible and hidden layers.
+
+        - Initializing components, sampling from layers during fit.
+
+        - Corrupting the data when scoring samples.
+
+        Pass an int for reproducible results across multiple function calls.
+        See :term:`Glossary `.
+
+    Attributes
+    ----------
+    intercept_hidden_ : array-like of shape (n_components,)
+        Biases of the hidden units.
+
+    intercept_visible_ : array-like of shape (n_features,)
+        Biases of the visible units.
+
+    components_ : array-like of shape (n_components, n_features)
+        Weight matrix, where `n_features` is the number of
+        visible units and `n_components` is the number of hidden units.
+
+    h_samples_ : array-like of shape (batch_size, n_components)
+        Hidden Activation sampled from the model distribution,
+        where `batch_size` is the number of examples per minibatch and
+        `n_components` is the number of hidden units.
+
+    n_features_in_ : int
+        Number of features seen during :term:`fit`.
+
+        .. versionadded:: 0.24
+
+    feature_names_in_ : ndarray of shape (`n_features_in_`,)
+        Names of features seen during :term:`fit`. Defined only when `X`
+        has feature names that are all strings.
+
+        .. versionadded:: 1.0
+
+    See Also
+    --------
+    sklearn.neural_network.MLPRegressor : Multi-layer Perceptron regressor.
+    sklearn.neural_network.MLPClassifier : Multi-layer Perceptron classifier.
+    sklearn.decomposition.PCA : An unsupervised linear dimensionality
+        reduction model.
+
+    References
+    ----------
+
+    [1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
+        deep belief nets. Neural Computation 18, pp 1527-1554.
+        https://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
+
+    [2] Tieleman, T. Training Restricted Boltzmann Machines using
+        Approximations to the Likelihood Gradient. International Conference
+        on Machine Learning (ICML) 2008
+
+    Examples
+    --------
+
+    >>> import numpy as np
+    >>> from sklearn.neural_network import BernoulliRBM
+    >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
+    >>> model = BernoulliRBM(n_components=2)
+    >>> model.fit(X)
+    BernoulliRBM(n_components=2)
+    """
+
+    _parameter_constraints: dict = {
+        "n_components": [Interval(Integral, 1, None, closed="left")],
+        "learning_rate": [Interval(Real, 0, None, closed="neither")],
+        "batch_size": [Interval(Integral, 1, None, closed="left")],
+        "n_iter": [Interval(Integral, 0, None, closed="left")],
+        "verbose": ["verbose"],
+        "random_state": ["random_state"],
+    }
+
+    def __init__(
+        self,
+        n_components=256,
+        *,
+        learning_rate=0.1,
+        batch_size=10,
+        n_iter=10,
+        verbose=0,
+        random_state=None,
+    ):
+        self.n_components = n_components
+        self.learning_rate = learning_rate
+        self.batch_size = batch_size
+        self.n_iter = n_iter
+        self.verbose = verbose
+        self.random_state = random_state
+
+    def transform(self, X):
+        """Compute the hidden layer activation probabilities, P(h=1|v=X).
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The data to be transformed.
+
+        Returns
+        -------
+        h : ndarray of shape (n_samples, n_components)
+            Latent representations of the data.
+        """
+        check_is_fitted(self)
+
+        X = self._validate_data(
+            X, accept_sparse="csr", reset=False, dtype=(np.float64, np.float32)
+        )
+        return self._mean_hiddens(X)
+
+    def _mean_hiddens(self, v):
+        """Computes the probabilities P(h=1|v).
+
+        Parameters
+        ----------
+        v : ndarray of shape (n_samples, n_features)
+            Values of the visible layer.
+
+        Returns
+        -------
+        h : ndarray of shape (n_samples, n_components)
+            Corresponding mean field values for the hidden layer.
+        """
+        p = safe_sparse_dot(v, self.components_.T)
+        p += self.intercept_hidden_
+        return expit(p, out=p)
+
+    def _sample_hiddens(self, v, rng):
+        """Sample from the distribution P(h|v).
+
+        Parameters
+        ----------
+        v : ndarray of shape (n_samples, n_features)
+            Values of the visible layer to sample from.
+
+        rng : RandomState instance
+            Random number generator to use.
+
+        Returns
+        -------
+        h : ndarray of shape (n_samples, n_components)
+            Values of the hidden layer.
+        """
+        p = self._mean_hiddens(v)
+        return rng.uniform(size=p.shape) < p
+
+    def _sample_visibles(self, h, rng):
+        """Sample from the distribution P(v|h).
+
+        Parameters
+        ----------
+        h : ndarray of shape (n_samples, n_components)
+            Values of the hidden layer to sample from.
+
+        rng : RandomState instance
+            Random number generator to use.
+
+        Returns
+        -------
+        v : ndarray of shape (n_samples, n_features)
+            Values of the visible layer.
+        """
+        p = np.dot(h, self.components_)
+        p += self.intercept_visible_
+        expit(p, out=p)
+        return rng.uniform(size=p.shape) < p
+
+    def _free_energy(self, v):
+        """Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
+
+        Parameters
+        ----------
+        v : ndarray of shape (n_samples, n_features)
+            Values of the visible layer.
+
+        Returns
+        -------
+        free_energy : ndarray of shape (n_samples,)
+            The value of the free energy.
+        """
+        return -safe_sparse_dot(v, self.intercept_visible_) - np.logaddexp(
+            0, safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_
+        ).sum(axis=1)
+
+    def gibbs(self, v):
+        """Perform one Gibbs sampling step.
+
+        Parameters
+        ----------
+        v : ndarray of shape (n_samples, n_features)
+            Values of the visible layer to start from.
+
+        Returns
+        -------
+        v_new : ndarray of shape (n_samples, n_features)
+            Values of the visible layer after one Gibbs step.
+        """
+        check_is_fitted(self)
+        if not hasattr(self, "random_state_"):
+            self.random_state_ = check_random_state(self.random_state)
+        h_ = self._sample_hiddens(v, self.random_state_)
+        v_ = self._sample_visibles(h_, self.random_state_)
+
+        return v_
+
+    @_fit_context(prefer_skip_nested_validation=True)
+    def partial_fit(self, X, y=None):
+        """Fit the model to the partial segment of the data X.
+
+        Parameters
+        ----------
+        X : ndarray of shape (n_samples, n_features)
+            Training data.
+
+        y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
+            Target values (None for unsupervised transformations).
+
+        Returns
+        -------
+        self : BernoulliRBM
+            The fitted model.
+        """
+        first_pass = not hasattr(self, "components_")
+        X = self._validate_data(
+            X, accept_sparse="csr", dtype=np.float64, reset=first_pass
+        )
+        if not hasattr(self, "random_state_"):
+            self.random_state_ = check_random_state(self.random_state)
+        if not hasattr(self, "components_"):
+            self.components_ = np.asarray(
+                self.random_state_.normal(0, 0.01, (self.n_components, X.shape[1])),
+                order="F",
+            )
+            self._n_features_out = self.components_.shape[0]
+        if not hasattr(self, "intercept_hidden_"):
+            self.intercept_hidden_ = np.zeros(
+                self.n_components,
+            )
+        if not hasattr(self, "intercept_visible_"):
+            self.intercept_visible_ = np.zeros(
+                X.shape[1],
+            )
+        if not hasattr(self, "h_samples_"):
+            self.h_samples_ = np.zeros((self.batch_size, self.n_components))
+
+        self._fit(X, self.random_state_)
+
+    def _fit(self, v_pos, rng):
+        """Inner fit for one mini-batch.
+
+        Adjust the parameters to maximize the likelihood of v using
+        Stochastic Maximum Likelihood (SML).
+
+        Parameters
+        ----------
+        v_pos : ndarray of shape (n_samples, n_features)
+            The data to use for training.
+
+        rng : RandomState instance
+            Random number generator to use for sampling.
+        """
+        h_pos = self._mean_hiddens(v_pos)
+        v_neg = self._sample_visibles(self.h_samples_, rng)
+        h_neg = self._mean_hiddens(v_neg)
+
+        lr = float(self.learning_rate) / v_pos.shape[0]
+        update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
+        update -= np.dot(h_neg.T, v_neg)
+        self.components_ += lr * update
+        self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
+        self.intercept_visible_ += lr * (
+            np.asarray(v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0)
+        )
+
+        h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0  # sample binomial
+        self.h_samples_ = np.floor(h_neg, h_neg)
+
+    def score_samples(self, X):
+        """Compute the pseudo-likelihood of X.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            Values of the visible layer. Must be all-boolean (not checked).
+
+        Returns
+        -------
+        pseudo_likelihood : ndarray of shape (n_samples,)
+            Value of the pseudo-likelihood (proxy for likelihood).
+
+        Notes
+        -----
+        This method is not deterministic: it computes a quantity called the
+        free energy on X, then on a randomly corrupted version of X, and
+        returns the log of the logistic function of the difference.
+        """
+        check_is_fitted(self)
+
+        v = self._validate_data(X, accept_sparse="csr", reset=False)
+        rng = check_random_state(self.random_state)
+
+        # Randomly corrupt one feature in each sample in v.
+        ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0]))
+        if sp.issparse(v):
+            data = -2 * v[ind] + 1
+            if isinstance(data, np.matrix):  # v is a sparse matrix
+                v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
+            else:  # v is a sparse array
+                v_ = v + sp.csr_array((data.ravel(), ind), shape=v.shape)
+        else:
+            v_ = v.copy()
+            v_[ind] = 1 - v_[ind]
+
+        fe = self._free_energy(v)
+        fe_ = self._free_energy(v_)
+        # log(expit(x)) = log(1 / (1 + exp(-x)) = -np.logaddexp(0, -x)
+        return -v.shape[1] * np.logaddexp(0, -(fe_ - fe))
+
+    @_fit_context(prefer_skip_nested_validation=True)
+    def fit(self, X, y=None):
+        """Fit the model to the data X.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            Training data.
+
+        y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
+            Target values (None for unsupervised transformations).
+
+        Returns
+        -------
+        self : BernoulliRBM
+            The fitted model.
+        """
+        X = self._validate_data(X, accept_sparse="csr", dtype=(np.float64, np.float32))
+        n_samples = X.shape[0]
+        rng = check_random_state(self.random_state)
+
+        self.components_ = np.asarray(
+            rng.normal(0, 0.01, (self.n_components, X.shape[1])),
+            order="F",
+            dtype=X.dtype,
+        )
+        self._n_features_out = self.components_.shape[0]
+        self.intercept_hidden_ = np.zeros(self.n_components, dtype=X.dtype)
+        self.intercept_visible_ = np.zeros(X.shape[1], dtype=X.dtype)
+        self.h_samples_ = np.zeros((self.batch_size, self.n_components), dtype=X.dtype)
+
+        n_batches = int(np.ceil(float(n_samples) / self.batch_size))
+        batch_slices = list(
+            gen_even_slices(n_batches * self.batch_size, n_batches, n_samples=n_samples)
+        )
+        verbose = self.verbose
+        begin = time.time()
+        for iteration in range(1, self.n_iter + 1):
+            for batch_slice in batch_slices:
+                self._fit(X[batch_slice], rng)
+
+            if verbose:
+                end = time.time()
+                print(
+                    "[%s] Iteration %d, pseudo-likelihood = %.2f, time = %.2fs"
+                    % (
+                        type(self).__name__,
+                        iteration,
+                        self.score_samples(X).mean(),
+                        end - begin,
+                    )
+                )
+                begin = end
+
+        return self
+
+    def _more_tags(self):
+        return {
+            "_xfail_checks": {
+                "check_methods_subset_invariance": (
+                    "fails for the decision_function method"
+                ),
+                "check_methods_sample_order_invariance": (
+                    "fails for the score_samples method"
+                ),
+            },
+            "preserves_dtype": [np.float64, np.float32],
+        }
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/_stochastic_optimizers.py b/venv/lib/python3.10/site-packages/sklearn/neural_network/_stochastic_optimizers.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9fbaec0098d077a4cee85e01255590754364579
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neural_network/_stochastic_optimizers.py
@@ -0,0 +1,288 @@
+"""Stochastic optimization methods for MLP
+"""
+
+# Authors: Jiyuan Qian 
+# License: BSD 3 clause
+
+import numpy as np
+
+
+class BaseOptimizer:
+    """Base (Stochastic) gradient descent optimizer
+
+    Parameters
+    ----------
+    learning_rate_init : float, default=0.1
+        The initial learning rate used. It controls the step-size in updating
+        the weights
+
+    Attributes
+    ----------
+    learning_rate : float
+        the current learning rate
+    """
+
+    def __init__(self, learning_rate_init=0.1):
+        self.learning_rate_init = learning_rate_init
+        self.learning_rate = float(learning_rate_init)
+
+    def update_params(self, params, grads):
+        """Update parameters with given gradients
+
+        Parameters
+        ----------
+        params : list of length = len(coefs_) + len(intercepts_)
+            The concatenated list containing coefs_ and intercepts_ in MLP
+            model. Used for initializing velocities and updating params
+
+        grads : list of length = len(params)
+            Containing gradients with respect to coefs_ and intercepts_ in MLP
+            model. So length should be aligned with params
+        """
+        updates = self._get_updates(grads)
+        for param, update in zip((p for p in params), updates):
+            param += update
+
+    def iteration_ends(self, time_step):
+        """Perform update to learning rate and potentially other states at the
+        end of an iteration
+        """
+        pass
+
+    def trigger_stopping(self, msg, verbose):
+        """Decides whether it is time to stop training
+
+        Parameters
+        ----------
+        msg : str
+            Message passed in for verbose output
+
+        verbose : bool
+            Print message to stdin if True
+
+        Returns
+        -------
+        is_stopping : bool
+            True if training needs to stop
+        """
+        if verbose:
+            print(msg + " Stopping.")
+        return True
+
+
+class SGDOptimizer(BaseOptimizer):
+    """Stochastic gradient descent optimizer with momentum
+
+    Parameters
+    ----------
+    params : list, length = len(coefs_) + len(intercepts_)
+        The concatenated list containing coefs_ and intercepts_ in MLP model.
+        Used for initializing velocities and updating params
+
+    learning_rate_init : float, default=0.1
+        The initial learning rate used. It controls the step-size in updating
+        the weights
+
+    lr_schedule : {'constant', 'adaptive', 'invscaling'}, default='constant'
+        Learning rate schedule for weight updates.
+
+        -'constant', is a constant learning rate given by
+         'learning_rate_init'.
+
+        -'invscaling' gradually decreases the learning rate 'learning_rate_' at
+          each time step 't' using an inverse scaling exponent of 'power_t'.
+          learning_rate_ = learning_rate_init / pow(t, power_t)
+
+        -'adaptive', keeps the learning rate constant to
+         'learning_rate_init' as long as the training keeps decreasing.
+         Each time 2 consecutive epochs fail to decrease the training loss by
+         tol, or fail to increase validation score by tol if 'early_stopping'
+         is on, the current learning rate is divided by 5.
+
+    momentum : float, default=0.9
+        Value of momentum used, must be larger than or equal to 0
+
+    nesterov : bool, default=True
+        Whether to use nesterov's momentum or not. Use nesterov's if True
+
+    power_t : float, default=0.5
+        Power of time step 't' in inverse scaling. See `lr_schedule` for
+        more details.
+
+    Attributes
+    ----------
+    learning_rate : float
+        the current learning rate
+
+    velocities : list, length = len(params)
+        velocities that are used to update params
+    """
+
+    def __init__(
+        self,
+        params,
+        learning_rate_init=0.1,
+        lr_schedule="constant",
+        momentum=0.9,
+        nesterov=True,
+        power_t=0.5,
+    ):
+        super().__init__(learning_rate_init)
+
+        self.lr_schedule = lr_schedule
+        self.momentum = momentum
+        self.nesterov = nesterov
+        self.power_t = power_t
+        self.velocities = [np.zeros_like(param) for param in params]
+
+    def iteration_ends(self, time_step):
+        """Perform updates to learning rate and potential other states at the
+        end of an iteration
+
+        Parameters
+        ----------
+        time_step : int
+            number of training samples trained on so far, used to update
+            learning rate for 'invscaling'
+        """
+        if self.lr_schedule == "invscaling":
+            self.learning_rate = (
+                float(self.learning_rate_init) / (time_step + 1) ** self.power_t
+            )
+
+    def trigger_stopping(self, msg, verbose):
+        if self.lr_schedule != "adaptive":
+            if verbose:
+                print(msg + " Stopping.")
+            return True
+
+        if self.learning_rate <= 1e-6:
+            if verbose:
+                print(msg + " Learning rate too small. Stopping.")
+            return True
+
+        self.learning_rate /= 5.0
+        if verbose:
+            print(msg + " Setting learning rate to %f" % self.learning_rate)
+        return False
+
+    def _get_updates(self, grads):
+        """Get the values used to update params with given gradients
+
+        Parameters
+        ----------
+        grads : list, length = len(coefs_) + len(intercepts_)
+            Containing gradients with respect to coefs_ and intercepts_ in MLP
+            model. So length should be aligned with params
+
+        Returns
+        -------
+        updates : list, length = len(grads)
+            The values to add to params
+        """
+        updates = [
+            self.momentum * velocity - self.learning_rate * grad
+            for velocity, grad in zip(self.velocities, grads)
+        ]
+        self.velocities = updates
+
+        if self.nesterov:
+            updates = [
+                self.momentum * velocity - self.learning_rate * grad
+                for velocity, grad in zip(self.velocities, grads)
+            ]
+
+        return updates
+
+
+class AdamOptimizer(BaseOptimizer):
+    """Stochastic gradient descent optimizer with Adam
+
+    Note: All default values are from the original Adam paper
+
+    Parameters
+    ----------
+    params : list, length = len(coefs_) + len(intercepts_)
+        The concatenated list containing coefs_ and intercepts_ in MLP model.
+        Used for initializing velocities and updating params
+
+    learning_rate_init : float, default=0.001
+        The initial learning rate used. It controls the step-size in updating
+        the weights
+
+    beta_1 : float, default=0.9
+        Exponential decay rate for estimates of first moment vector, should be
+        in [0, 1)
+
+    beta_2 : float, default=0.999
+        Exponential decay rate for estimates of second moment vector, should be
+        in [0, 1)
+
+    epsilon : float, default=1e-8
+        Value for numerical stability
+
+    Attributes
+    ----------
+    learning_rate : float
+        The current learning rate
+
+    t : int
+        Timestep
+
+    ms : list, length = len(params)
+        First moment vectors
+
+    vs : list, length = len(params)
+        Second moment vectors
+
+    References
+    ----------
+    :arxiv:`Kingma, Diederik, and Jimmy Ba (2014) "Adam: A method for
+        stochastic optimization." <1412.6980>
+    """
+
+    def __init__(
+        self, params, learning_rate_init=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8
+    ):
+        super().__init__(learning_rate_init)
+
+        self.beta_1 = beta_1
+        self.beta_2 = beta_2
+        self.epsilon = epsilon
+        self.t = 0
+        self.ms = [np.zeros_like(param) for param in params]
+        self.vs = [np.zeros_like(param) for param in params]
+
+    def _get_updates(self, grads):
+        """Get the values used to update params with given gradients
+
+        Parameters
+        ----------
+        grads : list, length = len(coefs_) + len(intercepts_)
+            Containing gradients with respect to coefs_ and intercepts_ in MLP
+            model. So length should be aligned with params
+
+        Returns
+        -------
+        updates : list, length = len(grads)
+            The values to add to params
+        """
+        self.t += 1
+        self.ms = [
+            self.beta_1 * m + (1 - self.beta_1) * grad
+            for m, grad in zip(self.ms, grads)
+        ]
+        self.vs = [
+            self.beta_2 * v + (1 - self.beta_2) * (grad**2)
+            for v, grad in zip(self.vs, grads)
+        ]
+        self.learning_rate = (
+            self.learning_rate_init
+            * np.sqrt(1 - self.beta_2**self.t)
+            / (1 - self.beta_1**self.t)
+        )
+        updates = [
+            -self.learning_rate * m / (np.sqrt(v) + self.epsilon)
+            for m, v in zip(self.ms, self.vs)
+        ]
+        return updates
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..53f42c32c7fb77953a5a04902b0f00c4bd5f7127
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/test_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/test_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b1644c0eb73d189841b41171a7181f93faef6522
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/test_base.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/test_mlp.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/test_mlp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0047b4ab7bf85ca765fae9135eae484665bf7039
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/test_mlp.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/test_rbm.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/test_rbm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9fc6391c3da71f3a3bb231263081b95155fb8031
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/test_rbm.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/test_stochastic_optimizers.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/test_stochastic_optimizers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..894deadc2f4cd7c6d731a6af50bb355e9bb63442
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/__pycache__/test_stochastic_optimizers.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/test_base.py b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/test_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..af7b38e899907bea5b1a3f056a7c755414c0cbd6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/test_base.py
@@ -0,0 +1,29 @@
+import numpy as np
+import pytest
+
+from sklearn.neural_network._base import binary_log_loss, log_loss
+
+
+def test_binary_log_loss_1_prob_finite():
+    # y_proba is equal to one should result in a finite logloss
+    y_true = np.array([[0, 0, 1]]).T
+    y_prob = np.array([[0.9, 1.0, 1.0]]).T
+
+    loss = binary_log_loss(y_true, y_prob)
+    assert np.isfinite(loss)
+
+
+@pytest.mark.parametrize(
+    "y_true, y_prob",
+    [
+        (
+            np.array([[1, 0, 0], [0, 1, 0]]),
+            np.array([[0.0, 1.0, 0.0], [0.9, 0.05, 0.05]]),
+        ),
+        (np.array([[0, 0, 1]]).T, np.array([[0.9, 1.0, 1.0]]).T),
+    ],
+)
+def test_log_loss_1_prob_finite(y_true, y_prob):
+    # y_proba is equal to 1 should result in a finite logloss
+    loss = log_loss(y_true, y_prob)
+    assert np.isfinite(loss)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/test_mlp.py b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/test_mlp.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b94e2703f7e180b9054f31a34243a64abac1617
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/test_mlp.py
@@ -0,0 +1,969 @@
+"""
+Testing for Multi-layer Perceptron module (sklearn.neural_network)
+"""
+
+# Author: Issam H. Laradji
+# License: BSD 3 clause
+
+import re
+import sys
+import warnings
+from io import StringIO
+
+import joblib
+import numpy as np
+import pytest
+from numpy.testing import (
+    assert_allclose,
+    assert_almost_equal,
+    assert_array_equal,
+)
+
+from sklearn.datasets import (
+    load_digits,
+    load_iris,
+    make_multilabel_classification,
+    make_regression,
+)
+from sklearn.exceptions import ConvergenceWarning
+from sklearn.metrics import roc_auc_score
+from sklearn.neural_network import MLPClassifier, MLPRegressor
+from sklearn.preprocessing import LabelBinarizer, MinMaxScaler, scale
+from sklearn.utils._testing import ignore_warnings
+from sklearn.utils.fixes import CSR_CONTAINERS
+
+ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"]
+
+X_digits, y_digits = load_digits(n_class=3, return_X_y=True)
+
+X_digits_multi = MinMaxScaler().fit_transform(X_digits[:200])
+y_digits_multi = y_digits[:200]
+
+X_digits, y_digits = load_digits(n_class=2, return_X_y=True)
+
+X_digits_binary = MinMaxScaler().fit_transform(X_digits[:200])
+y_digits_binary = y_digits[:200]
+
+classification_datasets = [
+    (X_digits_multi, y_digits_multi),
+    (X_digits_binary, y_digits_binary),
+]
+
+X_reg, y_reg = make_regression(
+    n_samples=200, n_features=10, bias=20.0, noise=100.0, random_state=7
+)
+y_reg = scale(y_reg)
+regression_datasets = [(X_reg, y_reg)]
+
+iris = load_iris()
+
+X_iris = iris.data
+y_iris = iris.target
+
+
+def test_alpha():
+    # Test that larger alpha yields weights closer to zero
+    X = X_digits_binary[:100]
+    y = y_digits_binary[:100]
+
+    alpha_vectors = []
+    alpha_values = np.arange(2)
+    absolute_sum = lambda x: np.sum(np.abs(x))
+
+    for alpha in alpha_values:
+        mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
+        with ignore_warnings(category=ConvergenceWarning):
+            mlp.fit(X, y)
+        alpha_vectors.append(
+            np.array([absolute_sum(mlp.coefs_[0]), absolute_sum(mlp.coefs_[1])])
+        )
+
+    for i in range(len(alpha_values) - 1):
+        assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
+
+
+def test_fit():
+    # Test that the algorithm solution is equal to a worked out example.
+    X = np.array([[0.6, 0.8, 0.7]])
+    y = np.array([0])
+    mlp = MLPClassifier(
+        solver="sgd",
+        learning_rate_init=0.1,
+        alpha=0.1,
+        activation="logistic",
+        random_state=1,
+        max_iter=1,
+        hidden_layer_sizes=2,
+        momentum=0,
+    )
+    # set weights
+    mlp.coefs_ = [0] * 2
+    mlp.intercepts_ = [0] * 2
+    mlp.n_outputs_ = 1
+    mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
+    mlp.coefs_[1] = np.array([[0.1], [0.2]])
+    mlp.intercepts_[0] = np.array([0.1, 0.1])
+    mlp.intercepts_[1] = np.array([1.0])
+    mlp._coef_grads = [] * 2
+    mlp._intercept_grads = [] * 2
+    mlp.n_features_in_ = 3
+
+    # Initialize parameters
+    mlp.n_iter_ = 0
+    mlp.learning_rate_ = 0.1
+
+    # Compute the number of layers
+    mlp.n_layers_ = 3
+
+    # Pre-allocate gradient matrices
+    mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
+    mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
+
+    mlp.out_activation_ = "logistic"
+    mlp.t_ = 0
+    mlp.best_loss_ = np.inf
+    mlp.loss_curve_ = []
+    mlp._no_improvement_count = 0
+    mlp._intercept_velocity = [
+        np.zeros_like(intercepts) for intercepts in mlp.intercepts_
+    ]
+    mlp._coef_velocity = [np.zeros_like(coefs) for coefs in mlp.coefs_]
+
+    mlp.partial_fit(X, y, classes=[0, 1])
+    # Manually worked out example
+    # h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
+    #       =  0.679178699175393
+    # h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
+    #         = 0.574442516811659
+    # o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
+    #       = 0.7654329236196236
+    # d21 = -(0 - 0.765) = 0.765
+    # d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
+    # d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
+    # W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
+    # W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
+    # W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
+    # W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
+    # W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
+    # W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
+    # W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
+    # W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
+    # b1grad1 = d11 = 0.01667
+    # b1grad2 = d12 = 0.0374
+    # b2grad = d21 = 0.765
+    # W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
+    #          [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
+    #          [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
+    #          0.096008], [0.4939998, -0.002244]]
+    # W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
+    #        [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
+    # b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
+    #         = [0.098333, 0.09626]
+    # b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
+    assert_almost_equal(
+        mlp.coefs_[0],
+        np.array([[0.098, 0.195756], [0.2956664, 0.096008], [0.4939998, -0.002244]]),
+        decimal=3,
+    )
+    assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]), decimal=3)
+    assert_almost_equal(mlp.intercepts_[0], np.array([0.098333, 0.09626]), decimal=3)
+    assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
+    # Testing output
+    #  h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
+    #               0.7 * 0.4939998 + 0.098333) = 0.677
+    #  h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
+    #            0.7 * -0.002244 + 0.09626) = 0.572
+    #  o1 = h * W2 + b21 = 0.677 * 0.04706 +
+    #             0.572 * 0.154089 + 0.9235 = 1.043
+    #  prob = sigmoid(o1) = 0.739
+    assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
+
+
+def test_gradient():
+    # Test gradient.
+
+    # This makes sure that the activation functions and their derivatives
+    # are correct. The numerical and analytical computation of the gradient
+    # should be close.
+    for n_labels in [2, 3]:
+        n_samples = 5
+        n_features = 10
+        random_state = np.random.RandomState(seed=42)
+        X = random_state.rand(n_samples, n_features)
+        y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
+        Y = LabelBinarizer().fit_transform(y)
+
+        for activation in ACTIVATION_TYPES:
+            mlp = MLPClassifier(
+                activation=activation,
+                hidden_layer_sizes=10,
+                solver="lbfgs",
+                alpha=1e-5,
+                learning_rate_init=0.2,
+                max_iter=1,
+                random_state=1,
+            )
+            mlp.fit(X, y)
+
+            theta = np.hstack([l.ravel() for l in mlp.coefs_ + mlp.intercepts_])
+
+            layer_units = [X.shape[1]] + [mlp.hidden_layer_sizes] + [mlp.n_outputs_]
+
+            activations = []
+            deltas = []
+            coef_grads = []
+            intercept_grads = []
+
+            activations.append(X)
+            for i in range(mlp.n_layers_ - 1):
+                activations.append(np.empty((X.shape[0], layer_units[i + 1])))
+                deltas.append(np.empty((X.shape[0], layer_units[i + 1])))
+
+                fan_in = layer_units[i]
+                fan_out = layer_units[i + 1]
+                coef_grads.append(np.empty((fan_in, fan_out)))
+                intercept_grads.append(np.empty(fan_out))
+
+            # analytically compute the gradients
+            def loss_grad_fun(t):
+                return mlp._loss_grad_lbfgs(
+                    t, X, Y, activations, deltas, coef_grads, intercept_grads
+                )
+
+            [value, grad] = loss_grad_fun(theta)
+            numgrad = np.zeros(np.size(theta))
+            n = np.size(theta, 0)
+            E = np.eye(n)
+            epsilon = 1e-5
+            # numerically compute the gradients
+            for i in range(n):
+                dtheta = E[:, i] * epsilon
+                numgrad[i] = (
+                    loss_grad_fun(theta + dtheta)[0] - loss_grad_fun(theta - dtheta)[0]
+                ) / (epsilon * 2.0)
+            assert_almost_equal(numgrad, grad)
+
+
+@pytest.mark.parametrize("X,y", classification_datasets)
+def test_lbfgs_classification(X, y):
+    # Test lbfgs on classification.
+    # It should achieve a score higher than 0.95 for the binary and multi-class
+    # versions of the digits dataset.
+    X_train = X[:150]
+    y_train = y[:150]
+    X_test = X[150:]
+    expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
+
+    for activation in ACTIVATION_TYPES:
+        mlp = MLPClassifier(
+            solver="lbfgs",
+            hidden_layer_sizes=50,
+            max_iter=150,
+            shuffle=True,
+            random_state=1,
+            activation=activation,
+        )
+        mlp.fit(X_train, y_train)
+        y_predict = mlp.predict(X_test)
+        assert mlp.score(X_train, y_train) > 0.95
+        assert (y_predict.shape[0], y_predict.dtype.kind) == expected_shape_dtype
+
+
+@pytest.mark.parametrize("X,y", regression_datasets)
+def test_lbfgs_regression(X, y):
+    # Test lbfgs on the regression dataset.
+    for activation in ACTIVATION_TYPES:
+        mlp = MLPRegressor(
+            solver="lbfgs",
+            hidden_layer_sizes=50,
+            max_iter=150,
+            shuffle=True,
+            random_state=1,
+            activation=activation,
+        )
+        mlp.fit(X, y)
+        if activation == "identity":
+            assert mlp.score(X, y) > 0.80
+        else:
+            # Non linear models perform much better than linear bottleneck:
+            assert mlp.score(X, y) > 0.98
+
+
+@pytest.mark.parametrize("X,y", classification_datasets)
+def test_lbfgs_classification_maxfun(X, y):
+    # Test lbfgs parameter max_fun.
+    # It should independently limit the number of iterations for lbfgs.
+    max_fun = 10
+    # classification tests
+    for activation in ACTIVATION_TYPES:
+        mlp = MLPClassifier(
+            solver="lbfgs",
+            hidden_layer_sizes=50,
+            max_iter=150,
+            max_fun=max_fun,
+            shuffle=True,
+            random_state=1,
+            activation=activation,
+        )
+        with pytest.warns(ConvergenceWarning):
+            mlp.fit(X, y)
+            assert max_fun >= mlp.n_iter_
+
+
+@pytest.mark.parametrize("X,y", regression_datasets)
+def test_lbfgs_regression_maxfun(X, y):
+    # Test lbfgs parameter max_fun.
+    # It should independently limit the number of iterations for lbfgs.
+    max_fun = 10
+    # regression tests
+    for activation in ACTIVATION_TYPES:
+        mlp = MLPRegressor(
+            solver="lbfgs",
+            hidden_layer_sizes=50,
+            tol=0.0,
+            max_iter=150,
+            max_fun=max_fun,
+            shuffle=True,
+            random_state=1,
+            activation=activation,
+        )
+        with pytest.warns(ConvergenceWarning):
+            mlp.fit(X, y)
+            assert max_fun >= mlp.n_iter_
+
+
+def test_learning_rate_warmstart():
+    # Tests that warm_start reuse past solutions.
+    X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
+    y = [1, 1, 1, 0]
+    for learning_rate in ["invscaling", "constant"]:
+        mlp = MLPClassifier(
+            solver="sgd",
+            hidden_layer_sizes=4,
+            learning_rate=learning_rate,
+            max_iter=1,
+            power_t=0.25,
+            warm_start=True,
+        )
+        with ignore_warnings(category=ConvergenceWarning):
+            mlp.fit(X, y)
+            prev_eta = mlp._optimizer.learning_rate
+            mlp.fit(X, y)
+            post_eta = mlp._optimizer.learning_rate
+
+        if learning_rate == "constant":
+            assert prev_eta == post_eta
+        elif learning_rate == "invscaling":
+            assert mlp.learning_rate_init / pow(8 + 1, mlp.power_t) == post_eta
+
+
+def test_multilabel_classification():
+    # Test that multi-label classification works as expected.
+    # test fit method
+    X, y = make_multilabel_classification(
+        n_samples=50, random_state=0, return_indicator=True
+    )
+    mlp = MLPClassifier(
+        solver="lbfgs",
+        hidden_layer_sizes=50,
+        alpha=1e-5,
+        max_iter=150,
+        random_state=0,
+        activation="logistic",
+        learning_rate_init=0.2,
+    )
+    mlp.fit(X, y)
+    assert mlp.score(X, y) > 0.97
+
+    # test partial fit method
+    mlp = MLPClassifier(
+        solver="sgd",
+        hidden_layer_sizes=50,
+        max_iter=150,
+        random_state=0,
+        activation="logistic",
+        alpha=1e-5,
+        learning_rate_init=0.2,
+    )
+    for i in range(100):
+        mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
+    assert mlp.score(X, y) > 0.9
+
+    # Make sure early stopping still work now that splitting is stratified by
+    # default (it is disabled for multilabel classification)
+    mlp = MLPClassifier(early_stopping=True)
+    mlp.fit(X, y).predict(X)
+
+
+def test_multioutput_regression():
+    # Test that multi-output regression works as expected
+    X, y = make_regression(n_samples=200, n_targets=5)
+    mlp = MLPRegressor(
+        solver="lbfgs", hidden_layer_sizes=50, max_iter=200, random_state=1
+    )
+    mlp.fit(X, y)
+    assert mlp.score(X, y) > 0.9
+
+
+def test_partial_fit_classes_error():
+    # Tests that passing different classes to partial_fit raises an error
+    X = [[3, 2]]
+    y = [0]
+    clf = MLPClassifier(solver="sgd")
+    clf.partial_fit(X, y, classes=[0, 1])
+    with pytest.raises(ValueError):
+        clf.partial_fit(X, y, classes=[1, 2])
+
+
+def test_partial_fit_classification():
+    # Test partial_fit on classification.
+    # `partial_fit` should yield the same results as 'fit' for binary and
+    # multi-class classification.
+    for X, y in classification_datasets:
+        mlp = MLPClassifier(
+            solver="sgd",
+            max_iter=100,
+            random_state=1,
+            tol=0,
+            alpha=1e-5,
+            learning_rate_init=0.2,
+        )
+
+        with ignore_warnings(category=ConvergenceWarning):
+            mlp.fit(X, y)
+        pred1 = mlp.predict(X)
+        mlp = MLPClassifier(
+            solver="sgd", random_state=1, alpha=1e-5, learning_rate_init=0.2
+        )
+        for i in range(100):
+            mlp.partial_fit(X, y, classes=np.unique(y))
+        pred2 = mlp.predict(X)
+        assert_array_equal(pred1, pred2)
+        assert mlp.score(X, y) > 0.95
+
+
+def test_partial_fit_unseen_classes():
+    # Non regression test for bug 6994
+    # Tests for labeling errors in partial fit
+
+    clf = MLPClassifier(random_state=0)
+    clf.partial_fit([[1], [2], [3]], ["a", "b", "c"], classes=["a", "b", "c", "d"])
+    clf.partial_fit([[4]], ["d"])
+    assert clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]) > 0
+
+
+def test_partial_fit_regression():
+    # Test partial_fit on regression.
+    # `partial_fit` should yield the same results as 'fit' for regression.
+    X = X_reg
+    y = y_reg
+
+    for momentum in [0, 0.9]:
+        mlp = MLPRegressor(
+            solver="sgd",
+            max_iter=100,
+            activation="relu",
+            random_state=1,
+            learning_rate_init=0.01,
+            batch_size=X.shape[0],
+            momentum=momentum,
+        )
+        with warnings.catch_warnings(record=True):
+            # catch convergence warning
+            mlp.fit(X, y)
+        pred1 = mlp.predict(X)
+        mlp = MLPRegressor(
+            solver="sgd",
+            activation="relu",
+            learning_rate_init=0.01,
+            random_state=1,
+            batch_size=X.shape[0],
+            momentum=momentum,
+        )
+        for i in range(100):
+            mlp.partial_fit(X, y)
+
+        pred2 = mlp.predict(X)
+        assert_allclose(pred1, pred2)
+        score = mlp.score(X, y)
+        assert score > 0.65
+
+
+def test_partial_fit_errors():
+    # Test partial_fit error handling.
+    X = [[3, 2], [1, 6]]
+    y = [1, 0]
+
+    # no classes passed
+    with pytest.raises(ValueError):
+        MLPClassifier(solver="sgd").partial_fit(X, y, classes=[2])
+
+    # lbfgs doesn't support partial_fit
+    assert not hasattr(MLPClassifier(solver="lbfgs"), "partial_fit")
+
+
+def test_nonfinite_params():
+    # Check that MLPRegressor throws ValueError when dealing with non-finite
+    # parameter values
+    rng = np.random.RandomState(0)
+    n_samples = 10
+    fmax = np.finfo(np.float64).max
+    X = fmax * rng.uniform(size=(n_samples, 2))
+    y = rng.standard_normal(size=n_samples)
+
+    clf = MLPRegressor()
+    msg = (
+        "Solver produced non-finite parameter weights. The input data may contain large"
+        " values and need to be preprocessed."
+    )
+    with pytest.raises(ValueError, match=msg):
+        clf.fit(X, y)
+
+
+def test_predict_proba_binary():
+    # Test that predict_proba works as expected for binary class.
+    X = X_digits_binary[:50]
+    y = y_digits_binary[:50]
+
+    clf = MLPClassifier(hidden_layer_sizes=5, activation="logistic", random_state=1)
+    with ignore_warnings(category=ConvergenceWarning):
+        clf.fit(X, y)
+    y_proba = clf.predict_proba(X)
+    y_log_proba = clf.predict_log_proba(X)
+
+    (n_samples, n_classes) = y.shape[0], 2
+
+    proba_max = y_proba.argmax(axis=1)
+    proba_log_max = y_log_proba.argmax(axis=1)
+
+    assert y_proba.shape == (n_samples, n_classes)
+    assert_array_equal(proba_max, proba_log_max)
+    assert_allclose(y_log_proba, np.log(y_proba))
+
+    assert roc_auc_score(y, y_proba[:, 1]) == 1.0
+
+
+def test_predict_proba_multiclass():
+    # Test that predict_proba works as expected for multi class.
+    X = X_digits_multi[:10]
+    y = y_digits_multi[:10]
+
+    clf = MLPClassifier(hidden_layer_sizes=5)
+    with ignore_warnings(category=ConvergenceWarning):
+        clf.fit(X, y)
+    y_proba = clf.predict_proba(X)
+    y_log_proba = clf.predict_log_proba(X)
+
+    (n_samples, n_classes) = y.shape[0], np.unique(y).size
+
+    proba_max = y_proba.argmax(axis=1)
+    proba_log_max = y_log_proba.argmax(axis=1)
+
+    assert y_proba.shape == (n_samples, n_classes)
+    assert_array_equal(proba_max, proba_log_max)
+    assert_allclose(y_log_proba, np.log(y_proba))
+
+
+def test_predict_proba_multilabel():
+    # Test that predict_proba works as expected for multilabel.
+    # Multilabel should not use softmax which makes probabilities sum to 1
+    X, Y = make_multilabel_classification(
+        n_samples=50, random_state=0, return_indicator=True
+    )
+    n_samples, n_classes = Y.shape
+
+    clf = MLPClassifier(solver="lbfgs", hidden_layer_sizes=30, random_state=0)
+    clf.fit(X, Y)
+    y_proba = clf.predict_proba(X)
+
+    assert y_proba.shape == (n_samples, n_classes)
+    assert_array_equal(y_proba > 0.5, Y)
+
+    y_log_proba = clf.predict_log_proba(X)
+    proba_max = y_proba.argmax(axis=1)
+    proba_log_max = y_log_proba.argmax(axis=1)
+
+    assert (y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1) > 1e-10
+    assert_array_equal(proba_max, proba_log_max)
+    assert_allclose(y_log_proba, np.log(y_proba))
+
+
+def test_shuffle():
+    # Test that the shuffle parameter affects the training process (it should)
+    X, y = make_regression(n_samples=50, n_features=5, n_targets=1, random_state=0)
+
+    # The coefficients will be identical if both do or do not shuffle
+    for shuffle in [True, False]:
+        mlp1 = MLPRegressor(
+            hidden_layer_sizes=1,
+            max_iter=1,
+            batch_size=1,
+            random_state=0,
+            shuffle=shuffle,
+        )
+        mlp2 = MLPRegressor(
+            hidden_layer_sizes=1,
+            max_iter=1,
+            batch_size=1,
+            random_state=0,
+            shuffle=shuffle,
+        )
+        mlp1.fit(X, y)
+        mlp2.fit(X, y)
+
+        assert np.array_equal(mlp1.coefs_[0], mlp2.coefs_[0])
+
+    # The coefficients will be slightly different if shuffle=True
+    mlp1 = MLPRegressor(
+        hidden_layer_sizes=1, max_iter=1, batch_size=1, random_state=0, shuffle=True
+    )
+    mlp2 = MLPRegressor(
+        hidden_layer_sizes=1, max_iter=1, batch_size=1, random_state=0, shuffle=False
+    )
+    mlp1.fit(X, y)
+    mlp2.fit(X, y)
+
+    assert not np.array_equal(mlp1.coefs_[0], mlp2.coefs_[0])
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_sparse_matrices(csr_container):
+    # Test that sparse and dense input matrices output the same results.
+    X = X_digits_binary[:50]
+    y = y_digits_binary[:50]
+    X_sparse = csr_container(X)
+    mlp = MLPClassifier(solver="lbfgs", hidden_layer_sizes=15, random_state=1)
+    mlp.fit(X, y)
+    pred1 = mlp.predict(X)
+    mlp.fit(X_sparse, y)
+    pred2 = mlp.predict(X_sparse)
+    assert_almost_equal(pred1, pred2)
+    pred1 = mlp.predict(X)
+    pred2 = mlp.predict(X_sparse)
+    assert_array_equal(pred1, pred2)
+
+
+def test_tolerance():
+    # Test tolerance.
+    # It should force the solver to exit the loop when it converges.
+    X = [[3, 2], [1, 6]]
+    y = [1, 0]
+    clf = MLPClassifier(tol=0.5, max_iter=3000, solver="sgd")
+    clf.fit(X, y)
+    assert clf.max_iter > clf.n_iter_
+
+
+def test_verbose_sgd():
+    # Test verbose.
+    X = [[3, 2], [1, 6]]
+    y = [1, 0]
+    clf = MLPClassifier(solver="sgd", max_iter=2, verbose=10, hidden_layer_sizes=2)
+    old_stdout = sys.stdout
+    sys.stdout = output = StringIO()
+
+    with ignore_warnings(category=ConvergenceWarning):
+        clf.fit(X, y)
+    clf.partial_fit(X, y)
+
+    sys.stdout = old_stdout
+    assert "Iteration" in output.getvalue()
+
+
+@pytest.mark.parametrize("MLPEstimator", [MLPClassifier, MLPRegressor])
+def test_early_stopping(MLPEstimator):
+    X = X_digits_binary[:100]
+    y = y_digits_binary[:100]
+    tol = 0.2
+    mlp_estimator = MLPEstimator(
+        tol=tol, max_iter=3000, solver="sgd", early_stopping=True
+    )
+    mlp_estimator.fit(X, y)
+    assert mlp_estimator.max_iter > mlp_estimator.n_iter_
+
+    assert mlp_estimator.best_loss_ is None
+    assert isinstance(mlp_estimator.validation_scores_, list)
+
+    valid_scores = mlp_estimator.validation_scores_
+    best_valid_score = mlp_estimator.best_validation_score_
+    assert max(valid_scores) == best_valid_score
+    assert best_valid_score + tol > valid_scores[-2]
+    assert best_valid_score + tol > valid_scores[-1]
+
+    # check that the attributes `validation_scores_` and `best_validation_score_`
+    # are set to None when `early_stopping=False`
+    mlp_estimator = MLPEstimator(
+        tol=tol, max_iter=3000, solver="sgd", early_stopping=False
+    )
+    mlp_estimator.fit(X, y)
+    assert mlp_estimator.validation_scores_ is None
+    assert mlp_estimator.best_validation_score_ is None
+    assert mlp_estimator.best_loss_ is not None
+
+
+def test_adaptive_learning_rate():
+    X = [[3, 2], [1, 6]]
+    y = [1, 0]
+    clf = MLPClassifier(tol=0.5, max_iter=3000, solver="sgd", learning_rate="adaptive")
+    clf.fit(X, y)
+    assert clf.max_iter > clf.n_iter_
+    assert 1e-6 > clf._optimizer.learning_rate
+
+
+@ignore_warnings(category=RuntimeWarning)
+def test_warm_start():
+    X = X_iris
+    y = y_iris
+
+    y_2classes = np.array([0] * 75 + [1] * 75)
+    y_3classes = np.array([0] * 40 + [1] * 40 + [2] * 70)
+    y_3classes_alt = np.array([0] * 50 + [1] * 50 + [3] * 50)
+    y_4classes = np.array([0] * 37 + [1] * 37 + [2] * 38 + [3] * 38)
+    y_5classes = np.array([0] * 30 + [1] * 30 + [2] * 30 + [3] * 30 + [4] * 30)
+
+    # No error raised
+    clf = MLPClassifier(hidden_layer_sizes=2, solver="lbfgs", warm_start=True).fit(X, y)
+    clf.fit(X, y)
+    clf.fit(X, y_3classes)
+
+    for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes):
+        clf = MLPClassifier(hidden_layer_sizes=2, solver="lbfgs", warm_start=True).fit(
+            X, y
+        )
+        message = (
+            "warm_start can only be used where `y` has the same "
+            "classes as in the previous call to fit."
+            " Previously got [0 1 2], `y` has %s"
+            % np.unique(y_i)
+        )
+        with pytest.raises(ValueError, match=re.escape(message)):
+            clf.fit(X, y_i)
+
+
+@pytest.mark.parametrize("MLPEstimator", [MLPClassifier, MLPRegressor])
+def test_warm_start_full_iteration(MLPEstimator):
+    # Non-regression test for:
+    # https://github.com/scikit-learn/scikit-learn/issues/16812
+    # Check that the MLP estimator accomplish `max_iter` with a
+    # warm started estimator.
+    X, y = X_iris, y_iris
+    max_iter = 3
+    clf = MLPEstimator(
+        hidden_layer_sizes=2, solver="sgd", warm_start=True, max_iter=max_iter
+    )
+    clf.fit(X, y)
+    assert max_iter == clf.n_iter_
+    clf.fit(X, y)
+    assert max_iter == clf.n_iter_
+
+
+def test_n_iter_no_change():
+    # test n_iter_no_change using binary data set
+    # the classifying fitting process is not prone to loss curve fluctuations
+    X = X_digits_binary[:100]
+    y = y_digits_binary[:100]
+    tol = 0.01
+    max_iter = 3000
+
+    # test multiple n_iter_no_change
+    for n_iter_no_change in [2, 5, 10, 50, 100]:
+        clf = MLPClassifier(
+            tol=tol, max_iter=max_iter, solver="sgd", n_iter_no_change=n_iter_no_change
+        )
+        clf.fit(X, y)
+
+        # validate n_iter_no_change
+        assert clf._no_improvement_count == n_iter_no_change + 1
+        assert max_iter > clf.n_iter_
+
+
+@ignore_warnings(category=ConvergenceWarning)
+def test_n_iter_no_change_inf():
+    # test n_iter_no_change using binary data set
+    # the fitting process should go to max_iter iterations
+    X = X_digits_binary[:100]
+    y = y_digits_binary[:100]
+
+    # set a ridiculous tolerance
+    # this should always trigger _update_no_improvement_count()
+    tol = 1e9
+
+    # fit
+    n_iter_no_change = np.inf
+    max_iter = 3000
+    clf = MLPClassifier(
+        tol=tol, max_iter=max_iter, solver="sgd", n_iter_no_change=n_iter_no_change
+    )
+    clf.fit(X, y)
+
+    # validate n_iter_no_change doesn't cause early stopping
+    assert clf.n_iter_ == max_iter
+
+    # validate _update_no_improvement_count() was always triggered
+    assert clf._no_improvement_count == clf.n_iter_ - 1
+
+
+def test_early_stopping_stratified():
+    # Make sure data splitting for early stopping is stratified
+    X = [[1, 2], [2, 3], [3, 4], [4, 5]]
+    y = [0, 0, 0, 1]
+
+    mlp = MLPClassifier(early_stopping=True)
+    with pytest.raises(
+        ValueError, match="The least populated class in y has only 1 member"
+    ):
+        mlp.fit(X, y)
+
+
+def test_mlp_classifier_dtypes_casting():
+    # Compare predictions for different dtypes
+    mlp_64 = MLPClassifier(
+        alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=50
+    )
+    mlp_64.fit(X_digits[:300], y_digits[:300])
+    pred_64 = mlp_64.predict(X_digits[300:])
+    proba_64 = mlp_64.predict_proba(X_digits[300:])
+
+    mlp_32 = MLPClassifier(
+        alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=50
+    )
+    mlp_32.fit(X_digits[:300].astype(np.float32), y_digits[:300])
+    pred_32 = mlp_32.predict(X_digits[300:].astype(np.float32))
+    proba_32 = mlp_32.predict_proba(X_digits[300:].astype(np.float32))
+
+    assert_array_equal(pred_64, pred_32)
+    assert_allclose(proba_64, proba_32, rtol=1e-02)
+
+
+def test_mlp_regressor_dtypes_casting():
+    mlp_64 = MLPRegressor(
+        alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=50
+    )
+    mlp_64.fit(X_digits[:300], y_digits[:300])
+    pred_64 = mlp_64.predict(X_digits[300:])
+
+    mlp_32 = MLPRegressor(
+        alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=50
+    )
+    mlp_32.fit(X_digits[:300].astype(np.float32), y_digits[:300])
+    pred_32 = mlp_32.predict(X_digits[300:].astype(np.float32))
+
+    assert_allclose(pred_64, pred_32, rtol=1e-04)
+
+
+@pytest.mark.parametrize("dtype", [np.float32, np.float64])
+@pytest.mark.parametrize("Estimator", [MLPClassifier, MLPRegressor])
+def test_mlp_param_dtypes(dtype, Estimator):
+    # Checks if input dtype is used for network parameters
+    # and predictions
+    X, y = X_digits.astype(dtype), y_digits
+    mlp = Estimator(alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=50)
+    mlp.fit(X[:300], y[:300])
+    pred = mlp.predict(X[300:])
+
+    assert all([intercept.dtype == dtype for intercept in mlp.intercepts_])
+
+    assert all([coef.dtype == dtype for coef in mlp.coefs_])
+
+    if Estimator == MLPRegressor:
+        assert pred.dtype == dtype
+
+
+def test_mlp_loading_from_joblib_partial_fit(tmp_path):
+    """Loading from MLP and partial fitting updates weights. Non-regression
+    test for #19626."""
+    pre_trained_estimator = MLPRegressor(
+        hidden_layer_sizes=(42,), random_state=42, learning_rate_init=0.01, max_iter=200
+    )
+    features, target = [[2]], [4]
+
+    # Fit on x=2, y=4
+    pre_trained_estimator.fit(features, target)
+
+    # dump and load model
+    pickled_file = tmp_path / "mlp.pkl"
+    joblib.dump(pre_trained_estimator, pickled_file)
+    load_estimator = joblib.load(pickled_file)
+
+    # Train for a more epochs on point x=2, y=1
+    fine_tune_features, fine_tune_target = [[2]], [1]
+
+    for _ in range(200):
+        load_estimator.partial_fit(fine_tune_features, fine_tune_target)
+
+    # finetuned model learned the new target
+    predicted_value = load_estimator.predict(fine_tune_features)
+    assert_allclose(predicted_value, fine_tune_target, rtol=1e-4)
+
+
+@pytest.mark.parametrize("Estimator", [MLPClassifier, MLPRegressor])
+def test_preserve_feature_names(Estimator):
+    """Check that feature names are preserved when early stopping is enabled.
+
+    Feature names are required for consistency checks during scoring.
+
+    Non-regression test for gh-24846
+    """
+    pd = pytest.importorskip("pandas")
+    rng = np.random.RandomState(0)
+
+    X = pd.DataFrame(data=rng.randn(10, 2), columns=["colname_a", "colname_b"])
+    y = pd.Series(data=np.full(10, 1), name="colname_y")
+
+    model = Estimator(early_stopping=True, validation_fraction=0.2)
+
+    with warnings.catch_warnings():
+        warnings.simplefilter("error", UserWarning)
+        model.fit(X, y)
+
+
+@pytest.mark.parametrize("MLPEstimator", [MLPClassifier, MLPRegressor])
+def test_mlp_warm_start_with_early_stopping(MLPEstimator):
+    """Check that early stopping works with warm start."""
+    mlp = MLPEstimator(
+        max_iter=10, random_state=0, warm_start=True, early_stopping=True
+    )
+    mlp.fit(X_iris, y_iris)
+    n_validation_scores = len(mlp.validation_scores_)
+    mlp.set_params(max_iter=20)
+    mlp.fit(X_iris, y_iris)
+    assert len(mlp.validation_scores_) > n_validation_scores
+
+
+@pytest.mark.parametrize("MLPEstimator", [MLPClassifier, MLPRegressor])
+@pytest.mark.parametrize("solver", ["sgd", "adam", "lbfgs"])
+def test_mlp_warm_start_no_convergence(MLPEstimator, solver):
+    """Check that we stop the number of iteration at `max_iter` when warm starting.
+
+    Non-regression test for:
+    https://github.com/scikit-learn/scikit-learn/issues/24764
+    """
+    model = MLPEstimator(
+        solver=solver,
+        warm_start=True,
+        early_stopping=False,
+        max_iter=10,
+        n_iter_no_change=np.inf,
+        random_state=0,
+    )
+
+    with pytest.warns(ConvergenceWarning):
+        model.fit(X_iris, y_iris)
+    assert model.n_iter_ == 10
+
+    model.set_params(max_iter=20)
+    with pytest.warns(ConvergenceWarning):
+        model.fit(X_iris, y_iris)
+    assert model.n_iter_ == 20
+
+
+@pytest.mark.parametrize("MLPEstimator", [MLPClassifier, MLPRegressor])
+def test_mlp_partial_fit_after_fit(MLPEstimator):
+    """Check partial fit does not fail after fit when early_stopping=True.
+
+    Non-regression test for gh-25693.
+    """
+    mlp = MLPEstimator(early_stopping=True, random_state=0).fit(X_iris, y_iris)
+
+    msg = "partial_fit does not support early_stopping=True"
+    with pytest.raises(ValueError, match=msg):
+        mlp.partial_fit(X_iris, y_iris)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/test_rbm.py b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/test_rbm.py
new file mode 100644
index 0000000000000000000000000000000000000000..8211c9735923d650234d4268cb30336ddc3ebbb1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/test_rbm.py
@@ -0,0 +1,251 @@
+import re
+import sys
+from io import StringIO
+
+import numpy as np
+import pytest
+
+from sklearn.datasets import load_digits
+from sklearn.neural_network import BernoulliRBM
+from sklearn.utils._testing import (
+    assert_allclose,
+    assert_almost_equal,
+    assert_array_equal,
+)
+from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS
+from sklearn.utils.validation import assert_all_finite
+
+Xdigits, _ = load_digits(return_X_y=True)
+Xdigits -= Xdigits.min()
+Xdigits /= Xdigits.max()
+
+
+def test_fit():
+    X = Xdigits.copy()
+
+    rbm = BernoulliRBM(
+        n_components=64, learning_rate=0.1, batch_size=10, n_iter=7, random_state=9
+    )
+    rbm.fit(X)
+
+    assert_almost_equal(rbm.score_samples(X).mean(), -21.0, decimal=0)
+
+    # in-place tricks shouldn't have modified X
+    assert_array_equal(X, Xdigits)
+
+
+def test_partial_fit():
+    X = Xdigits.copy()
+    rbm = BernoulliRBM(
+        n_components=64, learning_rate=0.1, batch_size=20, random_state=9
+    )
+    n_samples = X.shape[0]
+    n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
+    batch_slices = np.array_split(X, n_batches)
+
+    for i in range(7):
+        for batch in batch_slices:
+            rbm.partial_fit(batch)
+
+    assert_almost_equal(rbm.score_samples(X).mean(), -21.0, decimal=0)
+    assert_array_equal(X, Xdigits)
+
+
+def test_transform():
+    X = Xdigits[:100]
+    rbm1 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
+    rbm1.fit(X)
+
+    Xt1 = rbm1.transform(X)
+    Xt2 = rbm1._mean_hiddens(X)
+
+    assert_array_equal(Xt1, Xt2)
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_small_sparse(csr_container):
+    # BernoulliRBM should work on small sparse matrices.
+    X = csr_container(Xdigits[:4])
+    BernoulliRBM().fit(X)  # no exception
+
+
+@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
+def test_small_sparse_partial_fit(sparse_container):
+    X_sparse = sparse_container(Xdigits[:100])
+    X = Xdigits[:100].copy()
+
+    rbm1 = BernoulliRBM(
+        n_components=64, learning_rate=0.1, batch_size=10, random_state=9
+    )
+    rbm2 = BernoulliRBM(
+        n_components=64, learning_rate=0.1, batch_size=10, random_state=9
+    )
+
+    rbm1.partial_fit(X_sparse)
+    rbm2.partial_fit(X)
+
+    assert_almost_equal(
+        rbm1.score_samples(X).mean(), rbm2.score_samples(X).mean(), decimal=0
+    )
+
+
+def test_sample_hiddens():
+    rng = np.random.RandomState(0)
+    X = Xdigits[:100]
+    rbm1 = BernoulliRBM(n_components=2, batch_size=5, n_iter=5, random_state=42)
+    rbm1.fit(X)
+
+    h = rbm1._mean_hiddens(X[0])
+    hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
+
+    assert_almost_equal(h, hs, decimal=1)
+
+
+@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
+def test_fit_gibbs(csc_container):
+    # XXX: this test is very seed-dependent! It probably needs to be rewritten.
+
+    # Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
+    # from the same input
+    rng = np.random.RandomState(42)
+    X = np.array([[0.0], [1.0]])
+    rbm1 = BernoulliRBM(n_components=2, batch_size=2, n_iter=42, random_state=rng)
+    # you need that much iters
+    rbm1.fit(X)
+    assert_almost_equal(
+        rbm1.components_, np.array([[0.02649814], [0.02009084]]), decimal=4
+    )
+    assert_almost_equal(rbm1.gibbs(X), X)
+
+    # Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
+    # the same input even when the input is sparse, and test against non-sparse
+    rng = np.random.RandomState(42)
+    X = csc_container([[0.0], [1.0]])
+    rbm2 = BernoulliRBM(n_components=2, batch_size=2, n_iter=42, random_state=rng)
+    rbm2.fit(X)
+    assert_almost_equal(
+        rbm2.components_, np.array([[0.02649814], [0.02009084]]), decimal=4
+    )
+    assert_almost_equal(rbm2.gibbs(X), X.toarray())
+    assert_almost_equal(rbm1.components_, rbm2.components_)
+
+
+def test_gibbs_smoke():
+    # Check if we don't get NaNs sampling the full digits dataset.
+    # Also check that sampling again will yield different results.
+    X = Xdigits
+    rbm1 = BernoulliRBM(n_components=42, batch_size=40, n_iter=20, random_state=42)
+    rbm1.fit(X)
+    X_sampled = rbm1.gibbs(X)
+    assert_all_finite(X_sampled)
+    X_sampled2 = rbm1.gibbs(X)
+    assert np.all((X_sampled != X_sampled2).max(axis=1))
+
+
+@pytest.mark.parametrize("lil_containers", LIL_CONTAINERS)
+def test_score_samples(lil_containers):
+    # Test score_samples (pseudo-likelihood) method.
+    # Assert that pseudo-likelihood is computed without clipping.
+    # See Fabian's blog, http://bit.ly/1iYefRk
+    rng = np.random.RandomState(42)
+    X = np.vstack([np.zeros(1000), np.ones(1000)])
+    rbm1 = BernoulliRBM(n_components=10, batch_size=2, n_iter=10, random_state=rng)
+    rbm1.fit(X)
+    assert (rbm1.score_samples(X) < -300).all()
+
+    # Sparse vs. dense should not affect the output. Also test sparse input
+    # validation.
+    rbm1.random_state = 42
+    d_score = rbm1.score_samples(X)
+    rbm1.random_state = 42
+    s_score = rbm1.score_samples(lil_containers(X))
+    assert_almost_equal(d_score, s_score)
+
+    # Test numerical stability (#2785): would previously generate infinities
+    # and crash with an exception.
+    with np.errstate(under="ignore"):
+        rbm1.score_samples([np.arange(1000) * 100])
+
+
+def test_rbm_verbose():
+    rbm = BernoulliRBM(n_iter=2, verbose=10)
+    old_stdout = sys.stdout
+    sys.stdout = StringIO()
+    try:
+        rbm.fit(Xdigits)
+    finally:
+        sys.stdout = old_stdout
+
+
+@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
+def test_sparse_and_verbose(csc_container):
+    # Make sure RBM works with sparse input when verbose=True
+    old_stdout = sys.stdout
+    sys.stdout = StringIO()
+
+    X = csc_container([[0.0], [1.0]])
+    rbm = BernoulliRBM(
+        n_components=2, batch_size=2, n_iter=1, random_state=42, verbose=True
+    )
+    try:
+        rbm.fit(X)
+        s = sys.stdout.getvalue()
+        # make sure output is sound
+        assert re.match(
+            r"\[BernoulliRBM\] Iteration 1,"
+            r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
+            r" time = (\d|\.)+s",
+            s,
+        )
+    finally:
+        sys.stdout = old_stdout
+
+
+@pytest.mark.parametrize(
+    "dtype_in, dtype_out",
+    [(np.float32, np.float32), (np.float64, np.float64), (int, np.float64)],
+)
+def test_transformer_dtypes_casting(dtype_in, dtype_out):
+    X = Xdigits[:100].astype(dtype_in)
+    rbm = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
+    Xt = rbm.fit_transform(X)
+
+    # dtype_in and dtype_out should be consistent
+    assert Xt.dtype == dtype_out, "transform dtype: {} - original dtype: {}".format(
+        Xt.dtype, X.dtype
+    )
+
+
+def test_convergence_dtype_consistency():
+    # float 64 transformer
+    X_64 = Xdigits[:100].astype(np.float64)
+    rbm_64 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
+    Xt_64 = rbm_64.fit_transform(X_64)
+
+    # float 32 transformer
+    X_32 = Xdigits[:100].astype(np.float32)
+    rbm_32 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
+    Xt_32 = rbm_32.fit_transform(X_32)
+
+    # results and attributes should be close enough in 32 bit and 64 bit
+    assert_allclose(Xt_64, Xt_32, rtol=1e-06, atol=0)
+    assert_allclose(
+        rbm_64.intercept_hidden_, rbm_32.intercept_hidden_, rtol=1e-06, atol=0
+    )
+    assert_allclose(
+        rbm_64.intercept_visible_, rbm_32.intercept_visible_, rtol=1e-05, atol=0
+    )
+    assert_allclose(rbm_64.components_, rbm_32.components_, rtol=1e-03, atol=0)
+    assert_allclose(rbm_64.h_samples_, rbm_32.h_samples_)
+
+
+@pytest.mark.parametrize("method", ["fit", "partial_fit"])
+def test_feature_names_out(method):
+    """Check `get_feature_names_out` for `BernoulliRBM`."""
+    n_components = 10
+    rbm = BernoulliRBM(n_components=n_components)
+    getattr(rbm, method)(Xdigits)
+
+    names = rbm.get_feature_names_out()
+    expected_names = [f"bernoullirbm{i}" for i in range(n_components)]
+    assert_array_equal(expected_names, names)
diff --git a/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/test_stochastic_optimizers.py b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/test_stochastic_optimizers.py
new file mode 100644
index 0000000000000000000000000000000000000000..58a9f0c7dda13fd288c1c86f6a52fede485787ad
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sklearn/neural_network/tests/test_stochastic_optimizers.py
@@ -0,0 +1,112 @@
+import numpy as np
+
+from sklearn.neural_network._stochastic_optimizers import (
+    AdamOptimizer,
+    BaseOptimizer,
+    SGDOptimizer,
+)
+from sklearn.utils._testing import assert_array_equal
+
+shapes = [(4, 6), (6, 8), (7, 8, 9)]
+
+
+def test_base_optimizer():
+    for lr in [10**i for i in range(-3, 4)]:
+        optimizer = BaseOptimizer(lr)
+        assert optimizer.trigger_stopping("", False)
+
+
+def test_sgd_optimizer_no_momentum():
+    params = [np.zeros(shape) for shape in shapes]
+    rng = np.random.RandomState(0)
+
+    for lr in [10**i for i in range(-3, 4)]:
+        optimizer = SGDOptimizer(params, lr, momentum=0, nesterov=False)
+        grads = [rng.random_sample(shape) for shape in shapes]
+        expected = [param - lr * grad for param, grad in zip(params, grads)]
+        optimizer.update_params(params, grads)
+
+        for exp, param in zip(expected, params):
+            assert_array_equal(exp, param)
+
+
+def test_sgd_optimizer_momentum():
+    params = [np.zeros(shape) for shape in shapes]
+    lr = 0.1
+    rng = np.random.RandomState(0)
+
+    for momentum in np.arange(0.5, 0.9, 0.1):
+        optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=False)
+        velocities = [rng.random_sample(shape) for shape in shapes]
+        optimizer.velocities = velocities
+        grads = [rng.random_sample(shape) for shape in shapes]
+        updates = [
+            momentum * velocity - lr * grad for velocity, grad in zip(velocities, grads)
+        ]
+        expected = [param + update for param, update in zip(params, updates)]
+        optimizer.update_params(params, grads)
+
+        for exp, param in zip(expected, params):
+            assert_array_equal(exp, param)
+
+
+def test_sgd_optimizer_trigger_stopping():
+    params = [np.zeros(shape) for shape in shapes]
+    lr = 2e-6
+    optimizer = SGDOptimizer(params, lr, lr_schedule="adaptive")
+    assert not optimizer.trigger_stopping("", False)
+    assert lr / 5 == optimizer.learning_rate
+    assert optimizer.trigger_stopping("", False)
+
+
+def test_sgd_optimizer_nesterovs_momentum():
+    params = [np.zeros(shape) for shape in shapes]
+    lr = 0.1
+    rng = np.random.RandomState(0)
+
+    for momentum in np.arange(0.5, 0.9, 0.1):
+        optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=True)
+        velocities = [rng.random_sample(shape) for shape in shapes]
+        optimizer.velocities = velocities
+        grads = [rng.random_sample(shape) for shape in shapes]
+        updates = [
+            momentum * velocity - lr * grad for velocity, grad in zip(velocities, grads)
+        ]
+        updates = [
+            momentum * update - lr * grad for update, grad in zip(updates, grads)
+        ]
+        expected = [param + update for param, update in zip(params, updates)]
+        optimizer.update_params(params, grads)
+
+        for exp, param in zip(expected, params):
+            assert_array_equal(exp, param)
+
+
+def test_adam_optimizer():
+    params = [np.zeros(shape) for shape in shapes]
+    lr = 0.001
+    epsilon = 1e-8
+    rng = np.random.RandomState(0)
+
+    for beta_1 in np.arange(0.9, 1.0, 0.05):
+        for beta_2 in np.arange(0.995, 1.0, 0.001):
+            optimizer = AdamOptimizer(params, lr, beta_1, beta_2, epsilon)
+            ms = [rng.random_sample(shape) for shape in shapes]
+            vs = [rng.random_sample(shape) for shape in shapes]
+            t = 10
+            optimizer.ms = ms
+            optimizer.vs = vs
+            optimizer.t = t - 1
+            grads = [rng.random_sample(shape) for shape in shapes]
+
+            ms = [beta_1 * m + (1 - beta_1) * grad for m, grad in zip(ms, grads)]
+            vs = [beta_2 * v + (1 - beta_2) * (grad**2) for v, grad in zip(vs, grads)]
+            learning_rate = lr * np.sqrt(1 - beta_2**t) / (1 - beta_1**t)
+            updates = [
+                -learning_rate * m / (np.sqrt(v) + epsilon) for m, v in zip(ms, vs)
+            ]
+            expected = [param + update for param, update in zip(params, updates)]
+
+            optimizer.update_params(params, grads)
+            for exp, param in zip(expected, params):
+                assert_array_equal(exp, param)