diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__init__.py b/llmeval-env/lib/python3.10/site-packages/joblib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..88203212fbcff50994effb7337bf013950767c92 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/__init__.py @@ -0,0 +1,148 @@ +"""Joblib is a set of tools to provide **lightweight pipelining in +Python**. In particular: + +1. transparent disk-caching of functions and lazy re-evaluation + (memoize pattern) + +2. easy simple parallel computing + +Joblib is optimized to be **fast** and **robust** on large +data in particular and has specific optimizations for `numpy` arrays. It is +**BSD-licensed**. + + + ==================== =============================================== + **Documentation:** https://joblib.readthedocs.io + + **Download:** https://pypi.python.org/pypi/joblib#downloads + + **Source code:** https://github.com/joblib/joblib + + **Report issues:** https://github.com/joblib/joblib/issues + ==================== =============================================== + + +Vision +-------- + +The vision is to provide tools to easily achieve better performance and +reproducibility when working with long running jobs. + + * **Avoid computing the same thing twice**: code is often rerun again and + again, for instance when prototyping computational-heavy jobs (as in + scientific development), but hand-crafted solutions to alleviate this + issue are error-prone and often lead to unreproducible results. + + * **Persist to disk transparently**: efficiently persisting + arbitrary objects containing large data is hard. Using + joblib's caching mechanism avoids hand-written persistence and + implicitly links the file on disk to the execution context of + the original Python object. As a result, joblib's persistence is + good for resuming an application status or computational job, eg + after a crash. + +Joblib addresses these problems while **leaving your code and your flow +control as unmodified as possible** (no framework, no new paradigms). + +Main features +------------------ + +1) **Transparent and fast disk-caching of output value:** a memoize or + make-like functionality for Python functions that works well for + arbitrary Python objects, including very large numpy arrays. Separate + persistence and flow-execution logic from domain logic or algorithmic + code by writing the operations as a set of steps with well-defined + inputs and outputs: Python functions. Joblib can save their + computation to disk and rerun it only if necessary:: + + >>> from joblib import Memory + >>> cachedir = 'your_cache_dir_goes_here' + >>> mem = Memory(cachedir) + >>> import numpy as np + >>> a = np.vander(np.arange(3)).astype(float) + >>> square = mem.cache(np.square) + >>> b = square(a) # doctest: +ELLIPSIS + ______________________________________________________________________... + [Memory] Calling square... + square(array([[0., 0., 1.], + [1., 1., 1.], + [4., 2., 1.]])) + _________________________________________________...square - ...s, 0.0min + + >>> c = square(a) + >>> # The above call did not trigger an evaluation + +2) **Embarrassingly parallel helper:** to make it easy to write readable + parallel code and debug it quickly:: + + >>> from joblib import Parallel, delayed + >>> from math import sqrt + >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10)) + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] + + +3) **Fast compressed Persistence**: a replacement for pickle to work + efficiently on Python objects containing large data ( + *joblib.dump* & *joblib.load* ). + +.. + >>> import shutil ; shutil.rmtree(cachedir) + +""" + +# PEP0440 compatible formatted version, see: +# https://www.python.org/dev/peps/pep-0440/ +# +# Generic release markers: +# X.Y +# X.Y.Z # For bugfix releases +# +# Admissible pre-release markers: +# X.YaN # Alpha release +# X.YbN # Beta release +# X.YrcN # Release Candidate +# X.Y # Final release +# +# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. +# 'X.Y.dev0' is the canonical version of 'X.Y.dev' +# +__version__ = '1.4.2' + + +import os + +from .memory import Memory +from .memory import MemorizedResult +from .memory import register_store_backend +from .memory import expires_after + +from .logger import PrintTime +from .logger import Logger + +from .hashing import hash + +from .numpy_pickle import dump +from .numpy_pickle import load + +from .compressor import register_compressor + +from .parallel import Parallel +from .parallel import delayed +from .parallel import cpu_count +from .parallel import register_parallel_backend +from .parallel import parallel_backend +from .parallel import parallel_config +from .parallel import effective_n_jobs +from ._cloudpickle_wrapper import wrap_non_picklable_objects + + +__all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump', + 'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs', + 'register_parallel_backend', 'parallel_backend', 'expires_after', + 'register_store_backend', 'register_compressor', + 'wrap_non_picklable_objects', 'parallel_config'] + + +# Workaround issue discovered in intel-openmp 2019.5: +# https://github.com/ContinuumIO/anaconda-issues/issues/11294 +os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE") diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/_dask.py b/llmeval-env/lib/python3.10/site-packages/joblib/_dask.py new file mode 100644 index 0000000000000000000000000000000000000000..4288ed05cd9290a1db19044ea9ceb4ec28974082 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/_dask.py @@ -0,0 +1,379 @@ +from __future__ import print_function, division, absolute_import + +import asyncio +import concurrent.futures +import contextlib + +import time +from uuid import uuid4 +import weakref + +from .parallel import parallel_config +from .parallel import AutoBatchingMixin, ParallelBackendBase + +from ._utils import ( + _TracebackCapturingWrapper, + _retrieve_traceback_capturing_wrapped_call +) + +try: + import dask + import distributed +except ImportError: + dask = None + distributed = None + +if dask is not None and distributed is not None: + from dask.utils import funcname + from dask.sizeof import sizeof + from dask.distributed import ( + Client, + as_completed, + get_client, + secede, + rejoin, + ) + from distributed.utils import thread_state + + try: + # asyncio.TimeoutError, Python3-only error thrown by recent versions of + # distributed + from distributed.utils import TimeoutError as _TimeoutError + except ImportError: + from tornado.gen import TimeoutError as _TimeoutError + + +def is_weakrefable(obj): + try: + weakref.ref(obj) + return True + except TypeError: + return False + + +class _WeakKeyDictionary: + """A variant of weakref.WeakKeyDictionary for unhashable objects. + + This datastructure is used to store futures for broadcasted data objects + such as large numpy arrays or pandas dataframes that are not hashable and + therefore cannot be used as keys of traditional python dicts. + + Furthermore using a dict with id(array) as key is not safe because the + Python is likely to reuse id of recently collected arrays. + """ + + def __init__(self): + self._data = {} + + def __getitem__(self, obj): + ref, val = self._data[id(obj)] + if ref() is not obj: + # In case of a race condition with on_destroy. + raise KeyError(obj) + return val + + def __setitem__(self, obj, value): + key = id(obj) + try: + ref, _ = self._data[key] + if ref() is not obj: + # In case of race condition with on_destroy. + raise KeyError(obj) + except KeyError: + # Insert the new entry in the mapping along with a weakref + # callback to automatically delete the entry from the mapping + # as soon as the object used as key is garbage collected. + def on_destroy(_): + del self._data[key] + ref = weakref.ref(obj, on_destroy) + self._data[key] = ref, value + + def __len__(self): + return len(self._data) + + def clear(self): + self._data.clear() + + +def _funcname(x): + try: + if isinstance(x, list): + x = x[0][0] + except Exception: + pass + return funcname(x) + + +def _make_tasks_summary(tasks): + """Summarize of list of (func, args, kwargs) function calls""" + unique_funcs = {func for func, args, kwargs in tasks} + + if len(unique_funcs) == 1: + mixed = False + else: + mixed = True + return len(tasks), mixed, _funcname(tasks) + + +class Batch: + """dask-compatible wrapper that executes a batch of tasks""" + def __init__(self, tasks): + # collect some metadata from the tasks to ease Batch calls + # introspection when debugging + self._num_tasks, self._mixed, self._funcname = _make_tasks_summary( + tasks + ) + + def __call__(self, tasks=None): + results = [] + with parallel_config(backend='dask'): + for func, args, kwargs in tasks: + results.append(func(*args, **kwargs)) + return results + + def __repr__(self): + descr = f"batch_of_{self._funcname}_{self._num_tasks}_calls" + if self._mixed: + descr = "mixed_" + descr + return descr + + +def _joblib_probe_task(): + # Noop used by the joblib connector to probe when workers are ready. + pass + + +class DaskDistributedBackend(AutoBatchingMixin, ParallelBackendBase): + MIN_IDEAL_BATCH_DURATION = 0.2 + MAX_IDEAL_BATCH_DURATION = 1.0 + supports_retrieve_callback = True + default_n_jobs = -1 + + def __init__(self, scheduler_host=None, scatter=None, + client=None, loop=None, wait_for_workers_timeout=10, + **submit_kwargs): + super().__init__() + + if distributed is None: + msg = ("You are trying to use 'dask' as a joblib parallel backend " + "but dask is not installed. Please install dask " + "to fix this error.") + raise ValueError(msg) + + if client is None: + if scheduler_host: + client = Client(scheduler_host, loop=loop, + set_as_default=False) + else: + try: + client = get_client() + except ValueError as e: + msg = ("To use Joblib with Dask first create a Dask Client" + "\n\n" + " from dask.distributed import Client\n" + " client = Client()\n" + "or\n" + " client = Client('scheduler-address:8786')") + raise ValueError(msg) from e + + self.client = client + + if scatter is not None and not isinstance(scatter, (list, tuple)): + raise TypeError("scatter must be a list/tuple, got " + "`%s`" % type(scatter).__name__) + + if scatter is not None and len(scatter) > 0: + # Keep a reference to the scattered data to keep the ids the same + self._scatter = list(scatter) + scattered = self.client.scatter(scatter, broadcast=True) + self.data_futures = {id(x): f for x, f in zip(scatter, scattered)} + else: + self._scatter = [] + self.data_futures = {} + self.wait_for_workers_timeout = wait_for_workers_timeout + self.submit_kwargs = submit_kwargs + self.waiting_futures = as_completed( + [], + loop=client.loop, + with_results=True, + raise_errors=False + ) + self._results = {} + self._callbacks = {} + + async def _collect(self): + while self._continue: + async for future, result in self.waiting_futures: + cf_future = self._results.pop(future) + callback = self._callbacks.pop(future) + if future.status == "error": + typ, exc, tb = result + cf_future.set_exception(exc) + else: + cf_future.set_result(result) + callback(result) + await asyncio.sleep(0.01) + + def __reduce__(self): + return (DaskDistributedBackend, ()) + + def get_nested_backend(self): + return DaskDistributedBackend(client=self.client), -1 + + def configure(self, n_jobs=1, parallel=None, **backend_args): + self.parallel = parallel + return self.effective_n_jobs(n_jobs) + + def start_call(self): + self._continue = True + self.client.loop.add_callback(self._collect) + self.call_data_futures = _WeakKeyDictionary() + + def stop_call(self): + # The explicit call to clear is required to break a cycling reference + # to the futures. + self._continue = False + # wait for the future collection routine (self._backend._collect) to + # finish in order to limit asyncio warnings due to aborting _collect + # during a following backend termination call + time.sleep(0.01) + self.call_data_futures.clear() + + def effective_n_jobs(self, n_jobs): + effective_n_jobs = sum(self.client.ncores().values()) + if effective_n_jobs != 0 or not self.wait_for_workers_timeout: + return effective_n_jobs + + # If there is no worker, schedule a probe task to wait for the workers + # to come up and be available. If the dask cluster is in adaptive mode + # task might cause the cluster to provision some workers. + try: + self.client.submit(_joblib_probe_task).result( + timeout=self.wait_for_workers_timeout + ) + except _TimeoutError as e: + error_msg = ( + "DaskDistributedBackend has no worker after {} seconds. " + "Make sure that workers are started and can properly connect " + "to the scheduler and increase the joblib/dask connection " + "timeout with:\n\n" + "parallel_config(backend='dask', wait_for_workers_timeout={})" + ).format(self.wait_for_workers_timeout, + max(10, 2 * self.wait_for_workers_timeout)) + raise TimeoutError(error_msg) from e + return sum(self.client.ncores().values()) + + async def _to_func_args(self, func): + itemgetters = dict() + + # Futures that are dynamically generated during a single call to + # Parallel.__call__. + call_data_futures = getattr(self, 'call_data_futures', None) + + async def maybe_to_futures(args): + out = [] + for arg in args: + arg_id = id(arg) + if arg_id in itemgetters: + out.append(itemgetters[arg_id]) + continue + + f = self.data_futures.get(arg_id, None) + if f is None and call_data_futures is not None: + try: + f = await call_data_futures[arg] + except KeyError: + pass + if f is None: + if is_weakrefable(arg) and sizeof(arg) > 1e3: + # Automatically scatter large objects to some of + # the workers to avoid duplicated data transfers. + # Rely on automated inter-worker data stealing if + # more workers need to reuse this data + # concurrently. + # set hash=False - nested scatter calls (i.e + # calling client.scatter inside a dask worker) + # using hash=True often raise CancelledError, + # see dask/distributed#3703 + _coro = self.client.scatter( + arg, + asynchronous=True, + hash=False + ) + # Centralize the scattering of identical arguments + # between concurrent apply_async callbacks by + # exposing the running coroutine in + # call_data_futures before it completes. + t = asyncio.Task(_coro) + call_data_futures[arg] = t + + f = await t + + if f is not None: + out.append(f) + else: + out.append(arg) + return out + + tasks = [] + for f, args, kwargs in func.items: + args = list(await maybe_to_futures(args)) + kwargs = dict(zip(kwargs.keys(), + await maybe_to_futures(kwargs.values()))) + tasks.append((f, args, kwargs)) + + return (Batch(tasks), tasks) + + def apply_async(self, func, callback=None): + + cf_future = concurrent.futures.Future() + cf_future.get = cf_future.result # achieve AsyncResult API + + async def f(func, callback): + batch, tasks = await self._to_func_args(func) + key = f'{repr(batch)}-{uuid4().hex}' + + dask_future = self.client.submit( + _TracebackCapturingWrapper(batch), + tasks=tasks, + key=key, + **self.submit_kwargs + ) + self.waiting_futures.add(dask_future) + self._callbacks[dask_future] = callback + self._results[dask_future] = cf_future + + self.client.loop.add_callback(f, func, callback) + + return cf_future + + def retrieve_result_callback(self, out): + return _retrieve_traceback_capturing_wrapped_call(out) + + def abort_everything(self, ensure_ready=True): + """ Tell the client to cancel any task submitted via this instance + + joblib.Parallel will never access those results + """ + with self.waiting_futures.lock: + self.waiting_futures.futures.clear() + while not self.waiting_futures.queue.empty(): + self.waiting_futures.queue.get() + + @contextlib.contextmanager + def retrieval_context(self): + """Override ParallelBackendBase.retrieval_context to avoid deadlocks. + + This removes thread from the worker's thread pool (using 'secede'). + Seceding avoids deadlock in nested parallelism settings. + """ + # See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how + # this is used. + if hasattr(thread_state, 'execution_state'): + # we are in a worker. Secede to avoid deadlock. + secede() + + yield + + if hasattr(thread_state, 'execution_state'): + rejoin() diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/_store_backends.py b/llmeval-env/lib/python3.10/site-packages/joblib/_store_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..68e207c17d452faf7539fe0185dc0739a569774b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/_store_backends.py @@ -0,0 +1,474 @@ +"""Storage providers backends for Memory caching.""" + +from pickle import PicklingError +import re +import os +import os.path +import datetime +import json +import shutil +import time +import warnings +import collections +import operator +import threading +from abc import ABCMeta, abstractmethod + +from .backports import concurrency_safe_rename +from .disk import mkdirp, memstr_to_bytes, rm_subdirs +from .logger import format_time +from . import numpy_pickle + +CacheItemInfo = collections.namedtuple('CacheItemInfo', + 'path size last_access') + + +class CacheWarning(Warning): + """Warning to capture dump failures except for PicklingError.""" + pass + + +def concurrency_safe_write(object_to_write, filename, write_func): + """Writes an object into a unique file in a concurrency-safe way.""" + thread_id = id(threading.current_thread()) + temporary_filename = '{}.thread-{}-pid-{}'.format( + filename, thread_id, os.getpid()) + write_func(object_to_write, temporary_filename) + + return temporary_filename + + +class StoreBackendBase(metaclass=ABCMeta): + """Helper Abstract Base Class which defines all methods that + a StorageBackend must implement.""" + + location = None + + @abstractmethod + def _open_item(self, f, mode): + """Opens an item on the store and return a file-like object. + + This method is private and only used by the StoreBackendMixin object. + + Parameters + ---------- + f: a file-like object + The file-like object where an item is stored and retrieved + mode: string, optional + the mode in which the file-like object is opened allowed valued are + 'rb', 'wb' + + Returns + ------- + a file-like object + """ + + @abstractmethod + def _item_exists(self, location): + """Checks if an item location exists in the store. + + This method is private and only used by the StoreBackendMixin object. + + Parameters + ---------- + location: string + The location of an item. On a filesystem, this corresponds to the + absolute path, including the filename, of a file. + + Returns + ------- + True if the item exists, False otherwise + """ + + @abstractmethod + def _move_item(self, src, dst): + """Moves an item from src to dst in the store. + + This method is private and only used by the StoreBackendMixin object. + + Parameters + ---------- + src: string + The source location of an item + dst: string + The destination location of an item + """ + + @abstractmethod + def create_location(self, location): + """Creates a location on the store. + + Parameters + ---------- + location: string + The location in the store. On a filesystem, this corresponds to a + directory. + """ + + @abstractmethod + def clear_location(self, location): + """Clears a location on the store. + + Parameters + ---------- + location: string + The location in the store. On a filesystem, this corresponds to a + directory or a filename absolute path + """ + + @abstractmethod + def get_items(self): + """Returns the whole list of items available in the store. + + Returns + ------- + The list of items identified by their ids (e.g filename in a + filesystem). + """ + + @abstractmethod + def configure(self, location, verbose=0, backend_options=dict()): + """Configures the store. + + Parameters + ---------- + location: string + The base location used by the store. On a filesystem, this + corresponds to a directory. + verbose: int + The level of verbosity of the store + backend_options: dict + Contains a dictionary of named parameters used to configure the + store backend. + """ + + +class StoreBackendMixin(object): + """Class providing all logic for managing the store in a generic way. + + The StoreBackend subclass has to implement 3 methods: create_location, + clear_location and configure. The StoreBackend also has to provide + a private _open_item, _item_exists and _move_item methods. The _open_item + method has to have the same signature as the builtin open and return a + file-like object. + """ + + def load_item(self, call_id, verbose=1, timestamp=None, metadata=None): + """Load an item from the store given its id as a list of str.""" + full_path = os.path.join(self.location, *call_id) + + if verbose > 1: + ts_string = ('{: <16}'.format(format_time(time.time() - timestamp)) + if timestamp is not None else '') + signature = os.path.basename(call_id[0]) + if metadata is not None and 'input_args' in metadata: + kwargs = ', '.join('{}={}'.format(*item) + for item in metadata['input_args'].items()) + signature += '({})'.format(kwargs) + msg = '[Memory]{}: Loading {}'.format(ts_string, signature) + if verbose < 10: + print('{0}...'.format(msg)) + else: + print('{0} from {1}'.format(msg, full_path)) + + mmap_mode = (None if not hasattr(self, 'mmap_mode') + else self.mmap_mode) + + filename = os.path.join(full_path, 'output.pkl') + if not self._item_exists(filename): + raise KeyError("Non-existing item (may have been " + "cleared).\nFile %s does not exist" % filename) + + # file-like object cannot be used when mmap_mode is set + if mmap_mode is None: + with self._open_item(filename, "rb") as f: + item = numpy_pickle.load(f) + else: + item = numpy_pickle.load(filename, mmap_mode=mmap_mode) + return item + + def dump_item(self, call_id, item, verbose=1): + """Dump an item in the store at the id given as a list of str.""" + try: + item_path = os.path.join(self.location, *call_id) + if not self._item_exists(item_path): + self.create_location(item_path) + filename = os.path.join(item_path, 'output.pkl') + if verbose > 10: + print('Persisting in %s' % item_path) + + def write_func(to_write, dest_filename): + with self._open_item(dest_filename, "wb") as f: + try: + numpy_pickle.dump(to_write, f, compress=self.compress) + except PicklingError as e: + # TODO(1.5) turn into error + warnings.warn( + "Unable to cache to disk: failed to pickle " + "output. In version 1.5 this will raise an " + f"exception. Exception: {e}.", + FutureWarning + ) + + self._concurrency_safe_write(item, filename, write_func) + except Exception as e: # noqa: E722 + warnings.warn( + "Unable to cache to disk. Possibly a race condition in the " + f"creation of the directory. Exception: {e}.", + CacheWarning + ) + + def clear_item(self, call_id): + """Clear the item at the id, given as a list of str.""" + item_path = os.path.join(self.location, *call_id) + if self._item_exists(item_path): + self.clear_location(item_path) + + def contains_item(self, call_id): + """Check if there is an item at the id, given as a list of str.""" + item_path = os.path.join(self.location, *call_id) + filename = os.path.join(item_path, 'output.pkl') + + return self._item_exists(filename) + + def get_item_info(self, call_id): + """Return information about item.""" + return {'location': os.path.join(self.location, *call_id)} + + def get_metadata(self, call_id): + """Return actual metadata of an item.""" + try: + item_path = os.path.join(self.location, *call_id) + filename = os.path.join(item_path, 'metadata.json') + with self._open_item(filename, 'rb') as f: + return json.loads(f.read().decode('utf-8')) + except: # noqa: E722 + return {} + + def store_metadata(self, call_id, metadata): + """Store metadata of a computation.""" + try: + item_path = os.path.join(self.location, *call_id) + self.create_location(item_path) + filename = os.path.join(item_path, 'metadata.json') + + def write_func(to_write, dest_filename): + with self._open_item(dest_filename, "wb") as f: + f.write(json.dumps(to_write).encode('utf-8')) + + self._concurrency_safe_write(metadata, filename, write_func) + except: # noqa: E722 + pass + + def contains_path(self, call_id): + """Check cached function is available in store.""" + func_path = os.path.join(self.location, *call_id) + return self.object_exists(func_path) + + def clear_path(self, call_id): + """Clear all items with a common path in the store.""" + func_path = os.path.join(self.location, *call_id) + if self._item_exists(func_path): + self.clear_location(func_path) + + def store_cached_func_code(self, call_id, func_code=None): + """Store the code of the cached function.""" + func_path = os.path.join(self.location, *call_id) + if not self._item_exists(func_path): + self.create_location(func_path) + + if func_code is not None: + filename = os.path.join(func_path, "func_code.py") + with self._open_item(filename, 'wb') as f: + f.write(func_code.encode('utf-8')) + + def get_cached_func_code(self, call_id): + """Store the code of the cached function.""" + filename = os.path.join(self.location, *call_id, 'func_code.py') + try: + with self._open_item(filename, 'rb') as f: + return f.read().decode('utf-8') + except: # noqa: E722 + raise + + def get_cached_func_info(self, call_id): + """Return information related to the cached function if it exists.""" + return {'location': os.path.join(self.location, *call_id)} + + def clear(self): + """Clear the whole store content.""" + self.clear_location(self.location) + + def enforce_store_limits( + self, bytes_limit, items_limit=None, age_limit=None + ): + """ + Remove the store's oldest files to enforce item, byte, and age limits. + """ + items_to_delete = self._get_items_to_delete( + bytes_limit, items_limit, age_limit + ) + + for item in items_to_delete: + if self.verbose > 10: + print('Deleting item {0}'.format(item)) + try: + self.clear_location(item.path) + except OSError: + # Even with ignore_errors=True shutil.rmtree can raise OSError + # with: + # [Errno 116] Stale file handle if another process has deleted + # the folder already. + pass + + def _get_items_to_delete( + self, bytes_limit, items_limit=None, age_limit=None + ): + """ + Get items to delete to keep the store under size, file, & age limits. + """ + if isinstance(bytes_limit, str): + bytes_limit = memstr_to_bytes(bytes_limit) + + items = self.get_items() + if not items: + return [] + + size = sum(item.size for item in items) + + if bytes_limit is not None: + to_delete_size = size - bytes_limit + else: + to_delete_size = 0 + + if items_limit is not None: + to_delete_items = len(items) - items_limit + else: + to_delete_items = 0 + + if age_limit is not None: + older_item = min(item.last_access for item in items) + deadline = datetime.datetime.now() - age_limit + else: + deadline = None + + if ( + to_delete_size <= 0 and to_delete_items <= 0 + and (deadline is None or older_item > deadline) + ): + return [] + + # We want to delete first the cache items that were accessed a + # long time ago + items.sort(key=operator.attrgetter('last_access')) + + items_to_delete = [] + size_so_far = 0 + items_so_far = 0 + + for item in items: + if ( + (size_so_far >= to_delete_size) + and items_so_far >= to_delete_items + and (deadline is None or deadline < item.last_access) + ): + break + + items_to_delete.append(item) + size_so_far += item.size + items_so_far += 1 + + return items_to_delete + + def _concurrency_safe_write(self, to_write, filename, write_func): + """Writes an object into a file in a concurrency-safe way.""" + temporary_filename = concurrency_safe_write(to_write, + filename, write_func) + self._move_item(temporary_filename, filename) + + def __repr__(self): + """Printable representation of the store location.""" + return '{class_name}(location="{location}")'.format( + class_name=self.__class__.__name__, location=self.location) + + +class FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin): + """A StoreBackend used with local or network file systems.""" + + _open_item = staticmethod(open) + _item_exists = staticmethod(os.path.exists) + _move_item = staticmethod(concurrency_safe_rename) + + def clear_location(self, location): + """Delete location on store.""" + if (location == self.location): + rm_subdirs(location) + else: + shutil.rmtree(location, ignore_errors=True) + + def create_location(self, location): + """Create object location on store""" + mkdirp(location) + + def get_items(self): + """Returns the whole list of items available in the store.""" + items = [] + + for dirpath, _, filenames in os.walk(self.location): + is_cache_hash_dir = re.match('[a-f0-9]{32}', + os.path.basename(dirpath)) + + if is_cache_hash_dir: + output_filename = os.path.join(dirpath, 'output.pkl') + try: + last_access = os.path.getatime(output_filename) + except OSError: + try: + last_access = os.path.getatime(dirpath) + except OSError: + # The directory has already been deleted + continue + + last_access = datetime.datetime.fromtimestamp(last_access) + try: + full_filenames = [os.path.join(dirpath, fn) + for fn in filenames] + dirsize = sum(os.path.getsize(fn) + for fn in full_filenames) + except OSError: + # Either output_filename or one of the files in + # dirpath does not exist any more. We assume this + # directory is being cleaned by another process already + continue + + items.append(CacheItemInfo(dirpath, dirsize, + last_access)) + + return items + + def configure(self, location, verbose=1, backend_options=None): + """Configure the store backend. + + For this backend, valid store options are 'compress' and 'mmap_mode' + """ + if backend_options is None: + backend_options = {} + + # setup location directory + self.location = location + if not os.path.exists(self.location): + mkdirp(self.location) + + # item can be stored compressed for faster I/O + self.compress = backend_options.get('compress', False) + + # FileSystemStoreBackend can be used with mmap_mode options under + # certain conditions. + mmap_mode = backend_options.get('mmap_mode') + if self.compress and mmap_mode is not None: + warnings.warn('Compressed items cannot be memmapped in a ' + 'filesystem store. Option will be ignored.', + stacklevel=2) + + self.mmap_mode = mmap_mode + self.verbose = verbose diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/compressor.py b/llmeval-env/lib/python3.10/site-packages/joblib/compressor.py new file mode 100644 index 0000000000000000000000000000000000000000..0d9e2618a48339e1af8cf2da573fd0af8c96f0b0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/compressor.py @@ -0,0 +1,570 @@ +"""Classes and functions for managing compressors.""" + +import io +import zlib +from joblib.backports import LooseVersion + +try: + from threading import RLock +except ImportError: + from dummy_threading import RLock + +try: + import bz2 +except ImportError: + bz2 = None + +try: + import lz4 + from lz4.frame import LZ4FrameFile +except ImportError: + lz4 = None + +try: + import lzma +except ImportError: + lzma = None + + +LZ4_NOT_INSTALLED_ERROR = ('LZ4 is not installed. Install it with pip: ' + 'https://python-lz4.readthedocs.io/') + +# Registered compressors +_COMPRESSORS = {} + +# Magic numbers of supported compression file formats. +_ZFILE_PREFIX = b'ZF' # used with pickle files created before 0.9.3. +_ZLIB_PREFIX = b'\x78' +_GZIP_PREFIX = b'\x1f\x8b' +_BZ2_PREFIX = b'BZ' +_XZ_PREFIX = b'\xfd\x37\x7a\x58\x5a' +_LZMA_PREFIX = b'\x5d\x00' +_LZ4_PREFIX = b'\x04\x22\x4D\x18' + + +def register_compressor(compressor_name, compressor, + force=False): + """Register a new compressor. + + Parameters + ---------- + compressor_name: str. + The name of the compressor. + compressor: CompressorWrapper + An instance of a 'CompressorWrapper'. + """ + global _COMPRESSORS + if not isinstance(compressor_name, str): + raise ValueError("Compressor name should be a string, " + "'{}' given.".format(compressor_name)) + + if not isinstance(compressor, CompressorWrapper): + raise ValueError("Compressor should implement the CompressorWrapper " + "interface, '{}' given.".format(compressor)) + + if (compressor.fileobj_factory is not None and + (not hasattr(compressor.fileobj_factory, 'read') or + not hasattr(compressor.fileobj_factory, 'write') or + not hasattr(compressor.fileobj_factory, 'seek') or + not hasattr(compressor.fileobj_factory, 'tell'))): + raise ValueError("Compressor 'fileobj_factory' attribute should " + "implement the file object interface, '{}' given." + .format(compressor.fileobj_factory)) + + if compressor_name in _COMPRESSORS and not force: + raise ValueError("Compressor '{}' already registered." + .format(compressor_name)) + + _COMPRESSORS[compressor_name] = compressor + + +class CompressorWrapper(): + """A wrapper around a compressor file object. + + Attributes + ---------- + obj: a file-like object + The object must implement the buffer interface and will be used + internally to compress/decompress the data. + prefix: bytestring + A bytestring corresponding to the magic number that identifies the + file format associated to the compressor. + extension: str + The file extension used to automatically select this compressor during + a dump to a file. + """ + + def __init__(self, obj, prefix=b'', extension=''): + self.fileobj_factory = obj + self.prefix = prefix + self.extension = extension + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + if compresslevel is None: + return self.fileobj_factory(fileobj, 'wb') + else: + return self.fileobj_factory(fileobj, 'wb', + compresslevel=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + return self.fileobj_factory(fileobj, 'rb') + + +class BZ2CompressorWrapper(CompressorWrapper): + + prefix = _BZ2_PREFIX + extension = '.bz2' + + def __init__(self): + if bz2 is not None: + self.fileobj_factory = bz2.BZ2File + else: + self.fileobj_factory = None + + def _check_versions(self): + if bz2 is None: + raise ValueError('bz2 module is not compiled on your python ' + 'standard library.') + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + self._check_versions() + if compresslevel is None: + return self.fileobj_factory(fileobj, 'wb') + else: + return self.fileobj_factory(fileobj, 'wb', + compresslevel=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + self._check_versions() + fileobj = self.fileobj_factory(fileobj, 'rb') + return fileobj + + +class LZMACompressorWrapper(CompressorWrapper): + + prefix = _LZMA_PREFIX + extension = '.lzma' + _lzma_format_name = 'FORMAT_ALONE' + + def __init__(self): + if lzma is not None: + self.fileobj_factory = lzma.LZMAFile + self._lzma_format = getattr(lzma, self._lzma_format_name) + else: + self.fileobj_factory = None + + def _check_versions(self): + if lzma is None: + raise ValueError('lzma module is not compiled on your python ' + 'standard library.') + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + if compresslevel is None: + return self.fileobj_factory(fileobj, 'wb', + format=self._lzma_format) + else: + return self.fileobj_factory(fileobj, 'wb', + format=self._lzma_format, + preset=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + return lzma.LZMAFile(fileobj, 'rb') + + +class XZCompressorWrapper(LZMACompressorWrapper): + + prefix = _XZ_PREFIX + extension = '.xz' + _lzma_format_name = 'FORMAT_XZ' + + +class LZ4CompressorWrapper(CompressorWrapper): + + prefix = _LZ4_PREFIX + extension = '.lz4' + + def __init__(self): + if lz4 is not None: + self.fileobj_factory = LZ4FrameFile + else: + self.fileobj_factory = None + + def _check_versions(self): + if lz4 is None: + raise ValueError(LZ4_NOT_INSTALLED_ERROR) + lz4_version = lz4.__version__ + if lz4_version.startswith("v"): + lz4_version = lz4_version[1:] + if LooseVersion(lz4_version) < LooseVersion('0.19'): + raise ValueError(LZ4_NOT_INSTALLED_ERROR) + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + self._check_versions() + if compresslevel is None: + return self.fileobj_factory(fileobj, 'wb') + else: + return self.fileobj_factory(fileobj, 'wb', + compression_level=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + self._check_versions() + return self.fileobj_factory(fileobj, 'rb') + + +############################################################################### +# base file compression/decompression object definition +_MODE_CLOSED = 0 +_MODE_READ = 1 +_MODE_READ_EOF = 2 +_MODE_WRITE = 3 +_BUFFER_SIZE = 8192 + + +class BinaryZlibFile(io.BufferedIOBase): + """A file object providing transparent zlib (de)compression. + + TODO python2_drop: is it still needed since we dropped Python 2 support A + BinaryZlibFile can act as a wrapper for an existing file object, or refer + directly to a named file on disk. + + Note that BinaryZlibFile provides only a *binary* file interface: data read + is returned as bytes, and data to be written should be given as bytes. + + This object is an adaptation of the BZ2File object and is compatible with + versions of python >= 2.7. + + If filename is a str or bytes object, it gives the name + of the file to be opened. Otherwise, it should be a file object, + which will be used to read or write the compressed data. + + mode can be 'rb' for reading (default) or 'wb' for (over)writing + + If mode is 'wb', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 produces the most compression. 3 is the default. + """ + + wbits = zlib.MAX_WBITS + + def __init__(self, filename, mode="rb", compresslevel=3): + # This lock must be recursive, so that BufferedIOBase's + # readline(), readlines() and writelines() don't deadlock. + self._lock = RLock() + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + self._pos = 0 + self._size = -1 + self.compresslevel = compresslevel + + if not isinstance(compresslevel, int) or not (1 <= compresslevel <= 9): + raise ValueError("'compresslevel' must be an integer " + "between 1 and 9. You provided 'compresslevel={}'" + .format(compresslevel)) + + if mode == "rb": + self._mode = _MODE_READ + self._decompressor = zlib.decompressobj(self.wbits) + self._buffer = b"" + self._buffer_offset = 0 + elif mode == "wb": + self._mode = _MODE_WRITE + self._compressor = zlib.compressobj(self.compresslevel, + zlib.DEFLATED, self.wbits, + zlib.DEF_MEM_LEVEL, 0) + else: + raise ValueError("Invalid mode: %r" % (mode,)) + + if isinstance(filename, str): + self._fp = io.open(filename, mode) + self._closefp = True + elif hasattr(filename, "read") or hasattr(filename, "write"): + self._fp = filename + else: + raise TypeError("filename must be a str or bytes object, " + "or a file") + + def close(self): + """Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + """ + with self._lock: + if self._mode == _MODE_CLOSED: + return + try: + if self._mode in (_MODE_READ, _MODE_READ_EOF): + self._decompressor = None + elif self._mode == _MODE_WRITE: + self._fp.write(self._compressor.flush()) + self._compressor = None + finally: + try: + if self._closefp: + self._fp.close() + finally: + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + self._buffer = b"" + self._buffer_offset = 0 + + @property + def closed(self): + """True if this file is closed.""" + return self._mode == _MODE_CLOSED + + def fileno(self): + """Return the file descriptor for the underlying file.""" + self._check_not_closed() + return self._fp.fileno() + + def seekable(self): + """Return whether the file supports seeking.""" + return self.readable() and self._fp.seekable() + + def readable(self): + """Return whether the file was opened for reading.""" + self._check_not_closed() + return self._mode in (_MODE_READ, _MODE_READ_EOF) + + def writable(self): + """Return whether the file was opened for writing.""" + self._check_not_closed() + return self._mode == _MODE_WRITE + + # Mode-checking helper functions. + + def _check_not_closed(self): + if self.closed: + fname = getattr(self._fp, 'name', None) + msg = "I/O operation on closed file" + if fname is not None: + msg += " {}".format(fname) + msg += "." + raise ValueError(msg) + + def _check_can_read(self): + if self._mode not in (_MODE_READ, _MODE_READ_EOF): + self._check_not_closed() + raise io.UnsupportedOperation("File not open for reading") + + def _check_can_write(self): + if self._mode != _MODE_WRITE: + self._check_not_closed() + raise io.UnsupportedOperation("File not open for writing") + + def _check_can_seek(self): + if self._mode not in (_MODE_READ, _MODE_READ_EOF): + self._check_not_closed() + raise io.UnsupportedOperation("Seeking is only supported " + "on files open for reading") + if not self._fp.seekable(): + raise io.UnsupportedOperation("The underlying file object " + "does not support seeking") + + # Fill the readahead buffer if it is empty. Returns False on EOF. + def _fill_buffer(self): + if self._mode == _MODE_READ_EOF: + return False + # Depending on the input data, our call to the decompressor may not + # return any data. In this case, try again after reading another block. + while self._buffer_offset == len(self._buffer): + try: + rawblock = (self._decompressor.unused_data or + self._fp.read(_BUFFER_SIZE)) + if not rawblock: + raise EOFError + except EOFError: + # End-of-stream marker and end of file. We're good. + self._mode = _MODE_READ_EOF + self._size = self._pos + return False + else: + self._buffer = self._decompressor.decompress(rawblock) + self._buffer_offset = 0 + return True + + # Read data until EOF. + # If return_data is false, consume the data without returning it. + def _read_all(self, return_data=True): + # The loop assumes that _buffer_offset is 0. Ensure that this is true. + self._buffer = self._buffer[self._buffer_offset:] + self._buffer_offset = 0 + + blocks = [] + while self._fill_buffer(): + if return_data: + blocks.append(self._buffer) + self._pos += len(self._buffer) + self._buffer = b"" + if return_data: + return b"".join(blocks) + + # Read a block of up to n bytes. + # If return_data is false, consume the data without returning it. + def _read_block(self, n_bytes, return_data=True): + # If we have enough data buffered, return immediately. + end = self._buffer_offset + n_bytes + if end <= len(self._buffer): + data = self._buffer[self._buffer_offset: end] + self._buffer_offset = end + self._pos += len(data) + return data if return_data else None + + # The loop assumes that _buffer_offset is 0. Ensure that this is true. + self._buffer = self._buffer[self._buffer_offset:] + self._buffer_offset = 0 + + blocks = [] + while n_bytes > 0 and self._fill_buffer(): + if n_bytes < len(self._buffer): + data = self._buffer[:n_bytes] + self._buffer_offset = n_bytes + else: + data = self._buffer + self._buffer = b"" + if return_data: + blocks.append(data) + self._pos += len(data) + n_bytes -= len(data) + if return_data: + return b"".join(blocks) + + def read(self, size=-1): + """Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b'' if the file is already at EOF. + """ + with self._lock: + self._check_can_read() + if size == 0: + return b"" + elif size < 0: + return self._read_all() + else: + return self._read_block(size) + + def readinto(self, b): + """Read up to len(b) bytes into b. + + Returns the number of bytes read (0 for EOF). + """ + with self._lock: + return io.BufferedIOBase.readinto(self, b) + + def write(self, data): + """Write a byte string to the file. + + Returns the number of uncompressed bytes written, which is + always len(data). Note that due to buffering, the file on disk + may not reflect the data written until close() is called. + """ + with self._lock: + self._check_can_write() + # Convert data type if called by io.BufferedWriter. + if isinstance(data, memoryview): + data = data.tobytes() + + compressed = self._compressor.compress(data) + self._fp.write(compressed) + self._pos += len(data) + return len(data) + + # Rewind the file to the beginning of the data stream. + def _rewind(self): + self._fp.seek(0, 0) + self._mode = _MODE_READ + self._pos = 0 + self._decompressor = zlib.decompressobj(self.wbits) + self._buffer = b"" + self._buffer_offset = 0 + + def seek(self, offset, whence=0): + """Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Values for whence are: + + 0: start of stream (default); offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + """ + with self._lock: + self._check_can_seek() + + # Recalculate offset as an absolute file position. + if whence == 0: + pass + elif whence == 1: + offset = self._pos + offset + elif whence == 2: + # Seeking relative to EOF - we need to know the file's size. + if self._size < 0: + self._read_all(return_data=False) + offset = self._size + offset + else: + raise ValueError("Invalid value for whence: %s" % (whence,)) + + # Make it so that offset is the number of bytes to skip forward. + if offset < self._pos: + self._rewind() + else: + offset -= self._pos + + # Read and discard data until we reach the desired position. + self._read_block(offset, return_data=False) + + return self._pos + + def tell(self): + """Return the current file position.""" + with self._lock: + self._check_not_closed() + return self._pos + + +class ZlibCompressorWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryZlibFile, + prefix=_ZLIB_PREFIX, extension='.z') + + +class BinaryGzipFile(BinaryZlibFile): + """A file object providing transparent gzip (de)compression. + + If filename is a str or bytes object, it gives the name + of the file to be opened. Otherwise, it should be a file object, + which will be used to read or write the compressed data. + + mode can be 'rb' for reading (default) or 'wb' for (over)writing + + If mode is 'wb', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 produces the most compression. 3 is the default. + """ + + wbits = 31 # zlib compressor/decompressor wbits value for gzip format. + + +class GzipCompressorWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryGzipFile, + prefix=_GZIP_PREFIX, extension='.gz') diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/disk.py b/llmeval-env/lib/python3.10/site-packages/joblib/disk.py new file mode 100644 index 0000000000000000000000000000000000000000..32fbb89f6dc6c9c7df532c5fefa14934f16321f6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/disk.py @@ -0,0 +1,136 @@ +""" +Disk management utilities. +""" + +# Authors: Gael Varoquaux +# Lars Buitinck +# Copyright (c) 2010 Gael Varoquaux +# License: BSD Style, 3 clauses. + + +import os +import sys +import time +import errno +import shutil + +from multiprocessing import util + + +try: + WindowsError +except NameError: + WindowsError = OSError + + +def disk_used(path): + """ Return the disk usage in a directory.""" + size = 0 + for file in os.listdir(path) + ['.']: + stat = os.stat(os.path.join(path, file)) + if hasattr(stat, 'st_blocks'): + size += stat.st_blocks * 512 + else: + # on some platform st_blocks is not available (e.g., Windows) + # approximate by rounding to next multiple of 512 + size += (stat.st_size // 512 + 1) * 512 + # We need to convert to int to avoid having longs on some systems (we + # don't want longs to avoid problems we SQLite) + return int(size / 1024.) + + +def memstr_to_bytes(text): + """ Convert a memory text to its value in bytes. + """ + kilo = 1024 + units = dict(K=kilo, M=kilo ** 2, G=kilo ** 3) + try: + size = int(units[text[-1]] * float(text[:-1])) + except (KeyError, ValueError) as e: + raise ValueError( + "Invalid literal for size give: %s (type %s) should be " + "alike '10G', '500M', '50K'." % (text, type(text))) from e + return size + + +def mkdirp(d): + """Ensure directory d exists (like mkdir -p on Unix) + No guarantee that the directory is writable. + """ + try: + os.makedirs(d) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +# if a rmtree operation fails in rm_subdirs, wait for this much time (in secs), +# then retry up to RM_SUBDIRS_N_RETRY times. If it still fails, raise the +# exception. this mechanism ensures that the sub-process gc have the time to +# collect and close the memmaps before we fail. +RM_SUBDIRS_RETRY_TIME = 0.1 +RM_SUBDIRS_N_RETRY = 10 + + +def rm_subdirs(path, onerror=None): + """Remove all subdirectories in this path. + + The directory indicated by `path` is left in place, and its subdirectories + are erased. + + If onerror is set, it is called to handle the error with arguments (func, + path, exc_info) where func is os.listdir, os.remove, or os.rmdir; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If onerror is None, + an exception is raised. + """ + + # NOTE this code is adapted from the one in shutil.rmtree, and is + # just as fast + + names = [] + try: + names = os.listdir(path) + except os.error: + if onerror is not None: + onerror(os.listdir, path, sys.exc_info()) + else: + raise + + for name in names: + fullname = os.path.join(path, name) + delete_folder(fullname, onerror=onerror) + + +def delete_folder(folder_path, onerror=None, allow_non_empty=True): + """Utility function to cleanup a temporary folder if it still exists.""" + if os.path.isdir(folder_path): + if onerror is not None: + shutil.rmtree(folder_path, False, onerror) + else: + # allow the rmtree to fail once, wait and re-try. + # if the error is raised again, fail + err_count = 0 + while True: + files = os.listdir(folder_path) + try: + if len(files) == 0 or allow_non_empty: + shutil.rmtree( + folder_path, ignore_errors=False, onerror=None + ) + util.debug( + "Successfully deleted {}".format(folder_path)) + break + else: + raise OSError( + "Expected empty folder {} but got {} " + "files.".format(folder_path, len(files)) + ) + except (OSError, WindowsError): + err_count += 1 + if err_count > RM_SUBDIRS_N_RETRY: + # the folder cannot be deleted right now. It maybe + # because some temporary files have not been deleted + # yet. + raise + time.sleep(RM_SUBDIRS_RETRY_TIME) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/executor.py b/llmeval-env/lib/python3.10/site-packages/joblib/executor.py new file mode 100644 index 0000000000000000000000000000000000000000..6837a7d147411cd74034a078ff98cab916ec36ce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/executor.py @@ -0,0 +1,117 @@ +"""Utility function to construct a loky.ReusableExecutor with custom pickler. + +This module provides efficient ways of working with data stored in +shared memory with numpy.memmap arrays without inducing any memory +copy between the parent and child processes. +""" +# Author: Thomas Moreau +# Copyright: 2017, Thomas Moreau +# License: BSD 3 clause + +from ._memmapping_reducer import get_memmapping_reducers +from ._memmapping_reducer import TemporaryResourcesManager +from .externals.loky.reusable_executor import _ReusablePoolExecutor + + +_executor_args = None + + +def get_memmapping_executor(n_jobs, **kwargs): + return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs) + + +class MemmappingExecutor(_ReusablePoolExecutor): + + @classmethod + def get_memmapping_executor(cls, n_jobs, timeout=300, initializer=None, + initargs=(), env=None, temp_folder=None, + context_id=None, **backend_args): + """Factory for ReusableExecutor with automatic memmapping for large + numpy arrays. + """ + global _executor_args + # Check if we can reuse the executor here instead of deferring the test + # to loky as the reducers are objects that changes at each call. + executor_args = backend_args.copy() + executor_args.update(env if env else {}) + executor_args.update(dict( + timeout=timeout, initializer=initializer, initargs=initargs)) + reuse = _executor_args is None or _executor_args == executor_args + _executor_args = executor_args + + manager = TemporaryResourcesManager(temp_folder) + + # reducers access the temporary folder in which to store temporary + # pickles through a call to manager.resolve_temp_folder_name. resolving + # the folder name dynamically is useful to use different folders across + # calls of a same reusable executor + job_reducers, result_reducers = get_memmapping_reducers( + unlink_on_gc_collect=True, + temp_folder_resolver=manager.resolve_temp_folder_name, + **backend_args) + _executor, executor_is_reused = super().get_reusable_executor( + n_jobs, job_reducers=job_reducers, result_reducers=result_reducers, + reuse=reuse, timeout=timeout, initializer=initializer, + initargs=initargs, env=env + ) + + if not executor_is_reused: + # Only set a _temp_folder_manager for new executors. Reused + # executors already have a _temporary_folder_manager that must not + # be re-assigned like that because it is referenced in various + # places in the reducing machinery of the executor. + _executor._temp_folder_manager = manager + + if context_id is not None: + # Only register the specified context once we know which manager + # the current executor is using, in order to not register an atexit + # finalizer twice for the same folder. + _executor._temp_folder_manager.register_new_context(context_id) + + return _executor + + def terminate(self, kill_workers=False): + + self.shutdown(kill_workers=kill_workers) + + # When workers are killed in a brutal manner, they cannot execute the + # finalizer of their shared memmaps. The refcount of those memmaps may + # be off by an unknown number, so instead of decref'ing them, we force + # delete the whole temporary folder, and unregister them. There is no + # risk of PermissionError at folder deletion because at this + # point, all child processes are dead, so all references to temporary + # memmaps are closed. Otherwise, just try to delete as much as possible + # with allow_non_empty=True but if we can't, it will be clean up later + # on by the resource_tracker. + with self._submit_resize_lock: + self._temp_folder_manager._clean_temporary_resources( + force=kill_workers, allow_non_empty=True + ) + + @property + def _temp_folder(self): + # Legacy property in tests. could be removed if we refactored the + # memmapping tests. SHOULD ONLY BE USED IN TESTS! + # We cache this property because it is called late in the tests - at + # this point, all context have been unregistered, and + # resolve_temp_folder_name raises an error. + if getattr(self, '_cached_temp_folder', None) is not None: + return self._cached_temp_folder + else: + self._cached_temp_folder = self._temp_folder_manager.resolve_temp_folder_name() # noqa + return self._cached_temp_folder + + +class _TestingMemmappingExecutor(MemmappingExecutor): + """Wrapper around ReusableExecutor to ease memmapping testing with Pool + and Executor. This is only for testing purposes. + + """ + def apply_async(self, func, args): + """Schedule a func to be run""" + future = self.submit(func, *args) + future.get = future.result + return future + + def map(self, f, *args): + return list(super().map(f, *args)) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/hashing.py b/llmeval-env/lib/python3.10/site-packages/joblib/hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..6c081f06997ae3b6c18fc76b286ef95e008f23cc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/hashing.py @@ -0,0 +1,265 @@ +""" +Fast cryptographic hash of Python objects, with a special case for fast +hashing of numpy arrays. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import pickle +import hashlib +import sys +import types +import struct +import io +import decimal + + +Pickler = pickle._Pickler + + +class _ConsistentSet(object): + """ Class used to ensure the hash of Sets is preserved + whatever the order of its items. + """ + def __init__(self, set_sequence): + # Forces order of elements in set to ensure consistent hash. + try: + # Trying first to order the set assuming the type of elements is + # consistent and orderable. + # This fails on python 3 when elements are unorderable + # but we keep it in a try as it's faster. + self._sequence = sorted(set_sequence) + except (TypeError, decimal.InvalidOperation): + # If elements are unorderable, sorting them using their hash. + # This is slower but works in any case. + self._sequence = sorted((hash(e) for e in set_sequence)) + + +class _MyHash(object): + """ Class used to hash objects that won't normally pickle """ + + def __init__(self, *args): + self.args = args + + +class Hasher(Pickler): + """ A subclass of pickler, to do cryptographic hashing, rather than + pickling. + """ + + def __init__(self, hash_name='md5'): + self.stream = io.BytesIO() + # By default we want a pickle protocol that only changes with + # the major python version and not the minor one + protocol = 3 + Pickler.__init__(self, self.stream, protocol=protocol) + # Initialise the hash obj + self._hash = hashlib.new(hash_name) + + def hash(self, obj, return_digest=True): + try: + self.dump(obj) + except pickle.PicklingError as e: + e.args += ('PicklingError while hashing %r: %r' % (obj, e),) + raise + dumps = self.stream.getvalue() + self._hash.update(dumps) + if return_digest: + return self._hash.hexdigest() + + def save(self, obj): + if isinstance(obj, (types.MethodType, type({}.pop))): + # the Pickler cannot pickle instance methods; here we decompose + # them into components that make them uniquely identifiable + if hasattr(obj, '__func__'): + func_name = obj.__func__.__name__ + else: + func_name = obj.__name__ + inst = obj.__self__ + if type(inst) is type(pickle): + obj = _MyHash(func_name, inst.__name__) + elif inst is None: + # type(None) or type(module) do not pickle + obj = _MyHash(func_name, inst) + else: + cls = obj.__self__.__class__ + obj = _MyHash(func_name, inst, cls) + Pickler.save(self, obj) + + def memoize(self, obj): + # We want hashing to be sensitive to value instead of reference. + # For example we want ['aa', 'aa'] and ['aa', 'aaZ'[:2]] + # to hash to the same value and that's why we disable memoization + # for strings + if isinstance(obj, (bytes, str)): + return + Pickler.memoize(self, obj) + + # The dispatch table of the pickler is not accessible in Python + # 3, as these lines are only bugware for IPython, we skip them. + def save_global(self, obj, name=None, pack=struct.pack): + # We have to override this method in order to deal with objects + # defined interactively in IPython that are not injected in + # __main__ + kwargs = dict(name=name, pack=pack) + del kwargs['pack'] + try: + Pickler.save_global(self, obj, **kwargs) + except pickle.PicklingError: + Pickler.save_global(self, obj, **kwargs) + module = getattr(obj, "__module__", None) + if module == '__main__': + my_name = name + if my_name is None: + my_name = obj.__name__ + mod = sys.modules[module] + if not hasattr(mod, my_name): + # IPython doesn't inject the variables define + # interactively in __main__ + setattr(mod, my_name, obj) + + dispatch = Pickler.dispatch.copy() + # builtin + dispatch[type(len)] = save_global + # type + dispatch[type(object)] = save_global + # classobj + dispatch[type(Pickler)] = save_global + # function + dispatch[type(pickle.dump)] = save_global + + def _batch_setitems(self, items): + # forces order of keys in dict to ensure consistent hash. + try: + # Trying first to compare dict assuming the type of keys is + # consistent and orderable. + # This fails on python 3 when keys are unorderable + # but we keep it in a try as it's faster. + Pickler._batch_setitems(self, iter(sorted(items))) + except TypeError: + # If keys are unorderable, sorting them using their hash. This is + # slower but works in any case. + Pickler._batch_setitems(self, iter(sorted((hash(k), v) + for k, v in items))) + + def save_set(self, set_items): + # forces order of items in Set to ensure consistent hash + Pickler.save(self, _ConsistentSet(set_items)) + + dispatch[type(set())] = save_set + + +class NumpyHasher(Hasher): + """ Special case the hasher for when numpy is loaded. + """ + + def __init__(self, hash_name='md5', coerce_mmap=False): + """ + Parameters + ---------- + hash_name: string + The hash algorithm to be used + coerce_mmap: boolean + Make no difference between np.memmap and np.ndarray + objects. + """ + self.coerce_mmap = coerce_mmap + Hasher.__init__(self, hash_name=hash_name) + # delayed import of numpy, to avoid tight coupling + import numpy as np + self.np = np + if hasattr(np, 'getbuffer'): + self._getbuffer = np.getbuffer + else: + self._getbuffer = memoryview + + def save(self, obj): + """ Subclass the save method, to hash ndarray subclass, rather + than pickling them. Off course, this is a total abuse of + the Pickler class. + """ + if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject: + # Compute a hash of the object + # The update function of the hash requires a c_contiguous buffer. + if obj.shape == (): + # 0d arrays need to be flattened because viewing them as bytes + # raises a ValueError exception. + obj_c_contiguous = obj.flatten() + elif obj.flags.c_contiguous: + obj_c_contiguous = obj + elif obj.flags.f_contiguous: + obj_c_contiguous = obj.T + else: + # Cater for non-single-segment arrays: this creates a + # copy, and thus alleviates this issue. + # XXX: There might be a more efficient way of doing this + obj_c_contiguous = obj.flatten() + + # memoryview is not supported for some dtypes, e.g. datetime64, see + # https://github.com/numpy/numpy/issues/4983. The + # workaround is to view the array as bytes before + # taking the memoryview. + self._hash.update( + self._getbuffer(obj_c_contiguous.view(self.np.uint8))) + + # We store the class, to be able to distinguish between + # Objects with the same binary content, but different + # classes. + if self.coerce_mmap and isinstance(obj, self.np.memmap): + # We don't make the difference between memmap and + # normal ndarrays, to be able to reload previously + # computed results with memmap. + klass = self.np.ndarray + else: + klass = obj.__class__ + # We also return the dtype and the shape, to distinguish + # different views on the same data with different dtypes. + + # The object will be pickled by the pickler hashed at the end. + obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides)) + elif isinstance(obj, self.np.dtype): + # numpy.dtype consistent hashing is tricky to get right. This comes + # from the fact that atomic np.dtype objects are interned: + # ``np.dtype('f4') is np.dtype('f4')``. The situation is + # complicated by the fact that this interning does not resist a + # simple pickle.load/dump roundtrip: + # ``pickle.loads(pickle.dumps(np.dtype('f4'))) is not + # np.dtype('f4') Because pickle relies on memoization during + # pickling, it is easy to + # produce different hashes for seemingly identical objects, such as + # ``[np.dtype('f4'), np.dtype('f4')]`` + # and ``[np.dtype('f4'), pickle.loads(pickle.dumps('f4'))]``. + # To prevent memoization from interfering with hashing, we isolate + # the serialization (and thus the pickle memoization) of each dtype + # using each time a different ``pickle.dumps`` call unrelated to + # the current Hasher instance. + self._hash.update("_HASHED_DTYPE".encode('utf-8')) + self._hash.update(pickle.dumps(obj)) + return + Hasher.save(self, obj) + + +def hash(obj, hash_name='md5', coerce_mmap=False): + """ Quick calculation of a hash to identify uniquely Python objects + containing numpy arrays. + + Parameters + ---------- + hash_name: 'md5' or 'sha1' + Hashing algorithm used. sha1 is supposedly safer, but md5 is + faster. + coerce_mmap: boolean + Make no difference between np.memmap and np.ndarray + """ + valid_hash_names = ('md5', 'sha1') + if hash_name not in valid_hash_names: + raise ValueError("Valid options for 'hash_name' are {}. " + "Got hash_name={!r} instead." + .format(valid_hash_names, hash_name)) + if 'numpy' in sys.modules: + hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap) + else: + hasher = Hasher(hash_name=hash_name) + return hasher.hash(obj) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle_compat.py b/llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..32612849bdb8320990f0433d3ded42898046c913 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle_compat.py @@ -0,0 +1,244 @@ +"""Numpy pickle compatibility functions.""" + +import pickle +import os +import zlib +import inspect + +from io import BytesIO + +from .numpy_pickle_utils import _ZFILE_PREFIX +from .numpy_pickle_utils import Unpickler +from .numpy_pickle_utils import _ensure_native_byte_order + + +def hex_str(an_int): + """Convert an int to an hexadecimal string.""" + return '{:#x}'.format(an_int) + + +def asbytes(s): + if isinstance(s, bytes): + return s + return s.encode('latin1') + + +_MAX_LEN = len(hex_str(2 ** 64)) +_CHUNK_SIZE = 64 * 1024 + + +def read_zfile(file_handle): + """Read the z-file and return the content as a string. + + Z-files are raw data compressed with zlib used internally by joblib + for persistence. Backward compatibility is not guaranteed. Do not + use for external purposes. + """ + file_handle.seek(0) + header_length = len(_ZFILE_PREFIX) + _MAX_LEN + length = file_handle.read(header_length) + length = length[len(_ZFILE_PREFIX):] + length = int(length, 16) + + # With python2 and joblib version <= 0.8.4 compressed pickle header is one + # character wider so we need to ignore an additional space if present. + # Note: the first byte of the zlib data is guaranteed not to be a + # space according to + # https://tools.ietf.org/html/rfc6713#section-2.1 + next_byte = file_handle.read(1) + if next_byte != b' ': + # The zlib compressed data has started and we need to go back + # one byte + file_handle.seek(header_length) + + # We use the known length of the data to tell Zlib the size of the + # buffer to allocate. + data = zlib.decompress(file_handle.read(), 15, length) + assert len(data) == length, ( + "Incorrect data length while decompressing %s." + "The file could be corrupted." % file_handle) + return data + + +def write_zfile(file_handle, data, compress=1): + """Write the data in the given file as a Z-file. + + Z-files are raw data compressed with zlib used internally by joblib + for persistence. Backward compatibility is not guaranteed. Do not + use for external purposes. + """ + file_handle.write(_ZFILE_PREFIX) + length = hex_str(len(data)) + # Store the length of the data + file_handle.write(asbytes(length.ljust(_MAX_LEN))) + file_handle.write(zlib.compress(asbytes(data), compress)) + +############################################################################### +# Utility objects for persistence. + + +class NDArrayWrapper(object): + """An object to be persisted instead of numpy arrays. + + The only thing this object does, is to carry the filename in which + the array has been persisted, and the array subclass. + """ + + def __init__(self, filename, subclass, allow_mmap=True): + """Constructor. Store the useful information for later.""" + self.filename = filename + self.subclass = subclass + self.allow_mmap = allow_mmap + + def read(self, unpickler): + """Reconstruct the array.""" + filename = os.path.join(unpickler._dirname, self.filename) + # Load the array from the disk + # use getattr instead of self.allow_mmap to ensure backward compat + # with NDArrayWrapper instances pickled with joblib < 0.9.0 + allow_mmap = getattr(self, 'allow_mmap', True) + kwargs = {} + if allow_mmap: + kwargs['mmap_mode'] = unpickler.mmap_mode + if "allow_pickle" in inspect.signature(unpickler.np.load).parameters: + # Required in numpy 1.16.3 and later to aknowledge the security + # risk. + kwargs["allow_pickle"] = True + array = unpickler.np.load(filename, **kwargs) + + # Detect byte order mismatch and swap as needed. + array = _ensure_native_byte_order(array) + + # Reconstruct subclasses. This does not work with old + # versions of numpy + if (hasattr(array, '__array_prepare__') and + self.subclass not in (unpickler.np.ndarray, + unpickler.np.memmap)): + # We need to reconstruct another subclass + new_array = unpickler.np.core.multiarray._reconstruct( + self.subclass, (0,), 'b') + return new_array.__array_prepare__(array) + else: + return array + + +class ZNDArrayWrapper(NDArrayWrapper): + """An object to be persisted instead of numpy arrays. + + This object store the Zfile filename in which + the data array has been persisted, and the meta information to + retrieve it. + The reason that we store the raw buffer data of the array and + the meta information, rather than array representation routine + (tobytes) is that it enables us to use completely the strided + model to avoid memory copies (a and a.T store as fast). In + addition saving the heavy information separately can avoid + creating large temporary buffers when unpickling data with + large arrays. + """ + + def __init__(self, filename, init_args, state): + """Constructor. Store the useful information for later.""" + self.filename = filename + self.state = state + self.init_args = init_args + + def read(self, unpickler): + """Reconstruct the array from the meta-information and the z-file.""" + # Here we a simply reproducing the unpickling mechanism for numpy + # arrays + filename = os.path.join(unpickler._dirname, self.filename) + array = unpickler.np.core.multiarray._reconstruct(*self.init_args) + with open(filename, 'rb') as f: + data = read_zfile(f) + state = self.state + (data,) + array.__setstate__(state) + return array + + +class ZipNumpyUnpickler(Unpickler): + """A subclass of the Unpickler to unpickle our numpy pickles.""" + + dispatch = Unpickler.dispatch.copy() + + def __init__(self, filename, file_handle, mmap_mode=None): + """Constructor.""" + self._filename = os.path.basename(filename) + self._dirname = os.path.dirname(filename) + self.mmap_mode = mmap_mode + self.file_handle = self._open_pickle(file_handle) + Unpickler.__init__(self, self.file_handle) + try: + import numpy as np + except ImportError: + np = None + self.np = np + + def _open_pickle(self, file_handle): + return BytesIO(read_zfile(file_handle)) + + def load_build(self): + """Set the state of a newly created object. + + We capture it to replace our place-holder objects, + NDArrayWrapper, by the array we are interested in. We + replace them directly in the stack of pickler. + """ + Unpickler.load_build(self) + if isinstance(self.stack[-1], NDArrayWrapper): + if self.np is None: + raise ImportError("Trying to unpickle an ndarray, " + "but numpy didn't import correctly") + nd_array_wrapper = self.stack.pop() + array = nd_array_wrapper.read(self) + self.stack.append(array) + + dispatch[pickle.BUILD[0]] = load_build + + +def load_compatibility(filename): + """Reconstruct a Python object from a file persisted with joblib.dump. + + This function ensures the compatibility with joblib old persistence format + (<= 0.9.3). + + Parameters + ---------- + filename: string + The name of the file from which to load the object + + Returns + ------- + result: any Python object + The object stored in the file. + + See Also + -------- + joblib.dump : function to save an object + + Notes + ----- + + This function can load numpy array files saved separately during the + dump. + """ + with open(filename, 'rb') as file_handle: + # We are careful to open the file handle early and keep it open to + # avoid race-conditions on renames. That said, if data is stored in + # companion files, moving the directory will create a race when + # joblib tries to access the companion files. + unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle) + try: + obj = unpickler.load() + except UnicodeDecodeError as exc: + # More user-friendly error message + new_exc = ValueError( + 'You may be trying to read with ' + 'python 3 a joblib pickle generated with python 2. ' + 'This feature is not supported by joblib.') + new_exc.__cause__ = exc + raise new_exc + finally: + if hasattr(unpickler, 'file_handle'): + unpickler.file_handle.close() + return obj diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__init__.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad57ba1007170aee51ac1d79cc80a9a94738c225 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..152e49b2ae5ed59cd246eccaec8f04d4512aaf9f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6bf1460f24acf5fca74c90053a0446dad6be516 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f79945da4d52b479de014809a58eb9da9757940 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fc844c4f8f9caef2103505026a499cefa1228a2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f884118f879752b1074ac108f25bfc62d6ef15d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d3699c9c3f336a3c38f7e4f8050cde2d0b0104e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ada3d65fcb1072e93856691289e411410e56eff Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7aa8654b2f18501ec1b7c459e4f3987a37c4ca6d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f54f186e0945eb161fd1b2082defc5b661a50bf9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40c79949f1f0a4ed3b9d7b0daa353585fc16220b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a84dc4ff963194d7dfd93fb06f4a579a973a37f4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a47423b8cbabf0ac6242e929e8e402a81f276be Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0db2671733f1f835a6b74699fdae56ae439a3825 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..875989ee3dbb2f5b4951f739f7d85c95d20c801d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..ba903d6cc2cd75879eed60ff31ecdf7ffe230d45 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py @@ -0,0 +1,95 @@ +""" +This script is used to generate test data for joblib/test/test_numpy_pickle.py +""" + +import sys +import re + +# pytest needs to be able to import this module even when numpy is +# not installed +try: + import numpy as np +except ImportError: + np = None + +import joblib + + +def get_joblib_version(joblib_version=joblib.__version__): + """Normalize joblib version by removing suffix. + + >>> get_joblib_version('0.8.4') + '0.8.4' + >>> get_joblib_version('0.8.4b1') + '0.8.4' + >>> get_joblib_version('0.9.dev0') + '0.9' + """ + matches = [re.match(r'(\d+).*', each) + for each in joblib_version.split('.')] + return '.'.join([m.group(1) for m in matches if m is not None]) + + +def write_test_pickle(to_pickle, args): + kwargs = {} + compress = args.compress + method = args.method + joblib_version = get_joblib_version() + py_version = '{0[0]}{0[1]}'.format(sys.version_info) + numpy_version = ''.join(np.__version__.split('.')[:2]) + + # The game here is to generate the right filename according to the options. + body = '_compressed' if (compress and method == 'zlib') else '' + if compress: + if method == 'zlib': + kwargs['compress'] = True + extension = '.gz' + else: + kwargs['compress'] = (method, 3) + extension = '.pkl.{}'.format(method) + if args.cache_size: + kwargs['cache_size'] = 0 + body += '_cache_size' + else: + extension = '.pkl' + + pickle_filename = 'joblib_{}{}_pickle_py{}_np{}{}'.format( + joblib_version, body, py_version, numpy_version, extension) + + try: + joblib.dump(to_pickle, pickle_filename, **kwargs) + except Exception as e: + # With old python version (=< 3.3.), we can arrive there when + # dumping compressed pickle with LzmaFile. + print("Error: cannot generate file '{}' with arguments '{}'. " + "Error was: {}".format(pickle_filename, kwargs, e)) + else: + print("File '{}' generated successfully.".format(pickle_filename)) + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser(description="Joblib pickle data " + "generator.") + parser.add_argument('--cache_size', action="store_true", + help="Force creation of companion numpy " + "files for pickled arrays.") + parser.add_argument('--compress', action="store_true", + help="Generate compress pickles.") + parser.add_argument('--method', type=str, default='zlib', + choices=['zlib', 'gzip', 'bz2', 'xz', 'lzma', 'lz4'], + help="Set compression method.") + # We need to be specific about dtypes in particular endianness + # because the pickles can be generated on one architecture and + # the tests run on another one. See + # https://github.com/joblib/joblib/issues/279. + to_pickle = [np.arange(5, dtype=np.dtype(' 0 + + +@with_numpy +@with_multiprocessing +def test_parallel_config_params_explicit_set(tmpdir): + with parallel_config(n_jobs=3, max_nbytes=1, temp_folder=tmpdir): + with Parallel(n_jobs=2, prefer="processes", max_nbytes='1M') as p: + assert isinstance(p._backend, LokyBackend) + assert p.n_jobs == 2 + + # Checks that memmapping is disabled + with raises(TypeError, match="Expected np.memmap instance"): + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + + +@parametrize("param", ["prefer", "require"]) +def test_parallel_config_bad_params(param): + # Check that an error is raised when setting a wrong backend + # hint or constraint + with raises(ValueError, match=f"{param}=wrong is not a valid"): + with parallel_config(**{param: "wrong"}): + Parallel() + + +def test_parallel_config_constructor_params(): + # Check that an error is raised when backend is None + # but backend constructor params are given + with raises(ValueError, match="only supported when backend is not None"): + with parallel_config(inner_max_num_threads=1): + pass + + with raises(ValueError, match="only supported when backend is not None"): + with parallel_config(backend_param=1): + pass + + +def test_parallel_config_nested(): + # Check that nested configuration retrieves the info from the + # parent config and do not reset them. + + with parallel_config(n_jobs=2): + p = Parallel() + assert isinstance(p._backend, BACKENDS[DEFAULT_BACKEND]) + assert p.n_jobs == 2 + + with parallel_config(backend='threading'): + with parallel_config(n_jobs=2): + p = Parallel() + assert isinstance(p._backend, ThreadingBackend) + assert p.n_jobs == 2 + + with parallel_config(verbose=100): + with parallel_config(n_jobs=2): + p = Parallel() + assert p.verbose == 100 + assert p.n_jobs == 2 + + +@with_numpy +@with_multiprocessing +@parametrize('backend', ['multiprocessing', 'threading', + MultiprocessingBackend(), ThreadingBackend()]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_threadpool_limitation_in_child_context_error(context, backend): + + with raises(AssertionError, match=r"does not acc.*inner_max_num_threads"): + context(backend, inner_max_num_threads=1) + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_parallel_n_jobs_none(context): + # Check that n_jobs=None is interpreted as "unset" in Parallel + # non regression test for #1473 + with context(backend="threading", n_jobs=2): + with Parallel(n_jobs=None) as p: + assert p.n_jobs == 2 + + with context(backend="threading"): + default_n_jobs = Parallel().n_jobs + with Parallel(n_jobs=None) as p: + assert p.n_jobs == default_n_jobs + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_parallel_config_n_jobs_none(context): + # Check that n_jobs=None is interpreted as "explicitly set" in + # parallel_(config/backend) + # non regression test for #1473 + with context(backend="threading", n_jobs=2): + with context(backend="threading", n_jobs=None): + # n_jobs=None resets n_jobs to backend's default + with Parallel() as p: + assert p.n_jobs == 1 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_dask.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_dask.py new file mode 100644 index 0000000000000000000000000000000000000000..aebe65525fc55f4593e6c83b5bfd4ffc76d945a7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_dask.py @@ -0,0 +1,499 @@ +from __future__ import print_function, division, absolute_import +import os +import warnings + +import pytest +from random import random +from uuid import uuid4 +from time import sleep + +from .. import Parallel, delayed, parallel_config +from ..parallel import ThreadingBackend, AutoBatchingMixin +from .._dask import DaskDistributedBackend + +distributed = pytest.importorskip('distributed') +dask = pytest.importorskip('dask') + +# These imports need to be after the pytest.importorskip hence the noqa: E402 +from distributed import Client, LocalCluster, get_client # noqa: E402 +from distributed.metrics import time # noqa: E402 +# Note: pytest requires to manually import all fixtures used in the test +# and their dependencies. +from distributed.utils_test import cluster, inc, cleanup # noqa: E402, F401 + + +def noop(*args, **kwargs): + pass + + +def slow_raise_value_error(condition, duration=0.05): + sleep(duration) + if condition: + raise ValueError("condition evaluated to True") + + +def count_events(event_name, client): + worker_events = client.run(lambda dask_worker: dask_worker.log) + event_counts = {} + for w, events in worker_events.items(): + event_counts[w] = len([event for event in list(events) + if event[1] == event_name]) + return event_counts + + +def test_simple(loop): + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask'): + seq = Parallel()(delayed(inc)(i) for i in range(10)) + assert seq == [inc(i) for i in range(10)] + + with pytest.raises(ValueError): + Parallel()(delayed(slow_raise_value_error)(i == 3) + for i in range(10)) + + seq = Parallel()(delayed(inc)(i) for i in range(10)) + assert seq == [inc(i) for i in range(10)] + + +def test_dask_backend_uses_autobatching(loop): + assert (DaskDistributedBackend.compute_batch_size + is AutoBatchingMixin.compute_batch_size) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask'): + with Parallel() as parallel: + # The backend should be initialized with a default + # batch size of 1: + backend = parallel._backend + assert isinstance(backend, DaskDistributedBackend) + assert backend.parallel is parallel + assert backend._effective_batch_size == 1 + + # Launch many short tasks that should trigger + # auto-batching: + parallel( + delayed(lambda: None)() + for _ in range(int(1e4)) + ) + assert backend._effective_batch_size > 10 + + +def random2(): + return random() + + +def test_dont_assume_function_purity(loop): + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask'): + x, y = Parallel()(delayed(random2)() for i in range(2)) + assert x != y + + +@pytest.mark.parametrize("mixed", [True, False]) +def test_dask_funcname(loop, mixed): + from joblib._dask import Batch + if not mixed: + tasks = [delayed(inc)(i) for i in range(4)] + batch_repr = 'batch_of_inc_4_calls' + else: + tasks = [ + delayed(abs)(i) if i % 2 else delayed(inc)(i) for i in range(4) + ] + batch_repr = 'mixed_batch_of_inc_4_calls' + + assert repr(Batch(tasks)) == batch_repr + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: + with parallel_config(backend='dask'): + _ = Parallel(batch_size=2, pre_dispatch='all')(tasks) + + def f(dask_scheduler): + return list(dask_scheduler.transition_log) + batch_repr = batch_repr.replace('4', '2') + log = client.run_on_scheduler(f) + assert all('batch_of_inc' in tup[0] for tup in log) + + +def test_no_undesired_distributed_cache_hit(): + # Dask has a pickle cache for callables that are called many times. Because + # the dask backends used to wrap both the functions and the arguments + # under instances of the Batch callable class this caching mechanism could + # lead to bugs as described in: https://github.com/joblib/joblib/pull/1055 + # The joblib-dask backend has been refactored to avoid bundling the + # arguments as an attribute of the Batch instance to avoid this problem. + # This test serves as non-regression problem. + + # Use a large number of input arguments to give the AutoBatchingMixin + # enough tasks to kick-in. + lists = [[] for _ in range(100)] + np = pytest.importorskip('numpy') + X = np.arange(int(1e6)) + + def isolated_operation(list_, data=None): + if data is not None: + np.testing.assert_array_equal(data, X) + list_.append(uuid4().hex) + return list_ + + cluster = LocalCluster(n_workers=1, threads_per_worker=2) + client = Client(cluster) + try: + with parallel_config(backend='dask'): + # dispatches joblib.parallel.BatchedCalls + res = Parallel()( + delayed(isolated_operation)(list_) for list_ in lists + ) + + # The original arguments should not have been mutated as the mutation + # happens in the dask worker process. + assert lists == [[] for _ in range(100)] + + # Here we did not pass any large numpy array as argument to + # isolated_operation so no scattering event should happen under the + # hood. + counts = count_events('receive-from-scatter', client) + assert sum(counts.values()) == 0 + assert all([len(r) == 1 for r in res]) + + with parallel_config(backend='dask'): + # Append a large array which will be scattered by dask, and + # dispatch joblib._dask.Batch + res = Parallel()( + delayed(isolated_operation)(list_, data=X) for list_ in lists + ) + + # This time, auto-scattering should have kicked it. + counts = count_events('receive-from-scatter', client) + assert sum(counts.values()) > 0 + assert all([len(r) == 1 for r in res]) + finally: + client.close(timeout=30) + cluster.close(timeout=30) + + +class CountSerialized(object): + def __init__(self, x): + self.x = x + self.count = 0 + + def __add__(self, other): + return self.x + getattr(other, 'x', other) + + __radd__ = __add__ + + def __reduce__(self): + self.count += 1 + return (CountSerialized, (self.x,)) + + +def add5(a, b, c, d=0, e=0): + return a + b + c + d + e + + +def test_manual_scatter(loop): + x = CountSerialized(1) + y = CountSerialized(2) + z = CountSerialized(3) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask', scatter=[x, y]): + f = delayed(add5) + tasks = [f(x, y, z, d=4, e=5), + f(x, z, y, d=5, e=4), + f(y, x, z, d=x, e=5), + f(z, z, x, d=z, e=y)] + expected = [func(*args, **kwargs) + for func, args, kwargs in tasks] + results = Parallel()(tasks) + + # Scatter must take a list/tuple + with pytest.raises(TypeError): + with parallel_config(backend='dask', loop=loop, scatter=1): + pass + + assert results == expected + + # Scattered variables only serialized once + assert x.count == 1 + assert y.count == 1 + # Depending on the version of distributed, the unscattered z variable + # is either pickled 4 or 6 times, possibly because of the memoization + # of objects that appear several times in the arguments of a delayed + # task. + assert z.count in (4, 6) + + +# When the same IOLoop is used for multiple clients in a row, use +# loop_in_thread instead of loop to prevent the Client from closing it. See +# dask/distributed #4112 +def test_auto_scatter(loop_in_thread): + np = pytest.importorskip('numpy') + data1 = np.ones(int(1e4), dtype=np.uint8) + data2 = np.ones(int(1e4), dtype=np.uint8) + data_to_process = ([data1] * 3) + ([data2] * 3) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop_in_thread) as client: + with parallel_config(backend='dask'): + # Passing the same data as arg and kwarg triggers a single + # scatter operation whose result is reused. + Parallel()(delayed(noop)(data, data, i, opt=data) + for i, data in enumerate(data_to_process)) + # By default large array are automatically scattered with + # broadcast=1 which means that one worker must directly receive + # the data from the scatter operation once. + counts = count_events('receive-from-scatter', client) + assert counts[a['address']] + counts[b['address']] == 2 + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop_in_thread) as client: + with parallel_config(backend='dask'): + Parallel()(delayed(noop)(data1[:3], i) for i in range(5)) + # Small arrays are passed within the task definition without going + # through a scatter operation. + counts = count_events('receive-from-scatter', client) + assert counts[a['address']] == 0 + assert counts[b['address']] == 0 + + +@pytest.mark.parametrize("retry_no", list(range(2))) +def test_nested_scatter(loop, retry_no): + + np = pytest.importorskip('numpy') + + NUM_INNER_TASKS = 10 + NUM_OUTER_TASKS = 10 + + def my_sum(x, i, j): + return np.sum(x) + + def outer_function_joblib(array, i): + client = get_client() # noqa + with parallel_config(backend="dask"): + results = Parallel()( + delayed(my_sum)(array[j:], i, j) for j in range( + NUM_INNER_TASKS) + ) + return sum(results) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as _: + with parallel_config(backend="dask"): + my_array = np.ones(10000) + _ = Parallel()( + delayed(outer_function_joblib)( + my_array[i:], i) for i in range(NUM_OUTER_TASKS) + ) + + +def test_nested_backend_context_manager(loop_in_thread): + def get_nested_pids(): + pids = set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2))) + pids |= set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2))) + return pids + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop_in_thread) as client: + with parallel_config(backend='dask'): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids)() + for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) <= 2 + + # No deadlocks + with Client(s['address'], loop=loop_in_thread) as client: # noqa: F841 + with parallel_config(backend='dask'): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids)() + for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) <= 2 + + +def test_nested_backend_context_manager_implicit_n_jobs(loop): + # Check that Parallel with no explicit n_jobs value automatically selects + # all the dask workers, including in nested calls. + + def _backend_type(p): + return p._backend.__class__.__name__ + + def get_nested_implicit_n_jobs(): + with Parallel() as p: + return _backend_type(p), p.n_jobs + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask'): + with Parallel() as p: + assert _backend_type(p) == "DaskDistributedBackend" + assert p.n_jobs == -1 + all_nested_n_jobs = p( + delayed(get_nested_implicit_n_jobs)() + for _ in range(2) + ) + for backend_type, nested_n_jobs in all_nested_n_jobs: + assert backend_type == "DaskDistributedBackend" + assert nested_n_jobs == -1 + + +def test_errors(loop): + with pytest.raises(ValueError) as info: + with parallel_config(backend='dask'): + pass + + assert "create a dask client" in str(info.value).lower() + + +def test_correct_nested_backend(loop): + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + # No requirement, should be us + with parallel_config(backend='dask'): + result = Parallel(n_jobs=2)( + delayed(outer)(nested_require=None) for _ in range(1)) + assert isinstance(result[0][0][0], DaskDistributedBackend) + + # Require threads, should be threading + with parallel_config(backend='dask'): + result = Parallel(n_jobs=2)( + delayed(outer)(nested_require='sharedmem') + for _ in range(1)) + assert isinstance(result[0][0][0], ThreadingBackend) + + +def outer(nested_require): + return Parallel(n_jobs=2, prefer='threads')( + delayed(middle)(nested_require) for _ in range(1) + ) + + +def middle(require): + return Parallel(n_jobs=2, require=require)( + delayed(inner)() for _ in range(1) + ) + + +def inner(): + return Parallel()._backend + + +def test_secede_with_no_processes(loop): + # https://github.com/dask/distributed/issues/1775 + with Client(loop=loop, processes=False, set_as_default=True): + with parallel_config(backend='dask'): + Parallel(n_jobs=4)(delayed(id)(i) for i in range(2)) + + +def _worker_address(_): + from distributed import get_worker + return get_worker().address + + +def test_dask_backend_keywords(loop): + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask', workers=a['address']): + seq = Parallel()( + delayed(_worker_address)(i) for i in range(10)) + assert seq == [a['address']] * 10 + + with parallel_config(backend='dask', workers=b['address']): + seq = Parallel()( + delayed(_worker_address)(i) for i in range(10)) + assert seq == [b['address']] * 10 + + +def test_scheduler_tasks_cleanup(loop): + with Client(processes=False, loop=loop) as client: + with parallel_config(backend='dask'): + Parallel()(delayed(inc)(i) for i in range(10)) + + start = time() + while client.cluster.scheduler.tasks: + sleep(0.01) + assert time() < start + 5 + + assert not client.futures + + +@pytest.mark.parametrize("cluster_strategy", ["adaptive", "late_scaling"]) +@pytest.mark.skipif( + distributed.__version__ <= '2.1.1' and distributed.__version__ >= '1.28.0', + reason="distributed bug - https://github.com/dask/distributed/pull/2841") +def test_wait_for_workers(cluster_strategy): + cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2) + client = Client(cluster) + if cluster_strategy == "adaptive": + cluster.adapt(minimum=0, maximum=2) + elif cluster_strategy == "late_scaling": + # Tell the cluster to start workers but this is a non-blocking call + # and new workers might take time to connect. In this case the Parallel + # call should wait for at least one worker to come up before starting + # to schedule work. + cluster.scale(2) + try: + with parallel_config(backend='dask'): + # The following should wait a bit for at least one worker to + # become available. + Parallel()(delayed(inc)(i) for i in range(10)) + finally: + client.close() + cluster.close() + + +def test_wait_for_workers_timeout(): + # Start a cluster with 0 worker: + cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2) + client = Client(cluster) + try: + with parallel_config(backend='dask', wait_for_workers_timeout=0.1): + # Short timeout: DaskDistributedBackend + msg = "DaskDistributedBackend has no worker after 0.1 seconds." + with pytest.raises(TimeoutError, match=msg): + Parallel()(delayed(inc)(i) for i in range(10)) + + with parallel_config(backend='dask', wait_for_workers_timeout=0): + # No timeout: fallback to generic joblib failure: + msg = "DaskDistributedBackend has no active worker" + with pytest.raises(RuntimeError, match=msg): + Parallel()(delayed(inc)(i) for i in range(10)) + finally: + client.close() + cluster.close() + + +@pytest.mark.parametrize("backend", ["loky", "multiprocessing"]) +def test_joblib_warning_inside_dask_daemonic_worker(backend): + cluster = LocalCluster(n_workers=2) + client = Client(cluster) + try: + + def func_using_joblib_parallel(): + # Somehow trying to check the warning type here (e.g. with + # pytest.warns(UserWarning)) make the test hang. Work-around: + # return the warning record to the client and the warning check is + # done client-side. + with warnings.catch_warnings(record=True) as record: + Parallel(n_jobs=2, backend=backend)( + delayed(inc)(i) for i in range(10)) + + return record + + fut = client.submit(func_using_joblib_parallel) + record = fut.result() + + assert len(record) == 1 + warning = record[0].message + assert isinstance(warning, UserWarning) + assert "distributed.worker.daemon" in str(warning) + finally: + client.close(timeout=30) + cluster.close(timeout=30) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_disk.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_disk.py new file mode 100644 index 0000000000000000000000000000000000000000..b825a8b3a5c18a3114f34ed9d7c90cce62799085 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_disk.py @@ -0,0 +1,71 @@ +""" +Unit tests for the disk utilities. +""" + +# Authors: Gael Varoquaux +# Lars Buitinck +# Copyright (c) 2010 Gael Varoquaux +# License: BSD Style, 3 clauses. + +from __future__ import with_statement +import array +import os + +from joblib.disk import disk_used, memstr_to_bytes, mkdirp, rm_subdirs +from joblib.testing import parametrize, raises + +############################################################################### + + +def test_disk_used(tmpdir): + cachedir = tmpdir.strpath + # Not write a file that is 1M big in this directory, and check the + # size. The reason we use such a big file is that it makes us robust + # to errors due to block allocation. + a = array.array('i') + sizeof_i = a.itemsize + target_size = 1024 + n = int(target_size * 1024 / sizeof_i) + a = array.array('i', n * (1,)) + with open(os.path.join(cachedir, 'test'), 'wb') as output: + a.tofile(output) + assert disk_used(cachedir) >= target_size + assert disk_used(cachedir) < target_size + 12 + + +@parametrize('text,value', + [('80G', 80 * 1024 ** 3), + ('1.4M', int(1.4 * 1024 ** 2)), + ('120M', 120 * 1024 ** 2), + ('53K', 53 * 1024)]) +def test_memstr_to_bytes(text, value): + assert memstr_to_bytes(text) == value + + +@parametrize('text,exception,regex', + [('fooG', ValueError, r'Invalid literal for size.*fooG.*'), + ('1.4N', ValueError, r'Invalid literal for size.*1.4N.*')]) +def test_memstr_to_bytes_exception(text, exception, regex): + with raises(exception) as excinfo: + memstr_to_bytes(text) + assert excinfo.match(regex) + + +def test_mkdirp(tmpdir): + mkdirp(os.path.join(tmpdir.strpath, 'ham')) + mkdirp(os.path.join(tmpdir.strpath, 'ham')) + mkdirp(os.path.join(tmpdir.strpath, 'spam', 'spam')) + + # Not all OSErrors are ignored + with raises(OSError): + mkdirp('') + + +def test_rm_subdirs(tmpdir): + sub_path = os.path.join(tmpdir.strpath, "am", "stram") + full_path = os.path.join(sub_path, "gram") + mkdirp(os.path.join(full_path)) + + rm_subdirs(sub_path) + assert os.path.exists(sub_path) + assert not os.path.exists(full_path) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_func_inspect.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_func_inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..dba237d48578e5d6386e67e80f3e6d31761108d6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_func_inspect.py @@ -0,0 +1,310 @@ +""" +Test the func_inspect module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import functools + +from joblib.func_inspect import filter_args, get_func_name, get_func_code +from joblib.func_inspect import _clean_win_chars, format_signature +from joblib.memory import Memory +from joblib.test.common import with_numpy +from joblib.testing import fixture, parametrize, raises + + +############################################################################### +# Module-level functions and fixture, for tests +def f(x, y=0): + pass + + +def g(x): + pass + + +def h(x, y=0, *args, **kwargs): + pass + + +def i(x=1): + pass + + +def j(x, y, **kwargs): + pass + + +def k(*args, **kwargs): + pass + + +def m1(x, *, y): + pass + + +def m2(x, *, y, z=3): + pass + + +@fixture(scope='module') +def cached_func(tmpdir_factory): + # Create a Memory object to test decorated functions. + # We should be careful not to call the decorated functions, so that + # cache directories are not created in the temp dir. + cachedir = tmpdir_factory.mktemp("joblib_test_func_inspect") + mem = Memory(cachedir.strpath) + + @mem.cache + def cached_func_inner(x): + return x + + return cached_func_inner + + +class Klass(object): + + def f(self, x): + return x + + +############################################################################### +# Tests + +@parametrize('func,args,filtered_args', + [(f, [[], (1, )], {'x': 1, 'y': 0}), + (f, [['x'], (1, )], {'y': 0}), + (f, [['y'], (0, )], {'x': 0}), + (f, [['y'], (0, ), {'y': 1}], {'x': 0}), + (f, [['x', 'y'], (0, )], {}), + (f, [[], (0,), {'y': 1}], {'x': 0, 'y': 1}), + (f, [['y'], (), {'x': 2, 'y': 1}], {'x': 2}), + (g, [[], (), {'x': 1}], {'x': 1}), + (i, [[], (2, )], {'x': 2})]) +def test_filter_args(func, args, filtered_args): + assert filter_args(func, *args) == filtered_args + + +def test_filter_args_method(): + obj = Klass() + assert filter_args(obj.f, [], (1, )) == {'x': 1, 'self': obj} + + +@parametrize('func,args,filtered_args', + [(h, [[], (1, )], + {'x': 1, 'y': 0, '*': [], '**': {}}), + (h, [[], (1, 2, 3, 4)], + {'x': 1, 'y': 2, '*': [3, 4], '**': {}}), + (h, [[], (1, 25), {'ee': 2}], + {'x': 1, 'y': 25, '*': [], '**': {'ee': 2}}), + (h, [['*'], (1, 2, 25), {'ee': 2}], + {'x': 1, 'y': 2, '**': {'ee': 2}})]) +def test_filter_varargs(func, args, filtered_args): + assert filter_args(func, *args) == filtered_args + + +test_filter_kwargs_extra_params = [ + (m1, [[], (1,), {'y': 2}], {'x': 1, 'y': 2}), + (m2, [[], (1,), {'y': 2}], {'x': 1, 'y': 2, 'z': 3}) +] + + +@parametrize('func,args,filtered_args', + [(k, [[], (1, 2), {'ee': 2}], + {'*': [1, 2], '**': {'ee': 2}}), + (k, [[], (3, 4)], + {'*': [3, 4], '**': {}})] + + test_filter_kwargs_extra_params) +def test_filter_kwargs(func, args, filtered_args): + assert filter_args(func, *args) == filtered_args + + +def test_filter_args_2(): + assert (filter_args(j, [], (1, 2), {'ee': 2}) == + {'x': 1, 'y': 2, '**': {'ee': 2}}) + + ff = functools.partial(f, 1) + # filter_args has to special-case partial + assert filter_args(ff, [], (1, )) == {'*': [1], '**': {}} + assert filter_args(ff, ['y'], (1, )) == {'*': [1], '**': {}} + + +@parametrize('func,funcname', [(f, 'f'), (g, 'g'), + (cached_func, 'cached_func')]) +def test_func_name(func, funcname): + # Check that we are not confused by decoration + # here testcase 'cached_func' is the function itself + assert get_func_name(func)[1] == funcname + + +def test_func_name_on_inner_func(cached_func): + # Check that we are not confused by decoration + # here testcase 'cached_func' is the 'cached_func_inner' function + # returned by 'cached_func' fixture + assert get_func_name(cached_func)[1] == 'cached_func_inner' + + +def test_func_name_collision_on_inner_func(): + # Check that two functions defining and caching an inner function + # with the same do not cause (module, name) collision + def f(): + def inner_func(): + return # pragma: no cover + return get_func_name(inner_func) + + def g(): + def inner_func(): + return # pragma: no cover + return get_func_name(inner_func) + + module, name = f() + other_module, other_name = g() + + assert name == other_name + assert module != other_module + + +def test_func_inspect_errors(): + # Check that func_inspect is robust and will work on weird objects + assert get_func_name('a'.lower)[-1] == 'lower' + assert get_func_code('a'.lower)[1:] == (None, -1) + ff = lambda x: x # noqa: E731 + assert get_func_name(ff, win_characters=False)[-1] == '' + assert get_func_code(ff)[1] == __file__.replace('.pyc', '.py') + # Simulate a function defined in __main__ + ff.__module__ = '__main__' + assert get_func_name(ff, win_characters=False)[-1] == '' + assert get_func_code(ff)[1] == __file__.replace('.pyc', '.py') + + +def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'): + pass + + +def func_with_signature(a: int, b: int) -> None: + pass + + +def test_filter_args_edge_cases(): + assert ( + filter_args(func_with_kwonly_args, [], (1, 2), + {'kw1': 3, 'kw2': 4}) == + {'a': 1, 'b': 2, 'kw1': 3, 'kw2': 4}) + + # filter_args doesn't care about keyword-only arguments so you + # can pass 'kw1' into *args without any problem + with raises(ValueError) as excinfo: + filter_args(func_with_kwonly_args, [], (1, 2, 3), {'kw2': 2}) + excinfo.match("Keyword-only parameter 'kw1' was passed as positional " + "parameter") + + assert ( + filter_args(func_with_kwonly_args, ['b', 'kw2'], (1, 2), + {'kw1': 3, 'kw2': 4}) == + {'a': 1, 'kw1': 3}) + + assert (filter_args(func_with_signature, ['b'], (1, 2)) == {'a': 1}) + + +def test_bound_methods(): + """ Make sure that calling the same method on two different instances + of the same class does resolv to different signatures. + """ + a = Klass() + b = Klass() + assert filter_args(a.f, [], (1, )) != filter_args(b.f, [], (1, )) + + +@parametrize('exception,regex,func,args', + [(ValueError, 'ignore_lst must be a list of parameters to ignore', + f, ['bar', (None, )]), + (ValueError, r'Ignore list: argument \'(.*)\' is not defined', + g, [['bar'], (None, )]), + (ValueError, 'Wrong number of arguments', + h, [[]])]) +def test_filter_args_error_msg(exception, regex, func, args): + """ Make sure that filter_args returns decent error messages, for the + sake of the user. + """ + with raises(exception) as excinfo: + filter_args(func, *args) + excinfo.match(regex) + + +def test_filter_args_no_kwargs_mutation(): + """None-regression test against 0.12.0 changes. + + https://github.com/joblib/joblib/pull/75 + + Make sure filter args doesn't mutate the kwargs dict that gets passed in. + """ + kwargs = {'x': 0} + filter_args(g, [], [], kwargs) + assert kwargs == {'x': 0} + + +def test_clean_win_chars(): + string = r'C:\foo\bar\main.py' + mangled_string = _clean_win_chars(string) + for char in ('\\', ':', '<', '>', '!'): + assert char not in mangled_string + + +@parametrize('func,args,kwargs,sgn_expected', + [(g, [list(range(5))], {}, 'g([0, 1, 2, 3, 4])'), + (k, [1, 2, (3, 4)], {'y': True}, 'k(1, 2, (3, 4), y=True)')]) +def test_format_signature(func, args, kwargs, sgn_expected): + # Test signature formatting. + path, sgn_result = format_signature(func, *args, **kwargs) + assert sgn_result == sgn_expected + + +def test_format_signature_long_arguments(): + shortening_threshold = 1500 + # shortening gets it down to 700 characters but there is the name + # of the function in the signature and a few additional things + # like dots for the ellipsis + shortening_target = 700 + 10 + + arg = 'a' * shortening_threshold + _, signature = format_signature(h, arg) + assert len(signature) < shortening_target + + nb_args = 5 + args = [arg for _ in range(nb_args)] + _, signature = format_signature(h, *args) + assert len(signature) < shortening_target * nb_args + + kwargs = {str(i): arg for i, arg in enumerate(args)} + _, signature = format_signature(h, **kwargs) + assert len(signature) < shortening_target * nb_args + + _, signature = format_signature(h, *args, **kwargs) + assert len(signature) < shortening_target * 2 * nb_args + + +@with_numpy +def test_format_signature_numpy(): + """ Test the format signature formatting with numpy. + """ + + +def test_special_source_encoding(): + from joblib.test.test_func_inspect_special_encoding import big5_f + func_code, source_file, first_line = get_func_code(big5_f) + assert first_line == 5 + assert "def big5_f():" in func_code + assert "test_func_inspect_special_encoding" in source_file + + +def _get_code(): + from joblib.test.test_func_inspect_special_encoding import big5_f + return get_func_code(big5_f)[0] + + +def test_func_code_consistency(): + from joblib.parallel import Parallel, delayed + codes = Parallel(n_jobs=2)(delayed(_get_code)() for _ in range(5)) + assert len(set(codes)) == 1 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_hashing.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..85593d297f6e2387c58cad8d5bceba4d21b1c0aa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_hashing.py @@ -0,0 +1,495 @@ +""" +Test the hashing module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import time +import hashlib +import sys +import gc +import io +import collections +import itertools +import pickle +import random +from concurrent.futures import ProcessPoolExecutor +from decimal import Decimal + +from joblib.hashing import hash +from joblib.func_inspect import filter_args +from joblib.memory import Memory +from joblib.testing import raises, skipif, fixture, parametrize +from joblib.test.common import np, with_numpy + + +def unicode(s): + return s + + +############################################################################### +# Helper functions for the tests +def time_func(func, *args): + """ Time function func on *args. + """ + times = list() + for _ in range(3): + t1 = time.time() + func(*args) + times.append(time.time() - t1) + return min(times) + + +def relative_time(func1, func2, *args): + """ Return the relative time between func1 and func2 applied on + *args. + """ + time_func1 = time_func(func1, *args) + time_func2 = time_func(func2, *args) + relative_diff = 0.5 * (abs(time_func1 - time_func2) + / (time_func1 + time_func2)) + return relative_diff + + +class Klass(object): + + def f(self, x): + return x + + +class KlassWithCachedMethod(object): + + def __init__(self, cachedir): + mem = Memory(location=cachedir) + self.f = mem.cache(self.f) + + def f(self, x): + return x + + +############################################################################### +# Tests + +input_list = [1, 2, 1., 2., 1 + 1j, 2. + 1j, + 'a', 'b', + (1,), (1, 1,), [1, ], [1, 1, ], + {1: 1}, {1: 2}, {2: 1}, + None, + gc.collect, + [1, ].append, + # Next 2 sets have unorderable elements in python 3. + set(('a', 1)), + set(('a', 1, ('a', 1))), + # Next 2 dicts have unorderable type of keys in python 3. + {'a': 1, 1: 2}, + {'a': 1, 1: 2, 'd': {'a': 1}}] + + +@parametrize('obj1', input_list) +@parametrize('obj2', input_list) +def test_trivial_hash(obj1, obj2): + """Smoke test hash on various types.""" + # Check that 2 objects have the same hash only if they are the same. + are_hashes_equal = hash(obj1) == hash(obj2) + are_objs_identical = obj1 is obj2 + assert are_hashes_equal == are_objs_identical + + +def test_hash_methods(): + # Check that hashing instance methods works + a = io.StringIO(unicode('a')) + assert hash(a.flush) == hash(a.flush) + a1 = collections.deque(range(10)) + a2 = collections.deque(range(9)) + assert hash(a1.extend) != hash(a2.extend) + + +@fixture(scope='function') +@with_numpy +def three_np_arrays(): + rnd = np.random.RandomState(0) + arr1 = rnd.random_sample((10, 10)) + arr2 = arr1.copy() + arr3 = arr2.copy() + arr3[0] += 1 + return arr1, arr2, arr3 + + +def test_hash_numpy_arrays(three_np_arrays): + arr1, arr2, arr3 = three_np_arrays + + for obj1, obj2 in itertools.product(three_np_arrays, repeat=2): + are_hashes_equal = hash(obj1) == hash(obj2) + are_arrays_equal = np.all(obj1 == obj2) + assert are_hashes_equal == are_arrays_equal + + assert hash(arr1) != hash(arr1.T) + + +def test_hash_numpy_dict_of_arrays(three_np_arrays): + arr1, arr2, arr3 = three_np_arrays + + d1 = {1: arr1, 2: arr2} + d2 = {1: arr2, 2: arr1} + d3 = {1: arr2, 2: arr3} + + assert hash(d1) == hash(d2) + assert hash(d1) != hash(d3) + + +@with_numpy +@parametrize('dtype', ['datetime64[s]', 'timedelta64[D]']) +def test_numpy_datetime_array(dtype): + # memoryview is not supported for some dtypes e.g. datetime64 + # see https://github.com/joblib/joblib/issues/188 for more details + a_hash = hash(np.arange(10)) + array = np.arange(0, 10, dtype=dtype) + assert hash(array) != a_hash + + +@with_numpy +def test_hash_numpy_noncontiguous(): + a = np.asarray(np.arange(6000).reshape((1000, 2, 3)), + order='F')[:, :1, :] + b = np.ascontiguousarray(a) + assert hash(a) != hash(b) + + c = np.asfortranarray(a) + assert hash(a) != hash(c) + + +@with_numpy +@parametrize('coerce_mmap', [True, False]) +def test_hash_memmap(tmpdir, coerce_mmap): + """Check that memmap and arrays hash identically if coerce_mmap is True.""" + filename = tmpdir.join('memmap_temp').strpath + try: + m = np.memmap(filename, shape=(10, 10), mode='w+') + a = np.asarray(m) + are_hashes_equal = (hash(a, coerce_mmap=coerce_mmap) == + hash(m, coerce_mmap=coerce_mmap)) + assert are_hashes_equal == coerce_mmap + finally: + if 'm' in locals(): + del m + # Force a garbage-collection cycle, to be certain that the + # object is delete, and we don't run in a problem under + # Windows with a file handle still open. + gc.collect() + + +@with_numpy +@skipif(sys.platform == 'win32', reason='This test is not stable under windows' + ' for some reason') +def test_hash_numpy_performance(): + """ Check the performance of hashing numpy arrays: + + In [22]: a = np.random.random(1000000) + + In [23]: %timeit hashlib.md5(a).hexdigest() + 100 loops, best of 3: 20.7 ms per loop + + In [24]: %timeit hashlib.md5(pickle.dumps(a, protocol=2)).hexdigest() + 1 loops, best of 3: 73.1 ms per loop + + In [25]: %timeit hashlib.md5(cPickle.dumps(a, protocol=2)).hexdigest() + 10 loops, best of 3: 53.9 ms per loop + + In [26]: %timeit hash(a) + 100 loops, best of 3: 20.8 ms per loop + """ + rnd = np.random.RandomState(0) + a = rnd.random_sample(1000000) + + def md5_hash(x): + return hashlib.md5(memoryview(x)).hexdigest() + + relative_diff = relative_time(md5_hash, hash, a) + assert relative_diff < 0.3 + + # Check that hashing an tuple of 3 arrays takes approximately + # 3 times as much as hashing one array + time_hashlib = 3 * time_func(md5_hash, a) + time_hash = time_func(hash, (a, a, a)) + relative_diff = 0.5 * (abs(time_hash - time_hashlib) + / (time_hash + time_hashlib)) + assert relative_diff < 0.3 + + +def test_bound_methods_hash(): + """ Make sure that calling the same method on two different instances + of the same class does resolve to the same hashes. + """ + a = Klass() + b = Klass() + assert (hash(filter_args(a.f, [], (1, ))) == + hash(filter_args(b.f, [], (1, )))) + + +def test_bound_cached_methods_hash(tmpdir): + """ Make sure that calling the same _cached_ method on two different + instances of the same class does resolve to the same hashes. + """ + a = KlassWithCachedMethod(tmpdir.strpath) + b = KlassWithCachedMethod(tmpdir.strpath) + assert (hash(filter_args(a.f.func, [], (1, ))) == + hash(filter_args(b.f.func, [], (1, )))) + + +@with_numpy +def test_hash_object_dtype(): + """ Make sure that ndarrays with dtype `object' hash correctly.""" + + a = np.array([np.arange(i) for i in range(6)], dtype=object) + b = np.array([np.arange(i) for i in range(6)], dtype=object) + + assert hash(a) == hash(b) + + +@with_numpy +def test_numpy_scalar(): + # Numpy scalars are built from compiled functions, and lead to + # strange pickling paths explored, that can give hash collisions + a = np.float64(2.0) + b = np.float64(3.0) + assert hash(a) != hash(b) + + +def test_dict_hash(tmpdir): + # Check that dictionaries hash consistently, even though the ordering + # of the keys is not guaranteed + k = KlassWithCachedMethod(tmpdir.strpath) + + d = {'#s12069__c_maps.nii.gz': [33], + '#s12158__c_maps.nii.gz': [33], + '#s12258__c_maps.nii.gz': [33], + '#s12277__c_maps.nii.gz': [33], + '#s12300__c_maps.nii.gz': [33], + '#s12401__c_maps.nii.gz': [33], + '#s12430__c_maps.nii.gz': [33], + '#s13817__c_maps.nii.gz': [33], + '#s13903__c_maps.nii.gz': [33], + '#s13916__c_maps.nii.gz': [33], + '#s13981__c_maps.nii.gz': [33], + '#s13982__c_maps.nii.gz': [33], + '#s13983__c_maps.nii.gz': [33]} + + a = k.f(d) + b = k.f(a) + + assert hash(a) == hash(b) + + +def test_set_hash(tmpdir): + # Check that sets hash consistently, even though their ordering + # is not guaranteed + k = KlassWithCachedMethod(tmpdir.strpath) + + s = set(['#s12069__c_maps.nii.gz', + '#s12158__c_maps.nii.gz', + '#s12258__c_maps.nii.gz', + '#s12277__c_maps.nii.gz', + '#s12300__c_maps.nii.gz', + '#s12401__c_maps.nii.gz', + '#s12430__c_maps.nii.gz', + '#s13817__c_maps.nii.gz', + '#s13903__c_maps.nii.gz', + '#s13916__c_maps.nii.gz', + '#s13981__c_maps.nii.gz', + '#s13982__c_maps.nii.gz', + '#s13983__c_maps.nii.gz']) + + a = k.f(s) + b = k.f(a) + + assert hash(a) == hash(b) + + +def test_set_decimal_hash(): + # Check that sets containing decimals hash consistently, even though + # ordering is not guaranteed + assert (hash(set([Decimal(0), Decimal('NaN')])) == + hash(set([Decimal('NaN'), Decimal(0)]))) + + +def test_string(): + # Test that we obtain the same hash for object owning several strings, + # whatever the past of these strings (which are immutable in Python) + string = 'foo' + a = {string: 'bar'} + b = {string: 'bar'} + c = pickle.loads(pickle.dumps(b)) + assert hash([a, b]) == hash([a, c]) + + +@with_numpy +def test_numpy_dtype_pickling(): + # numpy dtype hashing is tricky to get right: see #231, #239, #251 #1080, + # #1082, and explanatory comments inside + # ``joblib.hashing.NumpyHasher.save``. + + # In this test, we make sure that the pickling of numpy dtypes is robust to + # object identity and object copy. + + dt1 = np.dtype('f4') + dt2 = np.dtype('f4') + + # simple dtypes objects are interned + assert dt1 is dt2 + assert hash(dt1) == hash(dt2) + + dt1_roundtripped = pickle.loads(pickle.dumps(dt1)) + assert dt1 is not dt1_roundtripped + assert hash(dt1) == hash(dt1_roundtripped) + + assert hash([dt1, dt1]) == hash([dt1_roundtripped, dt1_roundtripped]) + assert hash([dt1, dt1]) == hash([dt1, dt1_roundtripped]) + + complex_dt1 = np.dtype( + [('name', np.str_, 16), ('grades', np.float64, (2,))] + ) + complex_dt2 = np.dtype( + [('name', np.str_, 16), ('grades', np.float64, (2,))] + ) + + # complex dtypes objects are not interned + assert hash(complex_dt1) == hash(complex_dt2) + + complex_dt1_roundtripped = pickle.loads(pickle.dumps(complex_dt1)) + assert complex_dt1_roundtripped is not complex_dt1 + assert hash(complex_dt1) == hash(complex_dt1_roundtripped) + + assert hash([complex_dt1, complex_dt1]) == hash( + [complex_dt1_roundtripped, complex_dt1_roundtripped] + ) + assert hash([complex_dt1, complex_dt1]) == hash( + [complex_dt1_roundtripped, complex_dt1] + ) + + +@parametrize('to_hash,expected', + [('This is a string to hash', + '71b3f47df22cb19431d85d92d0b230b2'), + (u"C'est l\xe9t\xe9", + '2d8d189e9b2b0b2e384d93c868c0e576'), + ((123456, 54321, -98765), + 'e205227dd82250871fa25aa0ec690aa3'), + ([random.Random(42).random() for _ in range(5)], + 'a11ffad81f9682a7d901e6edc3d16c84'), + ({'abcde': 123, 'sadfas': [-9999, 2, 3]}, + 'aeda150553d4bb5c69f0e69d51b0e2ef')]) +def test_hashes_stay_the_same(to_hash, expected): + # We want to make sure that hashes don't change with joblib + # version. For end users, that would mean that they have to + # regenerate their cache from scratch, which potentially means + # lengthy recomputations. + # Expected results have been generated with joblib 0.9.2 + assert hash(to_hash) == expected + + +@with_numpy +def test_hashes_are_different_between_c_and_fortran_contiguous_arrays(): + # We want to be sure that the c-contiguous and f-contiguous versions of the + # same array produce 2 different hashes. + rng = np.random.RandomState(0) + arr_c = rng.random_sample((10, 10)) + arr_f = np.asfortranarray(arr_c) + assert hash(arr_c) != hash(arr_f) + + +@with_numpy +def test_0d_array(): + hash(np.array(0)) + + +@with_numpy +def test_0d_and_1d_array_hashing_is_different(): + assert hash(np.array(0)) != hash(np.array([0])) + + +@with_numpy +def test_hashes_stay_the_same_with_numpy_objects(): + # Note: joblib used to test numpy objects hashing by comparing the produced + # hash of an object with some hard-coded target value to guarantee that + # hashing remains the same across joblib versions. However, since numpy + # 1.20 and joblib 1.0, joblib relies on potentially unstable implementation + # details of numpy to hash np.dtype objects, which makes the stability of + # hash values across different environments hard to guarantee and to test. + # As a result, hashing stability across joblib versions becomes best-effort + # only, and we only test the consistency within a single environment by + # making sure: + # - the hash of two copies of the same objects is the same + # - hashing some object in two different python processes produces the same + # value. This should be viewed as a proxy for testing hash consistency + # through time between Python sessions (provided no change in the + # environment was done between sessions). + + def create_objects_to_hash(): + rng = np.random.RandomState(42) + # Being explicit about dtypes in order to avoid + # architecture-related differences. Also using 'f4' rather than + # 'f8' for float arrays because 'f8' arrays generated by + # rng.random.randn don't seem to be bit-identical on 32bit and + # 64bit machines. + to_hash_list = [ + rng.randint(-1000, high=1000, size=50).astype(' +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. +import re + +from joblib.logger import PrintTime + + +def test_print_time(tmpdir, capsys): + # A simple smoke test for PrintTime. + logfile = tmpdir.join('test.log').strpath + print_time = PrintTime(logfile=logfile) + print_time('Foo') + # Create a second time, to smoke test log rotation. + print_time = PrintTime(logfile=logfile) + print_time('Foo') + # And a third time + print_time = PrintTime(logfile=logfile) + print_time('Foo') + + out_printed_text, err_printed_text = capsys.readouterr() + # Use regexps to be robust to time variations + match = r"Foo: 0\..s, 0\..min\nFoo: 0\..s, 0..min\nFoo: " + \ + r".\..s, 0..min\n" + if not re.match(match, err_printed_text): + raise AssertionError('Excepted %s, got %s' % + (match, err_printed_text)) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_memmapping.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_memmapping.py new file mode 100644 index 0000000000000000000000000000000000000000..42a297a9e445d38c0daa03bc7d78e4c4f1fd4571 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_memmapping.py @@ -0,0 +1,1191 @@ +import os +import mmap +import sys +import platform +import gc +import pickle +import itertools +from time import sleep +import subprocess +import threading +import faulthandler + +import pytest + +from joblib.test.common import with_numpy, np +from joblib.test.common import with_multiprocessing +from joblib.test.common import with_dev_shm +from joblib.testing import raises, parametrize, skipif +from joblib.backports import make_memmap +from joblib.parallel import Parallel, delayed + +from joblib.pool import MemmappingPool +from joblib.executor import _TestingMemmappingExecutor as TestExecutor +from joblib._memmapping_reducer import has_shareable_memory +from joblib._memmapping_reducer import ArrayMemmapForwardReducer +from joblib._memmapping_reducer import _strided_from_memmap +from joblib._memmapping_reducer import _get_temp_dir +from joblib._memmapping_reducer import _WeakArrayKeyMap +from joblib._memmapping_reducer import _get_backing_memmap +import joblib._memmapping_reducer as jmr + + +def setup_module(): + faulthandler.dump_traceback_later(timeout=300, exit=True) + + +def teardown_module(): + faulthandler.cancel_dump_traceback_later() + + +def check_memmap_and_send_back(array): + assert _get_backing_memmap(array) is not None + return array + + +def check_array(args): + """Dummy helper function to be executed in subprocesses + + Check that the provided array has the expected values in the provided + range. + + """ + data, position, expected = args + np.testing.assert_array_equal(data[position], expected) + + +def inplace_double(args): + """Dummy helper function to be executed in subprocesses + + + Check that the input array has the right values in the provided range + and perform an inplace modification to double the values in the range by + two. + + """ + data, position, expected = args + assert data[position] == expected + data[position] *= 2 + np.testing.assert_array_equal(data[position], 2 * expected) + + +@with_numpy +@with_multiprocessing +def test_memmap_based_array_reducing(tmpdir): + """Check that it is possible to reduce a memmap backed array""" + assert_array_equal = np.testing.assert_array_equal + filename = tmpdir.join('test.mmap').strpath + + # Create a file larger than what will be used by a + buffer = np.memmap(filename, dtype=np.float64, shape=500, mode='w+') + + # Fill the original buffer with negative markers to detect over of + # underflow in case of test failures + buffer[:] = - 1.0 * np.arange(buffer.shape[0], dtype=buffer.dtype) + buffer.flush() + + # Memmap a 2D fortran array on a offsetted subsection of the previous + # buffer + a = np.memmap(filename, dtype=np.float64, shape=(3, 5, 4), + mode='r+', order='F', offset=4) + a[:] = np.arange(60).reshape(a.shape) + + # Build various views that share the buffer with the original memmap + + # b is an memmap sliced view on an memmap instance + b = a[1:-1, 2:-1, 2:4] + + # c and d are array views + c = np.asarray(b) + d = c.T + + # Array reducer with auto dumping disabled + reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, 'c', True) + + def reconstruct_array_or_memmap(x): + cons, args = reducer(x) + return cons(*args) + + # Reconstruct original memmap + a_reconstructed = reconstruct_array_or_memmap(a) + assert has_shareable_memory(a_reconstructed) + assert isinstance(a_reconstructed, np.memmap) + assert_array_equal(a_reconstructed, a) + + # Reconstruct strided memmap view + b_reconstructed = reconstruct_array_or_memmap(b) + assert has_shareable_memory(b_reconstructed) + assert_array_equal(b_reconstructed, b) + + # Reconstruct arrays views on memmap base + c_reconstructed = reconstruct_array_or_memmap(c) + assert not isinstance(c_reconstructed, np.memmap) + assert has_shareable_memory(c_reconstructed) + assert_array_equal(c_reconstructed, c) + + d_reconstructed = reconstruct_array_or_memmap(d) + assert not isinstance(d_reconstructed, np.memmap) + assert has_shareable_memory(d_reconstructed) + assert_array_equal(d_reconstructed, d) + + # Test graceful degradation on fake memmap instances with in-memory + # buffers + a3 = a * 3 + assert not has_shareable_memory(a3) + a3_reconstructed = reconstruct_array_or_memmap(a3) + assert not has_shareable_memory(a3_reconstructed) + assert not isinstance(a3_reconstructed, np.memmap) + assert_array_equal(a3_reconstructed, a * 3) + + # Test graceful degradation on arrays derived from fake memmap instances + b3 = np.asarray(a3) + assert not has_shareable_memory(b3) + + b3_reconstructed = reconstruct_array_or_memmap(b3) + assert isinstance(b3_reconstructed, np.ndarray) + assert not has_shareable_memory(b3_reconstructed) + assert_array_equal(b3_reconstructed, b3) + + +@with_multiprocessing +@skipif((sys.platform != "win32") or (), + reason="PermissionError only easily triggerable on Windows") +def test_resource_tracker_retries_when_permissionerror(tmpdir): + # Test resource_tracker retry mechanism when unlinking memmaps. See more + # thorough information in the ``unlink_file`` documentation of joblib. + filename = tmpdir.join('test.mmap').strpath + cmd = """if 1: + import os + import numpy as np + import time + from joblib.externals.loky.backend import resource_tracker + resource_tracker.VERBOSE = 1 + + # Start the resource tracker + resource_tracker.ensure_running() + time.sleep(1) + + # Create a file containing numpy data + memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+') + memmap[:] = np.arange(10).astype(np.int8).data + memmap.flush() + assert os.path.exists(r"{filename}") + del memmap + + # Create a np.memmap backed by this file + memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+') + resource_tracker.register(r"{filename}", "file") + + # Ask the resource_tracker to delete the file backing the np.memmap , this + # should raise PermissionError that the resource_tracker will log. + resource_tracker.maybe_unlink(r"{filename}", "file") + + # Wait for the resource_tracker to process the maybe_unlink before cleaning + # up the memmap + time.sleep(2) + """.format(filename=filename) + p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + p.wait() + out, err = p.communicate() + assert p.returncode == 0 + assert out == b'' + msg = 'tried to unlink {}, got PermissionError'.format(filename) + assert msg in err.decode() + + +@with_numpy +@with_multiprocessing +def test_high_dimension_memmap_array_reducing(tmpdir): + assert_array_equal = np.testing.assert_array_equal + + filename = tmpdir.join('test.mmap').strpath + + # Create a high dimensional memmap + a = np.memmap(filename, dtype=np.float64, shape=(100, 15, 15, 3), + mode='w+') + a[:] = np.arange(100 * 15 * 15 * 3).reshape(a.shape) + + # Create some slices/indices at various dimensions + b = a[0:10] + c = a[:, 5:10] + d = a[:, :, :, 0] + e = a[1:3:4] + + # Array reducer with auto dumping disabled + reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, 'c', True) + + def reconstruct_array_or_memmap(x): + cons, args = reducer(x) + return cons(*args) + + a_reconstructed = reconstruct_array_or_memmap(a) + assert has_shareable_memory(a_reconstructed) + assert isinstance(a_reconstructed, np.memmap) + assert_array_equal(a_reconstructed, a) + + b_reconstructed = reconstruct_array_or_memmap(b) + assert has_shareable_memory(b_reconstructed) + assert_array_equal(b_reconstructed, b) + + c_reconstructed = reconstruct_array_or_memmap(c) + assert has_shareable_memory(c_reconstructed) + assert_array_equal(c_reconstructed, c) + + d_reconstructed = reconstruct_array_or_memmap(d) + assert has_shareable_memory(d_reconstructed) + assert_array_equal(d_reconstructed, d) + + e_reconstructed = reconstruct_array_or_memmap(e) + assert has_shareable_memory(e_reconstructed) + assert_array_equal(e_reconstructed, e) + + +@with_numpy +def test__strided_from_memmap(tmpdir): + fname = tmpdir.join('test.mmap').strpath + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + # This line creates the mmap file that is reused later + memmap_obj = np.memmap(fname, mode='w+', shape=size + offset) + # filename, dtype, mode, offset, order, shape, strides, total_buffer_len + memmap_obj = _strided_from_memmap(fname, dtype='uint8', mode='r', + offset=offset, order='C', shape=size, + strides=None, total_buffer_len=None, + unlink_on_gc_collect=False) + assert isinstance(memmap_obj, np.memmap) + assert memmap_obj.offset == offset + memmap_backed_obj = _strided_from_memmap( + fname, dtype='uint8', mode='r', offset=offset, order='C', + shape=(size // 2,), strides=(2,), total_buffer_len=size, + unlink_on_gc_collect=False + ) + assert _get_backing_memmap(memmap_backed_obj).offset == offset + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_pool_with_memmap(factory, tmpdir): + """Check that subprocess can access and update shared memory memmap""" + assert_array_equal = np.testing.assert_array_equal + + # Fork the subprocess before allocating the objects to be passed + pool_temp_folder = tmpdir.mkdir('pool').strpath + p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder) + try: + filename = tmpdir.join('test.mmap').strpath + a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+') + a.fill(1.0) + + p.map(inplace_double, [(a, (i, j), 1.0) + for i in range(a.shape[0]) + for j in range(a.shape[1])]) + + assert_array_equal(a, 2 * np.ones(a.shape)) + + # Open a copy-on-write view on the previous data + b = np.memmap(filename, dtype=np.float32, shape=(5, 3), mode='c') + + p.map(inplace_double, [(b, (i, j), 2.0) + for i in range(b.shape[0]) + for j in range(b.shape[1])]) + + # Passing memmap instances to the pool should not trigger the creation + # of new files on the FS + assert os.listdir(pool_temp_folder) == [] + + # the original data is untouched + assert_array_equal(a, 2 * np.ones(a.shape)) + assert_array_equal(b, 2 * np.ones(b.shape)) + + # readonly maps can be read but not updated + c = np.memmap(filename, dtype=np.float32, shape=(10,), mode='r', + offset=5 * 4) + + with raises(AssertionError): + p.map(check_array, [(c, i, 3.0) for i in range(c.shape[0])]) + + # depending on the version of numpy one can either get a RuntimeError + # or a ValueError + with raises((RuntimeError, ValueError)): + p.map(inplace_double, [(c, i, 2.0) for i in range(c.shape[0])]) + finally: + # Clean all filehandlers held by the pool + p.terminate() + del p + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_pool_with_memmap_array_view(factory, tmpdir): + """Check that subprocess can access and update shared memory array""" + assert_array_equal = np.testing.assert_array_equal + + # Fork the subprocess before allocating the objects to be passed + pool_temp_folder = tmpdir.mkdir('pool').strpath + p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder) + try: + + filename = tmpdir.join('test.mmap').strpath + a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+') + a.fill(1.0) + + # Create an ndarray view on the memmap instance + a_view = np.asarray(a) + assert not isinstance(a_view, np.memmap) + assert has_shareable_memory(a_view) + + p.map(inplace_double, [(a_view, (i, j), 1.0) + for i in range(a.shape[0]) + for j in range(a.shape[1])]) + + # Both a and the a_view have been updated + assert_array_equal(a, 2 * np.ones(a.shape)) + assert_array_equal(a_view, 2 * np.ones(a.shape)) + + # Passing memmap array view to the pool should not trigger the + # creation of new files on the FS + assert os.listdir(pool_temp_folder) == [] + + finally: + p.terminate() + del p + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_permission_error_windows_reference_cycle(backend): + # Non regression test for: + # https://github.com/joblib/joblib/issues/806 + # + # The issue happens when trying to delete a memory mapped file that has + # not yet been closed by one of the worker processes. + cmd = """if 1: + import numpy as np + from joblib import Parallel, delayed + + + data = np.random.rand(int(2e6)).reshape((int(1e6), 2)) + + # Build a complex cyclic reference that is likely to delay garbage + # collection of the memmapped array in the worker processes. + first_list = current_list = [data] + for i in range(10): + current_list = [current_list] + first_list.append(current_list) + + if __name__ == "__main__": + results = Parallel(n_jobs=2, backend="{b}")( + delayed(len)(current_list) for i in range(10)) + assert results == [1] * 10 + """.format(b=backend) + p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, out.decode() + "\n\n" + err.decode() + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_permission_error_windows_memmap_sent_to_parent(backend): + # Second non-regression test for: + # https://github.com/joblib/joblib/issues/806 + # previously, child process would not convert temporary memmaps to numpy + # arrays when sending the data back to the parent process. This would lead + # to permission errors on windows when deleting joblib's temporary folder, + # as the memmaped files handles would still opened in the parent process. + cmd = '''if 1: + import os + import time + + import numpy as np + + from joblib import Parallel, delayed + from testutils import return_slice_of_data + + data = np.ones(int(2e6)) + + if __name__ == '__main__': + # warm-up call to launch the workers and start the resource_tracker + _ = Parallel(n_jobs=2, verbose=5, backend='{b}')( + delayed(id)(i) for i in range(20)) + + time.sleep(0.5) + + slice_of_data = Parallel(n_jobs=2, verbose=5, backend='{b}')( + delayed(return_slice_of_data)(data, 0, 20) for _ in range(10)) + '''.format(b=backend) + + for _ in range(3): + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(__file__) + p = subprocess.Popen([sys.executable, '-c', cmd], + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, env=env) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, err + assert out == b'' + if sys.version_info[:3] not in [(3, 8, 0), (3, 8, 1)]: + # In early versions of Python 3.8, a reference leak + # https://github.com/cloudpipe/cloudpickle/issues/327, holds + # references to pickled objects, generating race condition during + # cleanup finalizers of joblib and noisy resource_tracker outputs. + assert b'resource_tracker' not in err + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_parallel_isolated_temp_folders(backend): + # Test that consecutive Parallel call use isolated subfolders, even + # for the loky backend that reuses its executor instance across calls. + array = np.arange(int(1e2)) + [filename_1] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + [filename_2] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + assert os.path.dirname(filename_2) != os.path.dirname(filename_1) + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_managed_backend_reuse_temp_folder(backend): + # Test that calls to a managed parallel object reuse the same memmaps. + array = np.arange(int(1e2)) + with Parallel(n_jobs=2, backend=backend, max_nbytes=10) as p: + [filename_1] = p( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + [filename_2] = p( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + assert os.path.dirname(filename_2) == os.path.dirname(filename_1) + + +@with_numpy +@with_multiprocessing +def test_memmapping_temp_folder_thread_safety(): + # Concurrent calls to Parallel with the loky backend will use the same + # executor, and thus the same reducers. Make sure that those reducers use + # different temporary folders depending on which Parallel objects called + # them, which is necessary to limit potential race conditions during the + # garbage collection of temporary memmaps. + array = np.arange(int(1e2)) + + temp_dirs_thread_1 = set() + temp_dirs_thread_2 = set() + + def concurrent_get_filename(array, temp_dirs): + with Parallel(backend='loky', n_jobs=2, max_nbytes=10) as p: + for i in range(10): + [filename] = p( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + temp_dirs.add(os.path.dirname(filename)) + + t1 = threading.Thread( + target=concurrent_get_filename, args=(array, temp_dirs_thread_1) + ) + t2 = threading.Thread( + target=concurrent_get_filename, args=(array, temp_dirs_thread_2) + ) + + t1.start() + t2.start() + + t1.join() + t2.join() + + assert len(temp_dirs_thread_1) == 1 + assert len(temp_dirs_thread_2) == 1 + + assert temp_dirs_thread_1 != temp_dirs_thread_2 + + +@with_numpy +@with_multiprocessing +def test_multithreaded_parallel_termination_resource_tracker_silent(): + # test that concurrent termination attempts of a same executor does not + # emit any spurious error from the resource_tracker. We test various + # situations making 0, 1 or both parallel call sending a task that will + # make the worker (and thus the whole Parallel call) error out. + cmd = '''if 1: + import os + import numpy as np + from joblib import Parallel, delayed + from joblib.externals.loky.backend import resource_tracker + from concurrent.futures import ThreadPoolExecutor, wait + + resource_tracker.VERBOSE = 0 + + array = np.arange(int(1e2)) + + temp_dirs_thread_1 = set() + temp_dirs_thread_2 = set() + + + def raise_error(array): + raise ValueError + + + def parallel_get_filename(array, temp_dirs): + with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p: + for i in range(10): + [filename] = p( + delayed(getattr)(array, "filename") for _ in range(1) + ) + temp_dirs.add(os.path.dirname(filename)) + + + def parallel_raise(array, temp_dirs): + with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p: + for i in range(10): + [filename] = p( + delayed(raise_error)(array) for _ in range(1) + ) + temp_dirs.add(os.path.dirname(filename)) + + + executor = ThreadPoolExecutor(max_workers=2) + + # both function calls will use the same loky executor, but with a + # different Parallel object. + future_1 = executor.submit({f1}, array, temp_dirs_thread_1) + future_2 = executor.submit({f2}, array, temp_dirs_thread_2) + + # Wait for both threads to terminate their backend + wait([future_1, future_2]) + + future_1.result() + future_2.result() + ''' + functions_and_returncodes = [ + ("parallel_get_filename", "parallel_get_filename", 0), + ("parallel_get_filename", "parallel_raise", 1), + ("parallel_raise", "parallel_raise", 1) + ] + + for f1, f2, returncode in functions_and_returncodes: + p = subprocess.Popen([sys.executable, '-c', cmd.format(f1=f1, f2=f2)], + stderr=subprocess.PIPE, stdout=subprocess.PIPE) + p.wait() + out, err = p.communicate() + assert p.returncode == returncode, out.decode() + assert b"resource_tracker" not in err, err.decode() + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_many_parallel_calls_on_same_object(backend): + # After #966 got merged, consecutive Parallel objects were sharing temp + # folder, which would lead to race conditions happening during the + # temporary resources management with the resource_tracker. This is a + # non-regression test that makes sure that consecutive Parallel operations + # on the same object do not error out. + cmd = '''if 1: + import os + import time + + import numpy as np + + from joblib import Parallel, delayed + from testutils import return_slice_of_data + + data = np.ones(100) + + if __name__ == '__main__': + for i in range(5): + slice_of_data = Parallel( + n_jobs=2, max_nbytes=1, backend='{b}')( + delayed(return_slice_of_data)(data, 0, 20) + for _ in range(10) + ) + '''.format(b=backend) + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(__file__) + p = subprocess.Popen( + [sys.executable, '-c', cmd], + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + env=env, + ) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, err + assert out == b'' + if sys.version_info[:3] not in [(3, 8, 0), (3, 8, 1)]: + # In early versions of Python 3.8, a reference leak + # https://github.com/cloudpipe/cloudpickle/issues/327, holds + # references to pickled objects, generating race condition during + # cleanup finalizers of joblib and noisy resource_tracker outputs. + assert b'resource_tracker' not in err + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_memmap_returned_as_regular_array(backend): + data = np.ones(int(1e3)) + # Check that child processes send temporary memmaps back as numpy arrays. + [result] = Parallel(n_jobs=2, backend=backend, max_nbytes=100)( + delayed(check_memmap_and_send_back)(data) for _ in range(1)) + assert _get_backing_memmap(result) is None + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_resource_tracker_silent_when_reference_cycles(backend): + # There is a variety of reasons that can make joblib with loky backend + # output noisy warnings when a reference cycle is preventing a memmap from + # being garbage collected. Especially, joblib's main process finalizer + # deletes the temporary folder if it was not done before, which can + # interact badly with the resource_tracker. We don't risk leaking any + # resources, but this will likely make joblib output a lot of low-level + # confusing messages. + # + # This test makes sure that the resource_tracker is silent when a reference + # has been collected concurrently on non-Windows platforms. + # + # Note that the script in ``cmd`` is the exact same script as in + # test_permission_error_windows_reference_cycle. + if backend == "loky" and sys.platform.startswith('win'): + # XXX: on Windows, reference cycles can delay timely garbage collection + # and make it impossible to properly delete the temporary folder in the + # main process because of permission errors. + pytest.xfail( + "The temporary folder cannot be deleted on Windows in the " + "presence of a reference cycle" + ) + + cmd = """if 1: + import numpy as np + from joblib import Parallel, delayed + + + data = np.random.rand(int(2e6)).reshape((int(1e6), 2)) + + # Build a complex cyclic reference that is likely to delay garbage + # collection of the memmapped array in the worker processes. + first_list = current_list = [data] + for i in range(10): + current_list = [current_list] + first_list.append(current_list) + + if __name__ == "__main__": + results = Parallel(n_jobs=2, backend="{b}")( + delayed(len)(current_list) for i in range(10)) + assert results == [1] * 10 + """.format(b=backend) + p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + p.wait() + out, err = p.communicate() + out = out.decode() + err = err.decode() + assert p.returncode == 0, out + "\n\n" + err + assert "resource_tracker" not in err, err + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_pool_for_large_arrays(factory, tmpdir): + """Check that large arrays are not copied in memory""" + + # Check that the tempfolder is empty + assert os.listdir(tmpdir.strpath) == [] + + # Build an array reducers that automatically dump large array content + # to filesystem backed memmap instances to avoid memory explosion + p = factory(3, max_nbytes=40, temp_folder=tmpdir.strpath, verbose=2) + try: + # The temporary folder for the pool is not provisioned in advance + assert os.listdir(tmpdir.strpath) == [] + assert not os.path.exists(p._temp_folder) + + small = np.ones(5, dtype=np.float32) + assert small.nbytes == 20 + p.map(check_array, [(small, i, 1.0) for i in range(small.shape[0])]) + + # Memory has been copied, the pool filesystem folder is unused + assert os.listdir(tmpdir.strpath) == [] + + # Try with a file larger than the memmap threshold of 40 bytes + large = np.ones(100, dtype=np.float64) + assert large.nbytes == 800 + p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])]) + + # The data has been dumped in a temp folder for subprocess to share it + # without per-child memory copies + assert os.path.isdir(p._temp_folder) + dumped_filenames = os.listdir(p._temp_folder) + assert len(dumped_filenames) == 1 + + # Check that memory mapping is not triggered for arrays with + # dtype='object' + objects = np.array(['abc'] * 100, dtype='object') + results = p.map(has_shareable_memory, [objects]) + assert not results[0] + + finally: + # check FS garbage upon pool termination + p.terminate() + for i in range(10): + sleep(.1) + if not os.path.exists(p._temp_folder): + break + else: # pragma: no cover + raise AssertionError( + 'temporary folder {} was not deleted'.format(p._temp_folder) + ) + del p + + +@with_numpy +@with_multiprocessing +@parametrize( + "backend", + [ + pytest.param( + "multiprocessing", + marks=pytest.mark.xfail( + reason='https://github.com/joblib/joblib/issues/1086' + ), + ), + "loky", + ] +) +def test_child_raises_parent_exits_cleanly(backend): + # When a task executed by a child process raises an error, the parent + # process's backend is notified, and calls abort_everything. + # In loky, abort_everything itself calls shutdown(kill_workers=True) which + # sends SIGKILL to the worker, preventing it from running the finalizers + # supposed to signal the resource_tracker when the worker is done using + # objects relying on a shared resource (e.g np.memmaps). Because this + # behavior is prone to : + # - cause a resource leak + # - make the resource tracker emit noisy resource warnings + # we explicitly test that, when the said situation occurs: + # - no resources are actually leaked + # - the temporary resources are deleted as soon as possible (typically, at + # the end of the failing Parallel call) + # - the resource_tracker does not emit any warnings. + cmd = """if 1: + import os + from pathlib import Path + from time import sleep + + import numpy as np + from joblib import Parallel, delayed + from testutils import print_filename_and_raise + + data = np.random.rand(1000) + + def get_temp_folder(parallel_obj, backend): + if "{b}" == "loky": + return Path(parallel_obj._backend._workers._temp_folder) + else: + return Path(parallel_obj._backend._pool._temp_folder) + + + if __name__ == "__main__": + try: + with Parallel(n_jobs=2, backend="{b}", max_nbytes=100) as p: + temp_folder = get_temp_folder(p, "{b}") + p(delayed(print_filename_and_raise)(data) + for i in range(1)) + except ValueError as e: + # the temporary folder should be deleted by the end of this + # call but apparently on some file systems, this takes + # some time to be visible. + # + # We attempt to write into the temporary folder to test for + # its existence and we wait for a maximum of 10 seconds. + for i in range(100): + try: + with open(temp_folder / "some_file.txt", "w") as f: + f.write("some content") + except FileNotFoundError: + # temp_folder has been deleted, all is fine + break + + # ... else, wait a bit and try again + sleep(.1) + else: + raise AssertionError( + str(temp_folder) + " was not deleted" + ) from e + """.format(b=backend) + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(__file__) + p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE, + stdout=subprocess.PIPE, env=env) + p.wait() + out, err = p.communicate() + out, err = out.decode(), err.decode() + filename = out.split('\n')[0] + assert p.returncode == 0, err or out + assert err == '' # no resource_tracker warnings. + assert not os.path.exists(filename) + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_pool_for_large_arrays_disabled(factory, tmpdir): + """Check that large arrays memmapping can be disabled""" + # Set max_nbytes to None to disable the auto memmapping feature + p = factory(3, max_nbytes=None, temp_folder=tmpdir.strpath) + try: + + # Check that the tempfolder is empty + assert os.listdir(tmpdir.strpath) == [] + + # Try with a file largish than the memmap threshold of 40 bytes + large = np.ones(100, dtype=np.float64) + assert large.nbytes == 800 + p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])]) + + # Check that the tempfolder is still empty + assert os.listdir(tmpdir.strpath) == [] + + finally: + # Cleanup open file descriptors + p.terminate() + del p + + +@with_numpy +@with_multiprocessing +@with_dev_shm +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_on_large_enough_dev_shm(factory): + """Check that memmapping uses /dev/shm when possible""" + orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE + try: + # Make joblib believe that it can use /dev/shm even when running on a + # CI container where the size of the /dev/shm is not very large (that + # is at least 32 MB instead of 2 GB by default). + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(32e6) + p = factory(3, max_nbytes=10) + try: + # Check that the pool has correctly detected the presence of the + # shared memory filesystem. + pool_temp_folder = p._temp_folder + folder_prefix = '/dev/shm/joblib_memmapping_folder_' + assert pool_temp_folder.startswith(folder_prefix) + assert os.path.exists(pool_temp_folder) + + # Try with a file larger than the memmap threshold of 10 bytes + a = np.ones(100, dtype=np.float64) + assert a.nbytes == 800 + p.map(id, [a] * 10) + # a should have been memmapped to the pool temp folder: the joblib + # pickling procedure generate one .pkl file: + assert len(os.listdir(pool_temp_folder)) == 1 + + # create a new array with content that is different from 'a' so + # that it is mapped to a different file in the temporary folder of + # the pool. + b = np.ones(100, dtype=np.float64) * 2 + assert b.nbytes == 800 + p.map(id, [b] * 10) + # A copy of both a and b are now stored in the shared memory folder + assert len(os.listdir(pool_temp_folder)) == 2 + finally: + # Cleanup open file descriptors + p.terminate() + del p + + for i in range(100): + # The temp folder is cleaned up upon pool termination + if not os.path.exists(pool_temp_folder): + break + sleep(.1) + else: # pragma: no cover + raise AssertionError('temporary folder of pool was not deleted') + finally: + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size + + +@with_numpy +@with_multiprocessing +@with_dev_shm +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_on_too_small_dev_shm(factory): + orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE + try: + # Make joblib believe that it cannot use /dev/shm unless there is + # 42 exabytes of available shared memory in /dev/shm + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(42e18) + + p = factory(3, max_nbytes=10) + try: + # Check that the pool has correctly detected the presence of the + # shared memory filesystem. + pool_temp_folder = p._temp_folder + assert not pool_temp_folder.startswith('/dev/shm') + finally: + # Cleanup open file descriptors + p.terminate() + del p + + # The temp folder is cleaned up upon pool termination + assert not os.path.exists(pool_temp_folder) + finally: + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_pool_for_large_arrays_in_return(factory, tmpdir): + """Check that large arrays are not copied in memory in return""" + assert_array_equal = np.testing.assert_array_equal + + # Build an array reducers that automatically dump large array content + # but check that the returned datastructure are regular arrays to avoid + # passing a memmap array pointing to a pool controlled temp folder that + # might be confusing to the user + + # The MemmappingPool user can always return numpy.memmap object explicitly + # to avoid memory copy + p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath) + try: + res = p.apply_async(np.ones, args=(1000,)) + large = res.get() + assert not has_shareable_memory(large) + assert_array_equal(large, np.ones(1000)) + finally: + p.terminate() + del p + + +def _worker_multiply(a, n_times): + """Multiplication function to be executed by subprocess""" + assert has_shareable_memory(a) + return a * n_times + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_workaround_against_bad_memmap_with_copied_buffers(factory, tmpdir): + """Check that memmaps with a bad buffer are returned as regular arrays + + Unary operations and ufuncs on memmap instances return a new memmap + instance with an in-memory buffer (probably a numpy bug). + """ + assert_array_equal = np.testing.assert_array_equal + + p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath) + try: + # Send a complex, large-ish view on a array that will be converted to + # a memmap in the worker process + a = np.asarray(np.arange(6000).reshape((1000, 2, 3)), + order='F')[:, :1, :] + + # Call a non-inplace multiply operation on the worker and memmap and + # send it back to the parent. + b = p.apply_async(_worker_multiply, args=(a, 3)).get() + assert not has_shareable_memory(b) + assert_array_equal(b, 3 * a) + finally: + p.terminate() + del p + + +def identity(arg): + return arg + + +@with_numpy +@with_multiprocessing +@parametrize( + "factory,retry_no", + list(itertools.product( + [MemmappingPool, TestExecutor.get_memmapping_executor], range(3))), + ids=['{}, {}'.format(x, y) for x, y in itertools.product( + ["multiprocessing", "loky"], map(str, range(3)))]) +def test_pool_memmap_with_big_offset(factory, retry_no, tmpdir): + # Test that numpy memmap offset is set correctly if greater than + # mmap.ALLOCATIONGRANULARITY, see + # https://github.com/joblib/joblib/issues/451 and + # https://github.com/numpy/numpy/pull/8443 for more details. + fname = tmpdir.join('test.mmap').strpath + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + obj = make_memmap(fname, mode='w+', shape=size, dtype='uint8', + offset=offset) + + p = factory(2, temp_folder=tmpdir.strpath) + result = p.apply_async(identity, args=(obj,)).get() + assert isinstance(result, np.memmap) + assert result.offset == offset + np.testing.assert_array_equal(obj, result) + p.terminate() + + +def test_pool_get_temp_dir(tmpdir): + pool_folder_name = 'test.tmpdir' + pool_folder, shared_mem = _get_temp_dir(pool_folder_name, tmpdir.strpath) + assert shared_mem is False + assert pool_folder == tmpdir.join('test.tmpdir').strpath + + pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None) + if sys.platform.startswith('win'): + assert shared_mem is False + assert pool_folder.endswith(pool_folder_name) + + +def test_pool_get_temp_dir_no_statvfs(tmpdir, monkeypatch): + """Check that _get_temp_dir works when os.statvfs is not defined + + Regression test for #902 + """ + pool_folder_name = 'test.tmpdir' + import joblib._memmapping_reducer + if hasattr(joblib._memmapping_reducer.os, 'statvfs'): + # We are on Unix, since Windows doesn't have this function + monkeypatch.delattr(joblib._memmapping_reducer.os, 'statvfs') + + pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None) + if sys.platform.startswith('win'): + assert shared_mem is False + assert pool_folder.endswith(pool_folder_name) + + +@with_numpy +@skipif(sys.platform == 'win32', reason='This test fails with a ' + 'PermissionError on Windows') +@parametrize("mmap_mode", ["r+", "w+"]) +def test_numpy_arrays_use_different_memory(mmap_mode): + def func(arr, value): + arr[:] = value + return arr + + arrays = [np.zeros((10, 10), dtype='float64') for i in range(10)] + + results = Parallel(mmap_mode=mmap_mode, max_nbytes=0, n_jobs=2)( + delayed(func)(arr, i) for i, arr in enumerate(arrays)) + + for i, arr in enumerate(results): + np.testing.assert_array_equal(arr, i) + + +@with_numpy +def test_weak_array_key_map(): + + def assert_empty_after_gc_collect(container, retries=100): + for i in range(retries): + if len(container) == 0: + return + gc.collect() + sleep(.1) + assert len(container) == 0 + + a = np.ones(42) + m = _WeakArrayKeyMap() + m.set(a, 'a') + assert m.get(a) == 'a' + + b = a + assert m.get(b) == 'a' + m.set(b, 'b') + assert m.get(a) == 'b' + + del a + gc.collect() + assert len(m._data) == 1 + assert m.get(b) == 'b' + + del b + assert_empty_after_gc_collect(m._data) + + c = np.ones(42) + m.set(c, 'c') + assert len(m._data) == 1 + assert m.get(c) == 'c' + + with raises(KeyError): + m.get(np.ones(42)) + + del c + assert_empty_after_gc_collect(m._data) + + # Check that creating and dropping numpy arrays with potentially the same + # object id will not cause the map to get confused. + def get_set_get_collect(m, i): + a = np.ones(42) + with raises(KeyError): + m.get(a) + m.set(a, i) + assert m.get(a) == i + return id(a) + + unique_ids = set([get_set_get_collect(m, i) for i in range(1000)]) + if platform.python_implementation() == 'CPython': + # On CPython (at least) the same id is often reused many times for the + # temporary arrays created under the local scope of the + # get_set_get_collect function without causing any spurious lookups / + # insertions in the map. Apparently on Python nogil, the id is not + # reused as often. + max_len_unique_ids = 400 if getattr(sys.flags, 'nogil', False) else 100 + assert len(unique_ids) < max_len_unique_ids + + +def test_weak_array_key_map_no_pickling(): + m = _WeakArrayKeyMap() + with raises(pickle.PicklingError): + pickle.dumps(m) + + +@with_numpy +@with_multiprocessing +def test_direct_mmap(tmpdir): + testfile = str(tmpdir.join('arr.dat')) + a = np.arange(10, dtype='uint8') + a.tofile(testfile) + + def _read_array(): + with open(testfile) as fd: + mm = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ, offset=0) + return np.ndarray((10,), dtype=np.uint8, buffer=mm, offset=0) + + def func(x): + return x**2 + + arr = _read_array() + + # this is expected to work and gives the reference + ref = Parallel(n_jobs=2)(delayed(func)(x) for x in [a]) + + # now test that it work with the mmap array + results = Parallel(n_jobs=2)(delayed(func)(x) for x in [arr]) + np.testing.assert_array_equal(results, ref) + + # also test with a mmap array read in the subprocess + def worker(): + return _read_array() + + results = Parallel(n_jobs=2)(delayed(worker)() for _ in range(1)) + np.testing.assert_array_equal(results[0], arr) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_memory_async.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_memory_async.py new file mode 100644 index 0000000000000000000000000000000000000000..53ddeffda4a3e75590d0d02a3628d0d1e81ab6a2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_memory_async.py @@ -0,0 +1,170 @@ +import asyncio +import gc +import shutil + +import pytest + +from joblib.memory import (AsyncMemorizedFunc, AsyncNotMemorizedFunc, + MemorizedResult, Memory, NotMemorizedResult) +from joblib.test.common import np, with_numpy +from joblib.testing import raises + +from .test_memory import (corrupt_single_cache_item, + monkeypatch_cached_func_warn) + + +async def check_identity_lazy_async(func, accumulator, location): + """ Similar to check_identity_lazy_async for coroutine functions""" + memory = Memory(location=location, verbose=0) + func = memory.cache(func) + for i in range(3): + for _ in range(2): + value = await func(i) + assert value == i + assert len(accumulator) == i + 1 + + +@pytest.mark.asyncio +async def test_memory_integration_async(tmpdir): + accumulator = list() + + async def f(n): + await asyncio.sleep(0.1) + accumulator.append(1) + return n + + await check_identity_lazy_async(f, accumulator, tmpdir.strpath) + + # Now test clearing + for compress in (False, True): + for mmap_mode in ('r', None): + memory = Memory(location=tmpdir.strpath, verbose=10, + mmap_mode=mmap_mode, compress=compress) + # First clear the cache directory, to check that our code can + # handle that + # NOTE: this line would raise an exception, as the database + # file is still open; we ignore the error since we want to + # test what happens if the directory disappears + shutil.rmtree(tmpdir.strpath, ignore_errors=True) + g = memory.cache(f) + await g(1) + g.clear(warn=False) + current_accumulator = len(accumulator) + out = await g(1) + + assert len(accumulator) == current_accumulator + 1 + # Also, check that Memory.eval works similarly + evaled = await memory.eval(f, 1) + assert evaled == out + assert len(accumulator) == current_accumulator + 1 + + # Now do a smoke test with a function defined in __main__, as the name + # mangling rules are more complex + f.__module__ = '__main__' + memory = Memory(location=tmpdir.strpath, verbose=0) + await memory.cache(f)(1) + + +@pytest.mark.asyncio +async def test_no_memory_async(): + accumulator = list() + + async def ff(x): + await asyncio.sleep(0.1) + accumulator.append(1) + return x + + memory = Memory(location=None, verbose=0) + gg = memory.cache(ff) + for _ in range(4): + current_accumulator = len(accumulator) + await gg(1) + assert len(accumulator) == current_accumulator + 1 + + +@with_numpy +@pytest.mark.asyncio +async def test_memory_numpy_check_mmap_mode_async(tmpdir, monkeypatch): + """Check that mmap_mode is respected even at the first call""" + + memory = Memory(location=tmpdir.strpath, mmap_mode='r', verbose=0) + + @memory.cache() + async def twice(a): + return a * 2 + + a = np.ones(3) + b = await twice(a) + c = await twice(a) + + assert isinstance(c, np.memmap) + assert c.mode == 'r' + + assert isinstance(b, np.memmap) + assert b.mode == 'r' + + # Corrupts the file, Deleting b and c mmaps + # is necessary to be able edit the file + del b + del c + gc.collect() + corrupt_single_cache_item(memory) + + # Make sure that corrupting the file causes recomputation and that + # a warning is issued. + recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch) + d = await twice(a) + assert len(recorded_warnings) == 1 + exception_msg = 'Exception while loading results' + assert exception_msg in recorded_warnings[0] + # Asserts that the recomputation returns a mmap + assert isinstance(d, np.memmap) + assert d.mode == 'r' + + +@pytest.mark.asyncio +async def test_call_and_shelve_async(tmpdir): + async def f(x, y=1): + await asyncio.sleep(0.1) + return x ** 2 + y + + # Test MemorizedFunc outputting a reference to cache. + for func, Result in zip((AsyncMemorizedFunc(f, tmpdir.strpath), + AsyncNotMemorizedFunc(f), + Memory(location=tmpdir.strpath, + verbose=0).cache(f), + Memory(location=None).cache(f), + ), + (MemorizedResult, NotMemorizedResult, + MemorizedResult, NotMemorizedResult, + )): + for _ in range(2): + result = await func.call_and_shelve(2) + assert isinstance(result, Result) + assert result.get() == 5 + + result.clear() + with raises(KeyError): + result.get() + result.clear() # Do nothing if there is no cache. + + +@pytest.mark.asyncio +async def test_memorized_func_call_async(memory): + + async def ff(x, counter): + await asyncio.sleep(0.1) + counter[x] = counter.get(x, 0) + 1 + return counter[x] + + gg = memory.cache(ff, ignore=['counter']) + + counter = {} + assert await gg(2, counter) == 1 + assert await gg(2, counter) == 1 + + x, meta = await gg.call(2, counter) + assert x == 2, "f has not been called properly" + assert isinstance(meta, dict), ( + "Metadata are not returned by MemorizedFunc.call." + ) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..251925ced5208b4aaf09d9aab305eb44c7102818 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py @@ -0,0 +1,32 @@ +""" +Pyodide and other single-threaded Python builds will be missing the +_multiprocessing module. Test that joblib still works in this environment. +""" + +import os +import subprocess +import sys + + +def test_missing_multiprocessing(tmp_path): + """ + Test that import joblib works even if _multiprocessing is missing. + + pytest has already imported everything from joblib. The most reasonable way + to test importing joblib with modified environment is to invoke a separate + Python process. This also ensures that we don't break other tests by + importing a bad `_multiprocessing` module. + """ + (tmp_path / "_multiprocessing.py").write_text( + 'raise ImportError("No _multiprocessing module!")' + ) + env = dict(os.environ) + # For subprocess, use current sys.path with our custom version of + # multiprocessing inserted. + env["PYTHONPATH"] = ":".join([str(tmp_path)] + sys.path) + subprocess.check_call( + [sys.executable, "-c", + "import joblib, math; " + "joblib.Parallel(n_jobs=1)(" + "joblib.delayed(math.sqrt)(i**2) for i in range(10))" + ], env=env) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_module.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_module.py new file mode 100644 index 0000000000000000000000000000000000000000..a2257a4142d79996f3d299cb820927ae48a05810 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_module.py @@ -0,0 +1,53 @@ +import sys +import joblib +from joblib.testing import check_subprocess_call +from joblib.test.common import with_multiprocessing + + +def test_version(): + assert hasattr(joblib, '__version__'), ( + "There are no __version__ argument on the joblib module") + + +@with_multiprocessing +def test_no_start_method_side_effect_on_import(): + # check that importing joblib does not implicitly set the global + # start_method for multiprocessing. + code = """if True: + import joblib + import multiprocessing as mp + # The following line would raise RuntimeError if the + # start_method is already set. + mp.set_start_method("loky") + """ + check_subprocess_call([sys.executable, '-c', code]) + + +@with_multiprocessing +def test_no_semaphore_tracker_on_import(): + # check that importing joblib does not implicitly spawn a resource tracker + # or a semaphore tracker + code = """if True: + import joblib + from multiprocessing import semaphore_tracker + # The following line would raise RuntimeError if the + # start_method is already set. + msg = "multiprocessing.semaphore_tracker has been spawned on import" + assert semaphore_tracker._semaphore_tracker._fd is None, msg""" + if sys.version_info >= (3, 8): + # semaphore_tracker was renamed in Python 3.8: + code = code.replace("semaphore_tracker", "resource_tracker") + check_subprocess_call([sys.executable, '-c', code]) + + +@with_multiprocessing +def test_no_resource_tracker_on_import(): + code = """if True: + import joblib + from joblib.externals.loky.backend import resource_tracker + # The following line would raise RuntimeError if the + # start_method is already set. + msg = "loky.resource_tracker has been spawned on import" + assert resource_tracker._resource_tracker._fd is None, msg + """ + check_subprocess_call([sys.executable, '-c', code]) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5c414a2227cbf6094dc594f6712139d4fd397a9d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py @@ -0,0 +1,9 @@ +from joblib.compressor import BinaryZlibFile +from joblib.testing import parametrize + + +@parametrize('filename', ['test', u'test']) # testing str and unicode names +def test_binary_zlib_file(tmpdir, filename): + """Testing creation of files depending on the type of the filenames.""" + binary_file = BinaryZlibFile(tmpdir.join(filename).strpath, mode='wb') + binary_file.close() diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_parallel.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..f171375a872bccafc1711f935ec5369737bc617c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_parallel.py @@ -0,0 +1,2056 @@ +""" +Test the parallel module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2010-2011 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import os +import sys +import time +import mmap +import weakref +import warnings +import threading +from traceback import format_exception +from math import sqrt +from time import sleep +from pickle import PicklingError +from contextlib import nullcontext +from multiprocessing import TimeoutError +import pytest + +import joblib +from joblib import parallel +from joblib import dump, load + +from joblib._multiprocessing_helpers import mp + +from joblib.test.common import np, with_numpy +from joblib.test.common import with_multiprocessing +from joblib.test.common import IS_PYPY, force_gc_pypy +from joblib.testing import (parametrize, raises, check_subprocess_call, + skipif, warns) + +if mp is not None: + # Loky is not available if multiprocessing is not + from joblib.externals.loky import get_reusable_executor + +from queue import Queue + +try: + import posix +except ImportError: + posix = None + +try: + from ._openmp_test_helper.parallel_sum import parallel_sum +except ImportError: + parallel_sum = None + +try: + import distributed +except ImportError: + distributed = None + +from joblib._parallel_backends import SequentialBackend +from joblib._parallel_backends import ThreadingBackend +from joblib._parallel_backends import MultiprocessingBackend +from joblib._parallel_backends import ParallelBackendBase +from joblib._parallel_backends import LokyBackend + +from joblib.parallel import Parallel, delayed +from joblib.parallel import parallel_config +from joblib.parallel import parallel_backend +from joblib.parallel import register_parallel_backend +from joblib.parallel import effective_n_jobs, cpu_count + +from joblib.parallel import mp, BACKENDS, DEFAULT_BACKEND + + +RETURN_GENERATOR_BACKENDS = BACKENDS.copy() +RETURN_GENERATOR_BACKENDS.pop("multiprocessing", None) + +ALL_VALID_BACKENDS = [None] + sorted(BACKENDS.keys()) +# Add instances of backend classes deriving from ParallelBackendBase +ALL_VALID_BACKENDS += [BACKENDS[backend_str]() for backend_str in BACKENDS] +if mp is None: + PROCESS_BACKENDS = [] +else: + PROCESS_BACKENDS = ['multiprocessing', 'loky'] +PARALLEL_BACKENDS = PROCESS_BACKENDS + ['threading'] + +if hasattr(mp, 'get_context'): + # Custom multiprocessing context in Python 3.4+ + ALL_VALID_BACKENDS.append(mp.get_context('spawn')) + +DefaultBackend = BACKENDS[DEFAULT_BACKEND] + + +def get_workers(backend): + return getattr(backend, '_pool', getattr(backend, '_workers', None)) + + +def division(x, y): + return x / y + + +def square(x): + return x ** 2 + + +class MyExceptionWithFinickyInit(Exception): + """An exception class with non trivial __init__ + """ + def __init__(self, a, b, c, d): + pass + + +def exception_raiser(x, custom_exception=False): + if x == 7: + raise (MyExceptionWithFinickyInit('a', 'b', 'c', 'd') + if custom_exception else ValueError) + return x + + +def interrupt_raiser(x): + time.sleep(.05) + raise KeyboardInterrupt + + +def f(x, y=0, z=0): + """ A module-level function so that it can be spawn with + multiprocessing. + """ + return x ** 2 + y + z + + +def _active_backend_type(): + return type(parallel.get_active_backend()[0]) + + +def parallel_func(inner_n_jobs, backend): + return Parallel(n_jobs=inner_n_jobs, backend=backend)( + delayed(square)(i) for i in range(3)) + + +############################################################################### +def test_cpu_count(): + assert cpu_count() > 0 + + +def test_effective_n_jobs(): + assert effective_n_jobs() > 0 + + +@parametrize("context", [parallel_config, parallel_backend]) +@pytest.mark.parametrize( + "backend_n_jobs, expected_n_jobs", + [(3, 3), (-1, effective_n_jobs(n_jobs=-1)), (None, 1)], + ids=["positive-int", "negative-int", "None"] +) +@with_multiprocessing +def test_effective_n_jobs_None(context, backend_n_jobs, expected_n_jobs): + # check the number of effective jobs when `n_jobs=None` + # non-regression test for https://github.com/joblib/joblib/issues/984 + with context("threading", n_jobs=backend_n_jobs): + # when using a backend, the default of number jobs will be the one set + # in the backend + assert effective_n_jobs(n_jobs=None) == expected_n_jobs + # without any backend, None will default to a single job + assert effective_n_jobs(n_jobs=None) == 1 + + +############################################################################### +# Test parallel + +@parametrize('backend', ALL_VALID_BACKENDS) +@parametrize('n_jobs', [1, 2, -1, -2]) +@parametrize('verbose', [2, 11, 100]) +def test_simple_parallel(backend, n_jobs, verbose): + assert ([square(x) for x in range(5)] == + Parallel(n_jobs=n_jobs, backend=backend, + verbose=verbose)( + delayed(square)(x) for x in range(5))) + + +@parametrize('backend', ALL_VALID_BACKENDS) +def test_main_thread_renamed_no_warning(backend, monkeypatch): + # Check that no default backend relies on the name of the main thread: + # https://github.com/joblib/joblib/issues/180#issuecomment-253266247 + # Some programs use a different name for the main thread. This is the case + # for uWSGI apps for instance. + monkeypatch.setattr(target=threading.current_thread(), name='name', + value='some_new_name_for_the_main_thread') + + with warnings.catch_warnings(record=True) as warninfo: + results = Parallel(n_jobs=2, backend=backend)( + delayed(square)(x) for x in range(3)) + assert results == [0, 1, 4] + + # Due to the default parameters of LokyBackend, there is a chance that + # warninfo catches Warnings from worker timeouts. We remove it if it exists + warninfo = [w for w in warninfo if "worker timeout" not in str(w.message)] + + # The multiprocessing backend will raise a warning when detecting that is + # started from the non-main thread. Let's check that there is no false + # positive because of the name change. + assert len(warninfo) == 0 + + +def _assert_warning_nested(backend, inner_n_jobs, expected): + with warnings.catch_warnings(record=True) as warninfo: + warnings.simplefilter("always") + parallel_func(backend=backend, inner_n_jobs=inner_n_jobs) + + warninfo = [w.message for w in warninfo] + if expected: + if warninfo: + warnings_are_correct = all( + 'backed parallel loops cannot' in each.args[0] + for each in warninfo + ) + # With Python nogil, when the outer backend is threading, we might + # see more that one warning + warnings_have_the_right_length = ( + len(warninfo) >= 1 if getattr(sys.flags, 'nogil', False) + else len(warninfo) == 1) + return warnings_are_correct and warnings_have_the_right_length + + return False + else: + assert not warninfo + return True + + +@with_multiprocessing +@parametrize('parent_backend,child_backend,expected', [ + ('loky', 'multiprocessing', True), + ('loky', 'loky', False), + ('multiprocessing', 'multiprocessing', True), + ('multiprocessing', 'loky', True), + ('threading', 'multiprocessing', True), + ('threading', 'loky', True), +]) +def test_nested_parallel_warnings(parent_backend, child_backend, expected): + + # no warnings if inner_n_jobs=1 + Parallel(n_jobs=2, backend=parent_backend)( + delayed(_assert_warning_nested)( + backend=child_backend, inner_n_jobs=1, + expected=False) + for _ in range(5)) + + # warnings if inner_n_jobs != 1 and expected + res = Parallel(n_jobs=2, backend=parent_backend)( + delayed(_assert_warning_nested)( + backend=child_backend, inner_n_jobs=2, + expected=expected) + for _ in range(5)) + + # warning handling is not thread safe. One thread might see multiple + # warning or no warning at all. + if parent_backend == "threading": + if IS_PYPY and not any(res): + # Related to joblib#1426, should be removed once it is solved. + pytest.xfail(reason="This test often fails in PyPy.") + assert any(res) + else: + assert all(res) + + +@with_multiprocessing +@parametrize('backend', ['loky', 'multiprocessing', 'threading']) +def test_background_thread_parallelism(backend): + is_run_parallel = [False] + + def background_thread(is_run_parallel): + with warnings.catch_warnings(record=True) as warninfo: + Parallel(n_jobs=2)( + delayed(sleep)(.1) for _ in range(4)) + print(len(warninfo)) + is_run_parallel[0] = len(warninfo) == 0 + + t = threading.Thread(target=background_thread, args=(is_run_parallel,)) + t.start() + t.join() + assert is_run_parallel[0] + + +def nested_loop(backend): + Parallel(n_jobs=2, backend=backend)( + delayed(square)(.01) for _ in range(2)) + + +@parametrize('child_backend', BACKENDS) +@parametrize('parent_backend', BACKENDS) +def test_nested_loop(parent_backend, child_backend): + Parallel(n_jobs=2, backend=parent_backend)( + delayed(nested_loop)(child_backend) for _ in range(2)) + + +def raise_exception(backend): + raise ValueError + + +@with_multiprocessing +def test_nested_loop_with_exception_with_loky(): + with raises(ValueError): + with Parallel(n_jobs=2, backend="loky") as parallel: + parallel([delayed(nested_loop)("loky"), + delayed(raise_exception)("loky")]) + + +def test_mutate_input_with_threads(): + """Input is mutable when using the threading backend""" + q = Queue(maxsize=5) + Parallel(n_jobs=2, backend="threading")( + delayed(q.put)(1) for _ in range(5)) + assert q.full() + + +@parametrize('n_jobs', [1, 2, 3]) +def test_parallel_kwargs(n_jobs): + """Check the keyword argument processing of pmap.""" + lst = range(10) + assert ([f(x, y=1) for x in lst] == + Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst)) + + +@parametrize('backend', PARALLEL_BACKENDS) +def test_parallel_as_context_manager(backend): + lst = range(10) + expected = [f(x, y=1) for x in lst] + + with Parallel(n_jobs=4, backend=backend) as p: + # Internally a pool instance has been eagerly created and is managed + # via the context manager protocol + managed_backend = p._backend + + # We make call with the managed parallel object several times inside + # the managed block: + assert expected == p(delayed(f)(x, y=1) for x in lst) + assert expected == p(delayed(f)(x, y=1) for x in lst) + + # Those calls have all used the same pool instance: + if mp is not None: + assert get_workers(managed_backend) is get_workers(p._backend) + + # As soon as we exit the context manager block, the pool is terminated and + # no longer referenced from the parallel object: + if mp is not None: + assert get_workers(p._backend) is None + + # It's still possible to use the parallel instance in non-managed mode: + assert expected == p(delayed(f)(x, y=1) for x in lst) + if mp is not None: + assert get_workers(p._backend) is None + + +@with_multiprocessing +def test_parallel_pickling(): + """ Check that pmap captures the errors when it is passed an object + that cannot be pickled. + """ + class UnpicklableObject(object): + def __reduce__(self): + raise RuntimeError('123') + + with raises(PicklingError, match=r"the task to send"): + Parallel(n_jobs=2, backend='loky')(delayed(id)( + UnpicklableObject()) for _ in range(10)) + + +@with_numpy +@with_multiprocessing +@parametrize('byteorder', ['<', '>', '=']) +def test_parallel_byteorder_corruption(byteorder): + + def inspect_byteorder(x): + return x, x.dtype.byteorder + + x = np.arange(6).reshape((2, 3)).view(f'{byteorder}i4') + + initial_np_byteorder = x.dtype.byteorder + + result = Parallel(n_jobs=2, backend='loky')( + delayed(inspect_byteorder)(x) for _ in range(3) + ) + + for x_returned, byteorder_in_worker in result: + assert byteorder_in_worker == initial_np_byteorder + assert byteorder_in_worker == x_returned.dtype.byteorder + np.testing.assert_array_equal(x, x_returned) + + +@parametrize('backend', PARALLEL_BACKENDS) +def test_parallel_timeout_success(backend): + # Check that timeout isn't thrown when function is fast enough + assert len(Parallel(n_jobs=2, backend=backend, timeout=30)( + delayed(sleep)(0.001) for x in range(10))) == 10 + + +@with_multiprocessing +@parametrize('backend', PARALLEL_BACKENDS) +def test_parallel_timeout_fail(backend): + # Check that timeout properly fails when function is too slow + with raises(TimeoutError): + Parallel(n_jobs=2, backend=backend, timeout=0.01)( + delayed(sleep)(10) for x in range(10)) + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_error_capture(backend): + # Check that error are captured, and that correct exceptions + # are raised. + if mp is not None: + with raises(ZeroDivisionError): + Parallel(n_jobs=2, backend=backend)( + [delayed(division)(x, y) + for x, y in zip((0, 1), (1, 0))]) + + with raises(KeyboardInterrupt): + Parallel(n_jobs=2, backend=backend)( + [delayed(interrupt_raiser)(x) for x in (1, 0)]) + + # Try again with the context manager API + with Parallel(n_jobs=2, backend=backend) as parallel: + assert get_workers(parallel._backend) is not None + original_workers = get_workers(parallel._backend) + + with raises(ZeroDivisionError): + parallel([delayed(division)(x, y) + for x, y in zip((0, 1), (1, 0))]) + + # The managed pool should still be available and be in a working + # state despite the previously raised (and caught) exception + assert get_workers(parallel._backend) is not None + + # The pool should have been interrupted and restarted: + assert get_workers(parallel._backend) is not original_workers + + assert ([f(x, y=1) for x in range(10)] == + parallel(delayed(f)(x, y=1) for x in range(10))) + + original_workers = get_workers(parallel._backend) + with raises(KeyboardInterrupt): + parallel([delayed(interrupt_raiser)(x) for x in (1, 0)]) + + # The pool should still be available despite the exception + assert get_workers(parallel._backend) is not None + + # The pool should have been interrupted and restarted: + assert get_workers(parallel._backend) is not original_workers + + assert ([f(x, y=1) for x in range(10)] == + parallel(delayed(f)(x, y=1) for x in range(10))), ( + parallel._iterating, parallel.n_completed_tasks, + parallel.n_dispatched_tasks, parallel._aborting + ) + + # Check that the inner pool has been terminated when exiting the + # context manager + assert get_workers(parallel._backend) is None + else: + with raises(KeyboardInterrupt): + Parallel(n_jobs=2)( + [delayed(interrupt_raiser)(x) for x in (1, 0)]) + + # wrapped exceptions should inherit from the class of the original + # exception to make it easy to catch them + with raises(ZeroDivisionError): + Parallel(n_jobs=2)( + [delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))]) + + with raises(MyExceptionWithFinickyInit): + Parallel(n_jobs=2, verbose=0)( + (delayed(exception_raiser)(i, custom_exception=True) + for i in range(30))) + + +@with_multiprocessing +@parametrize('backend', BACKENDS) +def test_error_in_task_iterator(backend): + + def my_generator(raise_at=0): + for i in range(20): + if i == raise_at: + raise ValueError("Iterator Raising Error") + yield i + + with Parallel(n_jobs=2, backend=backend) as p: + # The error is raised in the pre-dispatch phase + with raises(ValueError, match="Iterator Raising Error"): + p(delayed(square)(i) for i in my_generator(raise_at=0)) + + # The error is raised when dispatching a new task after the + # pre-dispatch (likely to happen in a different thread) + with raises(ValueError, match="Iterator Raising Error"): + p(delayed(square)(i) for i in my_generator(raise_at=5)) + + # Same, but raises long after the pre-dispatch phase + with raises(ValueError, match="Iterator Raising Error"): + p(delayed(square)(i) for i in my_generator(raise_at=19)) + + +def consumer(queue, item): + queue.append('Consumed %s' % item) + + +@parametrize('backend', BACKENDS) +@parametrize('batch_size, expected_queue', + [(1, ['Produced 0', 'Consumed 0', + 'Produced 1', 'Consumed 1', + 'Produced 2', 'Consumed 2', + 'Produced 3', 'Consumed 3', + 'Produced 4', 'Consumed 4', + 'Produced 5', 'Consumed 5']), + (4, [ # First Batch + 'Produced 0', 'Produced 1', 'Produced 2', 'Produced 3', + 'Consumed 0', 'Consumed 1', 'Consumed 2', 'Consumed 3', + # Second batch + 'Produced 4', 'Produced 5', 'Consumed 4', 'Consumed 5'])]) +def test_dispatch_one_job(backend, batch_size, expected_queue): + """ Test that with only one job, Parallel does act as a iterator. + """ + queue = list() + + def producer(): + for i in range(6): + queue.append('Produced %i' % i) + yield i + + Parallel(n_jobs=1, batch_size=batch_size, backend=backend)( + delayed(consumer)(queue, x) for x in producer()) + assert queue == expected_queue + assert len(queue) == 12 + + +@with_multiprocessing +@parametrize('backend', PARALLEL_BACKENDS) +def test_dispatch_multiprocessing(backend): + """ Check that using pre_dispatch Parallel does indeed dispatch items + lazily. + """ + manager = mp.Manager() + queue = manager.list() + + def producer(): + for i in range(6): + queue.append('Produced %i' % i) + yield i + + Parallel(n_jobs=2, batch_size=1, pre_dispatch=3, backend=backend)( + delayed(consumer)(queue, 'any') for _ in producer()) + + queue_contents = list(queue) + assert queue_contents[0] == 'Produced 0' + + # Only 3 tasks are pre-dispatched out of 6. The 4th task is dispatched only + # after any of the first 3 jobs have completed. + first_consumption_index = queue_contents[:4].index('Consumed any') + assert first_consumption_index > -1 + + produced_3_index = queue_contents.index('Produced 3') # 4th task produced + assert produced_3_index > first_consumption_index + + assert len(queue) == 12 + + +def test_batching_auto_threading(): + # batching='auto' with the threading backend leaves the effective batch + # size to 1 (no batching) as it has been found to never be beneficial with + # this low-overhead backend. + + with Parallel(n_jobs=2, batch_size='auto', backend='threading') as p: + p(delayed(id)(i) for i in range(5000)) # many very fast tasks + assert p._backend.compute_batch_size() == 1 + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_batching_auto_subprocesses(backend): + with Parallel(n_jobs=2, batch_size='auto', backend=backend) as p: + p(delayed(id)(i) for i in range(5000)) # many very fast tasks + + # It should be strictly larger than 1 but as we don't want heisen + # failures on clogged CI worker environment be safe and only check that + # it's a strictly positive number. + assert p._backend.compute_batch_size() > 0 + + +def test_exception_dispatch(): + """Make sure that exception raised during dispatch are indeed captured""" + with raises(ValueError): + Parallel(n_jobs=2, pre_dispatch=16, verbose=0)( + delayed(exception_raiser)(i) for i in range(30)) + + +def nested_function_inner(i): + Parallel(n_jobs=2)( + delayed(exception_raiser)(j) for j in range(30)) + + +def nested_function_outer(i): + Parallel(n_jobs=2)( + delayed(nested_function_inner)(j) for j in range(30)) + + +@with_multiprocessing +@parametrize('backend', PARALLEL_BACKENDS) +@pytest.mark.xfail(reason="https://github.com/joblib/loky/pull/255") +def test_nested_exception_dispatch(backend): + """Ensure errors for nested joblib cases gets propagated + + We rely on the Python 3 built-in __cause__ system that already + report this kind of information to the user. + """ + with raises(ValueError) as excinfo: + Parallel(n_jobs=2, backend=backend)( + delayed(nested_function_outer)(i) for i in range(30)) + + # Check that important information such as function names are visible + # in the final error message reported to the user + report_lines = format_exception(excinfo.type, excinfo.value, excinfo.tb) + report = "".join(report_lines) + assert 'nested_function_outer' in report + assert 'nested_function_inner' in report + assert 'exception_raiser' in report + + assert type(excinfo.value) is ValueError + + +class FakeParallelBackend(SequentialBackend): + """Pretends to run concurrently while running sequentially.""" + + def configure(self, n_jobs=1, parallel=None, **backend_args): + self.n_jobs = self.effective_n_jobs(n_jobs) + self.parallel = parallel + return n_jobs + + def effective_n_jobs(self, n_jobs=1): + if n_jobs < 0: + n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1) + return n_jobs + + +def test_invalid_backend(): + with raises(ValueError, match="Invalid backend:"): + Parallel(backend='unit-testing') + + with raises(ValueError, match="Invalid backend:"): + with parallel_config(backend='unit-testing'): + pass + + with raises(ValueError, match="Invalid backend:"): + with parallel_config(backend='unit-testing'): + pass + + +@parametrize('backend', ALL_VALID_BACKENDS) +def test_invalid_njobs(backend): + with raises(ValueError) as excinfo: + Parallel(n_jobs=0, backend=backend)._initialize_backend() + assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value) + + with raises(ValueError) as excinfo: + Parallel(n_jobs=0.5, backend=backend)._initialize_backend() + assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value) + + with raises(ValueError) as excinfo: + Parallel(n_jobs="2.3", backend=backend)._initialize_backend() + assert "n_jobs could not be converted to int" in str(excinfo.value) + + with raises(ValueError) as excinfo: + Parallel(n_jobs="invalid_str", backend=backend)._initialize_backend() + assert "n_jobs could not be converted to int" in str(excinfo.value) + + +@with_multiprocessing +@parametrize('backend', PARALLEL_BACKENDS) +@parametrize('n_jobs', ['2', 2.3, 2]) +def test_njobs_converted_to_int(backend, n_jobs): + p = Parallel(n_jobs=n_jobs, backend=backend) + assert p._effective_n_jobs() == 2 + + res = p(delayed(square)(i) for i in range(10)) + assert all(r == square(i) for i, r in enumerate(res)) + + +def test_register_parallel_backend(): + try: + register_parallel_backend("test_backend", FakeParallelBackend) + assert "test_backend" in BACKENDS + assert BACKENDS["test_backend"] == FakeParallelBackend + finally: + del BACKENDS["test_backend"] + + +def test_overwrite_default_backend(): + assert _active_backend_type() == DefaultBackend + try: + register_parallel_backend("threading", BACKENDS["threading"], + make_default=True) + assert _active_backend_type() == ThreadingBackend + finally: + # Restore the global default manually + parallel.DEFAULT_BACKEND = DEFAULT_BACKEND + assert _active_backend_type() == DefaultBackend + + +@skipif(mp is not None, reason="Only without multiprocessing") +def test_backend_no_multiprocessing(): + with warns(UserWarning, + match="joblib backend '.*' is not available on.*"): + Parallel(backend='loky')(delayed(square)(i) for i in range(3)) + + # The below should now work without problems + with parallel_config(backend='loky'): + Parallel()(delayed(square)(i) for i in range(3)) + + +def check_backend_context_manager(context, backend_name): + with context(backend_name, n_jobs=3): + active_backend, active_n_jobs = parallel.get_active_backend() + assert active_n_jobs == 3 + assert effective_n_jobs(3) == 3 + p = Parallel() + assert p.n_jobs == 3 + if backend_name == 'multiprocessing': + assert type(active_backend) is MultiprocessingBackend + assert type(p._backend) is MultiprocessingBackend + elif backend_name == 'loky': + assert type(active_backend) is LokyBackend + assert type(p._backend) is LokyBackend + elif backend_name == 'threading': + assert type(active_backend) is ThreadingBackend + assert type(p._backend) is ThreadingBackend + elif backend_name.startswith('test_'): + assert type(active_backend) is FakeParallelBackend + assert type(p._backend) is FakeParallelBackend + + +all_backends_for_context_manager = PARALLEL_BACKENDS[:] +all_backends_for_context_manager.extend( + ['test_backend_%d' % i for i in range(3)] +) + + +@with_multiprocessing +@parametrize('backend', all_backends_for_context_manager) +@parametrize('context', [parallel_backend, parallel_config]) +def test_backend_context_manager(monkeypatch, backend, context): + if backend not in BACKENDS: + monkeypatch.setitem(BACKENDS, backend, FakeParallelBackend) + + assert _active_backend_type() == DefaultBackend + # check that this possible to switch parallel backends sequentially + check_backend_context_manager(context, backend) + + # The default backend is restored + assert _active_backend_type() == DefaultBackend + + # Check that context manager switching is thread safe: + Parallel(n_jobs=2, backend='threading')( + delayed(check_backend_context_manager)(context, b) + for b in all_backends_for_context_manager if not b) + + # The default backend is again restored + assert _active_backend_type() == DefaultBackend + + +class ParameterizedParallelBackend(SequentialBackend): + """Pretends to run conncurrently while running sequentially.""" + + def __init__(self, param=None): + if param is None: + raise ValueError('param should not be None') + self.param = param + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_parameterized_backend_context_manager(monkeypatch, context): + monkeypatch.setitem(BACKENDS, 'param_backend', + ParameterizedParallelBackend) + assert _active_backend_type() == DefaultBackend + + with context('param_backend', param=42, n_jobs=3): + active_backend, active_n_jobs = parallel.get_active_backend() + assert type(active_backend) is ParameterizedParallelBackend + assert active_backend.param == 42 + assert active_n_jobs == 3 + p = Parallel() + assert p.n_jobs == 3 + assert p._backend is active_backend + results = p(delayed(sqrt)(i) for i in range(5)) + assert results == [sqrt(i) for i in range(5)] + + # The default backend is again restored + assert _active_backend_type() == DefaultBackend + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_directly_parameterized_backend_context_manager(context): + assert _active_backend_type() == DefaultBackend + + # Check that it's possible to pass a backend instance directly, + # without registration + with context(ParameterizedParallelBackend(param=43), n_jobs=5): + active_backend, active_n_jobs = parallel.get_active_backend() + assert type(active_backend) is ParameterizedParallelBackend + assert active_backend.param == 43 + assert active_n_jobs == 5 + p = Parallel() + assert p.n_jobs == 5 + assert p._backend is active_backend + results = p(delayed(sqrt)(i) for i in range(5)) + assert results == [sqrt(i) for i in range(5)] + + # The default backend is again restored + assert _active_backend_type() == DefaultBackend + + +def sleep_and_return_pid(): + sleep(.1) + return os.getpid() + + +def get_nested_pids(): + assert _active_backend_type() == ThreadingBackend + # Assert that the nested backend does not change the default number of + # jobs used in Parallel + assert Parallel()._effective_n_jobs() == 1 + + # Assert that the tasks are running only on one process + return Parallel(n_jobs=2)(delayed(sleep_and_return_pid)() + for _ in range(2)) + + +class MyBackend(joblib._parallel_backends.LokyBackend): + """Backend to test backward compatibility with older backends""" + def get_nested_backend(self, ): + # Older backends only return a backend, without n_jobs indications. + return super(MyBackend, self).get_nested_backend()[0] + + +register_parallel_backend('back_compat_backend', MyBackend) + + +@with_multiprocessing +@parametrize('backend', ['threading', 'loky', 'multiprocessing', + 'back_compat_backend']) +@parametrize("context", [parallel_config, parallel_backend]) +def test_nested_backend_context_manager(context, backend): + # Check that by default, nested parallel calls will always use the + # ThreadingBackend + + with context(backend): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids)() + for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) == 1 + + +@with_multiprocessing +@parametrize('n_jobs', [2, -1, None]) +@parametrize('backend', PARALLEL_BACKENDS) +@parametrize("context", [parallel_config, parallel_backend]) +def test_nested_backend_in_sequential(backend, n_jobs, context): + # Check that by default, nested parallel calls will always use the + # ThreadingBackend + + def check_nested_backend(expected_backend_type, expected_n_job): + # Assert that the sequential backend at top level, does not change the + # backend for nested calls. + assert _active_backend_type() == BACKENDS[expected_backend_type] + + # Assert that the nested backend in SequentialBackend does not change + # the default number of jobs used in Parallel + expected_n_job = effective_n_jobs(expected_n_job) + assert Parallel()._effective_n_jobs() == expected_n_job + + Parallel(n_jobs=1)( + delayed(check_nested_backend)(DEFAULT_BACKEND, 1) + for _ in range(10) + ) + + with context(backend, n_jobs=n_jobs): + Parallel(n_jobs=1)( + delayed(check_nested_backend)(backend, n_jobs) + for _ in range(10) + ) + + +def check_nesting_level(context, inner_backend, expected_level): + with context(inner_backend) as ctx: + if context is parallel_config: + backend = ctx["backend"] + if context is parallel_backend: + backend = ctx[0] + assert backend.nesting_level == expected_level + + +@with_multiprocessing +@parametrize('outer_backend', PARALLEL_BACKENDS) +@parametrize('inner_backend', PARALLEL_BACKENDS) +@parametrize("context", [parallel_config, parallel_backend]) +def test_backend_nesting_level(context, outer_backend, inner_backend): + # Check that the nesting level for the backend is correctly set + check_nesting_level(context, outer_backend, 0) + + Parallel(n_jobs=2, backend=outer_backend)( + delayed(check_nesting_level)(context, inner_backend, 1) + for _ in range(10) + ) + + with context(inner_backend, n_jobs=2): + Parallel()(delayed(check_nesting_level)(context, inner_backend, 1) + for _ in range(10)) + + +@with_multiprocessing +@parametrize("context", [parallel_config, parallel_backend]) +@parametrize('with_retrieve_callback', [True, False]) +def test_retrieval_context(context, with_retrieve_callback): + import contextlib + + class MyBackend(ThreadingBackend): + i = 0 + supports_retrieve_callback = with_retrieve_callback + + @contextlib.contextmanager + def retrieval_context(self): + self.i += 1 + yield + + register_parallel_backend("retrieval", MyBackend) + + def nested_call(n): + return Parallel(n_jobs=2)(delayed(id)(i) for i in range(n)) + + with context("retrieval") as ctx: + Parallel(n_jobs=2)( + delayed(nested_call)(i) + for i in range(5) + ) + if context is parallel_config: + assert ctx["backend"].i == 1 + if context is parallel_backend: + assert ctx[0].i == 1 + + +############################################################################### +# Test helpers + +@parametrize('batch_size', [0, -1, 1.42]) +def test_invalid_batch_size(batch_size): + with raises(ValueError): + Parallel(batch_size=batch_size) + + +@parametrize('n_tasks, n_jobs, pre_dispatch, batch_size', + [(2, 2, 'all', 'auto'), + (2, 2, 'n_jobs', 'auto'), + (10, 2, 'n_jobs', 'auto'), + (517, 2, 'n_jobs', 'auto'), + (10, 2, 'n_jobs', 'auto'), + (10, 4, 'n_jobs', 'auto'), + (200, 12, 'n_jobs', 'auto'), + (25, 12, '2 * n_jobs', 1), + (250, 12, 'all', 1), + (250, 12, '2 * n_jobs', 7), + (200, 12, '2 * n_jobs', 'auto')]) +def test_dispatch_race_condition(n_tasks, n_jobs, pre_dispatch, batch_size): + # Check that using (async-)dispatch does not yield a race condition on the + # iterable generator that is not thread-safe natively. + # This is a non-regression test for the "Pool seems closed" class of error + params = {'n_jobs': n_jobs, 'pre_dispatch': pre_dispatch, + 'batch_size': batch_size} + expected = [square(i) for i in range(n_tasks)] + results = Parallel(**params)(delayed(square)(i) for i in range(n_tasks)) + assert results == expected + + +@with_multiprocessing +def test_default_mp_context(): + mp_start_method = mp.get_start_method() + p = Parallel(n_jobs=2, backend='multiprocessing') + context = p._backend_args.get('context') + start_method = context.get_start_method() + assert start_method == mp_start_method + + +@with_numpy +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_no_blas_crash_or_freeze_with_subprocesses(backend): + if backend == 'multiprocessing': + # Use the spawn backend that is both robust and available on all + # platforms + backend = mp.get_context('spawn') + + # Check that on recent Python version, the 'spawn' start method can make + # it possible to use multiprocessing in conjunction of any BLAS + # implementation that happens to be used by numpy with causing a freeze or + # a crash + rng = np.random.RandomState(42) + + # call BLAS DGEMM to force the initialization of the internal thread-pool + # in the main process + a = rng.randn(1000, 1000) + np.dot(a, a.T) + + # check that the internal BLAS thread-pool is not in an inconsistent state + # in the worker processes managed by multiprocessing + Parallel(n_jobs=2, backend=backend)( + delayed(np.dot)(a, a.T) for i in range(2)) + + +UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN = """\ +from joblib import Parallel, delayed + +def square(x): + return x ** 2 + +backend = "{}" +if backend == "spawn": + from multiprocessing import get_context + backend = get_context(backend) + +print(Parallel(n_jobs=2, backend=backend)( + delayed(square)(i) for i in range(5))) +""" + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_parallel_with_interactively_defined_functions(backend): + # When using the "-c" flag, interactive functions defined in __main__ + # should work with any backend. + if backend == "multiprocessing" and mp.get_start_method() != "fork": + pytest.skip("Require fork start method to use interactively defined " + "functions with multiprocessing.") + code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN.format(backend) + check_subprocess_call( + [sys.executable, '-c', code], timeout=10, + stdout_regex=r'\[0, 1, 4, 9, 16\]') + + +UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN = """\ +import sys +# Make sure that joblib is importable in the subprocess launching this +# script. This is needed in case we run the tests from the joblib root +# folder without having installed joblib +sys.path.insert(0, {joblib_root_folder!r}) + +from joblib import Parallel, delayed + +def run(f, x): + return f(x) + +{define_func} + +if __name__ == "__main__": + backend = "{backend}" + if backend == "spawn": + from multiprocessing import get_context + backend = get_context(backend) + + callable_position = "{callable_position}" + if callable_position == "delayed": + print(Parallel(n_jobs=2, backend=backend)( + delayed(square)(i) for i in range(5))) + elif callable_position == "args": + print(Parallel(n_jobs=2, backend=backend)( + delayed(run)(square, i) for i in range(5))) + else: + print(Parallel(n_jobs=2, backend=backend)( + delayed(run)(f=square, x=i) for i in range(5))) +""" + +SQUARE_MAIN = """\ +def square(x): + return x ** 2 +""" +SQUARE_LOCAL = """\ +def gen_square(): + def square(x): + return x ** 2 + return square +square = gen_square() +""" +SQUARE_LAMBDA = """\ +square = lambda x: x ** 2 +""" + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS + ([] if mp is None else ['spawn'])) +@parametrize('define_func', [SQUARE_MAIN, SQUARE_LOCAL, SQUARE_LAMBDA]) +@parametrize('callable_position', ['delayed', 'args', 'kwargs']) +def test_parallel_with_unpicklable_functions_in_args( + backend, define_func, callable_position, tmpdir): + if backend in ['multiprocessing', 'spawn'] and ( + define_func != SQUARE_MAIN or sys.platform == "win32"): + pytest.skip("Not picklable with pickle") + code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN.format( + define_func=define_func, backend=backend, + callable_position=callable_position, + joblib_root_folder=os.path.dirname(os.path.dirname(joblib.__file__))) + code_file = tmpdir.join("unpicklable_func_script.py") + code_file.write(code) + check_subprocess_call( + [sys.executable, code_file.strpath], timeout=10, + stdout_regex=r'\[0, 1, 4, 9, 16\]') + + +INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT = """\ +import sys +import faulthandler +# Make sure that joblib is importable in the subprocess launching this +# script. This is needed in case we run the tests from the joblib root +# folder without having installed joblib +sys.path.insert(0, {joblib_root_folder!r}) + +from joblib import Parallel, delayed +from functools import partial + +class MyClass: + '''Class defined in the __main__ namespace''' + def __init__(self, value): + self.value = value + + +def square(x, ignored=None, ignored2=None): + '''Function defined in the __main__ namespace''' + return x.value ** 2 + + +square2 = partial(square, ignored2='something') + +# Here, we do not need the `if __name__ == "__main__":` safeguard when +# using the default `loky` backend (even on Windows). + +# To make debugging easier +faulthandler.dump_traceback_later(30, exit=True) + +# The following baroque function call is meant to check that joblib +# introspection rightfully uses cloudpickle instead of the (faster) pickle +# module of the standard library when necessary. In particular cloudpickle is +# necessary for functions and instances of classes interactively defined in the +# __main__ module. + +print(Parallel(backend="loky", n_jobs=2)( + delayed(square2)(MyClass(i), ignored=[dict(a=MyClass(1))]) + for i in range(5) +)) +""".format(joblib_root_folder=os.path.dirname( + os.path.dirname(joblib.__file__))) + + +@with_multiprocessing +def test_parallel_with_interactively_defined_functions_loky(tmpdir): + # loky accepts interactive functions defined in __main__ and does not + # require if __name__ == '__main__' even when the __main__ module is + # defined by the result of the execution of a filesystem script. + script = tmpdir.join('joblib_interactively_defined_function.py') + script.write(INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT) + check_subprocess_call( + [sys.executable, script.strpath], + stdout_regex=r'\[0, 1, 4, 9, 16\]', + timeout=None, # rely on faulthandler to kill the process + ) + + +INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT = """\ +import sys +# Make sure that joblib is importable in the subprocess launching this +# script. This is needed in case we run the tests from the joblib root +# folder without having installed joblib +sys.path.insert(0, {joblib_root_folder!r}) + +from joblib import Parallel, delayed, hash +import multiprocessing as mp +mp.util.log_to_stderr(5) + +class MyList(list): + '''MyList is interactively defined by MyList.append is a built-in''' + def __hash__(self): + # XXX: workaround limitation in cloudpickle + return hash(self).__hash__() + +l = MyList() + +print(Parallel(backend="loky", n_jobs=2)( + delayed(l.append)(i) for i in range(3) +)) +""".format(joblib_root_folder=os.path.dirname( + os.path.dirname(joblib.__file__))) + + +@with_multiprocessing +def test_parallel_with_interactively_defined_bound_method_loky(tmpdir): + script = tmpdir.join('joblib_interactive_bound_method_script.py') + script.write(INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT) + check_subprocess_call([sys.executable, script.strpath], + stdout_regex=r'\[None, None, None\]', + stderr_regex=r'LokyProcess', + timeout=15) + + +def test_parallel_with_exhausted_iterator(): + exhausted_iterator = iter([]) + assert Parallel(n_jobs=2)(exhausted_iterator) == [] + + +def _cleanup_worker(): + """Helper function to force gc in each worker.""" + force_gc_pypy() + time.sleep(.1) + + +def check_memmap(a): + if not isinstance(a, np.memmap): + raise TypeError('Expected np.memmap instance, got %r', + type(a)) + return a.copy() # return a regular array instead of a memmap + + +@with_numpy +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_auto_memmap_on_arrays_from_generator(backend): + # Non-regression test for a problem with a bad interaction between the + # GC collecting arrays recently created during iteration inside the + # parallel dispatch loop and the auto-memmap feature of Parallel. + # See: https://github.com/joblib/joblib/pull/294 + def generate_arrays(n): + for i in range(n): + yield np.ones(10, dtype=np.float32) * i + # Use max_nbytes=1 to force the use of memory-mapping even for small + # arrays + results = Parallel(n_jobs=2, max_nbytes=1, backend=backend)( + delayed(check_memmap)(a) for a in generate_arrays(100)) + for result, expected in zip(results, generate_arrays(len(results))): + np.testing.assert_array_equal(expected, result) + + # Second call to force loky to adapt the executor by growing the number + # of worker processes. This is a non-regression test for: + # https://github.com/joblib/joblib/issues/629. + results = Parallel(n_jobs=4, max_nbytes=1, backend=backend)( + delayed(check_memmap)(a) for a in generate_arrays(100)) + for result, expected in zip(results, generate_arrays(len(results))): + np.testing.assert_array_equal(expected, result) + + +def identity(arg): + return arg + + +@with_numpy +@with_multiprocessing +def test_memmap_with_big_offset(tmpdir): + fname = tmpdir.join('test.mmap').strpath + size = mmap.ALLOCATIONGRANULARITY + obj = [np.zeros(size, dtype='uint8'), np.ones(size, dtype='uint8')] + dump(obj, fname) + memmap = load(fname, mmap_mode='r') + result, = Parallel(n_jobs=2)(delayed(identity)(memmap) for _ in [0]) + assert isinstance(memmap[1], np.memmap) + assert memmap[1].offset > size + np.testing.assert_array_equal(obj, result) + + +def test_warning_about_timeout_not_supported_by_backend(): + with warnings.catch_warnings(record=True) as warninfo: + Parallel(n_jobs=1, timeout=1)(delayed(square)(i) for i in range(50)) + assert len(warninfo) == 1 + w = warninfo[0] + assert isinstance(w.message, UserWarning) + assert str(w.message) == ( + "The backend class 'SequentialBackend' does not support timeout. " + "You have set 'timeout=1' in Parallel but the 'timeout' parameter " + "will not be used.") + + +def set_list_value(input_list, index, value): + input_list[index] = value + return value + + +@pytest.mark.parametrize('n_jobs', [1, 2, 4]) +def test_parallel_return_order_with_return_as_generator_parameter(n_jobs): + # This test inserts values in a list in some expected order + # in sequential computing, and then checks that this order has been + # respected by Parallel output generator. + input_list = [0] * 5 + result = Parallel(n_jobs=n_jobs, return_as="generator", + backend='threading')( + delayed(set_list_value)(input_list, i, i) for i in range(5)) + + # Ensure that all the tasks are completed before checking the result + result = list(result) + + assert all(v == r for v, r in zip(input_list, result)) + + +def _sqrt_with_delay(e, delay): + if delay: + sleep(30) + return sqrt(e) + + +def _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs): + # This test submits 10 tasks, but the second task is super slow. This test + # checks that the 9 other tasks return before the slow task is done, when + # `return_as` parameter is set to `'generator_unordered'` + result = Parallel(n_jobs=n_jobs, return_as="generator_unordered", + backend=backend)( + delayed(_sqrt_with_delay)(i**2, (i == 1)) for i in range(10)) + + quickly_returned = sorted(next(result) for _ in range(9)) + + expected_quickly_returned = [0] + list(range(2, 10)) + + assert all( + v == r for v, r in zip(expected_quickly_returned, quickly_returned) + ) + + del result + force_gc_pypy() + + +@pytest.mark.parametrize('n_jobs', [2, 4]) +# NB: for this test to work, the backend must be allowed to process tasks +# concurrently, so at least two jobs with a non-sequential backend are +# mandatory. +@with_multiprocessing +@parametrize('backend', set(RETURN_GENERATOR_BACKENDS) - {"sequential"}) +def test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs): + _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs) + + +@pytest.mark.parametrize('n_jobs', [2, -1]) +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is None, reason='This test requires dask') +def test_parallel_unordered_generator_returns_fastest_first_with_dask( + n_jobs, context +): + with distributed.Client( + n_workers=2, threads_per_worker=2 + ), context("dask"): + _test_parallel_unordered_generator_returns_fastest_first(None, n_jobs) + + +@parametrize('backend', ALL_VALID_BACKENDS) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_abort_backend(n_jobs, backend): + delays = ["a"] + [10] * 100 + with raises(TypeError): + t_start = time.time() + Parallel(n_jobs=n_jobs, backend=backend)( + delayed(time.sleep)(i) for i in delays) + dt = time.time() - t_start + assert dt < 20 + + +def get_large_object(arg): + result = np.ones(int(5 * 1e5), dtype=bool) + result[0] = False + return result + + +def _test_deadlock_with_generator(backend, return_as, n_jobs): + # Non-regression test for a race condition in the backends when the pickler + # is delayed by a large object. + with Parallel(n_jobs=n_jobs, backend=backend, + return_as=return_as) as parallel: + result = parallel(delayed(get_large_object)(i) for i in range(10)) + next(result) + next(result) + del result + # The gc in pypy can be delayed. Force it to make sure this test does + # not cause timeout on the CI. + force_gc_pypy() + + +@with_numpy +@parametrize('backend', RETURN_GENERATOR_BACKENDS) +@parametrize('return_as', ["generator", "generator_unordered"]) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_deadlock_with_generator(backend, return_as, n_jobs): + _test_deadlock_with_generator(backend, return_as, n_jobs) + + +@with_numpy +@pytest.mark.parametrize('n_jobs', [2, -1]) +@parametrize('return_as', ["generator", "generator_unordered"]) +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is None, reason='This test requires dask') +def test_deadlock_with_generator_and_dask(context, return_as, n_jobs): + with distributed.Client( + n_workers=2, threads_per_worker=2 + ), context("dask"): + _test_deadlock_with_generator(None, return_as, n_jobs) + + +@parametrize('backend', RETURN_GENERATOR_BACKENDS) +@parametrize('return_as', ["generator", "generator_unordered"]) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_multiple_generator_call(backend, return_as, n_jobs): + # Non-regression test that ensures the dispatch of the tasks starts + # immediately when Parallel.__call__ is called. This test relies on the + # assumption that only one generator can be submitted at a time. + with raises(RuntimeError, + match="This Parallel instance is already running"): + parallel = Parallel(n_jobs, backend=backend, return_as=return_as) + g = parallel(delayed(sleep)(1) for _ in range(10)) # noqa: F841 + t_start = time.time() + gen2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841 + + # Make sure that the error is raised quickly + assert time.time() - t_start < 2, ( + "The error should be raised immediatly when submitting a new task " + "but it took more than 2s." + ) + + del g + # The gc in pypy can be delayed. Force it to make sure this test does not + # cause timeout on the CI. + force_gc_pypy() + + +@parametrize('backend', RETURN_GENERATOR_BACKENDS) +@parametrize('return_as', ["generator", "generator_unordered"]) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_multiple_generator_call_managed(backend, return_as, n_jobs): + # Non-regression test that ensures the dispatch of the tasks starts + # immediately when Parallel.__call__ is called. This test relies on the + # assumption that only one generator can be submitted at a time. + with Parallel(n_jobs, backend=backend, + return_as=return_as) as parallel: + g = parallel(delayed(sleep)(10) for _ in range(10)) # noqa: F841 + t_start = time.time() + with raises(RuntimeError, + match="This Parallel instance is already running"): + g2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841 + + # Make sure that the error is raised quickly + assert time.time() - t_start < 2, ( + "The error should be raised immediatly when submitting a new task " + "but it took more than 2s." + ) + + # The gc in pypy can be delayed. Force it to make sure this test does not + # cause timeout on the CI. + del g + force_gc_pypy() + + +@parametrize('backend', RETURN_GENERATOR_BACKENDS) +@parametrize('return_as_1', ["generator", "generator_unordered"]) +@parametrize('return_as_2', ["generator", "generator_unordered"]) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_multiple_generator_call_separated( + backend, return_as_1, return_as_2, n_jobs +): + # Check that for separated Parallel, both tasks are correctly returned. + g = Parallel(n_jobs, backend=backend, return_as=return_as_1)( + delayed(sqrt)(i ** 2) for i in range(10) + ) + g2 = Parallel(n_jobs, backend=backend, return_as=return_as_2)( + delayed(sqrt)(i ** 2) for i in range(10, 20) + ) + + if return_as_1 == "generator_unordered": + g = sorted(g) + + if return_as_2 == "generator_unordered": + g2 = sorted(g2) + + assert all(res == i for res, i in zip(g, range(10))) + assert all(res == i for res, i in zip(g2, range(10, 20))) + + +@parametrize('backend, error', [ + ('loky', True), + ('threading', False), + ('sequential', False), +]) +@parametrize('return_as_1', ["generator", "generator_unordered"]) +@parametrize('return_as_2', ["generator", "generator_unordered"]) +def test_multiple_generator_call_separated_gc( + backend, return_as_1, return_as_2, error +): + + if (backend == 'loky') and (mp is None): + pytest.skip("Requires multiprocessing") + + # Check that in loky, only one call can be run at a time with + # a single executor. + parallel = Parallel(2, backend=backend, return_as=return_as_1) + g = parallel(delayed(sleep)(10) for i in range(10)) + g_wr = weakref.finalize(g, lambda: print("Generator collected")) + ctx = ( + raises(RuntimeError, match="The executor underlying Parallel") + if error else nullcontext() + ) + with ctx: + # For loky, this call will raise an error as the gc of the previous + # generator will shutdown the shared executor. + # For the other backends, as the worker pools are not shared between + # the two calls, this should proceed correctly. + t_start = time.time() + g = Parallel(2, backend=backend, return_as=return_as_2)( + delayed(sqrt)(i ** 2) for i in range(10, 20) + ) + + # The gc in pypy can be delayed. Force it to test the behavior when it + # will eventually be collected. + force_gc_pypy() + + if return_as_2 == "generator_unordered": + g = sorted(g) + + assert all(res == i for res, i in zip(g, range(10, 20))) + + assert time.time() - t_start < 5 + + # Make sure that the computation are stopped for the gc'ed generator + retry = 0 + while g_wr.alive and retry < 3: + retry += 1 + time.sleep(.5) + assert time.time() - t_start < 5 + + if parallel._effective_n_jobs() != 1: + # check that the first parallel object is aborting (the final _aborted + # state might be delayed). + assert parallel._aborting + + +@with_numpy +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_memmapping_leaks(backend, tmpdir): + # Non-regression test for memmapping backends. Ensure that the data + # does not stay too long in memory + tmpdir = tmpdir.strpath + + # Use max_nbytes=1 to force the use of memory-mapping even for small + # arrays + with Parallel(n_jobs=2, max_nbytes=1, backend=backend, + temp_folder=tmpdir) as p: + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + + # The memmap folder should not be clean in the context scope + assert len(os.listdir(tmpdir)) > 0 + + # Cleaning of the memmap folder is triggered by the garbage + # collection. With pypy the garbage collection has been observed to be + # delayed, sometimes up until the shutdown of the interpreter. This + # cleanup job executed in the worker ensures that it's triggered + # immediately. + p(delayed(_cleanup_worker)() for _ in range(2)) + + # Make sure that the shared memory is cleaned at the end when we exit + # the context + for _ in range(100): + if not os.listdir(tmpdir): + break + sleep(.1) + else: + raise AssertionError('temporary directory of Parallel was not removed') + + # Make sure that the shared memory is cleaned at the end of a call + p = Parallel(n_jobs=2, max_nbytes=1, backend=backend) + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + p(delayed(_cleanup_worker)() for _ in range(2)) + + for _ in range(100): + if not os.listdir(tmpdir): + break + sleep(.1) + else: + raise AssertionError('temporary directory of Parallel was not removed') + + +@parametrize('backend', + ([None, 'threading'] if mp is None + else [None, 'loky', 'threading']) + ) +def test_lambda_expression(backend): + # cloudpickle is used to pickle delayed callables + results = Parallel(n_jobs=2, backend=backend)( + delayed(lambda x: x ** 2)(i) for i in range(10)) + assert results == [i ** 2 for i in range(10)] + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_backend_batch_statistics_reset(backend): + """Test that a parallel backend correctly resets its batch statistics.""" + n_jobs = 2 + n_inputs = 500 + task_time = 2. / n_inputs + + p = Parallel(verbose=10, n_jobs=n_jobs, backend=backend) + p(delayed(time.sleep)(task_time) for i in range(n_inputs)) + assert (p._backend._effective_batch_size == + p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE) + assert (p._backend._smoothed_batch_duration == + p._backend._DEFAULT_SMOOTHED_BATCH_DURATION) + + p(delayed(time.sleep)(task_time) for i in range(n_inputs)) + assert (p._backend._effective_batch_size == + p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE) + assert (p._backend._smoothed_batch_duration == + p._backend._DEFAULT_SMOOTHED_BATCH_DURATION) + + +@with_multiprocessing +@parametrize("context", [parallel_config, parallel_backend]) +def test_backend_hinting_and_constraints(context): + for n_jobs in [1, 2, -1]: + assert type(Parallel(n_jobs=n_jobs)._backend) == DefaultBackend + + p = Parallel(n_jobs=n_jobs, prefer='threads') + assert type(p._backend) is ThreadingBackend + + p = Parallel(n_jobs=n_jobs, prefer='processes') + assert type(p._backend) is DefaultBackend + + p = Parallel(n_jobs=n_jobs, require='sharedmem') + assert type(p._backend) is ThreadingBackend + + # Explicit backend selection can override backend hinting although it + # is useless to pass a hint when selecting a backend. + p = Parallel(n_jobs=2, backend='loky', prefer='threads') + assert type(p._backend) is LokyBackend + + with context('loky', n_jobs=2): + # Explicit backend selection by the user with the context manager + # should be respected when combined with backend hints only. + p = Parallel(prefer='threads') + assert type(p._backend) is LokyBackend + assert p.n_jobs == 2 + + with context('loky', n_jobs=2): + # Locally hard-coded n_jobs value is respected. + p = Parallel(n_jobs=3, prefer='threads') + assert type(p._backend) is LokyBackend + assert p.n_jobs == 3 + + with context('loky', n_jobs=2): + # Explicit backend selection by the user with the context manager + # should be ignored when the Parallel call has hard constraints. + # In this case, the default backend that supports shared mem is + # used an the default number of processes is used. + p = Parallel(require='sharedmem') + assert type(p._backend) is ThreadingBackend + assert p.n_jobs == 1 + + with context('loky', n_jobs=2): + p = Parallel(n_jobs=3, require='sharedmem') + assert type(p._backend) is ThreadingBackend + assert p.n_jobs == 3 + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_backend_hinting_and_constraints_with_custom_backends( + capsys, context +): + # Custom backends can declare that they use threads and have shared memory + # semantics: + class MyCustomThreadingBackend(ParallelBackendBase): + supports_sharedmem = True + use_threads = True + + def apply_async(self): + pass + + def effective_n_jobs(self, n_jobs): + return n_jobs + + with context(MyCustomThreadingBackend()): + p = Parallel(n_jobs=2, prefer='processes') # ignored + assert type(p._backend) is MyCustomThreadingBackend + + p = Parallel(n_jobs=2, require='sharedmem') + assert type(p._backend) is MyCustomThreadingBackend + + class MyCustomProcessingBackend(ParallelBackendBase): + supports_sharedmem = False + use_threads = False + + def apply_async(self): + pass + + def effective_n_jobs(self, n_jobs): + return n_jobs + + with context(MyCustomProcessingBackend()): + p = Parallel(n_jobs=2, prefer='processes') + assert type(p._backend) is MyCustomProcessingBackend + + out, err = capsys.readouterr() + assert out == "" + assert err == "" + + p = Parallel(n_jobs=2, require='sharedmem', verbose=10) + assert type(p._backend) is ThreadingBackend + + out, err = capsys.readouterr() + expected = ("Using ThreadingBackend as joblib backend " + "instead of MyCustomProcessingBackend as the latter " + "does not provide shared memory semantics.") + assert out.strip() == expected + assert err == "" + + with raises(ValueError): + Parallel(backend=MyCustomProcessingBackend(), require='sharedmem') + + +def test_invalid_backend_hinting_and_constraints(): + with raises(ValueError): + Parallel(prefer='invalid') + + with raises(ValueError): + Parallel(require='invalid') + + with raises(ValueError): + # It is inconsistent to prefer process-based parallelism while + # requiring shared memory semantics. + Parallel(prefer='processes', require='sharedmem') + + if mp is not None: + # It is inconsistent to ask explicitly for a process-based + # parallelism while requiring shared memory semantics. + with raises(ValueError): + Parallel(backend='loky', require='sharedmem') + with raises(ValueError): + Parallel(backend='multiprocessing', require='sharedmem') + + +def _recursive_backend_info(limit=3, **kwargs): + """Perform nested parallel calls and introspect the backend on the way""" + + with Parallel(n_jobs=2) as p: + this_level = [(type(p._backend).__name__, p._backend.nesting_level)] + if limit == 0: + return this_level + results = p(delayed(_recursive_backend_info)(limit=limit - 1, **kwargs) + for i in range(1)) + return this_level + results[0] + + +@with_multiprocessing +@parametrize('backend', ['loky', 'threading']) +@parametrize("context", [parallel_config, parallel_backend]) +def test_nested_parallelism_limit(context, backend): + with context(backend, n_jobs=2): + backend_types_and_levels = _recursive_backend_info() + + if cpu_count() == 1: + second_level_backend_type = 'SequentialBackend' + max_level = 1 + else: + second_level_backend_type = 'ThreadingBackend' + max_level = 2 + + top_level_backend_type = backend.title() + 'Backend' + expected_types_and_levels = [ + (top_level_backend_type, 0), + (second_level_backend_type, 1), + ('SequentialBackend', max_level), + ('SequentialBackend', max_level) + ] + assert backend_types_and_levels == expected_types_and_levels + + +@with_numpy +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is None, reason='This test requires dask') +def test_nested_parallelism_with_dask(context): + with distributed.Client(n_workers=2, threads_per_worker=2): + # 10 MB of data as argument to trigger implicit scattering + data = np.ones(int(1e7), dtype=np.uint8) + for i in range(2): + with context('dask'): + backend_types_and_levels = _recursive_backend_info(data=data) + assert len(backend_types_and_levels) == 4 + assert all(name == 'DaskDistributedBackend' + for name, _ in backend_types_and_levels) + + # No argument + with context('dask'): + backend_types_and_levels = _recursive_backend_info() + assert len(backend_types_and_levels) == 4 + assert all(name == 'DaskDistributedBackend' + for name, _ in backend_types_and_levels) + + +def _recursive_parallel(nesting_limit=None): + """A horrible function that does recursive parallel calls""" + return Parallel()(delayed(_recursive_parallel)() for i in range(2)) + + +@pytest.mark.no_cover +@parametrize("context", [parallel_config, parallel_backend]) +@parametrize( + 'backend', (['threading'] if mp is None else ['loky', 'threading']) +) +def test_thread_bomb_mitigation(context, backend): + # Test that recursive parallelism raises a recursion rather than + # saturating the operating system resources by creating a unbounded number + # of threads. + with context(backend, n_jobs=2): + with raises(BaseException) as excinfo: + _recursive_parallel() + exc = excinfo.value + if backend == "loky": + # Local import because loky may not be importable for lack of + # multiprocessing + from joblib.externals.loky.process_executor import TerminatedWorkerError # noqa + if isinstance(exc, (TerminatedWorkerError, PicklingError)): + # The recursion exception can itself cause an error when + # pickling it to be send back to the parent process. In this + # case the worker crashes but the original traceback is still + # printed on stderr. This could be improved but does not seem + # simple to do and this is not critical for users (as long + # as there is no process or thread bomb happening). + pytest.xfail("Loky worker crash when serializing RecursionError") + + assert isinstance(exc, RecursionError) + + +def _run_parallel_sum(): + env_vars = {} + for var in ['OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS', + 'VECLIB_MAXIMUM_THREADS', 'NUMEXPR_NUM_THREADS', + 'NUMBA_NUM_THREADS', 'ENABLE_IPC']: + env_vars[var] = os.environ.get(var) + return env_vars, parallel_sum(100) + + +@parametrize("backend", ([None, 'loky'] if mp is not None else [None])) +@skipif(parallel_sum is None, reason="Need OpenMP helper compiled") +def test_parallel_thread_limit(backend): + results = Parallel(n_jobs=2, backend=backend)( + delayed(_run_parallel_sum)() for _ in range(2) + ) + expected_num_threads = max(cpu_count() // 2, 1) + for worker_env_vars, omp_num_threads in results: + assert omp_num_threads == expected_num_threads + for name, value in worker_env_vars.items(): + if name.endswith("_THREADS"): + assert value == str(expected_num_threads) + else: + assert name == "ENABLE_IPC" + assert value == "1" + + +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is not None, reason='This test requires dask') +def test_dask_backend_when_dask_not_installed(context): + with raises(ValueError, match='Please install dask'): + context('dask') + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_zero_worker_backend(context): + # joblib.Parallel should reject with an explicit error message parallel + # backends that have no worker. + class ZeroWorkerBackend(ThreadingBackend): + def configure(self, *args, **kwargs): + return 0 + + def apply_async(self, func, callback=None): # pragma: no cover + raise TimeoutError("No worker available") + + def effective_n_jobs(self, n_jobs): # pragma: no cover + return 0 + + expected_msg = "ZeroWorkerBackend has no active worker" + with context(ZeroWorkerBackend()): + with pytest.raises(RuntimeError, match=expected_msg): + Parallel(n_jobs=2)(delayed(id)(i) for i in range(2)) + + +def test_globals_update_at_each_parallel_call(): + # This is a non-regression test related to joblib issues #836 and #833. + # Cloudpickle versions between 0.5.4 and 0.7 introduced a bug where global + # variables changes in a parent process between two calls to + # joblib.Parallel would not be propagated into the workers. + global MY_GLOBAL_VARIABLE + MY_GLOBAL_VARIABLE = "original value" + + def check_globals(): + global MY_GLOBAL_VARIABLE + return MY_GLOBAL_VARIABLE + + assert check_globals() == "original value" + + workers_global_variable = Parallel(n_jobs=2)( + delayed(check_globals)() for i in range(2)) + assert set(workers_global_variable) == {"original value"} + + # Change the value of MY_GLOBAL_VARIABLE, and make sure this change gets + # propagated into the workers environment + MY_GLOBAL_VARIABLE = "changed value" + assert check_globals() == "changed value" + + workers_global_variable = Parallel(n_jobs=2)( + delayed(check_globals)() for i in range(2)) + assert set(workers_global_variable) == {"changed value"} + + +############################################################################## +# Test environment variable in child env, in particular for limiting +# the maximal number of threads in C-library threadpools. +# + +def _check_numpy_threadpool_limits(): + import numpy as np + # Let's call BLAS on a Matrix Matrix multiplication with dimensions large + # enough to ensure that the threadpool managed by the underlying BLAS + # implementation is actually used so as to force its initialization. + a = np.random.randn(100, 100) + np.dot(a, a) + from threadpoolctl import threadpool_info + return threadpool_info() + + +def _parent_max_num_threads_for(child_module, parent_info): + for parent_module in parent_info: + if parent_module['filepath'] == child_module['filepath']: + return parent_module['num_threads'] + raise ValueError("An unexpected module was loaded in child:\n{}" + .format(child_module)) + + +def check_child_num_threads(workers_info, parent_info, num_threads): + # Check that the number of threads reported in workers_info is consistent + # with the expectation. We need to be careful to handle the cases where + # the requested number of threads is below max_num_thread for the library. + for child_threadpool_info in workers_info: + for child_module in child_threadpool_info: + parent_max_num_threads = _parent_max_num_threads_for( + child_module, parent_info) + expected = {min(num_threads, parent_max_num_threads), num_threads} + assert child_module['num_threads'] in expected + + +@with_numpy +@with_multiprocessing +@parametrize('n_jobs', [2, 4, -2, -1]) +def test_threadpool_limitation_in_child_loky(n_jobs): + # Check that the protection against oversubscription in workers is working + # using threadpoolctl functionalities. + + # Skip this test if numpy is not linked to a BLAS library + parent_info = _check_numpy_threadpool_limits() + if len(parent_info) == 0: + pytest.skip(reason="Need a version of numpy linked to BLAS") + + workers_threadpool_infos = Parallel(backend="loky", n_jobs=n_jobs)( + delayed(_check_numpy_threadpool_limits)() for i in range(2)) + + n_jobs = effective_n_jobs(n_jobs) + expected_child_num_threads = max(cpu_count() // n_jobs, 1) + + check_child_num_threads(workers_threadpool_infos, parent_info, + expected_child_num_threads) + + +@with_numpy +@with_multiprocessing +@parametrize('inner_max_num_threads', [1, 2, 4, None]) +@parametrize('n_jobs', [2, -1]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_threadpool_limitation_in_child_context( + context, n_jobs, inner_max_num_threads +): + # Check that the protection against oversubscription in workers is working + # using threadpoolctl functionalities. + + # Skip this test if numpy is not linked to a BLAS library + parent_info = _check_numpy_threadpool_limits() + if len(parent_info) == 0: + pytest.skip(reason="Need a version of numpy linked to BLAS") + + with context('loky', inner_max_num_threads=inner_max_num_threads): + workers_threadpool_infos = Parallel(n_jobs=n_jobs)( + delayed(_check_numpy_threadpool_limits)() for i in range(2)) + + n_jobs = effective_n_jobs(n_jobs) + if inner_max_num_threads is None: + expected_child_num_threads = max(cpu_count() // n_jobs, 1) + else: + expected_child_num_threads = inner_max_num_threads + + check_child_num_threads(workers_threadpool_infos, parent_info, + expected_child_num_threads) + + +@with_multiprocessing +@parametrize('n_jobs', [2, -1]) +@parametrize('var_name', ["OPENBLAS_NUM_THREADS", + "MKL_NUM_THREADS", + "OMP_NUM_THREADS"]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_threadpool_limitation_in_child_override(context, n_jobs, var_name): + # Check that environment variables set by the user on the main process + # always have the priority. + + # Clean up the existing executor because we change the environment of the + # parent at runtime and it is not detected in loky intentionally. + get_reusable_executor(reuse=True).shutdown() + + def _get_env(var_name): + return os.environ.get(var_name) + + original_var_value = os.environ.get(var_name) + try: + os.environ[var_name] = "4" + # Skip this test if numpy is not linked to a BLAS library + results = Parallel(n_jobs=n_jobs)( + delayed(_get_env)(var_name) for i in range(2)) + assert results == ["4", "4"] + + with context('loky', inner_max_num_threads=1): + results = Parallel(n_jobs=n_jobs)( + delayed(_get_env)(var_name) for i in range(2)) + assert results == ["1", "1"] + + finally: + if original_var_value is None: + del os.environ[var_name] + else: + os.environ[var_name] = original_var_value + + +@with_multiprocessing +@parametrize('n_jobs', [2, 4, -1]) +def test_loky_reuse_workers(n_jobs): + # Non-regression test for issue #967 where the workers are not reused when + # calling multiple Parallel loops. + + def parallel_call(n_jobs): + x = range(10) + Parallel(n_jobs=n_jobs)(delayed(sum)(x) for i in range(10)) + + # Run a parallel loop and get the workers used for computations + parallel_call(n_jobs) + first_executor = get_reusable_executor(reuse=True) + + # Ensure that the workers are reused for the next calls, as the executor is + # not restarted. + for _ in range(10): + parallel_call(n_jobs) + executor = get_reusable_executor(reuse=True) + assert executor == first_executor diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_store_backends.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_store_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..e5db16757ea45fd6761e1b8cfd35e5f5a920752f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_store_backends.py @@ -0,0 +1,94 @@ + +try: + # Python 2.7: use the C pickle to speed up + # test_concurrency_safe_write which pickles big python objects + import cPickle as cpickle +except ImportError: + import pickle as cpickle +import functools +from pickle import PicklingError +import time + +import pytest + +from joblib.testing import parametrize, timeout +from joblib.test.common import with_multiprocessing +from joblib.backports import concurrency_safe_rename +from joblib import Parallel, delayed +from joblib._store_backends import ( + concurrency_safe_write, + FileSystemStoreBackend, + CacheWarning, +) + + +def write_func(output, filename): + with open(filename, 'wb') as f: + cpickle.dump(output, f) + + +def load_func(expected, filename): + for i in range(10): + try: + with open(filename, 'rb') as f: + reloaded = cpickle.load(f) + break + except (OSError, IOError): + # On Windows you can have WindowsError ([Error 5] Access + # is denied or [Error 13] Permission denied) when reading the file, + # probably because a writer process has a lock on the file + time.sleep(0.1) + else: + raise + assert expected == reloaded + + +def concurrency_safe_write_rename(to_write, filename, write_func): + temporary_filename = concurrency_safe_write(to_write, + filename, write_func) + concurrency_safe_rename(temporary_filename, filename) + + +@timeout(0) # No timeout as this test can be long +@with_multiprocessing +@parametrize('backend', ['multiprocessing', 'loky', 'threading']) +def test_concurrency_safe_write(tmpdir, backend): + # Add one item to cache + filename = tmpdir.join('test.pkl').strpath + + obj = {str(i): i for i in range(int(1e5))} + funcs = [functools.partial(concurrency_safe_write_rename, + write_func=write_func) + if i % 3 != 2 else load_func for i in range(12)] + Parallel(n_jobs=2, backend=backend)( + delayed(func)(obj, filename) for func in funcs) + + +def test_warning_on_dump_failure(tmpdir): + # Check that a warning is raised when the dump fails for any reason but + # a PicklingError. + class UnpicklableObject(object): + def __reduce__(self): + raise RuntimeError("some exception") + + backend = FileSystemStoreBackend() + backend.location = tmpdir.join('test_warning_on_pickling_error').strpath + backend.compress = None + + with pytest.warns(CacheWarning, match="some exception"): + backend.dump_item("testpath", UnpicklableObject()) + + +def test_warning_on_pickling_error(tmpdir): + # This is separate from test_warning_on_dump_failure because in the + # future we will turn this into an exception. + class UnpicklableObject(object): + def __reduce__(self): + raise PicklingError("not picklable") + + backend = FileSystemStoreBackend() + backend.location = tmpdir.join('test_warning_on_pickling_error').strpath + backend.compress = None + + with pytest.warns(FutureWarning, match="not picklable"): + backend.dump_item("testpath", UnpicklableObject()) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_testing.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_testing.py new file mode 100644 index 0000000000000000000000000000000000000000..e8095aa67040ce868849b89927b325895b5d8e34 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_testing.py @@ -0,0 +1,75 @@ +import sys +import re + +from joblib.testing import raises, check_subprocess_call + + +def test_check_subprocess_call(): + code = '\n'.join(['result = 1 + 2 * 3', + 'print(result)', + 'my_list = [1, 2, 3]', + 'print(my_list)']) + + check_subprocess_call([sys.executable, '-c', code]) + + # Now checking stdout with a regex + check_subprocess_call([sys.executable, '-c', code], + # Regex needed for platform-specific line endings + stdout_regex=r'7\s{1,2}\[1, 2, 3\]') + + +def test_check_subprocess_call_non_matching_regex(): + code = '42' + non_matching_pattern = '_no_way_this_matches_anything_' + + with raises(ValueError) as excinfo: + check_subprocess_call([sys.executable, '-c', code], + stdout_regex=non_matching_pattern) + excinfo.match('Unexpected stdout.+{}'.format(non_matching_pattern)) + + +def test_check_subprocess_call_wrong_command(): + wrong_command = '_a_command_that_does_not_exist_' + with raises(OSError): + check_subprocess_call([wrong_command]) + + +def test_check_subprocess_call_non_zero_return_code(): + code_with_non_zero_exit = '\n'.join([ + 'import sys', + 'print("writing on stdout")', + 'sys.stderr.write("writing on stderr")', + 'sys.exit(123)']) + + pattern = re.compile('Non-zero return code: 123.+' + 'Stdout:\nwriting on stdout.+' + 'Stderr:\nwriting on stderr', re.DOTALL) + + with raises(ValueError) as excinfo: + check_subprocess_call([sys.executable, '-c', code_with_non_zero_exit]) + excinfo.match(pattern) + + +def test_check_subprocess_call_timeout(): + code_timing_out = '\n'.join([ + 'import time', + 'import sys', + 'print("before sleep on stdout")', + 'sys.stdout.flush()', + 'sys.stderr.write("before sleep on stderr")', + 'sys.stderr.flush()', + # We need to sleep for at least 2 * timeout seconds in case the SIGKILL + # is triggered. + 'time.sleep(10)', + 'print("process should have be killed before")', + 'sys.stdout.flush()']) + + pattern = re.compile('Non-zero return code:.+' + 'Stdout:\nbefore sleep on stdout\\s+' + 'Stderr:\nbefore sleep on stderr', + re.DOTALL) + + with raises(ValueError) as excinfo: + check_subprocess_call([sys.executable, '-c', code_timing_out], + timeout=1) + excinfo.match(pattern) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_utils.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4999a212c462bdb6c10e9e08fdfba74d03b05294 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_utils.py @@ -0,0 +1,27 @@ +import pytest + +from joblib._utils import eval_expr + + +@pytest.mark.parametrize( + "expr", + ["exec('import os')", "print(1)", "import os", "1+1; import os", "1^1"], +) +def test_eval_expr_invalid(expr): + with pytest.raises( + ValueError, match="is not a valid or supported arithmetic" + ): + eval_expr(expr) + + +@pytest.mark.parametrize( + "expr, result", + [ + ("2*6", 12), + ("2**6", 64), + ("1 + 2*3**(4) / (6 + -7)", -161.0), + ("(20 // 3) % 5", 1), + ], +) +def test_eval_expr_valid(expr, result): + assert eval_expr(expr) == result diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/testutils.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/testutils.py new file mode 100644 index 0000000000000000000000000000000000000000..20ec8c1ba0a50da6be9fce3686ac1950b1a55ab5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/testutils.py @@ -0,0 +1,8 @@ +def return_slice_of_data(arr, start_idx, end_idx): + return arr[start_idx:end_idx] + + +def print_filename_and_raise(arr): + from joblib._memmapping_reducer import _get_backing_memmap + print(_get_backing_memmap(arr).filename) + raise ValueError diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__init__.py b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..29922d958efbdfa6ddee19f8b3904498f9222585 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__init__.py @@ -0,0 +1,221 @@ +"""Private counterpart of ``numpy.typing``.""" + +from __future__ import annotations + +from .. import ufunc +from .._utils import set_module +from typing import TYPE_CHECKING, final + + +@final # Disallow the creation of arbitrary `NBitBase` subclasses +@set_module("numpy.typing") +class NBitBase: + """ + A type representing `numpy.number` precision during static type checking. + + Used exclusively for the purpose static type checking, `NBitBase` + represents the base of a hierarchical set of subclasses. + Each subsequent subclass is herein used for representing a lower level + of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. + + .. versionadded:: 1.20 + + Examples + -------- + Below is a typical usage example: `NBitBase` is herein used for annotating + a function that takes a float and integer of arbitrary precision + as arguments and returns a new float of whichever precision is largest + (*e.g.* ``np.float16 + np.int64 -> np.float64``). + + .. code-block:: python + + >>> from __future__ import annotations + >>> from typing import TypeVar, TYPE_CHECKING + >>> import numpy as np + >>> import numpy.typing as npt + + >>> T1 = TypeVar("T1", bound=npt.NBitBase) + >>> T2 = TypeVar("T2", bound=npt.NBitBase) + + >>> def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: + ... return a + b + + >>> a = np.float16() + >>> b = np.int64() + >>> out = add(a, b) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.floating[numpy.typing._16Bit*] + ... # note: b: numpy.signedinteger[numpy.typing._64Bit*] + ... # note: out: numpy.floating[numpy.typing._64Bit*] + + """ + + def __init_subclass__(cls) -> None: + allowed_names = { + "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit", + "_64Bit", "_32Bit", "_16Bit", "_8Bit", + } + if cls.__name__ not in allowed_names: + raise TypeError('cannot inherit from final class "NBitBase"') + super().__init_subclass__() + + +# Silence errors about subclassing a `@final`-decorated class +class _256Bit(NBitBase): # type: ignore[misc] + pass + +class _128Bit(_256Bit): # type: ignore[misc] + pass + +class _96Bit(_128Bit): # type: ignore[misc] + pass + +class _80Bit(_96Bit): # type: ignore[misc] + pass + +class _64Bit(_80Bit): # type: ignore[misc] + pass + +class _32Bit(_64Bit): # type: ignore[misc] + pass + +class _16Bit(_32Bit): # type: ignore[misc] + pass + +class _8Bit(_16Bit): # type: ignore[misc] + pass + + +from ._nested_sequence import ( + _NestedSequence as _NestedSequence, +) +from ._nbit import ( + _NBitByte as _NBitByte, + _NBitShort as _NBitShort, + _NBitIntC as _NBitIntC, + _NBitIntP as _NBitIntP, + _NBitInt as _NBitInt, + _NBitLongLong as _NBitLongLong, + _NBitHalf as _NBitHalf, + _NBitSingle as _NBitSingle, + _NBitDouble as _NBitDouble, + _NBitLongDouble as _NBitLongDouble, +) +from ._char_codes import ( + _BoolCodes as _BoolCodes, + _UInt8Codes as _UInt8Codes, + _UInt16Codes as _UInt16Codes, + _UInt32Codes as _UInt32Codes, + _UInt64Codes as _UInt64Codes, + _Int8Codes as _Int8Codes, + _Int16Codes as _Int16Codes, + _Int32Codes as _Int32Codes, + _Int64Codes as _Int64Codes, + _Float16Codes as _Float16Codes, + _Float32Codes as _Float32Codes, + _Float64Codes as _Float64Codes, + _Complex64Codes as _Complex64Codes, + _Complex128Codes as _Complex128Codes, + _ByteCodes as _ByteCodes, + _ShortCodes as _ShortCodes, + _IntCCodes as _IntCCodes, + _IntPCodes as _IntPCodes, + _IntCodes as _IntCodes, + _LongLongCodes as _LongLongCodes, + _UByteCodes as _UByteCodes, + _UShortCodes as _UShortCodes, + _UIntCCodes as _UIntCCodes, + _UIntPCodes as _UIntPCodes, + _UIntCodes as _UIntCodes, + _ULongLongCodes as _ULongLongCodes, + _HalfCodes as _HalfCodes, + _SingleCodes as _SingleCodes, + _DoubleCodes as _DoubleCodes, + _LongDoubleCodes as _LongDoubleCodes, + _CSingleCodes as _CSingleCodes, + _CDoubleCodes as _CDoubleCodes, + _CLongDoubleCodes as _CLongDoubleCodes, + _DT64Codes as _DT64Codes, + _TD64Codes as _TD64Codes, + _StrCodes as _StrCodes, + _BytesCodes as _BytesCodes, + _VoidCodes as _VoidCodes, + _ObjectCodes as _ObjectCodes, +) +from ._scalars import ( + _CharLike_co as _CharLike_co, + _BoolLike_co as _BoolLike_co, + _UIntLike_co as _UIntLike_co, + _IntLike_co as _IntLike_co, + _FloatLike_co as _FloatLike_co, + _ComplexLike_co as _ComplexLike_co, + _TD64Like_co as _TD64Like_co, + _NumberLike_co as _NumberLike_co, + _ScalarLike_co as _ScalarLike_co, + _VoidLike_co as _VoidLike_co, +) +from ._shape import ( + _Shape as _Shape, + _ShapeLike as _ShapeLike, +) +from ._dtype_like import ( + DTypeLike as DTypeLike, + _DTypeLike as _DTypeLike, + _SupportsDType as _SupportsDType, + _VoidDTypeLike as _VoidDTypeLike, + _DTypeLikeBool as _DTypeLikeBool, + _DTypeLikeUInt as _DTypeLikeUInt, + _DTypeLikeInt as _DTypeLikeInt, + _DTypeLikeFloat as _DTypeLikeFloat, + _DTypeLikeComplex as _DTypeLikeComplex, + _DTypeLikeTD64 as _DTypeLikeTD64, + _DTypeLikeDT64 as _DTypeLikeDT64, + _DTypeLikeObject as _DTypeLikeObject, + _DTypeLikeVoid as _DTypeLikeVoid, + _DTypeLikeStr as _DTypeLikeStr, + _DTypeLikeBytes as _DTypeLikeBytes, + _DTypeLikeComplex_co as _DTypeLikeComplex_co, +) +from ._array_like import ( + NDArray as NDArray, + ArrayLike as ArrayLike, + _ArrayLike as _ArrayLike, + _FiniteNestedSequence as _FiniteNestedSequence, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, + _ArrayLikeInt as _ArrayLikeInt, + _ArrayLikeBool_co as _ArrayLikeBool_co, + _ArrayLikeUInt_co as _ArrayLikeUInt_co, + _ArrayLikeInt_co as _ArrayLikeInt_co, + _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeNumber_co as _ArrayLikeNumber_co, + _ArrayLikeTD64_co as _ArrayLikeTD64_co, + _ArrayLikeDT64_co as _ArrayLikeDT64_co, + _ArrayLikeObject_co as _ArrayLikeObject_co, + _ArrayLikeVoid_co as _ArrayLikeVoid_co, + _ArrayLikeStr_co as _ArrayLikeStr_co, + _ArrayLikeBytes_co as _ArrayLikeBytes_co, + _ArrayLikeUnknown as _ArrayLikeUnknown, + _UnknownType as _UnknownType, +) + +if TYPE_CHECKING: + from ._ufunc import ( + _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, + ) +else: + # Declare the (type-check-only) ufunc subclasses as ufunc aliases during + # runtime; this helps autocompletion tools such as Jedi (numpy/numpy#19834) + _UFunc_Nin1_Nout1 = ufunc + _UFunc_Nin2_Nout1 = ufunc + _UFunc_Nin1_Nout2 = ufunc + _UFunc_Nin2_Nout2 = ufunc + _GUFunc_Nin2_Nout1 = ufunc diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97d9879f62524c8af3ecec867322c124d7fafd2d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_add_docstring.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_add_docstring.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dc3ea0fee834bbae63e4ba332ab1142f9a164b0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_add_docstring.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_array_like.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_array_like.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95634b7a385019b441c5d6f0cc36e33159aef01f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_array_like.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_char_codes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_char_codes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45e5a9134e18cacdec30141243ffe2a0d5e57a8e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_char_codes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85bfadf1b1ab6b1a853976a4a20d1ec6fc9a06f6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_extended_precision.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_extended_precision.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ce8770dc7894704d93566eee1754d0329711d57 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_extended_precision.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_nbit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_nbit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11e18d4a122ae7c00e2909066746221886dd6e31 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_nbit.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_nested_sequence.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_nested_sequence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..135ed7a1d29f7e8cbbbf4ad86a37553d8067c1e6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_nested_sequence.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_scalars.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_scalars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eadc272273e3bb4e5df87bc782339ede418051b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_scalars.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_shape.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_shape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a42bad40a15422ffbd84cf9cbfe8d1dde7a8f8d0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/_shape.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/setup.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/setup.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5197a278f679ffa15b7c5e7c821abc8c82e147ca Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/__pycache__/setup.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_add_docstring.py b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_add_docstring.py new file mode 100644 index 0000000000000000000000000000000000000000..f84d19271c23f19c86729545de59e8cae4a50f05 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_add_docstring.py @@ -0,0 +1,152 @@ +"""A module for creating docstrings for sphinx ``data`` domains.""" + +import re +import textwrap + +from ._array_like import NDArray + +_docstrings_list = [] + + +def add_newdoc(name: str, value: str, doc: str) -> None: + """Append ``_docstrings_list`` with a docstring for `name`. + + Parameters + ---------- + name : str + The name of the object. + value : str + A string-representation of the object. + doc : str + The docstring of the object. + + """ + _docstrings_list.append((name, value, doc)) + + +def _parse_docstrings() -> str: + """Convert all docstrings in ``_docstrings_list`` into a single + sphinx-legible text block. + + """ + type_list_ret = [] + for name, value, doc in _docstrings_list: + s = textwrap.dedent(doc).replace("\n", "\n ") + + # Replace sections by rubrics + lines = s.split("\n") + new_lines = [] + indent = "" + for line in lines: + m = re.match(r'^(\s+)[-=]+\s*$', line) + if m and new_lines: + prev = textwrap.dedent(new_lines.pop()) + if prev == "Examples": + indent = "" + new_lines.append(f'{m.group(1)}.. rubric:: {prev}') + else: + indent = 4 * " " + new_lines.append(f'{m.group(1)}.. admonition:: {prev}') + new_lines.append("") + else: + new_lines.append(f"{indent}{line}") + + s = "\n".join(new_lines) + s_block = f""".. data:: {name}\n :value: {value}\n {s}""" + type_list_ret.append(s_block) + return "\n".join(type_list_ret) + + +add_newdoc('ArrayLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced + into an `~numpy.ndarray`. + + Among others this includes the likes of: + + * Scalars. + * (Nested) sequences. + * Objects implementing the `~class.__array__` protocol. + + .. versionadded:: 1.20 + + See Also + -------- + :term:`array_like`: + Any scalar or sequence that can be interpreted as an ndarray. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_array(a: npt.ArrayLike) -> np.ndarray: + ... return np.array(a) + + """) + +add_newdoc('DTypeLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced + into a `~numpy.dtype`. + + Among others this includes the likes of: + + * :class:`type` objects. + * Character codes or the names of :class:`type` objects. + * Objects with the ``.dtype`` attribute. + + .. versionadded:: 1.20 + + See Also + -------- + :ref:`Specifying and constructing data types ` + A comprehensive overview of all objects that can be coerced + into data types. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_dtype(d: npt.DTypeLike) -> np.dtype: + ... return np.dtype(d) + + """) + +add_newdoc('NDArray', repr(NDArray), + """ + A :term:`generic ` version of + `np.ndarray[Any, np.dtype[+ScalarType]] `. + + Can be used during runtime for typing arrays with a given dtype + and unspecified shape. + + .. versionadded:: 1.21 + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> print(npt.NDArray) + numpy.ndarray[typing.Any, numpy.dtype[+ScalarType]] + + >>> print(npt.NDArray[np.float64]) + numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] + + >>> NDArrayInt = npt.NDArray[np.int_] + >>> a: NDArrayInt = np.arange(10) + + >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]: + ... return np.array(a) + + """) + +_docstrings = _parse_docstrings() diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_array_like.py b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_array_like.py new file mode 100644 index 0000000000000000000000000000000000000000..883e817d9a6c7927a8b1e722d2b0ca074fd37c19 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_array_like.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import sys +from collections.abc import Collection, Callable, Sequence +from typing import Any, Protocol, Union, TypeVar, runtime_checkable + +from numpy import ( + ndarray, + dtype, + generic, + bool_, + unsignedinteger, + integer, + floating, + complexfloating, + number, + timedelta64, + datetime64, + object_, + void, + str_, + bytes_, +) +from ._nested_sequence import _NestedSequence + +_T = TypeVar("_T") +_ScalarType = TypeVar("_ScalarType", bound=generic) +_ScalarType_co = TypeVar("_ScalarType_co", bound=generic, covariant=True) +_DType = TypeVar("_DType", bound=dtype[Any]) +_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) + +NDArray = ndarray[Any, dtype[_ScalarType_co]] + +# The `_SupportsArray` protocol only cares about the default dtype +# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned +# array. +# Concrete implementations of the protocol are responsible for adding +# any and all remaining overloads +@runtime_checkable +class _SupportsArray(Protocol[_DType_co]): + def __array__(self) -> ndarray[Any, _DType_co]: ... + + +@runtime_checkable +class _SupportsArrayFunc(Protocol): + """A protocol class representing `~class.__array_function__`.""" + def __array_function__( + self, + func: Callable[..., Any], + types: Collection[type[Any]], + args: tuple[Any, ...], + kwargs: dict[str, Any], + ) -> object: ... + + +# TODO: Wait until mypy supports recursive objects in combination with typevars +_FiniteNestedSequence = Union[ + _T, + Sequence[_T], + Sequence[Sequence[_T]], + Sequence[Sequence[Sequence[_T]]], + Sequence[Sequence[Sequence[Sequence[_T]]]], +] + +# A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` +_ArrayLike = Union[ + _SupportsArray[dtype[_ScalarType]], + _NestedSequence[_SupportsArray[dtype[_ScalarType]]], +] + +# A union representing array-like objects; consists of two typevars: +# One representing types that can be parametrized w.r.t. `np.dtype` +# and another one for the rest +_DualArrayLike = Union[ + _SupportsArray[_DType], + _NestedSequence[_SupportsArray[_DType]], + _T, + _NestedSequence[_T], +] + +if sys.version_info >= (3, 12): + from collections.abc import Buffer + + ArrayLike = Buffer | _DualArrayLike[ + dtype[Any], + Union[bool, int, float, complex, str, bytes], + ] +else: + ArrayLike = _DualArrayLike[ + dtype[Any], + Union[bool, int, float, complex, str, bytes], + ] + +# `ArrayLike_co`: array-like objects that can be coerced into `X` +# given the casting rules `same_kind` +_ArrayLikeBool_co = _DualArrayLike[ + dtype[bool_], + bool, +] +_ArrayLikeUInt_co = _DualArrayLike[ + dtype[Union[bool_, unsignedinteger[Any]]], + bool, +] +_ArrayLikeInt_co = _DualArrayLike[ + dtype[Union[bool_, integer[Any]]], + Union[bool, int], +] +_ArrayLikeFloat_co = _DualArrayLike[ + dtype[Union[bool_, integer[Any], floating[Any]]], + Union[bool, int, float], +] +_ArrayLikeComplex_co = _DualArrayLike[ + dtype[Union[ + bool_, + integer[Any], + floating[Any], + complexfloating[Any, Any], + ]], + Union[bool, int, float, complex], +] +_ArrayLikeNumber_co = _DualArrayLike[ + dtype[Union[bool_, number[Any]]], + Union[bool, int, float, complex], +] +_ArrayLikeTD64_co = _DualArrayLike[ + dtype[Union[bool_, integer[Any], timedelta64]], + Union[bool, int], +] +_ArrayLikeDT64_co = Union[ + _SupportsArray[dtype[datetime64]], + _NestedSequence[_SupportsArray[dtype[datetime64]]], +] +_ArrayLikeObject_co = Union[ + _SupportsArray[dtype[object_]], + _NestedSequence[_SupportsArray[dtype[object_]]], +] + +_ArrayLikeVoid_co = Union[ + _SupportsArray[dtype[void]], + _NestedSequence[_SupportsArray[dtype[void]]], +] +_ArrayLikeStr_co = _DualArrayLike[ + dtype[str_], + str, +] +_ArrayLikeBytes_co = _DualArrayLike[ + dtype[bytes_], + bytes, +] + +_ArrayLikeInt = _DualArrayLike[ + dtype[integer[Any]], + int, +] + +# Extra ArrayLike type so that pyright can deal with NDArray[Any] +# Used as the first overload, should only match NDArray[Any], +# not any actual types. +# https://github.com/numpy/numpy/pull/22193 +class _UnknownType: + ... + + +_ArrayLikeUnknown = _DualArrayLike[ + dtype[_UnknownType], + _UnknownType, +] diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_callable.pyi b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_callable.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ee818e90575b62622e5802c3f2dc56b875cec38b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_callable.pyi @@ -0,0 +1,338 @@ +""" +A module with various ``typing.Protocol`` subclasses that implement +the ``__call__`` magic method. + +See the `Mypy documentation`_ on protocols for more details. + +.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols + +""" + +from __future__ import annotations + +from typing import ( + TypeVar, + overload, + Any, + NoReturn, + Protocol, +) + +from numpy import ( + ndarray, + dtype, + generic, + bool_, + timedelta64, + number, + integer, + unsignedinteger, + signedinteger, + int8, + int_, + floating, + float64, + complexfloating, + complex128, +) +from ._nbit import _NBitInt, _NBitDouble +from ._scalars import ( + _BoolLike_co, + _IntLike_co, + _FloatLike_co, + _NumberLike_co, +) +from . import NBitBase +from ._array_like import NDArray +from ._nested_sequence import _NestedSequence + +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T1_contra = TypeVar("_T1_contra", contravariant=True) +_T2_contra = TypeVar("_T2_contra", contravariant=True) +_2Tuple = tuple[_T1, _T1] + +_NBit1 = TypeVar("_NBit1", bound=NBitBase) +_NBit2 = TypeVar("_NBit2", bound=NBitBase) + +_IntType = TypeVar("_IntType", bound=integer) +_FloatType = TypeVar("_FloatType", bound=floating) +_NumberType = TypeVar("_NumberType", bound=number) +_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number) +_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) + +class _BoolOp(Protocol[_GenericType_co]): + @overload + def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: _NumberType, /) -> _NumberType: ... + +class _BoolBitOp(Protocol[_GenericType_co]): + @overload + def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: _IntType, /) -> _IntType: ... + +class _BoolSub(Protocol): + # Note that `other: bool_` is absent here + @overload + def __call__(self, other: bool, /) -> NoReturn: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: _NumberType, /) -> _NumberType: ... + +class _BoolTrueDiv(Protocol): + @overload + def __call__(self, other: float | _IntLike_co, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: _NumberType, /) -> _NumberType: ... + +class _BoolMod(Protocol): + @overload + def __call__(self, other: _BoolLike_co, /) -> int8: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: _IntType, /) -> _IntType: ... + @overload + def __call__(self, other: _FloatType, /) -> _FloatType: ... + +class _BoolDivMod(Protocol): + @overload + def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ... + @overload # platform dependent + def __call__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... + @overload + def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ... + @overload + def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ... + +class _TD64Div(Protocol[_NumberType_co]): + @overload + def __call__(self, other: timedelta64, /) -> _NumberType_co: ... + @overload + def __call__(self, other: _BoolLike_co, /) -> NoReturn: ... + @overload + def __call__(self, other: _FloatLike_co, /) -> timedelta64: ... + +class _IntTrueDiv(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> floating[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: complex, /, + ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + @overload + def __call__(self, other: integer[_NBit2], /) -> floating[_NBit1 | _NBit2]: ... + +class _UnsignedIntOp(Protocol[_NBit1]): + # NOTE: `uint64 + signedinteger -> float64` + @overload + def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + @overload + def __call__( + self, other: int | signedinteger[Any], / + ) -> Any: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: complex, /, + ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> unsignedinteger[_NBit1 | _NBit2]: ... + +class _UnsignedIntBitOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[Any]: ... + @overload + def __call__(self, other: signedinteger[Any], /) -> signedinteger[Any]: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> unsignedinteger[_NBit1 | _NBit2]: ... + +class _UnsignedIntMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + @overload + def __call__( + self, other: int | signedinteger[Any], / + ) -> Any: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> unsignedinteger[_NBit1 | _NBit2]: ... + +class _UnsignedIntDivMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... + @overload + def __call__( + self, other: int | signedinteger[Any], / + ) -> _2Tuple[Any]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ... + +class _SignedIntOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: complex, /, + ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], /, + ) -> signedinteger[_NBit1 | _NBit2]: ... + +class _SignedIntBitOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], /, + ) -> signedinteger[_NBit1 | _NBit2]: ... + +class _SignedIntMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], /, + ) -> signedinteger[_NBit1 | _NBit2]: ... + +class _SignedIntDivMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... + @overload + def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], /, + ) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ... + +class _FloatOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> floating[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: complex, /, + ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> floating[_NBit1 | _NBit2]: ... + +class _FloatMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> floating[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> floating[_NBit1 | _NBit2]: ... + +class _FloatDivMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ... + @overload + def __call__(self, other: int, /) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... + @overload + def __call__( + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> _2Tuple[floating[_NBit1 | _NBit2]]: ... + +class _ComplexOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> complexfloating[_NBit1, _NBit1]: ... + @overload + def __call__(self, other: int, /) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ... + @overload + def __call__( + self, other: complex, /, + ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, + other: ( + integer[_NBit2] + | floating[_NBit2] + | complexfloating[_NBit2, _NBit2] + ), /, + ) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ... + +class _NumberOp(Protocol): + def __call__(self, other: _NumberLike_co, /) -> Any: ... + +class _SupportsLT(Protocol): + def __lt__(self, other: Any, /) -> object: ... + +class _SupportsGT(Protocol): + def __gt__(self, other: Any, /) -> object: ... + +class _ComparisonOp(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> bool_: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[bool_]: ... + @overload + def __call__( + self, + other: _SupportsLT | _SupportsGT | _NestedSequence[_SupportsLT | _SupportsGT], + /, + ) -> Any: ... diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_char_codes.py b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_char_codes.py new file mode 100644 index 0000000000000000000000000000000000000000..f840d17bbca0a56133bfc2d5f14bcbf4b7ebc747 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_char_codes.py @@ -0,0 +1,111 @@ +from typing import Literal + +_BoolCodes = Literal["?", "=?", "?", "bool", "bool_", "bool8"] + +_UInt8Codes = Literal["uint8", "u1", "=u1", "u1"] +_UInt16Codes = Literal["uint16", "u2", "=u2", "u2"] +_UInt32Codes = Literal["uint32", "u4", "=u4", "u4"] +_UInt64Codes = Literal["uint64", "u8", "=u8", "u8"] + +_Int8Codes = Literal["int8", "i1", "=i1", "i1"] +_Int16Codes = Literal["int16", "i2", "=i2", "i2"] +_Int32Codes = Literal["int32", "i4", "=i4", "i4"] +_Int64Codes = Literal["int64", "i8", "=i8", "i8"] + +_Float16Codes = Literal["float16", "f2", "=f2", "f2"] +_Float32Codes = Literal["float32", "f4", "=f4", "f4"] +_Float64Codes = Literal["float64", "f8", "=f8", "f8"] + +_Complex64Codes = Literal["complex64", "c8", "=c8", "c8"] +_Complex128Codes = Literal["complex128", "c16", "=c16", "c16"] + +_ByteCodes = Literal["byte", "b", "=b", "b"] +_ShortCodes = Literal["short", "h", "=h", "h"] +_IntCCodes = Literal["intc", "i", "=i", "i"] +_IntPCodes = Literal["intp", "int0", "p", "=p", "p"] +_IntCodes = Literal["long", "int", "int_", "l", "=l", "l"] +_LongLongCodes = Literal["longlong", "q", "=q", "q"] + +_UByteCodes = Literal["ubyte", "B", "=B", "B"] +_UShortCodes = Literal["ushort", "H", "=H", "H"] +_UIntCCodes = Literal["uintc", "I", "=I", "I"] +_UIntPCodes = Literal["uintp", "uint0", "P", "=P", "P"] +_UIntCodes = Literal["ulong", "uint", "L", "=L", "L"] +_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "Q"] + +_HalfCodes = Literal["half", "e", "=e", "e"] +_SingleCodes = Literal["single", "f", "=f", "f"] +_DoubleCodes = Literal["double", "float", "float_", "d", "=d", "d"] +_LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "g"] + +_CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "F"] +_CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "D"] +_CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "G"] + +_StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "U"] +_BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "S"] +_VoidCodes = Literal["void", "void0", "V", "=V", "V"] +_ObjectCodes = Literal["object", "object_", "O", "=O", "O"] + +_DT64Codes = Literal[ + "datetime64", "=datetime64", "datetime64", + "datetime64[Y]", "=datetime64[Y]", "datetime64[Y]", + "datetime64[M]", "=datetime64[M]", "datetime64[M]", + "datetime64[W]", "=datetime64[W]", "datetime64[W]", + "datetime64[D]", "=datetime64[D]", "datetime64[D]", + "datetime64[h]", "=datetime64[h]", "datetime64[h]", + "datetime64[m]", "=datetime64[m]", "datetime64[m]", + "datetime64[s]", "=datetime64[s]", "datetime64[s]", + "datetime64[ms]", "=datetime64[ms]", "datetime64[ms]", + "datetime64[us]", "=datetime64[us]", "datetime64[us]", + "datetime64[ns]", "=datetime64[ns]", "datetime64[ns]", + "datetime64[ps]", "=datetime64[ps]", "datetime64[ps]", + "datetime64[fs]", "=datetime64[fs]", "datetime64[fs]", + "datetime64[as]", "=datetime64[as]", "datetime64[as]", + "M", "=M", "M", + "M8", "=M8", "M8", + "M8[Y]", "=M8[Y]", "M8[Y]", + "M8[M]", "=M8[M]", "M8[M]", + "M8[W]", "=M8[W]", "M8[W]", + "M8[D]", "=M8[D]", "M8[D]", + "M8[h]", "=M8[h]", "M8[h]", + "M8[m]", "=M8[m]", "M8[m]", + "M8[s]", "=M8[s]", "M8[s]", + "M8[ms]", "=M8[ms]", "M8[ms]", + "M8[us]", "=M8[us]", "M8[us]", + "M8[ns]", "=M8[ns]", "M8[ns]", + "M8[ps]", "=M8[ps]", "M8[ps]", + "M8[fs]", "=M8[fs]", "M8[fs]", + "M8[as]", "=M8[as]", "M8[as]", +] +_TD64Codes = Literal[ + "timedelta64", "=timedelta64", "timedelta64", + "timedelta64[Y]", "=timedelta64[Y]", "timedelta64[Y]", + "timedelta64[M]", "=timedelta64[M]", "timedelta64[M]", + "timedelta64[W]", "=timedelta64[W]", "timedelta64[W]", + "timedelta64[D]", "=timedelta64[D]", "timedelta64[D]", + "timedelta64[h]", "=timedelta64[h]", "timedelta64[h]", + "timedelta64[m]", "=timedelta64[m]", "timedelta64[m]", + "timedelta64[s]", "=timedelta64[s]", "timedelta64[s]", + "timedelta64[ms]", "=timedelta64[ms]", "timedelta64[ms]", + "timedelta64[us]", "=timedelta64[us]", "timedelta64[us]", + "timedelta64[ns]", "=timedelta64[ns]", "timedelta64[ns]", + "timedelta64[ps]", "=timedelta64[ps]", "timedelta64[ps]", + "timedelta64[fs]", "=timedelta64[fs]", "timedelta64[fs]", + "timedelta64[as]", "=timedelta64[as]", "timedelta64[as]", + "m", "=m", "m", + "m8", "=m8", "m8", + "m8[Y]", "=m8[Y]", "m8[Y]", + "m8[M]", "=m8[M]", "m8[M]", + "m8[W]", "=m8[W]", "m8[W]", + "m8[D]", "=m8[D]", "m8[D]", + "m8[h]", "=m8[h]", "m8[h]", + "m8[m]", "=m8[m]", "m8[m]", + "m8[s]", "=m8[s]", "m8[s]", + "m8[ms]", "=m8[ms]", "m8[ms]", + "m8[us]", "=m8[us]", "m8[us]", + "m8[ns]", "=m8[ns]", "m8[ns]", + "m8[ps]", "=m8[ps]", "m8[ps]", + "m8[fs]", "=m8[fs]", "m8[fs]", + "m8[as]", "=m8[as]", "m8[as]", +] diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_dtype_like.py b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_dtype_like.py new file mode 100644 index 0000000000000000000000000000000000000000..207a99c56b3cde87365992eff97d2da28d46c1f5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_dtype_like.py @@ -0,0 +1,246 @@ +from collections.abc import Sequence +from typing import ( + Any, + Sequence, + Union, + TypeVar, + Protocol, + TypedDict, + runtime_checkable, +) + +import numpy as np + +from ._shape import _ShapeLike + +from ._char_codes import ( + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, +) + +_SCT = TypeVar("_SCT", bound=np.generic) +_DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype[Any]) + +_DTypeLikeNested = Any # TODO: wait for support for recursive types + + +# Mandatory keys +class _DTypeDictBase(TypedDict): + names: Sequence[str] + formats: Sequence[_DTypeLikeNested] + + +# Mandatory + optional keys +class _DTypeDict(_DTypeDictBase, total=False): + # Only `str` elements are usable as indexing aliases, + # but `titles` can in principle accept any object + offsets: Sequence[int] + titles: Sequence[Any] + itemsize: int + aligned: bool + + +# A protocol for anything with the dtype attribute +@runtime_checkable +class _SupportsDType(Protocol[_DType_co]): + @property + def dtype(self) -> _DType_co: ... + + +# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` +_DTypeLike = Union[ + np.dtype[_SCT], + type[_SCT], + _SupportsDType[np.dtype[_SCT]], +] + + +# Would create a dtype[np.void] +_VoidDTypeLike = Union[ + # (flexible_dtype, itemsize) + tuple[_DTypeLikeNested, int], + # (fixed_dtype, shape) + tuple[_DTypeLikeNested, _ShapeLike], + # [(field_name, field_dtype, field_shape), ...] + # + # The type here is quite broad because NumPy accepts quite a wide + # range of inputs inside the list; see the tests for some + # examples. + list[Any], + # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., + # 'itemsize': ...} + _DTypeDict, + # (base_dtype, new_dtype) + tuple[_DTypeLikeNested, _DTypeLikeNested], +] + +# Anything that can be coerced into numpy.dtype. +# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html +DTypeLike = Union[ + np.dtype[Any], + # default data type (float64) + None, + # array-scalar types and generic types + type[Any], # NOTE: We're stuck with `type[Any]` due to object dtypes + # anything with a dtype attribute + _SupportsDType[np.dtype[Any]], + # character codes, type strings or comma-separated fields, e.g., 'float64' + str, + _VoidDTypeLike, +] + +# NOTE: while it is possible to provide the dtype as a dict of +# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), +# this syntax is officially discourged and +# therefore not included in the Union defining `DTypeLike`. +# +# See https://github.com/numpy/numpy/issues/16891 for more details. + +# Aliases for commonly used dtype-like objects. +# Note that the precision of `np.number` subclasses is ignored herein. +_DTypeLikeBool = Union[ + type[bool], + type[np.bool_], + np.dtype[np.bool_], + _SupportsDType[np.dtype[np.bool_]], + _BoolCodes, +] +_DTypeLikeUInt = Union[ + type[np.unsignedinteger], + np.dtype[np.unsignedinteger], + _SupportsDType[np.dtype[np.unsignedinteger]], + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, +] +_DTypeLikeInt = Union[ + type[int], + type[np.signedinteger], + np.dtype[np.signedinteger], + _SupportsDType[np.dtype[np.signedinteger]], + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, +] +_DTypeLikeFloat = Union[ + type[float], + type[np.floating], + np.dtype[np.floating], + _SupportsDType[np.dtype[np.floating]], + _Float16Codes, + _Float32Codes, + _Float64Codes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, +] +_DTypeLikeComplex = Union[ + type[complex], + type[np.complexfloating], + np.dtype[np.complexfloating], + _SupportsDType[np.dtype[np.complexfloating]], + _Complex64Codes, + _Complex128Codes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, +] +_DTypeLikeDT64 = Union[ + type[np.timedelta64], + np.dtype[np.timedelta64], + _SupportsDType[np.dtype[np.timedelta64]], + _TD64Codes, +] +_DTypeLikeTD64 = Union[ + type[np.datetime64], + np.dtype[np.datetime64], + _SupportsDType[np.dtype[np.datetime64]], + _DT64Codes, +] +_DTypeLikeStr = Union[ + type[str], + type[np.str_], + np.dtype[np.str_], + _SupportsDType[np.dtype[np.str_]], + _StrCodes, +] +_DTypeLikeBytes = Union[ + type[bytes], + type[np.bytes_], + np.dtype[np.bytes_], + _SupportsDType[np.dtype[np.bytes_]], + _BytesCodes, +] +_DTypeLikeVoid = Union[ + type[np.void], + np.dtype[np.void], + _SupportsDType[np.dtype[np.void]], + _VoidCodes, + _VoidDTypeLike, +] +_DTypeLikeObject = Union[ + type, + np.dtype[np.object_], + _SupportsDType[np.dtype[np.object_]], + _ObjectCodes, +] + +_DTypeLikeComplex_co = Union[ + _DTypeLikeBool, + _DTypeLikeUInt, + _DTypeLikeInt, + _DTypeLikeFloat, + _DTypeLikeComplex, +] diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_extended_precision.py b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_extended_precision.py new file mode 100644 index 0000000000000000000000000000000000000000..7246b47d0ee1724f5697ec3e80965f6f5ec48330 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_extended_precision.py @@ -0,0 +1,27 @@ +"""A module with platform-specific extended precision +`numpy.number` subclasses. + +The subclasses are defined here (instead of ``__init__.pyi``) such +that they can be imported conditionally via the numpy's mypy plugin. +""" + +import numpy as np +from . import ( + _80Bit, + _96Bit, + _128Bit, + _256Bit, +) + +uint128 = np.unsignedinteger[_128Bit] +uint256 = np.unsignedinteger[_256Bit] +int128 = np.signedinteger[_128Bit] +int256 = np.signedinteger[_256Bit] +float80 = np.floating[_80Bit] +float96 = np.floating[_96Bit] +float128 = np.floating[_128Bit] +float256 = np.floating[_256Bit] +complex160 = np.complexfloating[_80Bit, _80Bit] +complex192 = np.complexfloating[_96Bit, _96Bit] +complex256 = np.complexfloating[_128Bit, _128Bit] +complex512 = np.complexfloating[_256Bit, _256Bit] diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_nbit.py b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_nbit.py new file mode 100644 index 0000000000000000000000000000000000000000..b8d35db4f5947fc1fc7f4672c3510f4a4264da6f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_nbit.py @@ -0,0 +1,16 @@ +"""A module with the precisions of platform-specific `~numpy.number`s.""" + +from typing import Any + +# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin +_NBitByte = Any +_NBitShort = Any +_NBitIntC = Any +_NBitIntP = Any +_NBitInt = Any +_NBitLongLong = Any + +_NBitHalf = Any +_NBitSingle = Any +_NBitDouble = Any +_NBitLongDouble = Any diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_nested_sequence.py b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_nested_sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..3d0d25ae5b48a7c4375364c110f05af4dd38a5eb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_nested_sequence.py @@ -0,0 +1,86 @@ +"""A module containing the `_NestedSequence` protocol.""" + +from __future__ import annotations + +from collections.abc import Iterator +from typing import ( + Any, + TypeVar, + Protocol, + runtime_checkable, +) + +__all__ = ["_NestedSequence"] + +_T_co = TypeVar("_T_co", covariant=True) + + +@runtime_checkable +class _NestedSequence(Protocol[_T_co]): + """A protocol for representing nested sequences. + + Warning + ------- + `_NestedSequence` currently does not work in combination with typevars, + *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``. + + See Also + -------- + collections.abc.Sequence + ABCs for read-only and mutable :term:`sequences`. + + Examples + -------- + .. code-block:: python + + >>> from __future__ import annotations + + >>> from typing import TYPE_CHECKING + >>> import numpy as np + >>> from numpy._typing import _NestedSequence + + >>> def get_dtype(seq: _NestedSequence[float]) -> np.dtype[np.float64]: + ... return np.asarray(seq).dtype + + >>> a = get_dtype([1.0]) + >>> b = get_dtype([[1.0]]) + >>> c = get_dtype([[[1.0]]]) + >>> d = get_dtype([[[[1.0]]]]) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + + """ + + def __len__(self, /) -> int: + """Implement ``len(self)``.""" + raise NotImplementedError + + def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: + """Implement ``self[x]``.""" + raise NotImplementedError + + def __contains__(self, x: object, /) -> bool: + """Implement ``x in self``.""" + raise NotImplementedError + + def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + """Implement ``iter(self)``.""" + raise NotImplementedError + + def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + """Implement ``reversed(self)``.""" + raise NotImplementedError + + def count(self, value: Any, /) -> int: + """Return the number of occurrences of `value`.""" + raise NotImplementedError + + def index(self, value: Any, /) -> int: + """Return the first index of `value`.""" + raise NotImplementedError diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_shape.py b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_shape.py new file mode 100644 index 0000000000000000000000000000000000000000..4f1204e47c6a20012e729514fdd78424126d45b8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_shape.py @@ -0,0 +1,7 @@ +from collections.abc import Sequence +from typing import Union, SupportsIndex + +_Shape = tuple[int, ...] + +# Anything that can be coerced to a shape tuple +_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]] diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_ufunc.pyi b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_ufunc.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9f8e0d4edbfba4b29fb9ac8743009f3073c63e40 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/_typing/_ufunc.pyi @@ -0,0 +1,445 @@ +"""A module with private type-check-only `numpy.ufunc` subclasses. + +The signatures of the ufuncs are too varied to reasonably type +with a single class. So instead, `ufunc` has been expanded into +four private subclasses, one for each combination of +`~ufunc.nin` and `~ufunc.nout`. + +""" + +from typing import ( + Any, + Generic, + overload, + TypeVar, + Literal, + SupportsIndex, + Protocol, +) + +from numpy import ufunc, _CastingKind, _OrderKACF +from numpy.typing import NDArray + +from ._shape import _ShapeLike +from ._scalars import _ScalarLike_co +from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._dtype_like import DTypeLike + +_T = TypeVar("_T") +_2Tuple = tuple[_T, _T] +_3Tuple = tuple[_T, _T, _T] +_4Tuple = tuple[_T, _T, _T, _T] + +_NTypes = TypeVar("_NTypes", bound=int) +_IDType = TypeVar("_IDType", bound=Any) +_NameType = TypeVar("_NameType", bound=str) + + +class _SupportsArrayUFunc(Protocol): + def __array_ufunc__( + self, + ufunc: ufunc, + method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + + +# NOTE: In reality `extobj` should be a length of list 3 containing an +# int, an int, and a callable, but there's no way to properly express +# non-homogenous lists. +# Use `Any` over `Union` to avoid issues related to lists invariance. + +# NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for +# ufuncs that don't accept two input arguments and return one output argument. +# In such cases the respective methods are simply typed as `None`. + +# NOTE: Similarly, `at` won't be defined for ufuncs that return +# multiple outputs; in such cases `at` is typed as `None` + +# NOTE: If 2 output types are returned then `out` must be a +# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable + +class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def signature(self) -> None: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + out: None = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> NDArray[Any]: ... + @overload + def __call__( + self, + __x1: _SupportsArrayUFunc, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> Any: ... + + def at( + self, + a: _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + /, + ) -> None: ... + +class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __x2: _ScalarLike_co, + out: None = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> NDArray[Any]: ... + + def at( + self, + a: NDArray[Any], + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... + + def reduce( + self, + array: ArrayLike, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None | NDArray[Any] = ..., + keepdims: bool = ..., + initial: Any = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None | NDArray[Any] = ..., + ) -> NDArray[Any]: ... + + def reduceat( + self, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None | NDArray[Any] = ..., + ) -> NDArray[Any]: ... + + # Expand `**kwargs` into explicit keyword-only arguments + @overload + def outer( + self, + A: _ScalarLike_co, + B: _ScalarLike_co, + /, *, + out: None = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> Any: ... + @overload + def outer( # type: ignore[misc] + self, + A: ArrayLike, + B: ArrayLike, + /, *, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> NDArray[Any]: ... + +class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + @property + def at(self) -> None: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __out1: None | NDArray[Any] = ..., + __out2: None | NDArray[Any] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + @overload + def __call__( + self, + __x1: _SupportsArrayUFunc, + __out1: None | NDArray[Any] = ..., + __out2: None | NDArray[Any] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> _2Tuple[Any]: ... + +class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[4]: ... + @property + def signature(self) -> None: ... + @property + def at(self) -> None: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __x2: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _4Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + __out1: None | NDArray[Any] = ..., + __out2: None | NDArray[Any] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _4Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + +class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + + # NOTE: In practice the only gufunc in the main namespace is `matmul`, + # so we can use its signature here + @property + def signature(self) -> Literal["(n?,k),(k,m?)->(n?,m?)"]: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + @property + def at(self) -> None: ... + + # Scalar for 1D array-likes; ndarray otherwise + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: None = ..., + *, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: NDArray[Any] | tuple[NDArray[Any]], + *, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., + ) -> NDArray[Any]: ... diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/py2-objarr.npy b/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/py2-objarr.npy new file mode 100644 index 0000000000000000000000000000000000000000..0d41ae5223b7235d16081318703875ef495bbfc7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/py2-objarr.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:178732502fbf4c1f504852c0a3673b738e2b0ba35460882b7c6c7d3ea58f48a9 +size 258 diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/py2-objarr.npz b/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/py2-objarr.npz new file mode 100644 index 0000000000000000000000000000000000000000..85da7be0c3d77ff8d71ab7671e0d23188b7b36c4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/py2-objarr.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c68d771c14f415b159daabd9cf42d61836f74ae40049269787baca7d57098f1e +size 366 diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/py3-objarr.npy b/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/py3-objarr.npy new file mode 100644 index 0000000000000000000000000000000000000000..4b833c1ce2ab92b9e02706a430f9ea58a1e086c9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/py3-objarr.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a534d587c7b3a7e97000addf920bdd294f00ae9e4d30ace7543f8ce84c7fb80d +size 341 diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/py3-objarr.npz b/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/py3-objarr.npz new file mode 100644 index 0000000000000000000000000000000000000000..df1ed78f455a17de597a2a1a125c13e595717298 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/py3-objarr.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a90474812e7b7bdb5ad7a77fbc242369a28cef880f765c023e4a79e4ffaaaddc +size 449 diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/python3.npy b/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/python3.npy new file mode 100644 index 0000000000000000000000000000000000000000..1de3124ee4eaf42cfc3625cdfa88767de07df9c4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/python3.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f469dde101a2c65e283d2ed487028f8180ebcb9457cf60c9d9b952314668fed +size 96 diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/win64python2.npy b/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/win64python2.npy new file mode 100644 index 0000000000000000000000000000000000000000..46edfc4d6869ed05b8438bc5bfa5d77d9ec3e9ba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/lib/tests/data/win64python2.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a039c807558149ad5fa7ad12436c69d49c5e194cf617b92785f8cb60ec63297 +size 96 diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b71357c7d58a3f5ef5c0a0382766ea5d1b8b0d25 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/numpy/testing/tests/test_utils.py b/llmeval-env/lib/python3.10/site-packages/numpy/testing/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0aaa508ee5d2e194f44c756c94ae5b3db194292e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numpy/testing/tests/test_utils.py @@ -0,0 +1,1626 @@ +import warnings +import sys +import os +import itertools +import pytest +import weakref + +import numpy as np +from numpy.testing import ( + assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_array_less, build_err_msg, + assert_raises, assert_warns, assert_no_warnings, assert_allclose, + assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, + clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_, + tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT + ) + + +class _GenericTest: + + def _test_equal(self, a, b): + self._assert_func(a, b) + + def _test_not_equal(self, a, b): + with assert_raises(AssertionError): + self._assert_func(a, b) + + def test_array_rank1_eq(self): + """Test two equal array of rank 1 are found equal.""" + a = np.array([1, 2]) + b = np.array([1, 2]) + + self._test_equal(a, b) + + def test_array_rank1_noteq(self): + """Test two different array of rank 1 are found not equal.""" + a = np.array([1, 2]) + b = np.array([2, 2]) + + self._test_not_equal(a, b) + + def test_array_rank2_eq(self): + """Test two equal array of rank 2 are found equal.""" + a = np.array([[1, 2], [3, 4]]) + b = np.array([[1, 2], [3, 4]]) + + self._test_equal(a, b) + + def test_array_diffshape(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array([1, 2]) + b = np.array([[1, 2], [1, 2]]) + + self._test_not_equal(a, b) + + def test_objarray(self): + """Test object arrays.""" + a = np.array([1, 1], dtype=object) + self._test_equal(a, 1) + + def test_array_likes(self): + self._test_equal([1, 2, 3], (1, 2, 3)) + + +class TestArrayEqual(_GenericTest): + + def setup_method(self): + self._assert_func = assert_array_equal + + def test_generic_rank1(self): + """Test rank 1 array for all dtypes.""" + def foo(t): + a = np.empty(2, t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_0_ndim_array(self): + x = np.array(473963742225900817127911193656584771) + y = np.array(18535119325151578301457182298393896) + assert_raises(AssertionError, self._assert_func, x, y) + + y = x + self._assert_func(x, y) + + x = np.array(43) + y = np.array(10) + assert_raises(AssertionError, self._assert_func, x, y) + + y = x + self._assert_func(x, y) + + def test_generic_rank3(self): + """Test rank 3 array for all dtypes.""" + def foo(t): + a = np.empty((4, 2, 3), t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_nan_array(self): + """Test arrays with nan values in them.""" + a = np.array([1, 2, np.nan]) + b = np.array([1, 2, np.nan]) + + self._test_equal(a, b) + + c = np.array([1, 2, 3]) + self._test_not_equal(c, b) + + def test_string_arrays(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array(['floupi', 'floupa']) + b = np.array(['floupi', 'floupa']) + + self._test_equal(a, b) + + c = np.array(['floupipi', 'floupa']) + + self._test_not_equal(c, b) + + def test_recarrays(self): + """Test record arrays.""" + a = np.empty(2, [('floupi', float), ('floupa', float)]) + a['floupi'] = [1, 2] + a['floupa'] = [1, 2] + b = a.copy() + + self._test_equal(a, b) + + c = np.empty(2, [('floupipi', float), + ('floupi', float), ('floupa', float)]) + c['floupipi'] = a['floupi'].copy() + c['floupa'] = a['floupa'].copy() + + with pytest.raises(TypeError): + self._test_not_equal(c, b) + + def test_masked_nan_inf(self): + # Regression test for gh-11121 + a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False]) + b = np.array([3., np.nan, 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False]) + b = np.array([np.inf, 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_that_overrides_eq(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return bool(np.equal(self, other).all()) + + def __ne__(self, other): + return not self == other + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + assert_(type(a == a), bool) + assert_(a == a) + assert_(a != b) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + def test_subclass_that_does_not_implement_npall(self): + class MyArray(np.ndarray): + def __array_function__(self, *args, **kwargs): + return NotImplemented + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + with assert_raises(TypeError): + np.all(a) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + def test_suppress_overflow_warnings(self): + # Based on issue #18992 + with pytest.raises(AssertionError): + with np.errstate(all="raise"): + np.testing.assert_array_equal( + np.array([1, 2, 3], np.float32), + np.array([1, 1e-40, 3], np.float32)) + + def test_array_vs_scalar_is_equal(self): + """Test comparing an array with a scalar when all values are equal.""" + a = np.array([1., 1., 1.]) + b = 1. + + self._test_equal(a, b) + + def test_array_vs_scalar_not_equal(self): + """Test comparing an array with a scalar when not all values equal.""" + a = np.array([1., 2., 3.]) + b = 1. + + self._test_not_equal(a, b) + + def test_array_vs_scalar_strict(self): + """Test comparing an array with a scalar with strict option.""" + a = np.array([1., 1., 1.]) + b = 1. + + with pytest.raises(AssertionError): + assert_array_equal(a, b, strict=True) + + def test_array_vs_array_strict(self): + """Test comparing two arrays with strict option.""" + a = np.array([1., 1., 1.]) + b = np.array([1., 1., 1.]) + + assert_array_equal(a, b, strict=True) + + def test_array_vs_float_array_strict(self): + """Test comparing two arrays with strict option.""" + a = np.array([1, 1, 1]) + b = np.array([1., 1., 1.]) + + with pytest.raises(AssertionError): + assert_array_equal(a, b, strict=True) + + +class TestBuildErrorMessage: + + def test_build_err_msg_defaults(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, ' + '2.00003, 3.00004])') + assert_equal(a, b) + + def test_build_err_msg_no_verbose(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, verbose=False) + b = '\nItems are not equal: There is a mismatch' + assert_equal(a, b) + + def test_build_err_msg_custom_names(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR')) + b = ('\nItems are not equal: There is a mismatch\n FOO: array([' + '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, ' + '3.00004])') + assert_equal(a, b) + + def test_build_err_msg_custom_precision(self): + x = np.array([1.000000001, 2.00002, 3.00003]) + y = np.array([1.000000002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, precision=10) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([' + '1.000000002, 2.00003 , 3.00004 ])') + assert_equal(a, b) + + +class TestEqual(TestArrayEqual): + + def setup_method(self): + self._assert_func = assert_equal + + def test_nan_items(self): + self._assert_func(np.nan, np.nan) + self._assert_func([np.nan], [np.nan]) + self._test_not_equal(np.nan, [np.nan]) + self._test_not_equal(np.nan, 1) + + def test_inf_items(self): + self._assert_func(np.inf, np.inf) + self._assert_func([np.inf], [np.inf]) + self._test_not_equal(np.inf, [np.inf]) + + def test_datetime(self): + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "s") + ) + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "m") + ) + + # gh-10081 + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "s") + ) + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "m") + ) + + def test_nat_items(self): + # not a datetime + nadt_no_unit = np.datetime64("NaT") + nadt_s = np.datetime64("NaT", "s") + nadt_d = np.datetime64("NaT", "ns") + # not a timedelta + natd_no_unit = np.timedelta64("NaT") + natd_s = np.timedelta64("NaT", "s") + natd_d = np.timedelta64("NaT", "ns") + + dts = [nadt_no_unit, nadt_s, nadt_d] + tds = [natd_no_unit, natd_s, natd_d] + for a, b in itertools.product(dts, dts): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, tds): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, dts): + self._test_not_equal(a, b) + self._test_not_equal(a, [b]) + self._test_not_equal([a], [b]) + self._test_not_equal([a], np.datetime64("2017-01-01", "s")) + self._test_not_equal([b], np.datetime64("2017-01-01", "s")) + self._test_not_equal([a], np.timedelta64(123, "s")) + self._test_not_equal([b], np.timedelta64(123, "s")) + + def test_non_numeric(self): + self._assert_func('ab', 'ab') + self._test_not_equal('ab', 'abb') + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_negative_zero(self): + self._test_not_equal(np.PZERO, np.NZERO) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + + def test_object(self): + #gh-12942 + import datetime + a = np.array([datetime.datetime(2000, 1, 1), + datetime.datetime(2000, 1, 2)]) + self._test_not_equal(a, a[::-1]) + + +class TestArrayAlmostEqual(_GenericTest): + + def setup_method(self): + self._assert_func = assert_array_almost_equal + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + self._assert_func(1.499999, 0.0, decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func(1.5, 0.0, decimal=0)) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func([1.5], [0.0], decimal=0)) + + def test_simple(self): + x = np.array([1234.2222]) + y = np.array([1234.2223]) + + self._assert_func(x, y, decimal=3) + self._assert_func(x, y, decimal=4) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, decimal=5)) + + def test_nan(self): + anan = np.array([np.nan]) + aone = np.array([1]) + ainf = np.array([np.inf]) + self._assert_func(anan, anan) + assert_raises(AssertionError, + lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, + lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, + lambda: self._assert_func(ainf, anan)) + + def test_inf(self): + a = np.array([[1., 2.], [3., 4.]]) + b = a.copy() + a[0, 0] = np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + b[0, 0] = -np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + + def test_subclass(self): + a = np.array([[1., 2.], [3., 4.]]) + b = np.ma.masked_array([[1., 2.], [0., 4.]], + [[False, False], [True, False]]) + self._assert_func(a, b) + self._assert_func(b, a) + self._assert_func(b, b) + + # Test fully masked as well (see gh-11123). + a = np.ma.MaskedArray(3.5, mask=True) + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.masked + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array([1., 2., 3.]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array(1.) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestAlmostEqual(_GenericTest): + + def setup_method(self): + self._assert_func = assert_almost_equal + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + self._assert_func(1.499999, 0.0, decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func(1.5, 0.0, decimal=0)) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func([1.5], [0.0], decimal=0)) + + def test_nan_item(self): + self._assert_func(np.nan, np.nan) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, np.inf)) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, np.nan)) + + def test_inf_item(self): + self._assert_func(np.inf, np.inf) + self._assert_func(-np.inf, -np.inf) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(-np.inf, np.inf)) + + def test_simple_item(self): + self._test_not_equal(1, 2) + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + z = np.array([complex(1, 2), complex(np.nan, 1)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + self._test_not_equal(x, z) + + def test_error_message(self): + """Check the message is formatted correctly for the decimal value. + Also check the message when input includes inf or nan (gh12200)""" + x = np.array([1.00000000001, 2.00000000002, 3.00003]) + y = np.array([1.00000000002, 2.00000000003, 3.00004]) + + # Test with a different amount of decimal digits + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y, decimal=12) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.e-05') + assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06') + assert_equal( + msgs[6], + ' x: array([1.00000000001, 2.00000000002, 3.00003 ])') + assert_equal( + msgs[7], + ' y: array([1.00000000002, 2.00000000003, 3.00004 ])') + + # With the default value of decimal digits, only the 3rd element + # differs. Note that we only check for the formatting of the arrays + # themselves. + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)') + assert_equal(msgs[4], 'Max absolute difference: 1.e-05') + assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06') + assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])') + assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])') + + # Check the error message when input includes inf + x = np.array([np.inf, 0]) + y = np.array([np.inf, 1]) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 1.') + assert_equal(msgs[6], ' x: array([inf, 0.])') + assert_equal(msgs[7], ' y: array([inf, 1.])') + + # Check the error message when dividing by zero + x = np.array([1, 2]) + y = np.array([0, 0]) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 2') + assert_equal(msgs[5], 'Max relative difference: inf') + + def test_error_message_2(self): + """Check the message is formatted correctly when either x or y is a scalar.""" + x = 2 + y = np.ones(20) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 1.') + + y = 2 + x = np.ones(20) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 0.5') + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestApproxEqual: + + def setup_method(self): + self._assert_func = assert_approx_equal + + def test_simple_0d_arrays(self): + x = np.array(1234.22) + y = np.array(1234.23) + + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_simple_items(self): + x = 1234.22 + y = 1234.23 + + self._assert_func(x, y, significant=4) + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_nan_array(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_items(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + +class TestArrayAssertLess: + + def setup_method(self): + self._assert_func = assert_array_less + + def test_simple_arrays(self): + x = np.array([1.1, 2.2]) + y = np.array([1.2, 2.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 2.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_rank2(self): + x = np.array([[1.1, 2.2], [3.3, 4.4]]) + y = np.array([[1.2, 2.3], [3.4, 4.5]]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([[1.0, 2.3], [3.4, 4.5]]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_rank3(self): + x = np.ones(shape=(2, 2, 2)) + y = np.ones(shape=(2, 2, 2))+1 + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y[0, 0, 0] = 0 + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_simple_items(self): + x = 1.1 + y = 2.2 + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([2.2, 3.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 3.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_nan_noncompare(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(aone, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_noncompare_array(self): + x = np.array([1.1, 2.2, 3.3]) + anan = np.array(np.nan) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + x = np.array([1.1, 2.2, np.nan]) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + y = np.array([1.0, 2.0, np.nan]) + + self._assert_func(y, x) + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_inf_compare(self): + aone = np.array(1) + ainf = np.array(np.inf) + + self._assert_func(aone, ainf) + self._assert_func(-ainf, aone) + self._assert_func(-ainf, ainf) + assert_raises(AssertionError, lambda: self._assert_func(ainf, aone)) + assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf)) + + def test_inf_compare_array(self): + x = np.array([1.1, 2.2, np.inf]) + ainf = np.array(np.inf) + + assert_raises(AssertionError, lambda: self._assert_func(x, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, x)) + assert_raises(AssertionError, lambda: self._assert_func(x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x)) + self._assert_func(-ainf, x) + + +class TestWarns: + + def test_warn(self): + def f(): + warnings.warn("yo") + return 3 + + before_filters = sys.modules['warnings'].filters[:] + assert_equal(assert_warns(UserWarning, f), 3) + after_filters = sys.modules['warnings'].filters + + assert_raises(AssertionError, assert_no_warnings, f) + assert_equal(assert_no_warnings(lambda x: x, 1), 1) + + # Check that the warnings state is unchanged + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_context_manager(self): + + before_filters = sys.modules['warnings'].filters[:] + with assert_warns(UserWarning): + warnings.warn("yo") + after_filters = sys.modules['warnings'].filters + + def no_warnings(): + with assert_no_warnings(): + warnings.warn("yo") + + assert_raises(AssertionError, no_warnings) + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_warn_wrong_warning(self): + def f(): + warnings.warn("yo", DeprecationWarning) + + failed = False + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + try: + # Should raise a DeprecationWarning + assert_warns(UserWarning, f) + failed = True + except DeprecationWarning: + pass + + if failed: + raise AssertionError("wrong warning caught by assert_warn") + + +class TestAssertAllclose: + + def test_simple(self): + x = 1e-3 + y = 1e-9 + + assert_allclose(x, y, atol=1) + assert_raises(AssertionError, assert_allclose, x, y) + + a = np.array([x, y, x, y]) + b = np.array([x, y, x, x]) + + assert_allclose(a, b, atol=1) + assert_raises(AssertionError, assert_allclose, a, b) + + b[-1] = y * (1 + 1e-8) + assert_allclose(a, b) + assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9) + + assert_allclose(6, 10, rtol=0.5) + assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5) + + def test_min_int(self): + a = np.array([np.iinfo(np.int_).min], dtype=np.int_) + # Should not raise: + assert_allclose(a, a) + + def test_report_fail_percentage(self): + a = np.array([1, 1, 1, 1]) + b = np.array([1, 1, 1, 2]) + + with pytest.raises(AssertionError) as exc_info: + assert_allclose(a, b) + msg = str(exc_info.value) + assert_('Mismatched elements: 1 / 4 (25%)\n' + 'Max absolute difference: 1\n' + 'Max relative difference: 0.5' in msg) + + def test_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + # Should not raise: + assert_allclose(a, b, equal_nan=True) + + def test_not_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + + def test_equal_nan_default(self): + # Make sure equal_nan default behavior remains unchanged. (All + # of these functions use assert_array_compare under the hood.) + # None of these should raise. + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_array_equal(a, b) + assert_array_almost_equal(a, b) + assert_array_less(a, b) + assert_allclose(a, b) + + def test_report_max_relative_error(self): + a = np.array([0, 1]) + b = np.array([0, 2]) + + with pytest.raises(AssertionError) as exc_info: + assert_allclose(a, b) + msg = str(exc_info.value) + assert_('Max relative difference: 0.5' in msg) + + def test_timedelta(self): + # see gh-18286 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert_allclose(a, a) + + def test_error_message_unsigned(self): + """Check the the message is formatted correctly when overflow can occur + (gh21768)""" + # Ensure to test for potential overflow in the case of: + # x - y + # and + # y - x + x = np.asarray([0, 1, 8], dtype='uint8') + y = np.asarray([4, 4, 4], dtype='uint8') + with pytest.raises(AssertionError) as exc_info: + assert_allclose(x, y, atol=3) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[4], 'Max absolute difference: 4') + + +class TestArrayAlmostEqualNulp: + + def test_float64_pass(self): + # The number of units of least precision + # In this case, use a few places above the lowest level (ie nulp=1) + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + # Addition + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + # Subtraction + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float64_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint64(0xffffffff) + nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64) + nan2_i64 = nan1_i64 ^ offset # nan payload on MIPS is all ones. + nan1_f64 = nan1_i64.view(np.float64) + nan2_f64 = nan2_i64.view(np.float64) + assert_array_max_ulp(nan1_f64, nan2_f64, 0) + + def test_float32_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float32_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float32_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint32(0xffff) + nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32) + nan2_i32 = nan1_i32 ^ offset # nan payload on MIPS is all ones. + nan1_f32 = nan1_i32.view(np.float32) + nan2_f32 = nan2_i32.view(np.float32) + assert_array_max_ulp(nan1_f32, nan2_f32, 0) + + def test_float16_pass(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float16_fail(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float16_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint16(0xff) + nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16) + nan2_i16 = nan1_i16 ^ offset # nan payload on MIPS is all ones. + nan1_f16 = nan1_i16.view(np.float16) + nan2_f16 = nan2_i16.view(np.float16) + assert_array_max_ulp(nan1_f16, nan2_f16, 0) + + def test_complex128_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x*eps*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x - x*epsneg*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + def test_complex128_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x*eps*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + y = x - x*epsneg*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + def test_complex64_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x + x*eps*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x - x*epsneg*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + def test_complex64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + y = x + x*eps*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + y = x - x*epsneg*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + +class TestULP: + + def test_equal(self): + x = np.random.randn(10) + assert_array_max_ulp(x, x, maxulp=0) + + def test_single(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float32) + x += 0.01 * np.random.randn(10).astype(np.float32) + eps = np.finfo(np.float32).eps + assert_array_max_ulp(x, x+eps, maxulp=20) + + def test_double(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float64) + x += 0.01 * np.random.randn(10).astype(np.float64) + eps = np.finfo(np.float64).eps + assert_array_max_ulp(x, x+eps, maxulp=200) + + def test_inf(self): + for dt in [np.float32, np.float64]: + inf = np.array([np.inf]).astype(dt) + big = np.array([np.finfo(dt).max]) + assert_array_max_ulp(inf, big, maxulp=200) + + def test_nan(self): + # Test that nan is 'far' from small, tiny, inf, max and min + for dt in [np.float32, np.float64]: + if dt == np.float32: + maxulp = 1e6 + else: + maxulp = 1e12 + inf = np.array([np.inf]).astype(dt) + nan = np.array([np.nan]).astype(dt) + big = np.array([np.finfo(dt).max]) + tiny = np.array([np.finfo(dt).tiny]) + zero = np.array([np.PZERO]).astype(dt) + nzero = np.array([np.NZERO]).astype(dt) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, inf, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, big, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, tiny, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, zero, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, nzero, + maxulp=maxulp)) + + +class TestStringEqual: + def test_simple(self): + assert_string_equal("hello", "hello") + assert_string_equal("hello\nmultiline", "hello\nmultiline") + + with pytest.raises(AssertionError) as exc_info: + assert_string_equal("foo\nbar", "hello\nbar") + msg = str(exc_info.value) + assert_equal(msg, "Differences in strings:\n- foo\n+ hello") + + assert_raises(AssertionError, + lambda: assert_string_equal("foo", "hello")) + + def test_regex(self): + assert_string_equal("a+*b", "a+*b") + + assert_raises(AssertionError, + lambda: assert_string_equal("aaa", "a+b")) + + +def assert_warn_len_equal(mod, n_in_context): + try: + mod_warns = mod.__warningregistry__ + except AttributeError: + # the lack of a __warningregistry__ + # attribute means that no warning has + # occurred; this can be triggered in + # a parallel test scenario, while in + # a serial test scenario an initial + # warning (and therefore the attribute) + # are always created first + mod_warns = {} + + num_warns = len(mod_warns) + + if 'version' in mod_warns: + # Python 3 adds a 'version' entry to the registry, + # do not count it. + num_warns -= 1 + + assert_equal(num_warns, n_in_context) + + +def test_warn_len_equal_call_scenarios(): + # assert_warn_len_equal is called under + # varying circumstances depending on serial + # vs. parallel test scenarios; this test + # simply aims to probe both code paths and + # check that no assertion is uncaught + + # parallel scenario -- no warning issued yet + class mod: + pass + + mod_inst = mod() + + assert_warn_len_equal(mod=mod_inst, + n_in_context=0) + + # serial test scenario -- the __warningregistry__ + # attribute should be present + class mod: + def __init__(self): + self.__warningregistry__ = {'warning1':1, + 'warning2':2} + + mod_inst = mod() + assert_warn_len_equal(mod=mod_inst, + n_in_context=2) + + +def _get_fresh_mod(): + # Get this module, with warning registry empty + my_mod = sys.modules[__name__] + try: + my_mod.__warningregistry__.clear() + except AttributeError: + # will not have a __warningregistry__ unless warning has been + # raised in the module at some point + pass + return my_mod + + +def test_clear_and_catch_warnings(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + # Without specified modules, don't clear warnings during context. + # catch_warnings doesn't make an entry for 'ignore'. + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Manually adding two warnings to the registry: + my_mod.__warningregistry__ = {'warning1': 1, + 'warning2': 2} + + # Confirm that specifying module keeps old warning, does not add new + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 2) + + # Another warning, no module spec it clears up registry + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 0) + + +def test_suppress_warnings_module(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning 2", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + # Test module based warning suppression: + assert_warn_len_equal(my_mod, 0) + with suppress_warnings() as sup: + sup.record(UserWarning) + # suppress warning from other module (may have .pyc ending), + # if apply_along_axis is moved, had to be changed. + sup.filter(module=np.lib.shape_base) + warnings.warn("Some warning") + warn_other_module() + # Check that the suppression did test the file correctly (this module + # got filtered) + assert_equal(len(sup.log), 1) + assert_equal(sup.log[0].message.args[0], "Some warning") + assert_warn_len_equal(my_mod, 0) + sup = suppress_warnings() + # Will have to be changed if apply_along_axis is moved: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + +def test_suppress_warnings_type(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + # Test module based warning suppression: + with suppress_warnings() as sup: + sup.filter(UserWarning) + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + sup = suppress_warnings() + sup.filter(UserWarning) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + +def test_suppress_warnings_decorate_no_record(): + sup = suppress_warnings() + sup.filter(UserWarning) + + @sup + def warn(category): + warnings.warn('Some warning', category) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + warn(UserWarning) # should be supppressed + warn(RuntimeWarning) + assert_equal(len(w), 1) + + +def test_suppress_warnings_record(): + sup = suppress_warnings() + log1 = sup.record() + + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2),1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Do it again, with the same context to see if some warnings survived: + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2), 1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Test nested: + with suppress_warnings() as sup: + sup.record() + with suppress_warnings() as sup2: + sup2.record(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + assert_equal(len(sup2.log), 1) + assert_equal(len(sup.log), 1) + + +def test_suppress_warnings_forwarding(): + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("always"): + for i in range(2): + warnings.warn("Some warning") + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("location"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("module"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + warn_other_module() + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("once"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some other warning") + warn_other_module() + + assert_equal(len(sup.log), 2) + + +def test_tempdir(): + with tempdir() as tdir: + fpath = os.path.join(tdir, 'tmp') + with open(fpath, 'w'): + pass + assert_(not os.path.isdir(tdir)) + + raised = False + try: + with tempdir() as tdir: + raise ValueError() + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isdir(tdir)) + + +def test_temppath(): + with temppath() as fpath: + with open(fpath, 'w'): + pass + assert_(not os.path.isfile(fpath)) + + raised = False + try: + with temppath() as fpath: + raise ValueError() + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isfile(fpath)) + + +class my_cacw(clear_and_catch_warnings): + + class_modules = (sys.modules[__name__],) + + +def test_clear_and_catch_warnings_inherit(): + # Test can subclass and add default modules + my_mod = _get_fresh_mod() + with my_cacw(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +class TestAssertNoGcCycles: + """ Test assert_no_gc_cycles """ + def test_passes(self): + def no_cycle(): + b = [] + b.append([]) + return b + + with assert_no_gc_cycles(): + no_cycle() + + assert_no_gc_cycles(no_cycle) + + def test_asserts(self): + def make_cycle(): + a = [] + a.append(a) + a.append(a) + return a + + with assert_raises(AssertionError): + with assert_no_gc_cycles(): + make_cycle() + + with assert_raises(AssertionError): + assert_no_gc_cycles(make_cycle) + + @pytest.mark.slow + def test_fails(self): + """ + Test that in cases where the garbage cannot be collected, we raise an + error, instead of hanging forever trying to clear it. + """ + + class ReferenceCycleInDel: + """ + An object that not only contains a reference cycle, but creates new + cycles whenever it's garbage-collected and its __del__ runs + """ + make_cycle = True + + def __init__(self): + self.cycle = self + + def __del__(self): + # break the current cycle so that `self` can be freed + self.cycle = None + + if ReferenceCycleInDel.make_cycle: + # but create a new one so that the garbage collector has more + # work to do. + ReferenceCycleInDel() + + try: + w = weakref.ref(ReferenceCycleInDel()) + try: + with assert_raises(RuntimeError): + # this will be unable to get a baseline empty garbage + assert_no_gc_cycles(lambda: None) + except AssertionError: + # the above test is only necessary if the GC actually tried to free + # our object anyway, which python 2.7 does not. + if w() is not None: + pytest.skip("GC does not call __del__ on cyclic objects") + raise + + finally: + # make sure that we stop creating reference cycles + ReferenceCycleInDel.make_cycle = False