diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee3f7be5ce49f1fe19c13db5e1db9bafd5277dd9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7c3540e43f1d82586f2c204648d9fe85a15aee0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..829ba3f488aa775bc2816c2553e1e3f9d61841b9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca3aa87253b898c399fa98477611dbed96098011 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50093a05940b1ab3bfc390fc857a6d28cacb9880 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b8937adf0afc464751c4f42c7091daf871784d7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3ad1407fdcb26174050b91d99e48f90f175db5c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9c82f8cdd913ada826c0a6dc7785a57dee5aa64 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c70604e807f925e49af6c7fd9a5dc4c098af4d5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c270223617dad4454552d751fa5f936bd9f24f89 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e3ab8e8c73b1ed803fc50c69c26900754ad3e40 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/backports.py b/env-llmeval/lib/python3.10/site-packages/joblib/backports.py new file mode 100644 index 0000000000000000000000000000000000000000..3a14f107689e82b634da6794d30e658fdc000987 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/backports.py @@ -0,0 +1,177 @@ +""" +Backports of fixes for joblib dependencies +""" +import os +import re +import time + +from os.path import basename +from multiprocessing import util + + +class Version: + """Backport from deprecated distutils + + We maintain this backport to avoid introducing a new dependency on + `packaging`. + + We might rexplore this choice in the future if all major Python projects + introduce a dependency on packaging anyway. + """ + + def __init__(self, vstring=None): + if vstring: + self.parse(vstring) + + def __repr__(self): + return "%s ('%s')" % (self.__class__.__name__, str(self)) + + def __eq__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c == 0 + + def __lt__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c < 0 + + def __le__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c <= 0 + + def __gt__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c > 0 + + def __ge__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c >= 0 + + +class LooseVersion(Version): + """Backport from deprecated distutils + + We maintain this backport to avoid introducing a new dependency on + `packaging`. + + We might rexplore this choice in the future if all major Python projects + introduce a dependency on packaging anyway. + """ + + component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE) + + def __init__(self, vstring=None): + if vstring: + self.parse(vstring) + + def parse(self, vstring): + # I've given up on thinking I can reconstruct the version string + # from the parsed tuple -- so I just store the string here for + # use by __str__ + self.vstring = vstring + components = [x for x in self.component_re.split(vstring) + if x and x != '.'] + for i, obj in enumerate(components): + try: + components[i] = int(obj) + except ValueError: + pass + + self.version = components + + def __str__(self): + return self.vstring + + def __repr__(self): + return "LooseVersion ('%s')" % str(self) + + def _cmp(self, other): + if isinstance(other, str): + other = LooseVersion(other) + elif not isinstance(other, LooseVersion): + return NotImplemented + + if self.version == other.version: + return 0 + if self.version < other.version: + return -1 + if self.version > other.version: + return 1 + + +try: + import numpy as np + + def make_memmap(filename, dtype='uint8', mode='r+', offset=0, + shape=None, order='C', unlink_on_gc_collect=False): + """Custom memmap constructor compatible with numpy.memmap. + + This function: + - is a backport the numpy memmap offset fix (See + https://github.com/numpy/numpy/pull/8443 for more details. + The numpy fix is available starting numpy 1.13) + - adds ``unlink_on_gc_collect``, which specifies explicitly whether + the process re-constructing the memmap owns a reference to the + underlying file. If set to True, it adds a finalizer to the + newly-created memmap that sends a maybe_unlink request for the + memmaped file to resource_tracker. + """ + util.debug( + "[MEMMAP READ] creating a memmap (shape {}, filename {}, " + "pid {})".format(shape, basename(filename), os.getpid()) + ) + + mm = np.memmap(filename, dtype=dtype, mode=mode, offset=offset, + shape=shape, order=order) + if LooseVersion(np.__version__) < '1.13': + mm.offset = offset + if unlink_on_gc_collect: + from ._memmapping_reducer import add_maybe_unlink_finalizer + add_maybe_unlink_finalizer(mm) + return mm +except ImportError: + def make_memmap(filename, dtype='uint8', mode='r+', offset=0, + shape=None, order='C', unlink_on_gc_collect=False): + raise NotImplementedError( + "'joblib.backports.make_memmap' should not be used " + 'if numpy is not installed.') + + +if os.name == 'nt': + # https://github.com/joblib/joblib/issues/540 + access_denied_errors = (5, 13) + from os import replace + + def concurrency_safe_rename(src, dst): + """Renames ``src`` into ``dst`` overwriting ``dst`` if it exists. + + On Windows os.replace can yield permission errors if executed by two + different processes. + """ + max_sleep_time = 1 + total_sleep_time = 0 + sleep_time = 0.001 + while total_sleep_time < max_sleep_time: + try: + replace(src, dst) + break + except Exception as exc: + if getattr(exc, 'winerror', None) in access_denied_errors: + time.sleep(sleep_time) + total_sleep_time += sleep_time + sleep_time *= 2 + else: + raise + else: + raise +else: + from os import replace as concurrency_safe_rename # noqa diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/__init__.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__init__.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5886d2a62092bdc9f444d7a22058d065de567818 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__init__.py @@ -0,0 +1,44 @@ +r"""The :mod:`loky` module manages a pool of worker that can be re-used across time. +It provides a robust and dynamic implementation os the +:class:`ProcessPoolExecutor` and a function :func:`get_reusable_executor` which +hide the pool management under the hood. +""" +from concurrent.futures import ( + ALL_COMPLETED, + FIRST_COMPLETED, + FIRST_EXCEPTION, + CancelledError, + Executor, + TimeoutError, + as_completed, + wait, +) + +from ._base import Future +from .backend.context import cpu_count +from .backend.reduction import set_loky_pickler +from .reusable_executor import get_reusable_executor +from .cloudpickle_wrapper import wrap_non_picklable_objects +from .process_executor import BrokenProcessPool, ProcessPoolExecutor + + +__all__ = [ + "get_reusable_executor", + "cpu_count", + "wait", + "as_completed", + "Future", + "Executor", + "ProcessPoolExecutor", + "BrokenProcessPool", + "CancelledError", + "TimeoutError", + "FIRST_COMPLETED", + "FIRST_EXCEPTION", + "ALL_COMPLETED", + "wrap_non_picklable_objects", + "set_loky_pickler", +] + + +__version__ = "3.4.1" diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85f9f87ad095a4a5a189db1051712d3a583f3481 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd00d521c095129f083e2622e9c019833cd13445 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/_base.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..da0abc1e7fa18363e6342a3b67410f1429e6fa10 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/_base.py @@ -0,0 +1,28 @@ +############################################################################### +# Modification of concurrent.futures.Future +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from concurrent/futures/_base.py (17/02/2017) +# * Do not use yield from +# * Use old super syntax +# +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +from concurrent.futures import Future as _BaseFuture +from concurrent.futures._base import LOGGER + + +# To make loky._base.Future instances awaitable by concurrent.futures.wait, +# derive our custom Future class from _BaseFuture. _invoke_callback is the only +# modification made to this class in loky. +# TODO investigate why using `concurrent.futures.Future` directly does not +# always work in our test suite. +class Future(_BaseFuture): + def _invoke_callbacks(self): + for callback in self._done_callbacks: + try: + callback(self) + except BaseException: + LOGGER.exception(f"exception calling callback for {self!r}") diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77b13f042a64e73829ebd163667efd2384093b1a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d146090d219c8c6b67cdcb98d8e71cb601d24c9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f77d5d5d21ef4cbc1b36d5f6e7def4bafc474ee7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7e241c2bb0fd2d0381b8d7f220172eba35d32c1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21dc461d270858e264941b97645df2f54f2cff4f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12c463eca522053cc1845f0d17e0b42d45fd2009 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a150212d6c24160c9103714663419d3ad967ed66 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5afd40fae1704462db07f17b387e2a827ba26c8b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..327fc18c5bf8b1f3fbcbc25d4c23e0c22c0641e4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8856714b24f5928a35217acbcad4e26aafa9438 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..099debcb711c6695f0570861293b198047bd6093 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py @@ -0,0 +1,102 @@ +import inspect +from functools import partial +from joblib.externals.cloudpickle import dumps, loads + + +WRAP_CACHE = {} + + +class CloudpickledObjectWrapper: + def __init__(self, obj, keep_wrapper=False): + self._obj = obj + self._keep_wrapper = keep_wrapper + + def __reduce__(self): + _pickled_object = dumps(self._obj) + if not self._keep_wrapper: + return loads, (_pickled_object,) + + return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper) + + def __getattr__(self, attr): + # Ensure that the wrapped object can be used seemlessly as the + # previous object. + if attr not in ["_obj", "_keep_wrapper"]: + return getattr(self._obj, attr) + return getattr(self, attr) + + +# Make sure the wrapped object conserves the callable property +class CallableObjectWrapper(CloudpickledObjectWrapper): + def __call__(self, *args, **kwargs): + return self._obj(*args, **kwargs) + + +def _wrap_non_picklable_objects(obj, keep_wrapper): + if callable(obj): + return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper) + return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper) + + +def _reconstruct_wrapper(_pickled_object, keep_wrapper): + obj = loads(_pickled_object) + return _wrap_non_picklable_objects(obj, keep_wrapper) + + +def _wrap_objects_when_needed(obj): + # Function to introspect an object and decide if it should be wrapped or + # not. + need_wrap = "__main__" in getattr(obj, "__module__", "") + if isinstance(obj, partial): + return partial( + _wrap_objects_when_needed(obj.func), + *[_wrap_objects_when_needed(a) for a in obj.args], + **{ + k: _wrap_objects_when_needed(v) + for k, v in obj.keywords.items() + } + ) + if callable(obj): + # Need wrap if the object is a function defined in a local scope of + # another function. + func_code = getattr(obj, "__code__", "") + need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED + + # Need wrap if the obj is a lambda expression + func_name = getattr(obj, "__name__", "") + need_wrap |= "" in func_name + + if not need_wrap: + return obj + + wrapped_obj = WRAP_CACHE.get(obj) + if wrapped_obj is None: + wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False) + WRAP_CACHE[obj] = wrapped_obj + return wrapped_obj + + +def wrap_non_picklable_objects(obj, keep_wrapper=True): + """Wrapper for non-picklable object to use cloudpickle to serialize them. + + Note that this wrapper tends to slow down the serialization process as it + is done with cloudpickle which is typically slower compared to pickle. The + proper way to solve serialization issues is to avoid defining functions and + objects in the main scripts and to implement __reduce__ functions for + complex classes. + """ + # If obj is a class, create a CloudpickledClassWrapper which instantiates + # the object internally and wrap it directly in a CloudpickledObjectWrapper + if inspect.isclass(obj): + + class CloudpickledClassWrapper(CloudpickledObjectWrapper): + def __init__(self, *args, **kwargs): + self._obj = obj(*args, **kwargs) + self._keep_wrapper = keep_wrapper + + CloudpickledClassWrapper.__name__ = obj.__name__ + return CloudpickledClassWrapper + + # If obj is an instance of a class, just wrap it in a regular + # CloudpickledObjectWrapper + return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/initializers.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/initializers.py new file mode 100644 index 0000000000000000000000000000000000000000..aea0e56c25d0d74e04788493058549a1399f8342 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/initializers.py @@ -0,0 +1,80 @@ +import warnings + + +def _viztracer_init(init_kwargs): + """Initialize viztracer's profiler in worker processes""" + from viztracer import VizTracer + + tracer = VizTracer(**init_kwargs) + tracer.register_exit() + tracer.start() + + +def _make_viztracer_initializer_and_initargs(): + try: + import viztracer + + tracer = viztracer.get_tracer() + if tracer is not None and getattr(tracer, "enable", False): + # Profiler is active: introspect its configuration to + # initialize the workers with the same configuration. + return _viztracer_init, (tracer.init_kwargs,) + except ImportError: + # viztracer is not installed: nothing to do + pass + except Exception as e: + # In case viztracer's API evolve, we do not want to crash loky but + # we want to know about it to be able to update loky. + warnings.warn(f"Unable to introspect viztracer state: {e}") + return None, () + + +class _ChainedInitializer: + """Compound worker initializer + + This is meant to be used in conjunction with _chain_initializers to + produce the necessary chained_args list to be passed to __call__. + """ + + def __init__(self, initializers): + self._initializers = initializers + + def __call__(self, *chained_args): + for initializer, args in zip(self._initializers, chained_args): + initializer(*args) + + +def _chain_initializers(initializer_and_args): + """Convenience helper to combine a sequence of initializers. + + If some initializers are None, they are filtered out. + """ + filtered_initializers = [] + filtered_initargs = [] + for initializer, initargs in initializer_and_args: + if initializer is not None: + filtered_initializers.append(initializer) + filtered_initargs.append(initargs) + + if not filtered_initializers: + return None, () + elif len(filtered_initializers) == 1: + return filtered_initializers[0], filtered_initargs[0] + else: + return _ChainedInitializer(filtered_initializers), filtered_initargs + + +def _prepare_initializer(initializer, initargs): + if initializer is not None and not callable(initializer): + raise TypeError( + f"initializer must be a callable, got: {initializer!r}" + ) + + # Introspect runtime to determine if we need to propagate the viztracer + # profiler information to the workers: + return _chain_initializers( + [ + (initializer, initargs), + _make_viztracer_initializer_and_initargs(), + ] + ) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..1e08cc21f21dad4e9df4f547add59a8660395043 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py @@ -0,0 +1,1314 @@ +############################################################################### +# Re-implementation of the ProcessPoolExecutor more robust to faults +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from concurrent/futures/process_pool_executor.py (17/02/2017) +# * Add an extra management thread to detect executor_manager_thread failures, +# * Improve the shutdown process to avoid deadlocks, +# * Add timeout for workers, +# * More robust pickling process. +# +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ProcessPoolExecutor. + +The follow diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | | | | Call Q | | Process | +| | +----------+ | | +-----------+ | Pool | +| | | ... | | | | ... | +---------+ +| | | 6 | => | | => | 5, call() | => | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | +--------+ | 4, result | | | +| | | ... | | 3, except | | | ++----------+ +------------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Result Q" +""" + + +__author__ = "Thomas Moreau (thomas.moreau.2010@gmail.com)" + + +import os +import gc +import sys +import queue +import struct +import weakref +import warnings +import itertools +import traceback +import threading +from time import time, sleep +import multiprocessing as mp +from functools import partial +from pickle import PicklingError +from concurrent.futures import Executor +from concurrent.futures._base import LOGGER +from concurrent.futures.process import BrokenProcessPool as _BPPException +from multiprocessing.connection import wait + +from ._base import Future +from .backend import get_context +from .backend.context import cpu_count, _MAX_WINDOWS_WORKERS +from .backend.queues import Queue, SimpleQueue +from .backend.reduction import set_loky_pickler, get_loky_pickler_name +from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker +from .initializers import _prepare_initializer + + +# Mechanism to prevent infinite process spawning. When a worker of a +# ProcessPoolExecutor nested in MAX_DEPTH Executor tries to create a new +# Executor, a LokyRecursionError is raised +MAX_DEPTH = int(os.environ.get("LOKY_MAX_DEPTH", 10)) +_CURRENT_DEPTH = 0 + +# Minimum time interval between two consecutive memory leak protection checks. +_MEMORY_LEAK_CHECK_DELAY = 1.0 + +# Number of bytes of memory usage allowed over the reference process size. +_MAX_MEMORY_LEAK_SIZE = int(3e8) + + +try: + from psutil import Process + + _USE_PSUTIL = True + + def _get_memory_usage(pid, force_gc=False): + if force_gc: + gc.collect() + + mem_size = Process(pid).memory_info().rss + mp.util.debug(f"psutil return memory size: {mem_size}") + return mem_size + +except ImportError: + _USE_PSUTIL = False + + +class _ThreadWakeup: + def __init__(self): + self._closed = False + self._reader, self._writer = mp.Pipe(duplex=False) + + def close(self): + if not self._closed: + self._closed = True + self._writer.close() + self._reader.close() + + def wakeup(self): + if not self._closed: + self._writer.send_bytes(b"") + + def clear(self): + if not self._closed: + while self._reader.poll(): + self._reader.recv_bytes() + + +class _ExecutorFlags: + """necessary references to maintain executor states without preventing gc + + It permits to keep the information needed by executor_manager_thread + and crash_detection_thread to maintain the pool without preventing the + garbage collection of unreferenced executors. + """ + + def __init__(self, shutdown_lock): + + self.shutdown = False + self.broken = None + self.kill_workers = False + self.shutdown_lock = shutdown_lock + + def flag_as_shutting_down(self, kill_workers=None): + with self.shutdown_lock: + self.shutdown = True + if kill_workers is not None: + self.kill_workers = kill_workers + + def flag_as_broken(self, broken): + with self.shutdown_lock: + self.shutdown = True + self.broken = broken + + +# Prior to 3.9, executor_manager_thread is created as daemon thread. This means +# that it is not joined automatically when the interpreter is shutting down. +# To work around this problem, an exit handler is installed to tell the +# thread to exit when the interpreter is shutting down and then waits until +# it finishes. The thread needs to be daemonized because the atexit hooks are +# called after all non daemonized threads are joined. +# +# Starting 3.9, there exists a specific atexit hook to be called before joining +# the threads so the executor_manager_thread does not need to be daemonized +# anymore. +# +# The atexit hooks are registered when starting the first ProcessPoolExecutor +# to avoid import having an effect on the interpreter. + +_global_shutdown = False +_global_shutdown_lock = threading.Lock() +_threads_wakeups = weakref.WeakKeyDictionary() + + +def _python_exit(): + global _global_shutdown + _global_shutdown = True + + # Materialize the list of items to avoid error due to iterating over + # changing size dictionary. + items = list(_threads_wakeups.items()) + if len(items) > 0: + mp.util.debug( + "Interpreter shutting down. Waking up {len(items)}" + f"executor_manager_thread:\n{items}" + ) + + # Wake up the executor_manager_thread's so they can detect the interpreter + # is shutting down and exit. + for _, (shutdown_lock, thread_wakeup) in items: + with shutdown_lock: + thread_wakeup.wakeup() + + # Collect the executor_manager_thread's to make sure we exit cleanly. + for thread, _ in items: + # This locks is to prevent situations where an executor is gc'ed in one + # thread while the atexit finalizer is running in another thread. This + # can happen when joblib is used in pypy for instance. + with _global_shutdown_lock: + thread.join() + + +# With the fork context, _thread_wakeups is propagated to children. +# Clear it after fork to avoid some situation that can cause some +# freeze when joining the workers. +mp.util.register_after_fork(_threads_wakeups, lambda obj: obj.clear()) + + +# Module variable to register the at_exit call +process_pool_executor_at_exit = None + +# Controls how many more calls than processes will be queued in the call queue. +# A smaller number will mean that processes spend more time idle waiting for +# work while a larger number will make Future.cancel() succeed less frequently +# (Futures in the call queue cannot be cancelled). +EXTRA_QUEUED_CALLS = 1 + + +class _RemoteTraceback(Exception): + """Embed stringification of remote traceback in local traceback""" + + def __init__(self, tb=None): + self.tb = f'\n"""\n{tb}"""' + + def __str__(self): + return self.tb + + +# Do not inherit from BaseException to mirror +# concurrent.futures.process._ExceptionWithTraceback +class _ExceptionWithTraceback: + def __init__(self, exc): + tb = getattr(exc, "__traceback__", None) + if tb is None: + _, _, tb = sys.exc_info() + tb = traceback.format_exception(type(exc), exc, tb) + tb = "".join(tb) + self.exc = exc + self.tb = tb + + def __reduce__(self): + return _rebuild_exc, (self.exc, self.tb) + + +def _rebuild_exc(exc, tb): + exc.__cause__ = _RemoteTraceback(tb) + return exc + + +class _WorkItem: + + __slots__ = ["future", "fn", "args", "kwargs"] + + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + +class _ResultItem: + def __init__(self, work_id, exception=None, result=None): + self.work_id = work_id + self.exception = exception + self.result = result + + +class _CallItem: + def __init__(self, work_id, fn, args, kwargs): + self.work_id = work_id + self.fn = fn + self.args = args + self.kwargs = kwargs + + # Store the current loky_pickler so it is correctly set in the worker + self.loky_pickler = get_loky_pickler_name() + + def __call__(self): + set_loky_pickler(self.loky_pickler) + return self.fn(*self.args, **self.kwargs) + + def __repr__(self): + return ( + f"CallItem({self.work_id}, {self.fn}, {self.args}, {self.kwargs})" + ) + + +class _SafeQueue(Queue): + """Safe Queue set exception to the future object linked to a job""" + + def __init__( + self, + max_size=0, + ctx=None, + pending_work_items=None, + running_work_items=None, + thread_wakeup=None, + reducers=None, + ): + self.thread_wakeup = thread_wakeup + self.pending_work_items = pending_work_items + self.running_work_items = running_work_items + super().__init__(max_size, reducers=reducers, ctx=ctx) + + def _on_queue_feeder_error(self, e, obj): + if isinstance(obj, _CallItem): + # format traceback only works on python3 + if isinstance(e, struct.error): + raised_error = RuntimeError( + "The task could not be sent to the workers as it is too " + "large for `send_bytes`." + ) + else: + raised_error = PicklingError( + "Could not pickle the task to send it to the workers." + ) + tb = traceback.format_exception( + type(e), e, getattr(e, "__traceback__", None) + ) + raised_error.__cause__ = _RemoteTraceback("".join(tb)) + work_item = self.pending_work_items.pop(obj.work_id, None) + self.running_work_items.remove(obj.work_id) + # work_item can be None if another process terminated. In this + # case, the executor_manager_thread fails all work_items with + # BrokenProcessPool + if work_item is not None: + work_item.future.set_exception(raised_error) + del work_item + self.thread_wakeup.wakeup() + else: + super()._on_queue_feeder_error(e, obj) + + +def _get_chunks(chunksize, *iterables): + """Iterates over zip()ed iterables in chunks.""" + it = zip(*iterables) + while True: + chunk = tuple(itertools.islice(it, chunksize)) + if not chunk: + return + yield chunk + + +def _process_chunk(fn, chunk): + """Processes a chunk of an iterable passed to map. + + Runs the function passed to map() on a chunk of the + iterable passed to map. + + This function is run in a separate process. + + """ + return [fn(*args) for args in chunk] + + +def _sendback_result(result_queue, work_id, result=None, exception=None): + """Safely send back the given result or exception""" + try: + result_queue.put( + _ResultItem(work_id, result=result, exception=exception) + ) + except BaseException as e: + exc = _ExceptionWithTraceback(e) + result_queue.put(_ResultItem(work_id, exception=exc)) + + +def _process_worker( + call_queue, + result_queue, + initializer, + initargs, + processes_management_lock, + timeout, + worker_exit_lock, + current_depth, +): + """Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A ctx.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A ctx.Queue of _ResultItems that will written + to by the worker. + initializer: A callable initializer, or None + initargs: A tuple of args for the initializer + processes_management_lock: A ctx.Lock avoiding worker timeout while + some workers are being spawned. + timeout: maximum time to wait for a new item in the call_queue. If that + time is expired, the worker will shutdown. + worker_exit_lock: Lock to avoid flagging the executor as broken on + workers timeout. + current_depth: Nested parallelism level, to avoid infinite spawning. + """ + if initializer is not None: + try: + initializer(*initargs) + except BaseException: + LOGGER.critical("Exception in initializer:", exc_info=True) + # The parent will notice that the process stopped and + # mark the pool broken + return + + # set the global _CURRENT_DEPTH mechanism to limit recursive call + global _CURRENT_DEPTH + _CURRENT_DEPTH = current_depth + _process_reference_size = None + _last_memory_leak_check = None + pid = os.getpid() + + mp.util.debug(f"Worker started with timeout={timeout}") + while True: + try: + call_item = call_queue.get(block=True, timeout=timeout) + if call_item is None: + mp.util.info("Shutting down worker on sentinel") + except queue.Empty: + mp.util.info(f"Shutting down worker after timeout {timeout:0.3f}s") + if processes_management_lock.acquire(block=False): + processes_management_lock.release() + call_item = None + else: + mp.util.info("Could not acquire processes_management_lock") + continue + except BaseException: + previous_tb = traceback.format_exc() + try: + result_queue.put(_RemoteTraceback(previous_tb)) + except BaseException: + # If we cannot format correctly the exception, at least print + # the traceback. + print(previous_tb) + mp.util.debug("Exiting with code 1") + sys.exit(1) + if call_item is None: + # Notify queue management thread about worker shutdown + result_queue.put(pid) + is_clean = worker_exit_lock.acquire(True, timeout=30) + + # Early notify any loky executor running in this worker process + # (nested parallelism) that this process is about to shutdown to + # avoid a deadlock waiting undifinitely for the worker to finish. + _python_exit() + + if is_clean: + mp.util.debug("Exited cleanly") + else: + mp.util.info("Main process did not release worker_exit") + return + try: + r = call_item() + except BaseException as e: + exc = _ExceptionWithTraceback(e) + result_queue.put(_ResultItem(call_item.work_id, exception=exc)) + else: + _sendback_result(result_queue, call_item.work_id, result=r) + del r + + # Free the resource as soon as possible, to avoid holding onto + # open files or shared memory that is not needed anymore + del call_item + + if _USE_PSUTIL: + if _process_reference_size is None: + # Make reference measurement after the first call + _process_reference_size = _get_memory_usage(pid, force_gc=True) + _last_memory_leak_check = time() + continue + if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY: + mem_usage = _get_memory_usage(pid) + _last_memory_leak_check = time() + if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE: + # Memory usage stays within bounds: everything is fine. + continue + + # Check again memory usage; this time take the measurement + # after a forced garbage collection to break any reference + # cycles. + mem_usage = _get_memory_usage(pid, force_gc=True) + _last_memory_leak_check = time() + if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE: + # The GC managed to free the memory: everything is fine. + continue + + # The process is leaking memory: let the master process + # know that we need to start a new worker. + mp.util.info("Memory leak detected: shutting down worker") + result_queue.put(pid) + with worker_exit_lock: + mp.util.debug("Exit due to memory leak") + return + else: + # if psutil is not installed, trigger gc.collect events + # regularly to limit potential memory leaks due to reference cycles + if _last_memory_leak_check is None or ( + time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY + ): + gc.collect() + _last_memory_leak_check = time() + + +class _ExecutorManagerThread(threading.Thread): + """Manages the communication between this process and the worker processes. + + The manager is run in a local thread. + + Args: + executor: A reference to the ProcessPoolExecutor that owns + this thread. A weakref will be own by the manager as well as + references to internal objects used to introspect the state of + the executor. + """ + + def __init__(self, executor): + # Store references to necessary internals of the executor. + + # A _ThreadWakeup to allow waking up the executor_manager_thread from + # the main Thread and avoid deadlocks caused by permanently + # locked queues. + self.thread_wakeup = executor._executor_manager_thread_wakeup + self.shutdown_lock = executor._shutdown_lock + + # A weakref.ref to the ProcessPoolExecutor that owns this thread. Used + # to determine if the ProcessPoolExecutor has been garbage collected + # and that the manager can exit. + # When the executor gets garbage collected, the weakref callback + # will wake up the queue management thread so that it can terminate + # if there is no pending work item. + def weakref_cb( + _, + thread_wakeup=self.thread_wakeup, + shutdown_lock=self.shutdown_lock, + ): + if mp is not None: + # At this point, the multiprocessing module can already be + # garbage collected. We only log debug info when still + # possible. + mp.util.debug( + "Executor collected: triggering callback for" + " QueueManager wakeup" + ) + with shutdown_lock: + thread_wakeup.wakeup() + + self.executor_reference = weakref.ref(executor, weakref_cb) + + # The flags of the executor + self.executor_flags = executor._flags + + # A list of the ctx.Process instances used as workers. + self.processes = executor._processes + + # A ctx.Queue that will be filled with _CallItems derived from + # _WorkItems for processing by the process workers. + self.call_queue = executor._call_queue + + # A ctx.SimpleQueue of _ResultItems generated by the process workers. + self.result_queue = executor._result_queue + + # A queue.Queue of work ids e.g. Queue([5, 6, ...]). + self.work_ids_queue = executor._work_ids + + # A dict mapping work ids to _WorkItems e.g. + # {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + self.pending_work_items = executor._pending_work_items + + # A list of the work_ids that are currently running + self.running_work_items = executor._running_work_items + + # A lock to avoid concurrent shutdown of workers on timeout and spawn + # of new processes or shut down + self.processes_management_lock = executor._processes_management_lock + + super().__init__(name="ExecutorManagerThread") + if sys.version_info < (3, 9): + self.daemon = True + + def run(self): + # Main loop for the executor manager thread. + + while True: + self.add_call_item_to_queue() + + result_item, is_broken, bpe = self.wait_result_broken_or_wakeup() + + if is_broken: + self.terminate_broken(bpe) + return + if result_item is not None: + self.process_result_item(result_item) + # Delete reference to result_item to avoid keeping references + # while waiting on new results. + del result_item + + if self.is_shutting_down(): + self.flag_executor_shutting_down() + + # Since no new work items can be added, it is safe to shutdown + # this thread if there are no pending work items. + if not self.pending_work_items: + self.join_executor_internals() + return + + def add_call_item_to_queue(self): + # Fills call_queue with _WorkItems from pending_work_items. + # This function never blocks. + while True: + if self.call_queue.full(): + return + try: + work_id = self.work_ids_queue.get(block=False) + except queue.Empty: + return + else: + work_item = self.pending_work_items[work_id] + + if work_item.future.set_running_or_notify_cancel(): + self.running_work_items += [work_id] + self.call_queue.put( + _CallItem( + work_id, + work_item.fn, + work_item.args, + work_item.kwargs, + ), + block=True, + ) + else: + del self.pending_work_items[work_id] + continue + + def wait_result_broken_or_wakeup(self): + # Wait for a result to be ready in the result_queue while checking + # that all worker processes are still running, or for a wake up + # signal send. The wake up signals come either from new tasks being + # submitted, from the executor being shutdown/gc-ed, or from the + # shutdown of the python interpreter. + result_reader = self.result_queue._reader + wakeup_reader = self.thread_wakeup._reader + readers = [result_reader, wakeup_reader] + worker_sentinels = [p.sentinel for p in list(self.processes.values())] + ready = wait(readers + worker_sentinels) + + bpe = None + is_broken = True + result_item = None + if result_reader in ready: + try: + result_item = result_reader.recv() + if isinstance(result_item, _RemoteTraceback): + bpe = BrokenProcessPool( + "A task has failed to un-serialize. Please ensure that" + " the arguments of the function are all picklable." + ) + bpe.__cause__ = result_item + else: + is_broken = False + except BaseException as e: + bpe = BrokenProcessPool( + "A result has failed to un-serialize. Please ensure that " + "the objects returned by the function are always " + "picklable." + ) + tb = traceback.format_exception( + type(e), e, getattr(e, "__traceback__", None) + ) + bpe.__cause__ = _RemoteTraceback("".join(tb)) + + elif wakeup_reader in ready: + # This is simply a wake-up event that might either trigger putting + # more tasks in the queue or trigger the clean up of resources. + is_broken = False + else: + # A worker has terminated and we don't know why, set the state of + # the executor as broken + exit_codes = "" + if sys.platform != "win32": + # In Windows, introspecting terminated workers exitcodes seems + # unstable, therefore they are not appended in the exception + # message. + exit_codes = ( + "\nThe exit codes of the workers are " + f"{get_exitcodes_terminated_worker(self.processes)}" + ) + mp.util.debug( + "A worker unexpectedly terminated. Workers that " + "might have caused the breakage: " + + str( + { + p.name: p.exitcode + for p in list(self.processes.values()) + if p is not None and p.sentinel in ready + } + ) + ) + bpe = TerminatedWorkerError( + "A worker process managed by the executor was unexpectedly " + "terminated. This could be caused by a segmentation fault " + "while calling the function or by an excessive memory usage " + "causing the Operating System to kill the worker.\n" + f"{exit_codes}" + ) + + self.thread_wakeup.clear() + + return result_item, is_broken, bpe + + def process_result_item(self, result_item): + # Process the received a result_item. This can be either the PID of a + # worker that exited gracefully or a _ResultItem + + if isinstance(result_item, int): + # Clean shutdown of a worker using its PID, either on request + # by the executor.shutdown method or by the timeout of the worker + # itself: we should not mark the executor as broken. + with self.processes_management_lock: + p = self.processes.pop(result_item, None) + + # p can be None if the executor is concurrently shutting down. + if p is not None: + p._worker_exit_lock.release() + mp.util.debug( + f"joining {p.name} when processing {p.pid} as result_item" + ) + p.join() + del p + + # Make sure the executor have the right number of worker, even if a + # worker timeout while some jobs were submitted. If some work is + # pending or there is less processes than running items, we need to + # start a new Process and raise a warning. + n_pending = len(self.pending_work_items) + n_running = len(self.running_work_items) + if n_pending - n_running > 0 or n_running > len(self.processes): + executor = self.executor_reference() + if ( + executor is not None + and len(self.processes) < executor._max_workers + ): + warnings.warn( + "A worker stopped while some jobs were given to the " + "executor. This can be caused by a too short worker " + "timeout or by a memory leak.", + UserWarning, + ) + with executor._processes_management_lock: + executor._adjust_process_count() + executor = None + else: + # Received a _ResultItem so mark the future as completed. + work_item = self.pending_work_items.pop(result_item.work_id, None) + # work_item can be None if another process terminated (see above) + if work_item is not None: + if result_item.exception: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) + self.running_work_items.remove(result_item.work_id) + + def is_shutting_down(self): + # Check whether we should start shutting down the executor. + executor = self.executor_reference() + # No more work items can be added if: + # - The interpreter is shutting down OR + # - The executor that owns this thread is not broken AND + # * The executor that owns this worker has been collected OR + # * The executor that owns this worker has been shutdown. + # If the executor is broken, it should be detected in the next loop. + return _global_shutdown or ( + (executor is None or self.executor_flags.shutdown) + and not self.executor_flags.broken + ) + + def terminate_broken(self, bpe): + # Terminate the executor because it is in a broken state. The bpe + # argument can be used to display more information on the error that + # lead the executor into becoming broken. + + # Mark the process pool broken so that submits fail right now. + self.executor_flags.flag_as_broken(bpe) + + # Mark pending tasks as failed. + for work_item in self.pending_work_items.values(): + work_item.future.set_exception(bpe) + # Delete references to object. See issue16284 + del work_item + self.pending_work_items.clear() + + # Terminate remaining workers forcibly: the queues or their + # locks may be in a dirty state and block forever. + self.kill_workers(reason="broken executor") + + # clean up resources + self.join_executor_internals() + + def flag_executor_shutting_down(self): + # Flag the executor as shutting down and cancel remaining tasks if + # requested as early as possible if it is not gc-ed yet. + self.executor_flags.flag_as_shutting_down() + + # Cancel pending work items if requested. + if self.executor_flags.kill_workers: + while self.pending_work_items: + _, work_item = self.pending_work_items.popitem() + work_item.future.set_exception( + ShutdownExecutorError( + "The Executor was shutdown with `kill_workers=True` " + "before this job could complete." + ) + ) + del work_item + + # Kill the remaining worker forcibly to no waste time joining them + self.kill_workers(reason="executor shutting down") + + def kill_workers(self, reason=""): + # Terminate the remaining workers using SIGKILL. This function also + # terminates descendant workers of the children in case there is some + # nested parallelism. + while self.processes: + _, p = self.processes.popitem() + mp.util.debug(f"terminate process {p.name}, reason: {reason}") + try: + kill_process_tree(p) + except ProcessLookupError: # pragma: no cover + pass + + def shutdown_workers(self): + # shutdown all workers in self.processes + + # Create a list to avoid RuntimeError due to concurrent modification of + # processes. nb_children_alive is thus an upper bound. Also release the + # processes' _worker_exit_lock to accelerate the shutdown procedure, as + # there is no need for hand-shake here. + with self.processes_management_lock: + n_children_to_stop = 0 + for p in list(self.processes.values()): + mp.util.debug(f"releasing worker exit lock on {p.name}") + p._worker_exit_lock.release() + n_children_to_stop += 1 + + mp.util.debug(f"found {n_children_to_stop} processes to stop") + + # Send the right number of sentinels, to make sure all children are + # properly terminated. Do it with a mechanism that avoid hanging on + # Full queue when all workers have already been shutdown. + n_sentinels_sent = 0 + cooldown_time = 0.001 + while ( + n_sentinels_sent < n_children_to_stop + and self.get_n_children_alive() > 0 + ): + for _ in range(n_children_to_stop - n_sentinels_sent): + try: + self.call_queue.put_nowait(None) + n_sentinels_sent += 1 + except queue.Full as e: + if cooldown_time > 5.0: + mp.util.info( + "failed to send all sentinels and exit with error." + f"\ncall_queue size={self.call_queue._maxsize}; " + f" full is {self.call_queue.full()}; " + ) + raise e + mp.util.info( + "full call_queue prevented to send all sentinels at " + "once, waiting..." + ) + sleep(cooldown_time) + cooldown_time *= 1.2 + break + + mp.util.debug(f"sent {n_sentinels_sent} sentinels to the call queue") + + def join_executor_internals(self): + self.shutdown_workers() + + # Release the queue's resources as soon as possible. Flag the feeder + # thread for clean exit to avoid having the crash detection thread flag + # the Executor as broken during the shutdown. This is safe as either: + # * We don't need to communicate with the workers anymore + # * There is nothing left in the Queue buffer except None sentinels + mp.util.debug("closing call_queue") + self.call_queue.close() + self.call_queue.join_thread() + + # Closing result_queue + mp.util.debug("closing result_queue") + self.result_queue.close() + + mp.util.debug("closing thread_wakeup") + with self.shutdown_lock: + self.thread_wakeup.close() + + # If .join() is not called on the created processes then + # some ctx.Queue methods may deadlock on macOS. + with self.processes_management_lock: + mp.util.debug(f"joining {len(self.processes)} processes") + n_joined_processes = 0 + while True: + try: + pid, p = self.processes.popitem() + mp.util.debug(f"joining process {p.name} with pid {pid}") + p.join() + n_joined_processes += 1 + except KeyError: + break + + mp.util.debug( + "executor management thread clean shutdown of " + f"{n_joined_processes} workers" + ) + + def get_n_children_alive(self): + # This is an upper bound on the number of children alive. + with self.processes_management_lock: + return sum(p.is_alive() for p in list(self.processes.values())) + + +_system_limits_checked = False +_system_limited = None + + +def _check_system_limits(): + global _system_limits_checked, _system_limited + if _system_limits_checked and _system_limited: + raise NotImplementedError(_system_limited) + _system_limits_checked = True + try: + nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems_max == -1: + # undetermined limit, assume that limit is determined + # by available memory only + return + if nsems_max >= 256: + # minimum number of semaphores available + # according to POSIX + return + _system_limited = ( + f"system provides too few semaphores ({nsems_max} available, " + "256 necessary)" + ) + raise NotImplementedError(_system_limited) + + +def _chain_from_iterable_of_lists(iterable): + """ + Specialized implementation of itertools.chain.from_iterable. + Each item in *iterable* should be a list. This function is + careful not to keep references to yielded objects. + """ + for element in iterable: + element.reverse() + while element: + yield element.pop() + + +def _check_max_depth(context): + # Limit the maxmal recursion level + global _CURRENT_DEPTH + if context.get_start_method() == "fork" and _CURRENT_DEPTH > 0: + raise LokyRecursionError( + "Could not spawn extra nested processes at depth superior to " + "MAX_DEPTH=1. It is not possible to increase this limit when " + "using the 'fork' start method." + ) + + if 0 < MAX_DEPTH and _CURRENT_DEPTH + 1 > MAX_DEPTH: + raise LokyRecursionError( + "Could not spawn extra nested processes at depth superior to " + f"MAX_DEPTH={MAX_DEPTH}. If this is intendend, you can change " + "this limit with the LOKY_MAX_DEPTH environment variable." + ) + + +class LokyRecursionError(RuntimeError): + """A process tries to spawn too many levels of nested processes.""" + + +class BrokenProcessPool(_BPPException): + """ + Raised when the executor is broken while a future was in the running state. + The cause can an error raised when unpickling the task in the worker + process or when unpickling the result value in the parent process. It can + also be caused by a worker process being terminated unexpectedly. + """ + + +class TerminatedWorkerError(BrokenProcessPool): + """ + Raised when a process in a ProcessPoolExecutor terminated abruptly + while a future was in the running state. + """ + + +# Alias for backward compat (for code written for loky 1.1.4 and earlier). Do +# not use in new code. +BrokenExecutor = BrokenProcessPool + + +class ShutdownExecutorError(RuntimeError): + + """ + Raised when a ProcessPoolExecutor is shutdown while a future was in the + running or pending state. + """ + + +class ProcessPoolExecutor(Executor): + + _at_exit = None + + def __init__( + self, + max_workers=None, + job_reducers=None, + result_reducers=None, + timeout=None, + context=None, + initializer=None, + initargs=(), + env=None, + ): + """Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: int, optional (default: cpu_count()) + The maximum number of processes that can be used to execute the + given calls. If None or not given then as many worker processes + will be created as the number of CPUs the current process + can use. + job_reducers, result_reducers: dict(type: reducer_func) + Custom reducer for pickling the jobs and the results from the + Executor. If only `job_reducers` is provided, `result_reducer` + will use the same reducers + timeout: int, optional (default: None) + Idle workers exit after timeout seconds. If a new job is + submitted after the timeout, the executor will start enough + new Python processes to make sure the pool of workers is full. + context: A multiprocessing context to launch the workers. This + object should provide SimpleQueue, Queue and Process. + initializer: An callable used to initialize worker processes. + initargs: A tuple of arguments to pass to the initializer. + env: A dict of environment variable to overwrite in the child + process. The environment variables are set before any module is + loaded. Note that this only works with the loky context. + """ + _check_system_limits() + + if max_workers is None: + self._max_workers = cpu_count() + else: + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + self._max_workers = max_workers + + if ( + sys.platform == "win32" + and self._max_workers > _MAX_WINDOWS_WORKERS + ): + warnings.warn( + f"On Windows, max_workers cannot exceed {_MAX_WINDOWS_WORKERS} " + "due to limitations of the operating system." + ) + self._max_workers = _MAX_WINDOWS_WORKERS + + if context is None: + context = get_context() + self._context = context + self._env = env + + self._initializer, self._initargs = _prepare_initializer( + initializer, initargs + ) + _check_max_depth(self._context) + + if result_reducers is None: + result_reducers = job_reducers + + # Timeout + self._timeout = timeout + + # Management thread + self._executor_manager_thread = None + + # Map of pids to processes + self._processes = {} + + # Internal variables of the ProcessPoolExecutor + self._processes = {} + self._queue_count = 0 + self._pending_work_items = {} + self._running_work_items = [] + self._work_ids = queue.Queue() + self._processes_management_lock = self._context.Lock() + self._executor_manager_thread = None + self._shutdown_lock = threading.Lock() + + # _ThreadWakeup is a communication channel used to interrupt the wait + # of the main loop of executor_manager_thread from another thread (e.g. + # when calling executor.submit or executor.shutdown). We do not use the + # _result_queue to send wakeup signals to the executor_manager_thread + # as it could result in a deadlock if a worker process dies with the + # _result_queue write lock still acquired. + # + # _shutdown_lock must be locked to access _ThreadWakeup.wakeup. + self._executor_manager_thread_wakeup = _ThreadWakeup() + + # Flag to hold the state of the Executor. This permits to introspect + # the Executor state even once it has been garbage collected. + self._flags = _ExecutorFlags(self._shutdown_lock) + + # Finally setup the queues for interprocess communication + self._setup_queues(job_reducers, result_reducers) + + mp.util.debug("ProcessPoolExecutor is setup") + + def _setup_queues(self, job_reducers, result_reducers, queue_size=None): + # Make the call queue slightly larger than the number of processes to + # prevent the worker processes from idling. But don't make it too big + # because futures in the call queue cannot be cancelled. + if queue_size is None: + queue_size = 2 * self._max_workers + EXTRA_QUEUED_CALLS + self._call_queue = _SafeQueue( + max_size=queue_size, + pending_work_items=self._pending_work_items, + running_work_items=self._running_work_items, + thread_wakeup=self._executor_manager_thread_wakeup, + reducers=job_reducers, + ctx=self._context, + ) + # Killed worker processes can produce spurious "broken pipe" + # tracebacks in the queue's own worker thread. But we detect killed + # processes anyway, so silence the tracebacks. + self._call_queue._ignore_epipe = True + + self._result_queue = SimpleQueue( + reducers=result_reducers, ctx=self._context + ) + + def _start_executor_manager_thread(self): + if self._executor_manager_thread is None: + mp.util.debug("_start_executor_manager_thread called") + + # Start the processes so that their sentinels are known. + self._executor_manager_thread = _ExecutorManagerThread(self) + self._executor_manager_thread.start() + + # register this executor in a mechanism that ensures it will wakeup + # when the interpreter is exiting. + _threads_wakeups[self._executor_manager_thread] = ( + self._shutdown_lock, + self._executor_manager_thread_wakeup, + ) + + global process_pool_executor_at_exit + if process_pool_executor_at_exit is None: + # Ensure that the _python_exit function will be called before + # the multiprocessing.Queue._close finalizers which have an + # exitpriority of 10. + + if sys.version_info < (3, 9): + process_pool_executor_at_exit = mp.util.Finalize( + None, _python_exit, exitpriority=20 + ) + else: + process_pool_executor_at_exit = threading._register_atexit( + _python_exit + ) + + def _adjust_process_count(self): + while len(self._processes) < self._max_workers: + worker_exit_lock = self._context.BoundedSemaphore(1) + args = ( + self._call_queue, + self._result_queue, + self._initializer, + self._initargs, + self._processes_management_lock, + self._timeout, + worker_exit_lock, + _CURRENT_DEPTH + 1, + ) + worker_exit_lock.acquire() + try: + # Try to spawn the process with some environment variable to + # overwrite but it only works with the loky context for now. + p = self._context.Process( + target=_process_worker, args=args, env=self._env + ) + except TypeError: + p = self._context.Process(target=_process_worker, args=args) + p._worker_exit_lock = worker_exit_lock + p.start() + self._processes[p.pid] = p + mp.util.debug( + f"Adjusted process count to {self._max_workers}: " + f"{[(p.name, pid) for pid, p in self._processes.items()]}" + ) + + def _ensure_executor_running(self): + """ensures all workers and management thread are running""" + with self._processes_management_lock: + if len(self._processes) != self._max_workers: + self._adjust_process_count() + self._start_executor_manager_thread() + + def submit(self, fn, *args, **kwargs): + with self._flags.shutdown_lock: + if self._flags.broken is not None: + raise self._flags.broken + if self._flags.shutdown: + raise ShutdownExecutorError( + "cannot schedule new futures after shutdown" + ) + + # Cannot submit a new calls once the interpreter is shutting down. + # This check avoids spawning new processes at exit. + if _global_shutdown: + raise RuntimeError( + "cannot schedule new futures after " "interpreter shutdown" + ) + + f = Future() + w = _WorkItem(f, fn, args, kwargs) + + self._pending_work_items[self._queue_count] = w + self._work_ids.put(self._queue_count) + self._queue_count += 1 + # Wake up queue management thread + self._executor_manager_thread_wakeup.wakeup() + + self._ensure_executor_running() + return f + + submit.__doc__ = Executor.submit.__doc__ + + def map(self, fn, *iterables, **kwargs): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: If greater than one, the iterables will be chopped into + chunks of size chunksize and submitted to the process pool. + If set to one, the items in the list will be sent one at a + time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + timeout = kwargs.get("timeout", None) + chunksize = kwargs.get("chunksize", 1) + if chunksize < 1: + raise ValueError("chunksize must be >= 1.") + + results = super().map( + partial(_process_chunk, fn), + _get_chunks(chunksize, *iterables), + timeout=timeout, + ) + return _chain_from_iterable_of_lists(results) + + def shutdown(self, wait=True, kill_workers=False): + mp.util.debug(f"shutting down executor {self}") + + self._flags.flag_as_shutting_down(kill_workers) + executor_manager_thread = self._executor_manager_thread + executor_manager_thread_wakeup = self._executor_manager_thread_wakeup + + if executor_manager_thread_wakeup is not None: + # Wake up queue management thread + with self._shutdown_lock: + self._executor_manager_thread_wakeup.wakeup() + + if executor_manager_thread is not None and wait: + # This locks avoids concurrent join if the interpreter + # is shutting down. + with _global_shutdown_lock: + executor_manager_thread.join() + _threads_wakeups.pop(executor_manager_thread, None) + + # To reduce the risk of opening too many files, remove references to + # objects that use file descriptors. + self._executor_manager_thread = None + self._executor_manager_thread_wakeup = None + self._call_queue = None + self._result_queue = None + self._processes_management_lock = None + + shutdown.__doc__ = Executor.shutdown.__doc__ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..ad016fd389762a1c458200ffe7b310239da3a3f3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py @@ -0,0 +1,285 @@ +############################################################################### +# Reusable ProcessPoolExecutor +# +# author: Thomas Moreau and Olivier Grisel +# +import time +import warnings +import threading +import multiprocessing as mp + +from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS +from .backend.context import cpu_count +from .backend import get_context + +__all__ = ["get_reusable_executor"] + +# Singleton executor and id management +_executor_lock = threading.RLock() +_next_executor_id = 0 +_executor = None +_executor_kwargs = None + + +def _get_next_executor_id(): + """Ensure that each successive executor instance has a unique, monotonic id. + + The purpose of this monotonic id is to help debug and test automated + instance creation. + """ + global _next_executor_id + with _executor_lock: + executor_id = _next_executor_id + _next_executor_id += 1 + return executor_id + + +def get_reusable_executor( + max_workers=None, + context=None, + timeout=10, + kill_workers=False, + reuse="auto", + job_reducers=None, + result_reducers=None, + initializer=None, + initargs=(), + env=None, +): + """Return the current ReusableExectutor instance. + + Start a new instance if it has not been started already or if the previous + instance was left in a broken state. + + If the previous instance does not have the requested number of workers, the + executor is dynamically resized to adjust the number of workers prior to + returning. + + Reusing a singleton instance spares the overhead of starting new worker + processes and importing common python packages each time. + + ``max_workers`` controls the maximum number of tasks that can be running in + parallel in worker processes. By default this is set to the number of + CPUs on the host. + + Setting ``timeout`` (in seconds) makes idle workers automatically shutdown + so as to release system resources. New workers are respawn upon submission + of new tasks so that ``max_workers`` are available to accept the newly + submitted tasks. Setting ``timeout`` to around 100 times the time required + to spawn new processes and import packages in them (on the order of 100ms) + ensures that the overhead of spawning workers is negligible. + + Setting ``kill_workers=True`` makes it possible to forcibly interrupt + previously spawned jobs to get a new instance of the reusable executor + with new constructor argument values. + + The ``job_reducers`` and ``result_reducers`` are used to customize the + pickling of tasks and results send to the executor. + + When provided, the ``initializer`` is run first in newly spawned + processes with argument ``initargs``. + + The environment variable in the child process are a copy of the values in + the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and + ``VAL`` are string literals to overwrite the environment variable ``ENV`` + in the child processes to value ``VAL``. The environment variables are set + in the children before any module is loaded. This only works with the + ``loky`` context. + """ + _executor, _ = _ReusablePoolExecutor.get_reusable_executor( + max_workers=max_workers, + context=context, + timeout=timeout, + kill_workers=kill_workers, + reuse=reuse, + job_reducers=job_reducers, + result_reducers=result_reducers, + initializer=initializer, + initargs=initargs, + env=env, + ) + return _executor + + +class _ReusablePoolExecutor(ProcessPoolExecutor): + def __init__( + self, + submit_resize_lock, + max_workers=None, + context=None, + timeout=None, + executor_id=0, + job_reducers=None, + result_reducers=None, + initializer=None, + initargs=(), + env=None, + ): + super().__init__( + max_workers=max_workers, + context=context, + timeout=timeout, + job_reducers=job_reducers, + result_reducers=result_reducers, + initializer=initializer, + initargs=initargs, + env=env, + ) + self.executor_id = executor_id + self._submit_resize_lock = submit_resize_lock + + @classmethod + def get_reusable_executor( + cls, + max_workers=None, + context=None, + timeout=10, + kill_workers=False, + reuse="auto", + job_reducers=None, + result_reducers=None, + initializer=None, + initargs=(), + env=None, + ): + with _executor_lock: + global _executor, _executor_kwargs + executor = _executor + + if max_workers is None: + if reuse is True and executor is not None: + max_workers = executor._max_workers + else: + max_workers = cpu_count() + elif max_workers <= 0: + raise ValueError( + f"max_workers must be greater than 0, got {max_workers}." + ) + + if isinstance(context, str): + context = get_context(context) + if context is not None and context.get_start_method() == "fork": + raise ValueError( + "Cannot use reusable executor with the 'fork' context" + ) + + kwargs = dict( + context=context, + timeout=timeout, + job_reducers=job_reducers, + result_reducers=result_reducers, + initializer=initializer, + initargs=initargs, + env=env, + ) + if executor is None: + is_reused = False + mp.util.debug( + f"Create a executor with max_workers={max_workers}." + ) + executor_id = _get_next_executor_id() + _executor_kwargs = kwargs + _executor = executor = cls( + _executor_lock, + max_workers=max_workers, + executor_id=executor_id, + **kwargs, + ) + else: + if reuse == "auto": + reuse = kwargs == _executor_kwargs + if ( + executor._flags.broken + or executor._flags.shutdown + or not reuse + ): + if executor._flags.broken: + reason = "broken" + elif executor._flags.shutdown: + reason = "shutdown" + else: + reason = "arguments have changed" + mp.util.debug( + "Creating a new executor with max_workers=" + f"{max_workers} as the previous instance cannot be " + f"reused ({reason})." + ) + executor.shutdown(wait=True, kill_workers=kill_workers) + _executor = executor = _executor_kwargs = None + # Recursive call to build a new instance + return cls.get_reusable_executor( + max_workers=max_workers, **kwargs + ) + else: + mp.util.debug( + "Reusing existing executor with " + f"max_workers={executor._max_workers}." + ) + is_reused = True + executor._resize(max_workers) + + return executor, is_reused + + def submit(self, fn, *args, **kwargs): + with self._submit_resize_lock: + return super().submit(fn, *args, **kwargs) + + def _resize(self, max_workers): + with self._submit_resize_lock: + if max_workers is None: + raise ValueError("Trying to resize with max_workers=None") + elif max_workers == self._max_workers: + return + + if self._executor_manager_thread is None: + # If the executor_manager_thread has not been started + # then no processes have been spawned and we can just + # update _max_workers and return + self._max_workers = max_workers + return + + self._wait_job_completion() + + # Some process might have returned due to timeout so check how many + # children are still alive. Use the _process_management_lock to + # ensure that no process are spawned or timeout during the resize. + with self._processes_management_lock: + processes = list(self._processes.values()) + nb_children_alive = sum(p.is_alive() for p in processes) + self._max_workers = max_workers + for _ in range(max_workers, nb_children_alive): + self._call_queue.put(None) + while ( + len(self._processes) > max_workers and not self._flags.broken + ): + time.sleep(1e-3) + + self._adjust_process_count() + processes = list(self._processes.values()) + while not all(p.is_alive() for p in processes): + time.sleep(1e-3) + + def _wait_job_completion(self): + """Wait for the cache to be empty before resizing the pool.""" + # Issue a warning to the user about the bad effect of this usage. + if self._pending_work_items: + warnings.warn( + "Trying to resize an executor with running jobs: " + "waiting for jobs completion before resizing.", + UserWarning, + ) + mp.util.debug( + f"Executor {self.executor_id} waiting for jobs completion " + "before resizing" + ) + # Wait for the completion of the jobs + while self._pending_work_items: + time.sleep(1e-3) + + def _setup_queues(self, job_reducers, result_reducers): + # As this executor can be resized, use a large queue size to avoid + # underestimating capacity and introducing overhead + queue_size = 2 * cpu_count() + EXTRA_QUEUED_CALLS + super()._setup_queues( + job_reducers, result_reducers, queue_size=queue_size + ) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__init__.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18e828e928033cc26da531c89fbf86eb687942a9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc4166d1138902988eb99b65da32169e6ccd9dc6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4611940fecc8aa3dac9e5af94782268f1c74b9d9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7896469e61ae679cf83c65a637546b67cb644e60 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39b223c956b1905a4b57c9c3bcc1d7793e57a6f9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ded69cfa7b803e5521c37e942b8687670a2d62d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9620ad6a068f6e29245de009721675bb75f32d7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a638d9ca3062849ba5a252c46049ab3e670be5e7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bd8bf162968b39b2f7d66a456d04002031bc59c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76b3515a949dd0e4498e270995999b34c95b7ec8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2c6c31cbe88a08c5adf38bcad8d5b2813290d10 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bf65066c305bb6666053ada58b16d4501203b1f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df7c5b055bf929a44c626cfe9d5a99044ffab562 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62aa1a1ea8800c7e91a45dfdb0f0c1e9fcf57849 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbe0987f75ca23cacc826075a47178f112a50756 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/common.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/common.py new file mode 100644 index 0000000000000000000000000000000000000000..b0ca0c6abd913bc37091ebae4bd6a0b64084d20f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/common.py @@ -0,0 +1,84 @@ +""" +Small utilities for testing. +""" +import os +import gc +import sys + +from joblib._multiprocessing_helpers import mp +from joblib.testing import SkipTest, skipif + +try: + import lz4 +except ImportError: + lz4 = None + +IS_PYPY = hasattr(sys, "pypy_version_info") + +# A decorator to run tests only when numpy is available +try: + import numpy as np + + def with_numpy(func): + """A decorator to skip tests requiring numpy.""" + return func + +except ImportError: + def with_numpy(func): + """A decorator to skip tests requiring numpy.""" + def my_func(): + raise SkipTest('Test requires numpy') + return my_func + np = None + +# TODO: Turn this back on after refactoring yield based tests in test_hashing +# with_numpy = skipif(not np, reason='Test requires numpy.') + +# we use memory_profiler library for memory consumption checks +try: + from memory_profiler import memory_usage + + def with_memory_profiler(func): + """A decorator to skip tests requiring memory_profiler.""" + return func + + def memory_used(func, *args, **kwargs): + """Compute memory usage when executing func.""" + gc.collect() + mem_use = memory_usage((func, args, kwargs), interval=.001) + return max(mem_use) - min(mem_use) + +except ImportError: + def with_memory_profiler(func): + """A decorator to skip tests requiring memory_profiler.""" + def dummy_func(): + raise SkipTest('Test requires memory_profiler.') + return dummy_func + + memory_usage = memory_used = None + + +def force_gc_pypy(): + # The gc in pypy can be delayed. Force it to test the behavior when it + # will eventually be collected. + if IS_PYPY: + # Run gc.collect() twice to make sure the weakref is collected, as + # mentionned in the pypy doc: + # https://doc.pypy.org/en/latest/config/objspace.usemodules._weakref.html + import gc + gc.collect() + gc.collect() + + +with_multiprocessing = skipif( + mp is None, reason='Needs multiprocessing to run.') + + +with_dev_shm = skipif( + not os.path.exists('/dev/shm'), + reason='This test requires a large /dev/shm shared memory fs.') + +with_lz4 = skipif(lz4 is None, reason='Needs lz4 compression to run') + +without_lz4 = skipif( + lz4 is not None, reason='Needs lz4 not being installed to run') diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..ba903d6cc2cd75879eed60ff31ecdf7ffe230d45 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py @@ -0,0 +1,95 @@ +""" +This script is used to generate test data for joblib/test/test_numpy_pickle.py +""" + +import sys +import re + +# pytest needs to be able to import this module even when numpy is +# not installed +try: + import numpy as np +except ImportError: + np = None + +import joblib + + +def get_joblib_version(joblib_version=joblib.__version__): + """Normalize joblib version by removing suffix. + + >>> get_joblib_version('0.8.4') + '0.8.4' + >>> get_joblib_version('0.8.4b1') + '0.8.4' + >>> get_joblib_version('0.9.dev0') + '0.9' + """ + matches = [re.match(r'(\d+).*', each) + for each in joblib_version.split('.')] + return '.'.join([m.group(1) for m in matches if m is not None]) + + +def write_test_pickle(to_pickle, args): + kwargs = {} + compress = args.compress + method = args.method + joblib_version = get_joblib_version() + py_version = '{0[0]}{0[1]}'.format(sys.version_info) + numpy_version = ''.join(np.__version__.split('.')[:2]) + + # The game here is to generate the right filename according to the options. + body = '_compressed' if (compress and method == 'zlib') else '' + if compress: + if method == 'zlib': + kwargs['compress'] = True + extension = '.gz' + else: + kwargs['compress'] = (method, 3) + extension = '.pkl.{}'.format(method) + if args.cache_size: + kwargs['cache_size'] = 0 + body += '_cache_size' + else: + extension = '.pkl' + + pickle_filename = 'joblib_{}{}_pickle_py{}_np{}{}'.format( + joblib_version, body, py_version, numpy_version, extension) + + try: + joblib.dump(to_pickle, pickle_filename, **kwargs) + except Exception as e: + # With old python version (=< 3.3.), we can arrive there when + # dumping compressed pickle with LzmaFile. + print("Error: cannot generate file '{}' with arguments '{}'. " + "Error was: {}".format(pickle_filename, kwargs, e)) + else: + print("File '{}' generated successfully.".format(pickle_filename)) + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser(description="Joblib pickle data " + "generator.") + parser.add_argument('--cache_size', action="store_true", + help="Force creation of companion numpy " + "files for pickled arrays.") + parser.add_argument('--compress', action="store_true", + help="Generate compress pickles.") + parser.add_argument('--method', type=str, default='zlib', + choices=['zlib', 'gzip', 'bz2', 'xz', 'lzma', 'lz4'], + help="Set compression method.") + # We need to be specific about dtypes in particular endianness + # because the pickles can be generated on one architecture and + # the tests run on another one. See + # https://github.com/joblib/joblib/issues/279. + to_pickle = [np.arange(5, dtype=np.dtype(' 0 + + +@with_numpy +@with_multiprocessing +def test_parallel_config_params_explicit_set(tmpdir): + with parallel_config(n_jobs=3, max_nbytes=1, temp_folder=tmpdir): + with Parallel(n_jobs=2, prefer="processes", max_nbytes='1M') as p: + assert isinstance(p._backend, LokyBackend) + assert p.n_jobs == 2 + + # Checks that memmapping is disabled + with raises(TypeError, match="Expected np.memmap instance"): + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + + +@parametrize("param", ["prefer", "require"]) +def test_parallel_config_bad_params(param): + # Check that an error is raised when setting a wrong backend + # hint or constraint + with raises(ValueError, match=f"{param}=wrong is not a valid"): + with parallel_config(**{param: "wrong"}): + Parallel() + + +def test_parallel_config_constructor_params(): + # Check that an error is raised when backend is None + # but backend constructor params are given + with raises(ValueError, match="only supported when backend is not None"): + with parallel_config(inner_max_num_threads=1): + pass + + with raises(ValueError, match="only supported when backend is not None"): + with parallel_config(backend_param=1): + pass + + +def test_parallel_config_nested(): + # Check that nested configuration retrieves the info from the + # parent config and do not reset them. + + with parallel_config(n_jobs=2): + p = Parallel() + assert isinstance(p._backend, BACKENDS[DEFAULT_BACKEND]) + assert p.n_jobs == 2 + + with parallel_config(backend='threading'): + with parallel_config(n_jobs=2): + p = Parallel() + assert isinstance(p._backend, ThreadingBackend) + assert p.n_jobs == 2 + + with parallel_config(verbose=100): + with parallel_config(n_jobs=2): + p = Parallel() + assert p.verbose == 100 + assert p.n_jobs == 2 + + +@with_numpy +@with_multiprocessing +@parametrize('backend', ['multiprocessing', 'threading', + MultiprocessingBackend(), ThreadingBackend()]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_threadpool_limitation_in_child_context_error(context, backend): + + with raises(AssertionError, match=r"does not acc.*inner_max_num_threads"): + context(backend, inner_max_num_threads=1) + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_parallel_n_jobs_none(context): + # Check that n_jobs=None is interpreted as "unset" in Parallel + # non regression test for #1473 + with context(backend="threading", n_jobs=2): + with Parallel(n_jobs=None) as p: + assert p.n_jobs == 2 + + with context(backend="threading"): + default_n_jobs = Parallel().n_jobs + with Parallel(n_jobs=None) as p: + assert p.n_jobs == default_n_jobs + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_parallel_config_n_jobs_none(context): + # Check that n_jobs=None is interpreted as "explicitly set" in + # parallel_(config/backend) + # non regression test for #1473 + with context(backend="threading", n_jobs=2): + with context(backend="threading", n_jobs=None): + # n_jobs=None resets n_jobs to backend's default + with Parallel() as p: + assert p.n_jobs == 1 diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_dask.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_dask.py new file mode 100644 index 0000000000000000000000000000000000000000..aebe65525fc55f4593e6c83b5bfd4ffc76d945a7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_dask.py @@ -0,0 +1,499 @@ +from __future__ import print_function, division, absolute_import +import os +import warnings + +import pytest +from random import random +from uuid import uuid4 +from time import sleep + +from .. import Parallel, delayed, parallel_config +from ..parallel import ThreadingBackend, AutoBatchingMixin +from .._dask import DaskDistributedBackend + +distributed = pytest.importorskip('distributed') +dask = pytest.importorskip('dask') + +# These imports need to be after the pytest.importorskip hence the noqa: E402 +from distributed import Client, LocalCluster, get_client # noqa: E402 +from distributed.metrics import time # noqa: E402 +# Note: pytest requires to manually import all fixtures used in the test +# and their dependencies. +from distributed.utils_test import cluster, inc, cleanup # noqa: E402, F401 + + +def noop(*args, **kwargs): + pass + + +def slow_raise_value_error(condition, duration=0.05): + sleep(duration) + if condition: + raise ValueError("condition evaluated to True") + + +def count_events(event_name, client): + worker_events = client.run(lambda dask_worker: dask_worker.log) + event_counts = {} + for w, events in worker_events.items(): + event_counts[w] = len([event for event in list(events) + if event[1] == event_name]) + return event_counts + + +def test_simple(loop): + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask'): + seq = Parallel()(delayed(inc)(i) for i in range(10)) + assert seq == [inc(i) for i in range(10)] + + with pytest.raises(ValueError): + Parallel()(delayed(slow_raise_value_error)(i == 3) + for i in range(10)) + + seq = Parallel()(delayed(inc)(i) for i in range(10)) + assert seq == [inc(i) for i in range(10)] + + +def test_dask_backend_uses_autobatching(loop): + assert (DaskDistributedBackend.compute_batch_size + is AutoBatchingMixin.compute_batch_size) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask'): + with Parallel() as parallel: + # The backend should be initialized with a default + # batch size of 1: + backend = parallel._backend + assert isinstance(backend, DaskDistributedBackend) + assert backend.parallel is parallel + assert backend._effective_batch_size == 1 + + # Launch many short tasks that should trigger + # auto-batching: + parallel( + delayed(lambda: None)() + for _ in range(int(1e4)) + ) + assert backend._effective_batch_size > 10 + + +def random2(): + return random() + + +def test_dont_assume_function_purity(loop): + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask'): + x, y = Parallel()(delayed(random2)() for i in range(2)) + assert x != y + + +@pytest.mark.parametrize("mixed", [True, False]) +def test_dask_funcname(loop, mixed): + from joblib._dask import Batch + if not mixed: + tasks = [delayed(inc)(i) for i in range(4)] + batch_repr = 'batch_of_inc_4_calls' + else: + tasks = [ + delayed(abs)(i) if i % 2 else delayed(inc)(i) for i in range(4) + ] + batch_repr = 'mixed_batch_of_inc_4_calls' + + assert repr(Batch(tasks)) == batch_repr + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: + with parallel_config(backend='dask'): + _ = Parallel(batch_size=2, pre_dispatch='all')(tasks) + + def f(dask_scheduler): + return list(dask_scheduler.transition_log) + batch_repr = batch_repr.replace('4', '2') + log = client.run_on_scheduler(f) + assert all('batch_of_inc' in tup[0] for tup in log) + + +def test_no_undesired_distributed_cache_hit(): + # Dask has a pickle cache for callables that are called many times. Because + # the dask backends used to wrap both the functions and the arguments + # under instances of the Batch callable class this caching mechanism could + # lead to bugs as described in: https://github.com/joblib/joblib/pull/1055 + # The joblib-dask backend has been refactored to avoid bundling the + # arguments as an attribute of the Batch instance to avoid this problem. + # This test serves as non-regression problem. + + # Use a large number of input arguments to give the AutoBatchingMixin + # enough tasks to kick-in. + lists = [[] for _ in range(100)] + np = pytest.importorskip('numpy') + X = np.arange(int(1e6)) + + def isolated_operation(list_, data=None): + if data is not None: + np.testing.assert_array_equal(data, X) + list_.append(uuid4().hex) + return list_ + + cluster = LocalCluster(n_workers=1, threads_per_worker=2) + client = Client(cluster) + try: + with parallel_config(backend='dask'): + # dispatches joblib.parallel.BatchedCalls + res = Parallel()( + delayed(isolated_operation)(list_) for list_ in lists + ) + + # The original arguments should not have been mutated as the mutation + # happens in the dask worker process. + assert lists == [[] for _ in range(100)] + + # Here we did not pass any large numpy array as argument to + # isolated_operation so no scattering event should happen under the + # hood. + counts = count_events('receive-from-scatter', client) + assert sum(counts.values()) == 0 + assert all([len(r) == 1 for r in res]) + + with parallel_config(backend='dask'): + # Append a large array which will be scattered by dask, and + # dispatch joblib._dask.Batch + res = Parallel()( + delayed(isolated_operation)(list_, data=X) for list_ in lists + ) + + # This time, auto-scattering should have kicked it. + counts = count_events('receive-from-scatter', client) + assert sum(counts.values()) > 0 + assert all([len(r) == 1 for r in res]) + finally: + client.close(timeout=30) + cluster.close(timeout=30) + + +class CountSerialized(object): + def __init__(self, x): + self.x = x + self.count = 0 + + def __add__(self, other): + return self.x + getattr(other, 'x', other) + + __radd__ = __add__ + + def __reduce__(self): + self.count += 1 + return (CountSerialized, (self.x,)) + + +def add5(a, b, c, d=0, e=0): + return a + b + c + d + e + + +def test_manual_scatter(loop): + x = CountSerialized(1) + y = CountSerialized(2) + z = CountSerialized(3) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask', scatter=[x, y]): + f = delayed(add5) + tasks = [f(x, y, z, d=4, e=5), + f(x, z, y, d=5, e=4), + f(y, x, z, d=x, e=5), + f(z, z, x, d=z, e=y)] + expected = [func(*args, **kwargs) + for func, args, kwargs in tasks] + results = Parallel()(tasks) + + # Scatter must take a list/tuple + with pytest.raises(TypeError): + with parallel_config(backend='dask', loop=loop, scatter=1): + pass + + assert results == expected + + # Scattered variables only serialized once + assert x.count == 1 + assert y.count == 1 + # Depending on the version of distributed, the unscattered z variable + # is either pickled 4 or 6 times, possibly because of the memoization + # of objects that appear several times in the arguments of a delayed + # task. + assert z.count in (4, 6) + + +# When the same IOLoop is used for multiple clients in a row, use +# loop_in_thread instead of loop to prevent the Client from closing it. See +# dask/distributed #4112 +def test_auto_scatter(loop_in_thread): + np = pytest.importorskip('numpy') + data1 = np.ones(int(1e4), dtype=np.uint8) + data2 = np.ones(int(1e4), dtype=np.uint8) + data_to_process = ([data1] * 3) + ([data2] * 3) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop_in_thread) as client: + with parallel_config(backend='dask'): + # Passing the same data as arg and kwarg triggers a single + # scatter operation whose result is reused. + Parallel()(delayed(noop)(data, data, i, opt=data) + for i, data in enumerate(data_to_process)) + # By default large array are automatically scattered with + # broadcast=1 which means that one worker must directly receive + # the data from the scatter operation once. + counts = count_events('receive-from-scatter', client) + assert counts[a['address']] + counts[b['address']] == 2 + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop_in_thread) as client: + with parallel_config(backend='dask'): + Parallel()(delayed(noop)(data1[:3], i) for i in range(5)) + # Small arrays are passed within the task definition without going + # through a scatter operation. + counts = count_events('receive-from-scatter', client) + assert counts[a['address']] == 0 + assert counts[b['address']] == 0 + + +@pytest.mark.parametrize("retry_no", list(range(2))) +def test_nested_scatter(loop, retry_no): + + np = pytest.importorskip('numpy') + + NUM_INNER_TASKS = 10 + NUM_OUTER_TASKS = 10 + + def my_sum(x, i, j): + return np.sum(x) + + def outer_function_joblib(array, i): + client = get_client() # noqa + with parallel_config(backend="dask"): + results = Parallel()( + delayed(my_sum)(array[j:], i, j) for j in range( + NUM_INNER_TASKS) + ) + return sum(results) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as _: + with parallel_config(backend="dask"): + my_array = np.ones(10000) + _ = Parallel()( + delayed(outer_function_joblib)( + my_array[i:], i) for i in range(NUM_OUTER_TASKS) + ) + + +def test_nested_backend_context_manager(loop_in_thread): + def get_nested_pids(): + pids = set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2))) + pids |= set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2))) + return pids + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop_in_thread) as client: + with parallel_config(backend='dask'): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids)() + for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) <= 2 + + # No deadlocks + with Client(s['address'], loop=loop_in_thread) as client: # noqa: F841 + with parallel_config(backend='dask'): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids)() + for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) <= 2 + + +def test_nested_backend_context_manager_implicit_n_jobs(loop): + # Check that Parallel with no explicit n_jobs value automatically selects + # all the dask workers, including in nested calls. + + def _backend_type(p): + return p._backend.__class__.__name__ + + def get_nested_implicit_n_jobs(): + with Parallel() as p: + return _backend_type(p), p.n_jobs + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask'): + with Parallel() as p: + assert _backend_type(p) == "DaskDistributedBackend" + assert p.n_jobs == -1 + all_nested_n_jobs = p( + delayed(get_nested_implicit_n_jobs)() + for _ in range(2) + ) + for backend_type, nested_n_jobs in all_nested_n_jobs: + assert backend_type == "DaskDistributedBackend" + assert nested_n_jobs == -1 + + +def test_errors(loop): + with pytest.raises(ValueError) as info: + with parallel_config(backend='dask'): + pass + + assert "create a dask client" in str(info.value).lower() + + +def test_correct_nested_backend(loop): + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + # No requirement, should be us + with parallel_config(backend='dask'): + result = Parallel(n_jobs=2)( + delayed(outer)(nested_require=None) for _ in range(1)) + assert isinstance(result[0][0][0], DaskDistributedBackend) + + # Require threads, should be threading + with parallel_config(backend='dask'): + result = Parallel(n_jobs=2)( + delayed(outer)(nested_require='sharedmem') + for _ in range(1)) + assert isinstance(result[0][0][0], ThreadingBackend) + + +def outer(nested_require): + return Parallel(n_jobs=2, prefer='threads')( + delayed(middle)(nested_require) for _ in range(1) + ) + + +def middle(require): + return Parallel(n_jobs=2, require=require)( + delayed(inner)() for _ in range(1) + ) + + +def inner(): + return Parallel()._backend + + +def test_secede_with_no_processes(loop): + # https://github.com/dask/distributed/issues/1775 + with Client(loop=loop, processes=False, set_as_default=True): + with parallel_config(backend='dask'): + Parallel(n_jobs=4)(delayed(id)(i) for i in range(2)) + + +def _worker_address(_): + from distributed import get_worker + return get_worker().address + + +def test_dask_backend_keywords(loop): + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask', workers=a['address']): + seq = Parallel()( + delayed(_worker_address)(i) for i in range(10)) + assert seq == [a['address']] * 10 + + with parallel_config(backend='dask', workers=b['address']): + seq = Parallel()( + delayed(_worker_address)(i) for i in range(10)) + assert seq == [b['address']] * 10 + + +def test_scheduler_tasks_cleanup(loop): + with Client(processes=False, loop=loop) as client: + with parallel_config(backend='dask'): + Parallel()(delayed(inc)(i) for i in range(10)) + + start = time() + while client.cluster.scheduler.tasks: + sleep(0.01) + assert time() < start + 5 + + assert not client.futures + + +@pytest.mark.parametrize("cluster_strategy", ["adaptive", "late_scaling"]) +@pytest.mark.skipif( + distributed.__version__ <= '2.1.1' and distributed.__version__ >= '1.28.0', + reason="distributed bug - https://github.com/dask/distributed/pull/2841") +def test_wait_for_workers(cluster_strategy): + cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2) + client = Client(cluster) + if cluster_strategy == "adaptive": + cluster.adapt(minimum=0, maximum=2) + elif cluster_strategy == "late_scaling": + # Tell the cluster to start workers but this is a non-blocking call + # and new workers might take time to connect. In this case the Parallel + # call should wait for at least one worker to come up before starting + # to schedule work. + cluster.scale(2) + try: + with parallel_config(backend='dask'): + # The following should wait a bit for at least one worker to + # become available. + Parallel()(delayed(inc)(i) for i in range(10)) + finally: + client.close() + cluster.close() + + +def test_wait_for_workers_timeout(): + # Start a cluster with 0 worker: + cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2) + client = Client(cluster) + try: + with parallel_config(backend='dask', wait_for_workers_timeout=0.1): + # Short timeout: DaskDistributedBackend + msg = "DaskDistributedBackend has no worker after 0.1 seconds." + with pytest.raises(TimeoutError, match=msg): + Parallel()(delayed(inc)(i) for i in range(10)) + + with parallel_config(backend='dask', wait_for_workers_timeout=0): + # No timeout: fallback to generic joblib failure: + msg = "DaskDistributedBackend has no active worker" + with pytest.raises(RuntimeError, match=msg): + Parallel()(delayed(inc)(i) for i in range(10)) + finally: + client.close() + cluster.close() + + +@pytest.mark.parametrize("backend", ["loky", "multiprocessing"]) +def test_joblib_warning_inside_dask_daemonic_worker(backend): + cluster = LocalCluster(n_workers=2) + client = Client(cluster) + try: + + def func_using_joblib_parallel(): + # Somehow trying to check the warning type here (e.g. with + # pytest.warns(UserWarning)) make the test hang. Work-around: + # return the warning record to the client and the warning check is + # done client-side. + with warnings.catch_warnings(record=True) as record: + Parallel(n_jobs=2, backend=backend)( + delayed(inc)(i) for i in range(10)) + + return record + + fut = client.submit(func_using_joblib_parallel) + record = fut.result() + + assert len(record) == 1 + warning = record[0].message + assert isinstance(warning, UserWarning) + assert "distributed.worker.daemon" in str(warning) + finally: + client.close(timeout=30) + cluster.close(timeout=30) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_disk.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_disk.py new file mode 100644 index 0000000000000000000000000000000000000000..b825a8b3a5c18a3114f34ed9d7c90cce62799085 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_disk.py @@ -0,0 +1,71 @@ +""" +Unit tests for the disk utilities. +""" + +# Authors: Gael Varoquaux +# Lars Buitinck +# Copyright (c) 2010 Gael Varoquaux +# License: BSD Style, 3 clauses. + +from __future__ import with_statement +import array +import os + +from joblib.disk import disk_used, memstr_to_bytes, mkdirp, rm_subdirs +from joblib.testing import parametrize, raises + +############################################################################### + + +def test_disk_used(tmpdir): + cachedir = tmpdir.strpath + # Not write a file that is 1M big in this directory, and check the + # size. The reason we use such a big file is that it makes us robust + # to errors due to block allocation. + a = array.array('i') + sizeof_i = a.itemsize + target_size = 1024 + n = int(target_size * 1024 / sizeof_i) + a = array.array('i', n * (1,)) + with open(os.path.join(cachedir, 'test'), 'wb') as output: + a.tofile(output) + assert disk_used(cachedir) >= target_size + assert disk_used(cachedir) < target_size + 12 + + +@parametrize('text,value', + [('80G', 80 * 1024 ** 3), + ('1.4M', int(1.4 * 1024 ** 2)), + ('120M', 120 * 1024 ** 2), + ('53K', 53 * 1024)]) +def test_memstr_to_bytes(text, value): + assert memstr_to_bytes(text) == value + + +@parametrize('text,exception,regex', + [('fooG', ValueError, r'Invalid literal for size.*fooG.*'), + ('1.4N', ValueError, r'Invalid literal for size.*1.4N.*')]) +def test_memstr_to_bytes_exception(text, exception, regex): + with raises(exception) as excinfo: + memstr_to_bytes(text) + assert excinfo.match(regex) + + +def test_mkdirp(tmpdir): + mkdirp(os.path.join(tmpdir.strpath, 'ham')) + mkdirp(os.path.join(tmpdir.strpath, 'ham')) + mkdirp(os.path.join(tmpdir.strpath, 'spam', 'spam')) + + # Not all OSErrors are ignored + with raises(OSError): + mkdirp('') + + +def test_rm_subdirs(tmpdir): + sub_path = os.path.join(tmpdir.strpath, "am", "stram") + full_path = os.path.join(sub_path, "gram") + mkdirp(os.path.join(full_path)) + + rm_subdirs(sub_path) + assert os.path.exists(sub_path) + assert not os.path.exists(full_path) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_func_inspect.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_func_inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..dba237d48578e5d6386e67e80f3e6d31761108d6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_func_inspect.py @@ -0,0 +1,310 @@ +""" +Test the func_inspect module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import functools + +from joblib.func_inspect import filter_args, get_func_name, get_func_code +from joblib.func_inspect import _clean_win_chars, format_signature +from joblib.memory import Memory +from joblib.test.common import with_numpy +from joblib.testing import fixture, parametrize, raises + + +############################################################################### +# Module-level functions and fixture, for tests +def f(x, y=0): + pass + + +def g(x): + pass + + +def h(x, y=0, *args, **kwargs): + pass + + +def i(x=1): + pass + + +def j(x, y, **kwargs): + pass + + +def k(*args, **kwargs): + pass + + +def m1(x, *, y): + pass + + +def m2(x, *, y, z=3): + pass + + +@fixture(scope='module') +def cached_func(tmpdir_factory): + # Create a Memory object to test decorated functions. + # We should be careful not to call the decorated functions, so that + # cache directories are not created in the temp dir. + cachedir = tmpdir_factory.mktemp("joblib_test_func_inspect") + mem = Memory(cachedir.strpath) + + @mem.cache + def cached_func_inner(x): + return x + + return cached_func_inner + + +class Klass(object): + + def f(self, x): + return x + + +############################################################################### +# Tests + +@parametrize('func,args,filtered_args', + [(f, [[], (1, )], {'x': 1, 'y': 0}), + (f, [['x'], (1, )], {'y': 0}), + (f, [['y'], (0, )], {'x': 0}), + (f, [['y'], (0, ), {'y': 1}], {'x': 0}), + (f, [['x', 'y'], (0, )], {}), + (f, [[], (0,), {'y': 1}], {'x': 0, 'y': 1}), + (f, [['y'], (), {'x': 2, 'y': 1}], {'x': 2}), + (g, [[], (), {'x': 1}], {'x': 1}), + (i, [[], (2, )], {'x': 2})]) +def test_filter_args(func, args, filtered_args): + assert filter_args(func, *args) == filtered_args + + +def test_filter_args_method(): + obj = Klass() + assert filter_args(obj.f, [], (1, )) == {'x': 1, 'self': obj} + + +@parametrize('func,args,filtered_args', + [(h, [[], (1, )], + {'x': 1, 'y': 0, '*': [], '**': {}}), + (h, [[], (1, 2, 3, 4)], + {'x': 1, 'y': 2, '*': [3, 4], '**': {}}), + (h, [[], (1, 25), {'ee': 2}], + {'x': 1, 'y': 25, '*': [], '**': {'ee': 2}}), + (h, [['*'], (1, 2, 25), {'ee': 2}], + {'x': 1, 'y': 2, '**': {'ee': 2}})]) +def test_filter_varargs(func, args, filtered_args): + assert filter_args(func, *args) == filtered_args + + +test_filter_kwargs_extra_params = [ + (m1, [[], (1,), {'y': 2}], {'x': 1, 'y': 2}), + (m2, [[], (1,), {'y': 2}], {'x': 1, 'y': 2, 'z': 3}) +] + + +@parametrize('func,args,filtered_args', + [(k, [[], (1, 2), {'ee': 2}], + {'*': [1, 2], '**': {'ee': 2}}), + (k, [[], (3, 4)], + {'*': [3, 4], '**': {}})] + + test_filter_kwargs_extra_params) +def test_filter_kwargs(func, args, filtered_args): + assert filter_args(func, *args) == filtered_args + + +def test_filter_args_2(): + assert (filter_args(j, [], (1, 2), {'ee': 2}) == + {'x': 1, 'y': 2, '**': {'ee': 2}}) + + ff = functools.partial(f, 1) + # filter_args has to special-case partial + assert filter_args(ff, [], (1, )) == {'*': [1], '**': {}} + assert filter_args(ff, ['y'], (1, )) == {'*': [1], '**': {}} + + +@parametrize('func,funcname', [(f, 'f'), (g, 'g'), + (cached_func, 'cached_func')]) +def test_func_name(func, funcname): + # Check that we are not confused by decoration + # here testcase 'cached_func' is the function itself + assert get_func_name(func)[1] == funcname + + +def test_func_name_on_inner_func(cached_func): + # Check that we are not confused by decoration + # here testcase 'cached_func' is the 'cached_func_inner' function + # returned by 'cached_func' fixture + assert get_func_name(cached_func)[1] == 'cached_func_inner' + + +def test_func_name_collision_on_inner_func(): + # Check that two functions defining and caching an inner function + # with the same do not cause (module, name) collision + def f(): + def inner_func(): + return # pragma: no cover + return get_func_name(inner_func) + + def g(): + def inner_func(): + return # pragma: no cover + return get_func_name(inner_func) + + module, name = f() + other_module, other_name = g() + + assert name == other_name + assert module != other_module + + +def test_func_inspect_errors(): + # Check that func_inspect is robust and will work on weird objects + assert get_func_name('a'.lower)[-1] == 'lower' + assert get_func_code('a'.lower)[1:] == (None, -1) + ff = lambda x: x # noqa: E731 + assert get_func_name(ff, win_characters=False)[-1] == '' + assert get_func_code(ff)[1] == __file__.replace('.pyc', '.py') + # Simulate a function defined in __main__ + ff.__module__ = '__main__' + assert get_func_name(ff, win_characters=False)[-1] == '' + assert get_func_code(ff)[1] == __file__.replace('.pyc', '.py') + + +def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'): + pass + + +def func_with_signature(a: int, b: int) -> None: + pass + + +def test_filter_args_edge_cases(): + assert ( + filter_args(func_with_kwonly_args, [], (1, 2), + {'kw1': 3, 'kw2': 4}) == + {'a': 1, 'b': 2, 'kw1': 3, 'kw2': 4}) + + # filter_args doesn't care about keyword-only arguments so you + # can pass 'kw1' into *args without any problem + with raises(ValueError) as excinfo: + filter_args(func_with_kwonly_args, [], (1, 2, 3), {'kw2': 2}) + excinfo.match("Keyword-only parameter 'kw1' was passed as positional " + "parameter") + + assert ( + filter_args(func_with_kwonly_args, ['b', 'kw2'], (1, 2), + {'kw1': 3, 'kw2': 4}) == + {'a': 1, 'kw1': 3}) + + assert (filter_args(func_with_signature, ['b'], (1, 2)) == {'a': 1}) + + +def test_bound_methods(): + """ Make sure that calling the same method on two different instances + of the same class does resolv to different signatures. + """ + a = Klass() + b = Klass() + assert filter_args(a.f, [], (1, )) != filter_args(b.f, [], (1, )) + + +@parametrize('exception,regex,func,args', + [(ValueError, 'ignore_lst must be a list of parameters to ignore', + f, ['bar', (None, )]), + (ValueError, r'Ignore list: argument \'(.*)\' is not defined', + g, [['bar'], (None, )]), + (ValueError, 'Wrong number of arguments', + h, [[]])]) +def test_filter_args_error_msg(exception, regex, func, args): + """ Make sure that filter_args returns decent error messages, for the + sake of the user. + """ + with raises(exception) as excinfo: + filter_args(func, *args) + excinfo.match(regex) + + +def test_filter_args_no_kwargs_mutation(): + """None-regression test against 0.12.0 changes. + + https://github.com/joblib/joblib/pull/75 + + Make sure filter args doesn't mutate the kwargs dict that gets passed in. + """ + kwargs = {'x': 0} + filter_args(g, [], [], kwargs) + assert kwargs == {'x': 0} + + +def test_clean_win_chars(): + string = r'C:\foo\bar\main.py' + mangled_string = _clean_win_chars(string) + for char in ('\\', ':', '<', '>', '!'): + assert char not in mangled_string + + +@parametrize('func,args,kwargs,sgn_expected', + [(g, [list(range(5))], {}, 'g([0, 1, 2, 3, 4])'), + (k, [1, 2, (3, 4)], {'y': True}, 'k(1, 2, (3, 4), y=True)')]) +def test_format_signature(func, args, kwargs, sgn_expected): + # Test signature formatting. + path, sgn_result = format_signature(func, *args, **kwargs) + assert sgn_result == sgn_expected + + +def test_format_signature_long_arguments(): + shortening_threshold = 1500 + # shortening gets it down to 700 characters but there is the name + # of the function in the signature and a few additional things + # like dots for the ellipsis + shortening_target = 700 + 10 + + arg = 'a' * shortening_threshold + _, signature = format_signature(h, arg) + assert len(signature) < shortening_target + + nb_args = 5 + args = [arg for _ in range(nb_args)] + _, signature = format_signature(h, *args) + assert len(signature) < shortening_target * nb_args + + kwargs = {str(i): arg for i, arg in enumerate(args)} + _, signature = format_signature(h, **kwargs) + assert len(signature) < shortening_target * nb_args + + _, signature = format_signature(h, *args, **kwargs) + assert len(signature) < shortening_target * 2 * nb_args + + +@with_numpy +def test_format_signature_numpy(): + """ Test the format signature formatting with numpy. + """ + + +def test_special_source_encoding(): + from joblib.test.test_func_inspect_special_encoding import big5_f + func_code, source_file, first_line = get_func_code(big5_f) + assert first_line == 5 + assert "def big5_f():" in func_code + assert "test_func_inspect_special_encoding" in source_file + + +def _get_code(): + from joblib.test.test_func_inspect_special_encoding import big5_f + return get_func_code(big5_f)[0] + + +def test_func_code_consistency(): + from joblib.parallel import Parallel, delayed + codes = Parallel(n_jobs=2)(delayed(_get_code)() for _ in range(5)) + assert len(set(codes)) == 1 diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..6c41a59a6900ced36050bf357359c1164a11fdbe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py @@ -0,0 +1,9 @@ +# -*- coding: big5 -*- + + +# Some Traditional Chinese characters: ¤@¨Ç¤¤¤å¦r²Å +def big5_f(): + """¥Î©ó´ú¸Õªº¨ç¼Æ + """ + # µùÄÀ + return 0 diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_hashing.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..85593d297f6e2387c58cad8d5bceba4d21b1c0aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_hashing.py @@ -0,0 +1,495 @@ +""" +Test the hashing module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import time +import hashlib +import sys +import gc +import io +import collections +import itertools +import pickle +import random +from concurrent.futures import ProcessPoolExecutor +from decimal import Decimal + +from joblib.hashing import hash +from joblib.func_inspect import filter_args +from joblib.memory import Memory +from joblib.testing import raises, skipif, fixture, parametrize +from joblib.test.common import np, with_numpy + + +def unicode(s): + return s + + +############################################################################### +# Helper functions for the tests +def time_func(func, *args): + """ Time function func on *args. + """ + times = list() + for _ in range(3): + t1 = time.time() + func(*args) + times.append(time.time() - t1) + return min(times) + + +def relative_time(func1, func2, *args): + """ Return the relative time between func1 and func2 applied on + *args. + """ + time_func1 = time_func(func1, *args) + time_func2 = time_func(func2, *args) + relative_diff = 0.5 * (abs(time_func1 - time_func2) + / (time_func1 + time_func2)) + return relative_diff + + +class Klass(object): + + def f(self, x): + return x + + +class KlassWithCachedMethod(object): + + def __init__(self, cachedir): + mem = Memory(location=cachedir) + self.f = mem.cache(self.f) + + def f(self, x): + return x + + +############################################################################### +# Tests + +input_list = [1, 2, 1., 2., 1 + 1j, 2. + 1j, + 'a', 'b', + (1,), (1, 1,), [1, ], [1, 1, ], + {1: 1}, {1: 2}, {2: 1}, + None, + gc.collect, + [1, ].append, + # Next 2 sets have unorderable elements in python 3. + set(('a', 1)), + set(('a', 1, ('a', 1))), + # Next 2 dicts have unorderable type of keys in python 3. + {'a': 1, 1: 2}, + {'a': 1, 1: 2, 'd': {'a': 1}}] + + +@parametrize('obj1', input_list) +@parametrize('obj2', input_list) +def test_trivial_hash(obj1, obj2): + """Smoke test hash on various types.""" + # Check that 2 objects have the same hash only if they are the same. + are_hashes_equal = hash(obj1) == hash(obj2) + are_objs_identical = obj1 is obj2 + assert are_hashes_equal == are_objs_identical + + +def test_hash_methods(): + # Check that hashing instance methods works + a = io.StringIO(unicode('a')) + assert hash(a.flush) == hash(a.flush) + a1 = collections.deque(range(10)) + a2 = collections.deque(range(9)) + assert hash(a1.extend) != hash(a2.extend) + + +@fixture(scope='function') +@with_numpy +def three_np_arrays(): + rnd = np.random.RandomState(0) + arr1 = rnd.random_sample((10, 10)) + arr2 = arr1.copy() + arr3 = arr2.copy() + arr3[0] += 1 + return arr1, arr2, arr3 + + +def test_hash_numpy_arrays(three_np_arrays): + arr1, arr2, arr3 = three_np_arrays + + for obj1, obj2 in itertools.product(three_np_arrays, repeat=2): + are_hashes_equal = hash(obj1) == hash(obj2) + are_arrays_equal = np.all(obj1 == obj2) + assert are_hashes_equal == are_arrays_equal + + assert hash(arr1) != hash(arr1.T) + + +def test_hash_numpy_dict_of_arrays(three_np_arrays): + arr1, arr2, arr3 = three_np_arrays + + d1 = {1: arr1, 2: arr2} + d2 = {1: arr2, 2: arr1} + d3 = {1: arr2, 2: arr3} + + assert hash(d1) == hash(d2) + assert hash(d1) != hash(d3) + + +@with_numpy +@parametrize('dtype', ['datetime64[s]', 'timedelta64[D]']) +def test_numpy_datetime_array(dtype): + # memoryview is not supported for some dtypes e.g. datetime64 + # see https://github.com/joblib/joblib/issues/188 for more details + a_hash = hash(np.arange(10)) + array = np.arange(0, 10, dtype=dtype) + assert hash(array) != a_hash + + +@with_numpy +def test_hash_numpy_noncontiguous(): + a = np.asarray(np.arange(6000).reshape((1000, 2, 3)), + order='F')[:, :1, :] + b = np.ascontiguousarray(a) + assert hash(a) != hash(b) + + c = np.asfortranarray(a) + assert hash(a) != hash(c) + + +@with_numpy +@parametrize('coerce_mmap', [True, False]) +def test_hash_memmap(tmpdir, coerce_mmap): + """Check that memmap and arrays hash identically if coerce_mmap is True.""" + filename = tmpdir.join('memmap_temp').strpath + try: + m = np.memmap(filename, shape=(10, 10), mode='w+') + a = np.asarray(m) + are_hashes_equal = (hash(a, coerce_mmap=coerce_mmap) == + hash(m, coerce_mmap=coerce_mmap)) + assert are_hashes_equal == coerce_mmap + finally: + if 'm' in locals(): + del m + # Force a garbage-collection cycle, to be certain that the + # object is delete, and we don't run in a problem under + # Windows with a file handle still open. + gc.collect() + + +@with_numpy +@skipif(sys.platform == 'win32', reason='This test is not stable under windows' + ' for some reason') +def test_hash_numpy_performance(): + """ Check the performance of hashing numpy arrays: + + In [22]: a = np.random.random(1000000) + + In [23]: %timeit hashlib.md5(a).hexdigest() + 100 loops, best of 3: 20.7 ms per loop + + In [24]: %timeit hashlib.md5(pickle.dumps(a, protocol=2)).hexdigest() + 1 loops, best of 3: 73.1 ms per loop + + In [25]: %timeit hashlib.md5(cPickle.dumps(a, protocol=2)).hexdigest() + 10 loops, best of 3: 53.9 ms per loop + + In [26]: %timeit hash(a) + 100 loops, best of 3: 20.8 ms per loop + """ + rnd = np.random.RandomState(0) + a = rnd.random_sample(1000000) + + def md5_hash(x): + return hashlib.md5(memoryview(x)).hexdigest() + + relative_diff = relative_time(md5_hash, hash, a) + assert relative_diff < 0.3 + + # Check that hashing an tuple of 3 arrays takes approximately + # 3 times as much as hashing one array + time_hashlib = 3 * time_func(md5_hash, a) + time_hash = time_func(hash, (a, a, a)) + relative_diff = 0.5 * (abs(time_hash - time_hashlib) + / (time_hash + time_hashlib)) + assert relative_diff < 0.3 + + +def test_bound_methods_hash(): + """ Make sure that calling the same method on two different instances + of the same class does resolve to the same hashes. + """ + a = Klass() + b = Klass() + assert (hash(filter_args(a.f, [], (1, ))) == + hash(filter_args(b.f, [], (1, )))) + + +def test_bound_cached_methods_hash(tmpdir): + """ Make sure that calling the same _cached_ method on two different + instances of the same class does resolve to the same hashes. + """ + a = KlassWithCachedMethod(tmpdir.strpath) + b = KlassWithCachedMethod(tmpdir.strpath) + assert (hash(filter_args(a.f.func, [], (1, ))) == + hash(filter_args(b.f.func, [], (1, )))) + + +@with_numpy +def test_hash_object_dtype(): + """ Make sure that ndarrays with dtype `object' hash correctly.""" + + a = np.array([np.arange(i) for i in range(6)], dtype=object) + b = np.array([np.arange(i) for i in range(6)], dtype=object) + + assert hash(a) == hash(b) + + +@with_numpy +def test_numpy_scalar(): + # Numpy scalars are built from compiled functions, and lead to + # strange pickling paths explored, that can give hash collisions + a = np.float64(2.0) + b = np.float64(3.0) + assert hash(a) != hash(b) + + +def test_dict_hash(tmpdir): + # Check that dictionaries hash consistently, even though the ordering + # of the keys is not guaranteed + k = KlassWithCachedMethod(tmpdir.strpath) + + d = {'#s12069__c_maps.nii.gz': [33], + '#s12158__c_maps.nii.gz': [33], + '#s12258__c_maps.nii.gz': [33], + '#s12277__c_maps.nii.gz': [33], + '#s12300__c_maps.nii.gz': [33], + '#s12401__c_maps.nii.gz': [33], + '#s12430__c_maps.nii.gz': [33], + '#s13817__c_maps.nii.gz': [33], + '#s13903__c_maps.nii.gz': [33], + '#s13916__c_maps.nii.gz': [33], + '#s13981__c_maps.nii.gz': [33], + '#s13982__c_maps.nii.gz': [33], + '#s13983__c_maps.nii.gz': [33]} + + a = k.f(d) + b = k.f(a) + + assert hash(a) == hash(b) + + +def test_set_hash(tmpdir): + # Check that sets hash consistently, even though their ordering + # is not guaranteed + k = KlassWithCachedMethod(tmpdir.strpath) + + s = set(['#s12069__c_maps.nii.gz', + '#s12158__c_maps.nii.gz', + '#s12258__c_maps.nii.gz', + '#s12277__c_maps.nii.gz', + '#s12300__c_maps.nii.gz', + '#s12401__c_maps.nii.gz', + '#s12430__c_maps.nii.gz', + '#s13817__c_maps.nii.gz', + '#s13903__c_maps.nii.gz', + '#s13916__c_maps.nii.gz', + '#s13981__c_maps.nii.gz', + '#s13982__c_maps.nii.gz', + '#s13983__c_maps.nii.gz']) + + a = k.f(s) + b = k.f(a) + + assert hash(a) == hash(b) + + +def test_set_decimal_hash(): + # Check that sets containing decimals hash consistently, even though + # ordering is not guaranteed + assert (hash(set([Decimal(0), Decimal('NaN')])) == + hash(set([Decimal('NaN'), Decimal(0)]))) + + +def test_string(): + # Test that we obtain the same hash for object owning several strings, + # whatever the past of these strings (which are immutable in Python) + string = 'foo' + a = {string: 'bar'} + b = {string: 'bar'} + c = pickle.loads(pickle.dumps(b)) + assert hash([a, b]) == hash([a, c]) + + +@with_numpy +def test_numpy_dtype_pickling(): + # numpy dtype hashing is tricky to get right: see #231, #239, #251 #1080, + # #1082, and explanatory comments inside + # ``joblib.hashing.NumpyHasher.save``. + + # In this test, we make sure that the pickling of numpy dtypes is robust to + # object identity and object copy. + + dt1 = np.dtype('f4') + dt2 = np.dtype('f4') + + # simple dtypes objects are interned + assert dt1 is dt2 + assert hash(dt1) == hash(dt2) + + dt1_roundtripped = pickle.loads(pickle.dumps(dt1)) + assert dt1 is not dt1_roundtripped + assert hash(dt1) == hash(dt1_roundtripped) + + assert hash([dt1, dt1]) == hash([dt1_roundtripped, dt1_roundtripped]) + assert hash([dt1, dt1]) == hash([dt1, dt1_roundtripped]) + + complex_dt1 = np.dtype( + [('name', np.str_, 16), ('grades', np.float64, (2,))] + ) + complex_dt2 = np.dtype( + [('name', np.str_, 16), ('grades', np.float64, (2,))] + ) + + # complex dtypes objects are not interned + assert hash(complex_dt1) == hash(complex_dt2) + + complex_dt1_roundtripped = pickle.loads(pickle.dumps(complex_dt1)) + assert complex_dt1_roundtripped is not complex_dt1 + assert hash(complex_dt1) == hash(complex_dt1_roundtripped) + + assert hash([complex_dt1, complex_dt1]) == hash( + [complex_dt1_roundtripped, complex_dt1_roundtripped] + ) + assert hash([complex_dt1, complex_dt1]) == hash( + [complex_dt1_roundtripped, complex_dt1] + ) + + +@parametrize('to_hash,expected', + [('This is a string to hash', + '71b3f47df22cb19431d85d92d0b230b2'), + (u"C'est l\xe9t\xe9", + '2d8d189e9b2b0b2e384d93c868c0e576'), + ((123456, 54321, -98765), + 'e205227dd82250871fa25aa0ec690aa3'), + ([random.Random(42).random() for _ in range(5)], + 'a11ffad81f9682a7d901e6edc3d16c84'), + ({'abcde': 123, 'sadfas': [-9999, 2, 3]}, + 'aeda150553d4bb5c69f0e69d51b0e2ef')]) +def test_hashes_stay_the_same(to_hash, expected): + # We want to make sure that hashes don't change with joblib + # version. For end users, that would mean that they have to + # regenerate their cache from scratch, which potentially means + # lengthy recomputations. + # Expected results have been generated with joblib 0.9.2 + assert hash(to_hash) == expected + + +@with_numpy +def test_hashes_are_different_between_c_and_fortran_contiguous_arrays(): + # We want to be sure that the c-contiguous and f-contiguous versions of the + # same array produce 2 different hashes. + rng = np.random.RandomState(0) + arr_c = rng.random_sample((10, 10)) + arr_f = np.asfortranarray(arr_c) + assert hash(arr_c) != hash(arr_f) + + +@with_numpy +def test_0d_array(): + hash(np.array(0)) + + +@with_numpy +def test_0d_and_1d_array_hashing_is_different(): + assert hash(np.array(0)) != hash(np.array([0])) + + +@with_numpy +def test_hashes_stay_the_same_with_numpy_objects(): + # Note: joblib used to test numpy objects hashing by comparing the produced + # hash of an object with some hard-coded target value to guarantee that + # hashing remains the same across joblib versions. However, since numpy + # 1.20 and joblib 1.0, joblib relies on potentially unstable implementation + # details of numpy to hash np.dtype objects, which makes the stability of + # hash values across different environments hard to guarantee and to test. + # As a result, hashing stability across joblib versions becomes best-effort + # only, and we only test the consistency within a single environment by + # making sure: + # - the hash of two copies of the same objects is the same + # - hashing some object in two different python processes produces the same + # value. This should be viewed as a proxy for testing hash consistency + # through time between Python sessions (provided no change in the + # environment was done between sessions). + + def create_objects_to_hash(): + rng = np.random.RandomState(42) + # Being explicit about dtypes in order to avoid + # architecture-related differences. Also using 'f4' rather than + # 'f8' for float arrays because 'f8' arrays generated by + # rng.random.randn don't seem to be bit-identical on 32bit and + # 64bit machines. + to_hash_list = [ + rng.randint(-1000, high=1000, size=50).astype(' +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. +import re + +from joblib.logger import PrintTime + + +def test_print_time(tmpdir, capsys): + # A simple smoke test for PrintTime. + logfile = tmpdir.join('test.log').strpath + print_time = PrintTime(logfile=logfile) + print_time('Foo') + # Create a second time, to smoke test log rotation. + print_time = PrintTime(logfile=logfile) + print_time('Foo') + # And a third time + print_time = PrintTime(logfile=logfile) + print_time('Foo') + + out_printed_text, err_printed_text = capsys.readouterr() + # Use regexps to be robust to time variations + match = r"Foo: 0\..s, 0\..min\nFoo: 0\..s, 0..min\nFoo: " + \ + r".\..s, 0..min\n" + if not re.match(match, err_printed_text): + raise AssertionError('Excepted %s, got %s' % + (match, err_printed_text)) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_memmapping.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_memmapping.py new file mode 100644 index 0000000000000000000000000000000000000000..42a297a9e445d38c0daa03bc7d78e4c4f1fd4571 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_memmapping.py @@ -0,0 +1,1191 @@ +import os +import mmap +import sys +import platform +import gc +import pickle +import itertools +from time import sleep +import subprocess +import threading +import faulthandler + +import pytest + +from joblib.test.common import with_numpy, np +from joblib.test.common import with_multiprocessing +from joblib.test.common import with_dev_shm +from joblib.testing import raises, parametrize, skipif +from joblib.backports import make_memmap +from joblib.parallel import Parallel, delayed + +from joblib.pool import MemmappingPool +from joblib.executor import _TestingMemmappingExecutor as TestExecutor +from joblib._memmapping_reducer import has_shareable_memory +from joblib._memmapping_reducer import ArrayMemmapForwardReducer +from joblib._memmapping_reducer import _strided_from_memmap +from joblib._memmapping_reducer import _get_temp_dir +from joblib._memmapping_reducer import _WeakArrayKeyMap +from joblib._memmapping_reducer import _get_backing_memmap +import joblib._memmapping_reducer as jmr + + +def setup_module(): + faulthandler.dump_traceback_later(timeout=300, exit=True) + + +def teardown_module(): + faulthandler.cancel_dump_traceback_later() + + +def check_memmap_and_send_back(array): + assert _get_backing_memmap(array) is not None + return array + + +def check_array(args): + """Dummy helper function to be executed in subprocesses + + Check that the provided array has the expected values in the provided + range. + + """ + data, position, expected = args + np.testing.assert_array_equal(data[position], expected) + + +def inplace_double(args): + """Dummy helper function to be executed in subprocesses + + + Check that the input array has the right values in the provided range + and perform an inplace modification to double the values in the range by + two. + + """ + data, position, expected = args + assert data[position] == expected + data[position] *= 2 + np.testing.assert_array_equal(data[position], 2 * expected) + + +@with_numpy +@with_multiprocessing +def test_memmap_based_array_reducing(tmpdir): + """Check that it is possible to reduce a memmap backed array""" + assert_array_equal = np.testing.assert_array_equal + filename = tmpdir.join('test.mmap').strpath + + # Create a file larger than what will be used by a + buffer = np.memmap(filename, dtype=np.float64, shape=500, mode='w+') + + # Fill the original buffer with negative markers to detect over of + # underflow in case of test failures + buffer[:] = - 1.0 * np.arange(buffer.shape[0], dtype=buffer.dtype) + buffer.flush() + + # Memmap a 2D fortran array on a offsetted subsection of the previous + # buffer + a = np.memmap(filename, dtype=np.float64, shape=(3, 5, 4), + mode='r+', order='F', offset=4) + a[:] = np.arange(60).reshape(a.shape) + + # Build various views that share the buffer with the original memmap + + # b is an memmap sliced view on an memmap instance + b = a[1:-1, 2:-1, 2:4] + + # c and d are array views + c = np.asarray(b) + d = c.T + + # Array reducer with auto dumping disabled + reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, 'c', True) + + def reconstruct_array_or_memmap(x): + cons, args = reducer(x) + return cons(*args) + + # Reconstruct original memmap + a_reconstructed = reconstruct_array_or_memmap(a) + assert has_shareable_memory(a_reconstructed) + assert isinstance(a_reconstructed, np.memmap) + assert_array_equal(a_reconstructed, a) + + # Reconstruct strided memmap view + b_reconstructed = reconstruct_array_or_memmap(b) + assert has_shareable_memory(b_reconstructed) + assert_array_equal(b_reconstructed, b) + + # Reconstruct arrays views on memmap base + c_reconstructed = reconstruct_array_or_memmap(c) + assert not isinstance(c_reconstructed, np.memmap) + assert has_shareable_memory(c_reconstructed) + assert_array_equal(c_reconstructed, c) + + d_reconstructed = reconstruct_array_or_memmap(d) + assert not isinstance(d_reconstructed, np.memmap) + assert has_shareable_memory(d_reconstructed) + assert_array_equal(d_reconstructed, d) + + # Test graceful degradation on fake memmap instances with in-memory + # buffers + a3 = a * 3 + assert not has_shareable_memory(a3) + a3_reconstructed = reconstruct_array_or_memmap(a3) + assert not has_shareable_memory(a3_reconstructed) + assert not isinstance(a3_reconstructed, np.memmap) + assert_array_equal(a3_reconstructed, a * 3) + + # Test graceful degradation on arrays derived from fake memmap instances + b3 = np.asarray(a3) + assert not has_shareable_memory(b3) + + b3_reconstructed = reconstruct_array_or_memmap(b3) + assert isinstance(b3_reconstructed, np.ndarray) + assert not has_shareable_memory(b3_reconstructed) + assert_array_equal(b3_reconstructed, b3) + + +@with_multiprocessing +@skipif((sys.platform != "win32") or (), + reason="PermissionError only easily triggerable on Windows") +def test_resource_tracker_retries_when_permissionerror(tmpdir): + # Test resource_tracker retry mechanism when unlinking memmaps. See more + # thorough information in the ``unlink_file`` documentation of joblib. + filename = tmpdir.join('test.mmap').strpath + cmd = """if 1: + import os + import numpy as np + import time + from joblib.externals.loky.backend import resource_tracker + resource_tracker.VERBOSE = 1 + + # Start the resource tracker + resource_tracker.ensure_running() + time.sleep(1) + + # Create a file containing numpy data + memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+') + memmap[:] = np.arange(10).astype(np.int8).data + memmap.flush() + assert os.path.exists(r"{filename}") + del memmap + + # Create a np.memmap backed by this file + memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+') + resource_tracker.register(r"{filename}", "file") + + # Ask the resource_tracker to delete the file backing the np.memmap , this + # should raise PermissionError that the resource_tracker will log. + resource_tracker.maybe_unlink(r"{filename}", "file") + + # Wait for the resource_tracker to process the maybe_unlink before cleaning + # up the memmap + time.sleep(2) + """.format(filename=filename) + p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + p.wait() + out, err = p.communicate() + assert p.returncode == 0 + assert out == b'' + msg = 'tried to unlink {}, got PermissionError'.format(filename) + assert msg in err.decode() + + +@with_numpy +@with_multiprocessing +def test_high_dimension_memmap_array_reducing(tmpdir): + assert_array_equal = np.testing.assert_array_equal + + filename = tmpdir.join('test.mmap').strpath + + # Create a high dimensional memmap + a = np.memmap(filename, dtype=np.float64, shape=(100, 15, 15, 3), + mode='w+') + a[:] = np.arange(100 * 15 * 15 * 3).reshape(a.shape) + + # Create some slices/indices at various dimensions + b = a[0:10] + c = a[:, 5:10] + d = a[:, :, :, 0] + e = a[1:3:4] + + # Array reducer with auto dumping disabled + reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, 'c', True) + + def reconstruct_array_or_memmap(x): + cons, args = reducer(x) + return cons(*args) + + a_reconstructed = reconstruct_array_or_memmap(a) + assert has_shareable_memory(a_reconstructed) + assert isinstance(a_reconstructed, np.memmap) + assert_array_equal(a_reconstructed, a) + + b_reconstructed = reconstruct_array_or_memmap(b) + assert has_shareable_memory(b_reconstructed) + assert_array_equal(b_reconstructed, b) + + c_reconstructed = reconstruct_array_or_memmap(c) + assert has_shareable_memory(c_reconstructed) + assert_array_equal(c_reconstructed, c) + + d_reconstructed = reconstruct_array_or_memmap(d) + assert has_shareable_memory(d_reconstructed) + assert_array_equal(d_reconstructed, d) + + e_reconstructed = reconstruct_array_or_memmap(e) + assert has_shareable_memory(e_reconstructed) + assert_array_equal(e_reconstructed, e) + + +@with_numpy +def test__strided_from_memmap(tmpdir): + fname = tmpdir.join('test.mmap').strpath + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + # This line creates the mmap file that is reused later + memmap_obj = np.memmap(fname, mode='w+', shape=size + offset) + # filename, dtype, mode, offset, order, shape, strides, total_buffer_len + memmap_obj = _strided_from_memmap(fname, dtype='uint8', mode='r', + offset=offset, order='C', shape=size, + strides=None, total_buffer_len=None, + unlink_on_gc_collect=False) + assert isinstance(memmap_obj, np.memmap) + assert memmap_obj.offset == offset + memmap_backed_obj = _strided_from_memmap( + fname, dtype='uint8', mode='r', offset=offset, order='C', + shape=(size // 2,), strides=(2,), total_buffer_len=size, + unlink_on_gc_collect=False + ) + assert _get_backing_memmap(memmap_backed_obj).offset == offset + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_pool_with_memmap(factory, tmpdir): + """Check that subprocess can access and update shared memory memmap""" + assert_array_equal = np.testing.assert_array_equal + + # Fork the subprocess before allocating the objects to be passed + pool_temp_folder = tmpdir.mkdir('pool').strpath + p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder) + try: + filename = tmpdir.join('test.mmap').strpath + a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+') + a.fill(1.0) + + p.map(inplace_double, [(a, (i, j), 1.0) + for i in range(a.shape[0]) + for j in range(a.shape[1])]) + + assert_array_equal(a, 2 * np.ones(a.shape)) + + # Open a copy-on-write view on the previous data + b = np.memmap(filename, dtype=np.float32, shape=(5, 3), mode='c') + + p.map(inplace_double, [(b, (i, j), 2.0) + for i in range(b.shape[0]) + for j in range(b.shape[1])]) + + # Passing memmap instances to the pool should not trigger the creation + # of new files on the FS + assert os.listdir(pool_temp_folder) == [] + + # the original data is untouched + assert_array_equal(a, 2 * np.ones(a.shape)) + assert_array_equal(b, 2 * np.ones(b.shape)) + + # readonly maps can be read but not updated + c = np.memmap(filename, dtype=np.float32, shape=(10,), mode='r', + offset=5 * 4) + + with raises(AssertionError): + p.map(check_array, [(c, i, 3.0) for i in range(c.shape[0])]) + + # depending on the version of numpy one can either get a RuntimeError + # or a ValueError + with raises((RuntimeError, ValueError)): + p.map(inplace_double, [(c, i, 2.0) for i in range(c.shape[0])]) + finally: + # Clean all filehandlers held by the pool + p.terminate() + del p + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_pool_with_memmap_array_view(factory, tmpdir): + """Check that subprocess can access and update shared memory array""" + assert_array_equal = np.testing.assert_array_equal + + # Fork the subprocess before allocating the objects to be passed + pool_temp_folder = tmpdir.mkdir('pool').strpath + p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder) + try: + + filename = tmpdir.join('test.mmap').strpath + a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+') + a.fill(1.0) + + # Create an ndarray view on the memmap instance + a_view = np.asarray(a) + assert not isinstance(a_view, np.memmap) + assert has_shareable_memory(a_view) + + p.map(inplace_double, [(a_view, (i, j), 1.0) + for i in range(a.shape[0]) + for j in range(a.shape[1])]) + + # Both a and the a_view have been updated + assert_array_equal(a, 2 * np.ones(a.shape)) + assert_array_equal(a_view, 2 * np.ones(a.shape)) + + # Passing memmap array view to the pool should not trigger the + # creation of new files on the FS + assert os.listdir(pool_temp_folder) == [] + + finally: + p.terminate() + del p + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_permission_error_windows_reference_cycle(backend): + # Non regression test for: + # https://github.com/joblib/joblib/issues/806 + # + # The issue happens when trying to delete a memory mapped file that has + # not yet been closed by one of the worker processes. + cmd = """if 1: + import numpy as np + from joblib import Parallel, delayed + + + data = np.random.rand(int(2e6)).reshape((int(1e6), 2)) + + # Build a complex cyclic reference that is likely to delay garbage + # collection of the memmapped array in the worker processes. + first_list = current_list = [data] + for i in range(10): + current_list = [current_list] + first_list.append(current_list) + + if __name__ == "__main__": + results = Parallel(n_jobs=2, backend="{b}")( + delayed(len)(current_list) for i in range(10)) + assert results == [1] * 10 + """.format(b=backend) + p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, out.decode() + "\n\n" + err.decode() + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_permission_error_windows_memmap_sent_to_parent(backend): + # Second non-regression test for: + # https://github.com/joblib/joblib/issues/806 + # previously, child process would not convert temporary memmaps to numpy + # arrays when sending the data back to the parent process. This would lead + # to permission errors on windows when deleting joblib's temporary folder, + # as the memmaped files handles would still opened in the parent process. + cmd = '''if 1: + import os + import time + + import numpy as np + + from joblib import Parallel, delayed + from testutils import return_slice_of_data + + data = np.ones(int(2e6)) + + if __name__ == '__main__': + # warm-up call to launch the workers and start the resource_tracker + _ = Parallel(n_jobs=2, verbose=5, backend='{b}')( + delayed(id)(i) for i in range(20)) + + time.sleep(0.5) + + slice_of_data = Parallel(n_jobs=2, verbose=5, backend='{b}')( + delayed(return_slice_of_data)(data, 0, 20) for _ in range(10)) + '''.format(b=backend) + + for _ in range(3): + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(__file__) + p = subprocess.Popen([sys.executable, '-c', cmd], + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, env=env) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, err + assert out == b'' + if sys.version_info[:3] not in [(3, 8, 0), (3, 8, 1)]: + # In early versions of Python 3.8, a reference leak + # https://github.com/cloudpipe/cloudpickle/issues/327, holds + # references to pickled objects, generating race condition during + # cleanup finalizers of joblib and noisy resource_tracker outputs. + assert b'resource_tracker' not in err + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_parallel_isolated_temp_folders(backend): + # Test that consecutive Parallel call use isolated subfolders, even + # for the loky backend that reuses its executor instance across calls. + array = np.arange(int(1e2)) + [filename_1] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + [filename_2] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + assert os.path.dirname(filename_2) != os.path.dirname(filename_1) + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_managed_backend_reuse_temp_folder(backend): + # Test that calls to a managed parallel object reuse the same memmaps. + array = np.arange(int(1e2)) + with Parallel(n_jobs=2, backend=backend, max_nbytes=10) as p: + [filename_1] = p( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + [filename_2] = p( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + assert os.path.dirname(filename_2) == os.path.dirname(filename_1) + + +@with_numpy +@with_multiprocessing +def test_memmapping_temp_folder_thread_safety(): + # Concurrent calls to Parallel with the loky backend will use the same + # executor, and thus the same reducers. Make sure that those reducers use + # different temporary folders depending on which Parallel objects called + # them, which is necessary to limit potential race conditions during the + # garbage collection of temporary memmaps. + array = np.arange(int(1e2)) + + temp_dirs_thread_1 = set() + temp_dirs_thread_2 = set() + + def concurrent_get_filename(array, temp_dirs): + with Parallel(backend='loky', n_jobs=2, max_nbytes=10) as p: + for i in range(10): + [filename] = p( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + temp_dirs.add(os.path.dirname(filename)) + + t1 = threading.Thread( + target=concurrent_get_filename, args=(array, temp_dirs_thread_1) + ) + t2 = threading.Thread( + target=concurrent_get_filename, args=(array, temp_dirs_thread_2) + ) + + t1.start() + t2.start() + + t1.join() + t2.join() + + assert len(temp_dirs_thread_1) == 1 + assert len(temp_dirs_thread_2) == 1 + + assert temp_dirs_thread_1 != temp_dirs_thread_2 + + +@with_numpy +@with_multiprocessing +def test_multithreaded_parallel_termination_resource_tracker_silent(): + # test that concurrent termination attempts of a same executor does not + # emit any spurious error from the resource_tracker. We test various + # situations making 0, 1 or both parallel call sending a task that will + # make the worker (and thus the whole Parallel call) error out. + cmd = '''if 1: + import os + import numpy as np + from joblib import Parallel, delayed + from joblib.externals.loky.backend import resource_tracker + from concurrent.futures import ThreadPoolExecutor, wait + + resource_tracker.VERBOSE = 0 + + array = np.arange(int(1e2)) + + temp_dirs_thread_1 = set() + temp_dirs_thread_2 = set() + + + def raise_error(array): + raise ValueError + + + def parallel_get_filename(array, temp_dirs): + with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p: + for i in range(10): + [filename] = p( + delayed(getattr)(array, "filename") for _ in range(1) + ) + temp_dirs.add(os.path.dirname(filename)) + + + def parallel_raise(array, temp_dirs): + with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p: + for i in range(10): + [filename] = p( + delayed(raise_error)(array) for _ in range(1) + ) + temp_dirs.add(os.path.dirname(filename)) + + + executor = ThreadPoolExecutor(max_workers=2) + + # both function calls will use the same loky executor, but with a + # different Parallel object. + future_1 = executor.submit({f1}, array, temp_dirs_thread_1) + future_2 = executor.submit({f2}, array, temp_dirs_thread_2) + + # Wait for both threads to terminate their backend + wait([future_1, future_2]) + + future_1.result() + future_2.result() + ''' + functions_and_returncodes = [ + ("parallel_get_filename", "parallel_get_filename", 0), + ("parallel_get_filename", "parallel_raise", 1), + ("parallel_raise", "parallel_raise", 1) + ] + + for f1, f2, returncode in functions_and_returncodes: + p = subprocess.Popen([sys.executable, '-c', cmd.format(f1=f1, f2=f2)], + stderr=subprocess.PIPE, stdout=subprocess.PIPE) + p.wait() + out, err = p.communicate() + assert p.returncode == returncode, out.decode() + assert b"resource_tracker" not in err, err.decode() + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_many_parallel_calls_on_same_object(backend): + # After #966 got merged, consecutive Parallel objects were sharing temp + # folder, which would lead to race conditions happening during the + # temporary resources management with the resource_tracker. This is a + # non-regression test that makes sure that consecutive Parallel operations + # on the same object do not error out. + cmd = '''if 1: + import os + import time + + import numpy as np + + from joblib import Parallel, delayed + from testutils import return_slice_of_data + + data = np.ones(100) + + if __name__ == '__main__': + for i in range(5): + slice_of_data = Parallel( + n_jobs=2, max_nbytes=1, backend='{b}')( + delayed(return_slice_of_data)(data, 0, 20) + for _ in range(10) + ) + '''.format(b=backend) + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(__file__) + p = subprocess.Popen( + [sys.executable, '-c', cmd], + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + env=env, + ) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, err + assert out == b'' + if sys.version_info[:3] not in [(3, 8, 0), (3, 8, 1)]: + # In early versions of Python 3.8, a reference leak + # https://github.com/cloudpipe/cloudpickle/issues/327, holds + # references to pickled objects, generating race condition during + # cleanup finalizers of joblib and noisy resource_tracker outputs. + assert b'resource_tracker' not in err + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_memmap_returned_as_regular_array(backend): + data = np.ones(int(1e3)) + # Check that child processes send temporary memmaps back as numpy arrays. + [result] = Parallel(n_jobs=2, backend=backend, max_nbytes=100)( + delayed(check_memmap_and_send_back)(data) for _ in range(1)) + assert _get_backing_memmap(result) is None + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_resource_tracker_silent_when_reference_cycles(backend): + # There is a variety of reasons that can make joblib with loky backend + # output noisy warnings when a reference cycle is preventing a memmap from + # being garbage collected. Especially, joblib's main process finalizer + # deletes the temporary folder if it was not done before, which can + # interact badly with the resource_tracker. We don't risk leaking any + # resources, but this will likely make joblib output a lot of low-level + # confusing messages. + # + # This test makes sure that the resource_tracker is silent when a reference + # has been collected concurrently on non-Windows platforms. + # + # Note that the script in ``cmd`` is the exact same script as in + # test_permission_error_windows_reference_cycle. + if backend == "loky" and sys.platform.startswith('win'): + # XXX: on Windows, reference cycles can delay timely garbage collection + # and make it impossible to properly delete the temporary folder in the + # main process because of permission errors. + pytest.xfail( + "The temporary folder cannot be deleted on Windows in the " + "presence of a reference cycle" + ) + + cmd = """if 1: + import numpy as np + from joblib import Parallel, delayed + + + data = np.random.rand(int(2e6)).reshape((int(1e6), 2)) + + # Build a complex cyclic reference that is likely to delay garbage + # collection of the memmapped array in the worker processes. + first_list = current_list = [data] + for i in range(10): + current_list = [current_list] + first_list.append(current_list) + + if __name__ == "__main__": + results = Parallel(n_jobs=2, backend="{b}")( + delayed(len)(current_list) for i in range(10)) + assert results == [1] * 10 + """.format(b=backend) + p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + p.wait() + out, err = p.communicate() + out = out.decode() + err = err.decode() + assert p.returncode == 0, out + "\n\n" + err + assert "resource_tracker" not in err, err + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_pool_for_large_arrays(factory, tmpdir): + """Check that large arrays are not copied in memory""" + + # Check that the tempfolder is empty + assert os.listdir(tmpdir.strpath) == [] + + # Build an array reducers that automatically dump large array content + # to filesystem backed memmap instances to avoid memory explosion + p = factory(3, max_nbytes=40, temp_folder=tmpdir.strpath, verbose=2) + try: + # The temporary folder for the pool is not provisioned in advance + assert os.listdir(tmpdir.strpath) == [] + assert not os.path.exists(p._temp_folder) + + small = np.ones(5, dtype=np.float32) + assert small.nbytes == 20 + p.map(check_array, [(small, i, 1.0) for i in range(small.shape[0])]) + + # Memory has been copied, the pool filesystem folder is unused + assert os.listdir(tmpdir.strpath) == [] + + # Try with a file larger than the memmap threshold of 40 bytes + large = np.ones(100, dtype=np.float64) + assert large.nbytes == 800 + p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])]) + + # The data has been dumped in a temp folder for subprocess to share it + # without per-child memory copies + assert os.path.isdir(p._temp_folder) + dumped_filenames = os.listdir(p._temp_folder) + assert len(dumped_filenames) == 1 + + # Check that memory mapping is not triggered for arrays with + # dtype='object' + objects = np.array(['abc'] * 100, dtype='object') + results = p.map(has_shareable_memory, [objects]) + assert not results[0] + + finally: + # check FS garbage upon pool termination + p.terminate() + for i in range(10): + sleep(.1) + if not os.path.exists(p._temp_folder): + break + else: # pragma: no cover + raise AssertionError( + 'temporary folder {} was not deleted'.format(p._temp_folder) + ) + del p + + +@with_numpy +@with_multiprocessing +@parametrize( + "backend", + [ + pytest.param( + "multiprocessing", + marks=pytest.mark.xfail( + reason='https://github.com/joblib/joblib/issues/1086' + ), + ), + "loky", + ] +) +def test_child_raises_parent_exits_cleanly(backend): + # When a task executed by a child process raises an error, the parent + # process's backend is notified, and calls abort_everything. + # In loky, abort_everything itself calls shutdown(kill_workers=True) which + # sends SIGKILL to the worker, preventing it from running the finalizers + # supposed to signal the resource_tracker when the worker is done using + # objects relying on a shared resource (e.g np.memmaps). Because this + # behavior is prone to : + # - cause a resource leak + # - make the resource tracker emit noisy resource warnings + # we explicitly test that, when the said situation occurs: + # - no resources are actually leaked + # - the temporary resources are deleted as soon as possible (typically, at + # the end of the failing Parallel call) + # - the resource_tracker does not emit any warnings. + cmd = """if 1: + import os + from pathlib import Path + from time import sleep + + import numpy as np + from joblib import Parallel, delayed + from testutils import print_filename_and_raise + + data = np.random.rand(1000) + + def get_temp_folder(parallel_obj, backend): + if "{b}" == "loky": + return Path(parallel_obj._backend._workers._temp_folder) + else: + return Path(parallel_obj._backend._pool._temp_folder) + + + if __name__ == "__main__": + try: + with Parallel(n_jobs=2, backend="{b}", max_nbytes=100) as p: + temp_folder = get_temp_folder(p, "{b}") + p(delayed(print_filename_and_raise)(data) + for i in range(1)) + except ValueError as e: + # the temporary folder should be deleted by the end of this + # call but apparently on some file systems, this takes + # some time to be visible. + # + # We attempt to write into the temporary folder to test for + # its existence and we wait for a maximum of 10 seconds. + for i in range(100): + try: + with open(temp_folder / "some_file.txt", "w") as f: + f.write("some content") + except FileNotFoundError: + # temp_folder has been deleted, all is fine + break + + # ... else, wait a bit and try again + sleep(.1) + else: + raise AssertionError( + str(temp_folder) + " was not deleted" + ) from e + """.format(b=backend) + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(__file__) + p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE, + stdout=subprocess.PIPE, env=env) + p.wait() + out, err = p.communicate() + out, err = out.decode(), err.decode() + filename = out.split('\n')[0] + assert p.returncode == 0, err or out + assert err == '' # no resource_tracker warnings. + assert not os.path.exists(filename) + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_pool_for_large_arrays_disabled(factory, tmpdir): + """Check that large arrays memmapping can be disabled""" + # Set max_nbytes to None to disable the auto memmapping feature + p = factory(3, max_nbytes=None, temp_folder=tmpdir.strpath) + try: + + # Check that the tempfolder is empty + assert os.listdir(tmpdir.strpath) == [] + + # Try with a file largish than the memmap threshold of 40 bytes + large = np.ones(100, dtype=np.float64) + assert large.nbytes == 800 + p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])]) + + # Check that the tempfolder is still empty + assert os.listdir(tmpdir.strpath) == [] + + finally: + # Cleanup open file descriptors + p.terminate() + del p + + +@with_numpy +@with_multiprocessing +@with_dev_shm +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_on_large_enough_dev_shm(factory): + """Check that memmapping uses /dev/shm when possible""" + orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE + try: + # Make joblib believe that it can use /dev/shm even when running on a + # CI container where the size of the /dev/shm is not very large (that + # is at least 32 MB instead of 2 GB by default). + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(32e6) + p = factory(3, max_nbytes=10) + try: + # Check that the pool has correctly detected the presence of the + # shared memory filesystem. + pool_temp_folder = p._temp_folder + folder_prefix = '/dev/shm/joblib_memmapping_folder_' + assert pool_temp_folder.startswith(folder_prefix) + assert os.path.exists(pool_temp_folder) + + # Try with a file larger than the memmap threshold of 10 bytes + a = np.ones(100, dtype=np.float64) + assert a.nbytes == 800 + p.map(id, [a] * 10) + # a should have been memmapped to the pool temp folder: the joblib + # pickling procedure generate one .pkl file: + assert len(os.listdir(pool_temp_folder)) == 1 + + # create a new array with content that is different from 'a' so + # that it is mapped to a different file in the temporary folder of + # the pool. + b = np.ones(100, dtype=np.float64) * 2 + assert b.nbytes == 800 + p.map(id, [b] * 10) + # A copy of both a and b are now stored in the shared memory folder + assert len(os.listdir(pool_temp_folder)) == 2 + finally: + # Cleanup open file descriptors + p.terminate() + del p + + for i in range(100): + # The temp folder is cleaned up upon pool termination + if not os.path.exists(pool_temp_folder): + break + sleep(.1) + else: # pragma: no cover + raise AssertionError('temporary folder of pool was not deleted') + finally: + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size + + +@with_numpy +@with_multiprocessing +@with_dev_shm +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_on_too_small_dev_shm(factory): + orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE + try: + # Make joblib believe that it cannot use /dev/shm unless there is + # 42 exabytes of available shared memory in /dev/shm + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(42e18) + + p = factory(3, max_nbytes=10) + try: + # Check that the pool has correctly detected the presence of the + # shared memory filesystem. + pool_temp_folder = p._temp_folder + assert not pool_temp_folder.startswith('/dev/shm') + finally: + # Cleanup open file descriptors + p.terminate() + del p + + # The temp folder is cleaned up upon pool termination + assert not os.path.exists(pool_temp_folder) + finally: + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_pool_for_large_arrays_in_return(factory, tmpdir): + """Check that large arrays are not copied in memory in return""" + assert_array_equal = np.testing.assert_array_equal + + # Build an array reducers that automatically dump large array content + # but check that the returned datastructure are regular arrays to avoid + # passing a memmap array pointing to a pool controlled temp folder that + # might be confusing to the user + + # The MemmappingPool user can always return numpy.memmap object explicitly + # to avoid memory copy + p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath) + try: + res = p.apply_async(np.ones, args=(1000,)) + large = res.get() + assert not has_shareable_memory(large) + assert_array_equal(large, np.ones(1000)) + finally: + p.terminate() + del p + + +def _worker_multiply(a, n_times): + """Multiplication function to be executed by subprocess""" + assert has_shareable_memory(a) + return a * n_times + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_workaround_against_bad_memmap_with_copied_buffers(factory, tmpdir): + """Check that memmaps with a bad buffer are returned as regular arrays + + Unary operations and ufuncs on memmap instances return a new memmap + instance with an in-memory buffer (probably a numpy bug). + """ + assert_array_equal = np.testing.assert_array_equal + + p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath) + try: + # Send a complex, large-ish view on a array that will be converted to + # a memmap in the worker process + a = np.asarray(np.arange(6000).reshape((1000, 2, 3)), + order='F')[:, :1, :] + + # Call a non-inplace multiply operation on the worker and memmap and + # send it back to the parent. + b = p.apply_async(_worker_multiply, args=(a, 3)).get() + assert not has_shareable_memory(b) + assert_array_equal(b, 3 * a) + finally: + p.terminate() + del p + + +def identity(arg): + return arg + + +@with_numpy +@with_multiprocessing +@parametrize( + "factory,retry_no", + list(itertools.product( + [MemmappingPool, TestExecutor.get_memmapping_executor], range(3))), + ids=['{}, {}'.format(x, y) for x, y in itertools.product( + ["multiprocessing", "loky"], map(str, range(3)))]) +def test_pool_memmap_with_big_offset(factory, retry_no, tmpdir): + # Test that numpy memmap offset is set correctly if greater than + # mmap.ALLOCATIONGRANULARITY, see + # https://github.com/joblib/joblib/issues/451 and + # https://github.com/numpy/numpy/pull/8443 for more details. + fname = tmpdir.join('test.mmap').strpath + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + obj = make_memmap(fname, mode='w+', shape=size, dtype='uint8', + offset=offset) + + p = factory(2, temp_folder=tmpdir.strpath) + result = p.apply_async(identity, args=(obj,)).get() + assert isinstance(result, np.memmap) + assert result.offset == offset + np.testing.assert_array_equal(obj, result) + p.terminate() + + +def test_pool_get_temp_dir(tmpdir): + pool_folder_name = 'test.tmpdir' + pool_folder, shared_mem = _get_temp_dir(pool_folder_name, tmpdir.strpath) + assert shared_mem is False + assert pool_folder == tmpdir.join('test.tmpdir').strpath + + pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None) + if sys.platform.startswith('win'): + assert shared_mem is False + assert pool_folder.endswith(pool_folder_name) + + +def test_pool_get_temp_dir_no_statvfs(tmpdir, monkeypatch): + """Check that _get_temp_dir works when os.statvfs is not defined + + Regression test for #902 + """ + pool_folder_name = 'test.tmpdir' + import joblib._memmapping_reducer + if hasattr(joblib._memmapping_reducer.os, 'statvfs'): + # We are on Unix, since Windows doesn't have this function + monkeypatch.delattr(joblib._memmapping_reducer.os, 'statvfs') + + pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None) + if sys.platform.startswith('win'): + assert shared_mem is False + assert pool_folder.endswith(pool_folder_name) + + +@with_numpy +@skipif(sys.platform == 'win32', reason='This test fails with a ' + 'PermissionError on Windows') +@parametrize("mmap_mode", ["r+", "w+"]) +def test_numpy_arrays_use_different_memory(mmap_mode): + def func(arr, value): + arr[:] = value + return arr + + arrays = [np.zeros((10, 10), dtype='float64') for i in range(10)] + + results = Parallel(mmap_mode=mmap_mode, max_nbytes=0, n_jobs=2)( + delayed(func)(arr, i) for i, arr in enumerate(arrays)) + + for i, arr in enumerate(results): + np.testing.assert_array_equal(arr, i) + + +@with_numpy +def test_weak_array_key_map(): + + def assert_empty_after_gc_collect(container, retries=100): + for i in range(retries): + if len(container) == 0: + return + gc.collect() + sleep(.1) + assert len(container) == 0 + + a = np.ones(42) + m = _WeakArrayKeyMap() + m.set(a, 'a') + assert m.get(a) == 'a' + + b = a + assert m.get(b) == 'a' + m.set(b, 'b') + assert m.get(a) == 'b' + + del a + gc.collect() + assert len(m._data) == 1 + assert m.get(b) == 'b' + + del b + assert_empty_after_gc_collect(m._data) + + c = np.ones(42) + m.set(c, 'c') + assert len(m._data) == 1 + assert m.get(c) == 'c' + + with raises(KeyError): + m.get(np.ones(42)) + + del c + assert_empty_after_gc_collect(m._data) + + # Check that creating and dropping numpy arrays with potentially the same + # object id will not cause the map to get confused. + def get_set_get_collect(m, i): + a = np.ones(42) + with raises(KeyError): + m.get(a) + m.set(a, i) + assert m.get(a) == i + return id(a) + + unique_ids = set([get_set_get_collect(m, i) for i in range(1000)]) + if platform.python_implementation() == 'CPython': + # On CPython (at least) the same id is often reused many times for the + # temporary arrays created under the local scope of the + # get_set_get_collect function without causing any spurious lookups / + # insertions in the map. Apparently on Python nogil, the id is not + # reused as often. + max_len_unique_ids = 400 if getattr(sys.flags, 'nogil', False) else 100 + assert len(unique_ids) < max_len_unique_ids + + +def test_weak_array_key_map_no_pickling(): + m = _WeakArrayKeyMap() + with raises(pickle.PicklingError): + pickle.dumps(m) + + +@with_numpy +@with_multiprocessing +def test_direct_mmap(tmpdir): + testfile = str(tmpdir.join('arr.dat')) + a = np.arange(10, dtype='uint8') + a.tofile(testfile) + + def _read_array(): + with open(testfile) as fd: + mm = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ, offset=0) + return np.ndarray((10,), dtype=np.uint8, buffer=mm, offset=0) + + def func(x): + return x**2 + + arr = _read_array() + + # this is expected to work and gives the reference + ref = Parallel(n_jobs=2)(delayed(func)(x) for x in [a]) + + # now test that it work with the mmap array + results = Parallel(n_jobs=2)(delayed(func)(x) for x in [arr]) + np.testing.assert_array_equal(results, ref) + + # also test with a mmap array read in the subprocess + def worker(): + return _read_array() + + results = Parallel(n_jobs=2)(delayed(worker)() for _ in range(1)) + np.testing.assert_array_equal(results[0], arr) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_memory.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..120987b666bb84268457bdd4b843a7bae070832f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_memory.py @@ -0,0 +1,1493 @@ +""" +Test the memory module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import functools +import gc +import logging +import shutil +import os +import os.path +import pathlib +import pickle +import sys +import time +import datetime +import textwrap + +import pytest + +from joblib.memory import Memory +from joblib.memory import expires_after +from joblib.memory import MemorizedFunc, NotMemorizedFunc +from joblib.memory import MemorizedResult, NotMemorizedResult +from joblib.memory import _FUNCTION_HASHES +from joblib.memory import register_store_backend, _STORE_BACKENDS +from joblib.memory import _build_func_identifier, _store_backend_factory +from joblib.memory import JobLibCollisionWarning +from joblib.parallel import Parallel, delayed +from joblib._store_backends import StoreBackendBase, FileSystemStoreBackend +from joblib.test.common import with_numpy, np +from joblib.test.common import with_multiprocessing +from joblib.testing import parametrize, raises, warns +from joblib.hashing import hash + + +############################################################################### +# Module-level variables for the tests +def f(x, y=1): + """ A module-level function for testing purposes. + """ + return x ** 2 + y + + +############################################################################### +# Helper function for the tests +def check_identity_lazy(func, accumulator, location): + """ Given a function and an accumulator (a list that grows every + time the function is called), check that the function can be + decorated by memory to be a lazy identity. + """ + # Call each function with several arguments, and check that it is + # evaluated only once per argument. + memory = Memory(location=location, verbose=0) + func = memory.cache(func) + for i in range(3): + for _ in range(2): + assert func(i) == i + assert len(accumulator) == i + 1 + + +def corrupt_single_cache_item(memory): + single_cache_item, = memory.store_backend.get_items() + output_filename = os.path.join(single_cache_item.path, 'output.pkl') + with open(output_filename, 'w') as f: + f.write('garbage') + + +def monkeypatch_cached_func_warn(func, monkeypatch_fixture): + # Need monkeypatch because pytest does not + # capture stdlib logging output (see + # https://github.com/pytest-dev/pytest/issues/2079) + + recorded = [] + + def append_to_record(item): + recorded.append(item) + monkeypatch_fixture.setattr(func, 'warn', append_to_record) + return recorded + + +############################################################################### +# Tests +def test_memory_integration(tmpdir): + """ Simple test of memory lazy evaluation. + """ + accumulator = list() + + # Rmk: this function has the same name than a module-level function, + # thus it serves as a test to see that both are identified + # as different. + def f(arg): + accumulator.append(1) + return arg + + check_identity_lazy(f, accumulator, tmpdir.strpath) + + # Now test clearing + for compress in (False, True): + for mmap_mode in ('r', None): + memory = Memory(location=tmpdir.strpath, verbose=10, + mmap_mode=mmap_mode, compress=compress) + # First clear the cache directory, to check that our code can + # handle that + # NOTE: this line would raise an exception, as the database file is + # still open; we ignore the error since we want to test what + # happens if the directory disappears + shutil.rmtree(tmpdir.strpath, ignore_errors=True) + g = memory.cache(f) + g(1) + g.clear(warn=False) + current_accumulator = len(accumulator) + out = g(1) + + assert len(accumulator) == current_accumulator + 1 + # Also, check that Memory.eval works similarly + assert memory.eval(f, 1) == out + assert len(accumulator) == current_accumulator + 1 + + # Now do a smoke test with a function defined in __main__, as the name + # mangling rules are more complex + f.__module__ = '__main__' + memory = Memory(location=tmpdir.strpath, verbose=0) + memory.cache(f)(1) + + +@parametrize("call_before_reducing", [True, False]) +def test_parallel_call_cached_function_defined_in_jupyter( + tmpdir, call_before_reducing +): + # Calling an interactively defined memory.cache()'d function inside a + # Parallel call used to clear the existing cache related to the said + # function (https://github.com/joblib/joblib/issues/1035) + + # This tests checks that this is no longer the case. + + # TODO: test that the cache related to the function cache persists across + # ipython sessions (provided that no code change were made to the + # function's source)? + + # The first part of the test makes the necessary low-level calls to emulate + # the definition of a function in an jupyter notebook cell. Joblib has + # some custom code to treat functions defined specifically in jupyter + # notebooks/ipython session -- we want to test this code, which requires + # the emulation to be rigorous. + for session_no in [0, 1]: + ipython_cell_source = ''' + def f(x): + return x + ''' + + ipython_cell_id = ''.format(session_no) + + exec( + compile( + textwrap.dedent(ipython_cell_source), + filename=ipython_cell_id, + mode='exec' + ) + ) + # f is now accessible in the locals mapping - but for some unknown + # reason, f = locals()['f'] throws a KeyError at runtime, we need to + # bind locals()['f'] to a different name in the local namespace + aliased_f = locals()['f'] + aliased_f.__module__ = "__main__" + + # Preliminary sanity checks, and tests checking that joblib properly + # identified f as an interactive function defined in a jupyter notebook + assert aliased_f(1) == 1 + assert aliased_f.__code__.co_filename == ipython_cell_id + + memory = Memory(location=tmpdir.strpath, verbose=0) + cached_f = memory.cache(aliased_f) + + assert len(os.listdir(tmpdir / 'joblib')) == 1 + f_cache_relative_directory = os.listdir(tmpdir / 'joblib')[0] + assert 'ipython-input' in f_cache_relative_directory + + f_cache_directory = tmpdir / 'joblib' / f_cache_relative_directory + + if session_no == 0: + # The cache should be empty as cached_f has not been called yet. + assert os.listdir(f_cache_directory) == ['f'] + assert os.listdir(f_cache_directory / 'f') == [] + + if call_before_reducing: + cached_f(3) + # Two files were just created, func_code.py, and a folder + # containing the information (inputs hash/ouptput) of + # cached_f(3) + assert len(os.listdir(f_cache_directory / 'f')) == 2 + + # Now, testing #1035: when calling a cached function, joblib + # used to dynamically inspect the underlying function to + # extract its source code (to verify it matches the source code + # of the function as last inspected by joblib) -- however, + # source code introspection fails for dynamic functions sent to + # child processes - which would eventually make joblib clear + # the cache associated to f + res = Parallel(n_jobs=2)(delayed(cached_f)(i) for i in [1, 2]) + else: + # Submit the function to the joblib child processes, although + # the function has never been called in the parent yet. This + # triggers a specific code branch inside + # MemorizedFunc.__reduce__. + res = Parallel(n_jobs=2)(delayed(cached_f)(i) for i in [1, 2]) + assert len(os.listdir(f_cache_directory / 'f')) == 3 + + cached_f(3) + + # Making sure f's cache does not get cleared after the parallel + # calls, and contains ALL cached functions calls (f(1), f(2), f(3)) + # and 'func_code.py' + assert len(os.listdir(f_cache_directory / 'f')) == 4 + else: + # For the second session, there should be an already existing cache + assert len(os.listdir(f_cache_directory / 'f')) == 4 + + cached_f(3) + + # The previous cache should not be invalidated after calling the + # function in a new session + assert len(os.listdir(f_cache_directory / 'f')) == 4 + + +def test_no_memory(): + """ Test memory with location=None: no memoize """ + accumulator = list() + + def ff(arg): + accumulator.append(1) + return arg + + memory = Memory(location=None, verbose=0) + gg = memory.cache(ff) + for _ in range(4): + current_accumulator = len(accumulator) + gg(1) + assert len(accumulator) == current_accumulator + 1 + + +def test_memory_kwarg(tmpdir): + " Test memory with a function with keyword arguments." + accumulator = list() + + def g(arg1=None, arg2=1): + accumulator.append(1) + return arg1 + + check_identity_lazy(g, accumulator, tmpdir.strpath) + + memory = Memory(location=tmpdir.strpath, verbose=0) + g = memory.cache(g) + # Smoke test with an explicit keyword argument: + assert g(arg1=30, arg2=2) == 30 + + +def test_memory_lambda(tmpdir): + " Test memory with a function with a lambda." + accumulator = list() + + def helper(x): + """ A helper function to define l as a lambda. + """ + accumulator.append(1) + return x + + check_identity_lazy(lambda x: helper(x), accumulator, tmpdir.strpath) + + +def test_memory_name_collision(tmpdir): + " Check that name collisions with functions will raise warnings" + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache + def name_collision(x): + """ A first function called name_collision + """ + return x + + a = name_collision + + @memory.cache + def name_collision(x): + """ A second function called name_collision + """ + return x + + b = name_collision + + with warns(JobLibCollisionWarning) as warninfo: + a(1) + b(1) + + assert len(warninfo) == 1 + assert "collision" in str(warninfo[0].message) + + +def test_memory_warning_lambda_collisions(tmpdir): + # Check that multiple use of lambda will raise collisions + memory = Memory(location=tmpdir.strpath, verbose=0) + a = memory.cache(lambda x: x) + b = memory.cache(lambda x: x + 1) + + with warns(JobLibCollisionWarning) as warninfo: + assert a(0) == 0 + assert b(1) == 2 + assert a(1) == 1 + + # In recent Python versions, we can retrieve the code of lambdas, + # thus nothing is raised + assert len(warninfo) == 4 + + +def test_memory_warning_collision_detection(tmpdir): + # Check that collisions impossible to detect will raise appropriate + # warnings. + memory = Memory(location=tmpdir.strpath, verbose=0) + a1 = eval('lambda x: x') + a1 = memory.cache(a1) + b1 = eval('lambda x: x+1') + b1 = memory.cache(b1) + + with warns(JobLibCollisionWarning) as warninfo: + a1(1) + b1(1) + a1(0) + + assert len(warninfo) == 2 + assert "cannot detect" in str(warninfo[0].message).lower() + + +def test_memory_partial(tmpdir): + " Test memory with functools.partial." + accumulator = list() + + def func(x, y): + """ A helper function to define l as a lambda. + """ + accumulator.append(1) + return y + + import functools + function = functools.partial(func, 1) + + check_identity_lazy(function, accumulator, tmpdir.strpath) + + +def test_memory_eval(tmpdir): + " Smoke test memory with a function with a function defined in an eval." + memory = Memory(location=tmpdir.strpath, verbose=0) + + m = eval('lambda x: x') + mm = memory.cache(m) + + assert mm(1) == 1 + + +def count_and_append(x=[]): + """ A function with a side effect in its arguments. + + Return the length of its argument and append one element. + """ + len_x = len(x) + x.append(None) + return len_x + + +def test_argument_change(tmpdir): + """ Check that if a function has a side effect in its arguments, it + should use the hash of changing arguments. + """ + memory = Memory(location=tmpdir.strpath, verbose=0) + func = memory.cache(count_and_append) + # call the function for the first time, is should cache it with + # argument x=[] + assert func() == 0 + # the second time the argument is x=[None], which is not cached + # yet, so the functions should be called a second time + assert func() == 1 + + +@with_numpy +@parametrize('mmap_mode', [None, 'r']) +def test_memory_numpy(tmpdir, mmap_mode): + " Test memory with a function with numpy arrays." + accumulator = list() + + def n(arg=None): + accumulator.append(1) + return arg + + memory = Memory(location=tmpdir.strpath, mmap_mode=mmap_mode, + verbose=0) + cached_n = memory.cache(n) + + rnd = np.random.RandomState(0) + for i in range(3): + a = rnd.random_sample((10, 10)) + for _ in range(3): + assert np.all(cached_n(a) == a) + assert len(accumulator) == i + 1 + + +@with_numpy +def test_memory_numpy_check_mmap_mode(tmpdir, monkeypatch): + """Check that mmap_mode is respected even at the first call""" + + memory = Memory(location=tmpdir.strpath, mmap_mode='r', verbose=0) + + @memory.cache() + def twice(a): + return a * 2 + + a = np.ones(3) + + b = twice(a) + c = twice(a) + + assert isinstance(c, np.memmap) + assert c.mode == 'r' + + assert isinstance(b, np.memmap) + assert b.mode == 'r' + + # Corrupts the file, Deleting b and c mmaps + # is necessary to be able edit the file + del b + del c + gc.collect() + corrupt_single_cache_item(memory) + + # Make sure that corrupting the file causes recomputation and that + # a warning is issued. + recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch) + d = twice(a) + assert len(recorded_warnings) == 1 + exception_msg = 'Exception while loading results' + assert exception_msg in recorded_warnings[0] + # Asserts that the recomputation returns a mmap + assert isinstance(d, np.memmap) + assert d.mode == 'r' + + +def test_memory_exception(tmpdir): + """ Smoketest the exception handling of Memory. + """ + memory = Memory(location=tmpdir.strpath, verbose=0) + + class MyException(Exception): + pass + + @memory.cache + def h(exc=0): + if exc: + raise MyException + + # Call once, to initialise the cache + h() + + for _ in range(3): + # Call 3 times, to be sure that the Exception is always raised + with raises(MyException): + h(1) + + +def test_memory_ignore(tmpdir): + " Test the ignore feature of memory " + memory = Memory(location=tmpdir.strpath, verbose=0) + accumulator = list() + + @memory.cache(ignore=['y']) + def z(x, y=1): + accumulator.append(1) + + assert z.ignore == ['y'] + + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=2) + assert len(accumulator) == 1 + + +def test_memory_ignore_decorated(tmpdir): + " Test the ignore feature of memory on a decorated function " + memory = Memory(location=tmpdir.strpath, verbose=0) + accumulator = list() + + def decorate(f): + @functools.wraps(f) + def wrapped(*args, **kwargs): + return f(*args, **kwargs) + return wrapped + + @memory.cache(ignore=['y']) + @decorate + def z(x, y=1): + accumulator.append(1) + + assert z.ignore == ['y'] + + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=2) + assert len(accumulator) == 1 + + +def test_memory_args_as_kwargs(tmpdir): + """Non-regression test against 0.12.0 changes. + + https://github.com/joblib/joblib/pull/751 + """ + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache + def plus_one(a): + return a + 1 + + # It's possible to call a positional arg as a kwarg. + assert plus_one(1) == 2 + assert plus_one(a=1) == 2 + + # However, a positional argument that joblib hadn't seen + # before would cause a failure if it was passed as a kwarg. + assert plus_one(a=2) == 3 + + +@parametrize('ignore, verbose, mmap_mode', [(['x'], 100, 'r'), + ([], 10, None)]) +def test_partial_decoration(tmpdir, ignore, verbose, mmap_mode): + "Check cache may be called with kwargs before decorating" + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache(ignore=ignore, verbose=verbose, mmap_mode=mmap_mode) + def z(x): + pass + + assert z.ignore == ignore + assert z._verbose == verbose + assert z.mmap_mode == mmap_mode + + +def test_func_dir(tmpdir): + # Test the creation of the memory cache directory for the function. + memory = Memory(location=tmpdir.strpath, verbose=0) + path = __name__.split('.') + path.append('f') + path = tmpdir.join('joblib', *path).strpath + + g = memory.cache(f) + # Test that the function directory is created on demand + func_id = _build_func_identifier(f) + location = os.path.join(g.store_backend.location, func_id) + assert location == path + assert os.path.exists(path) + assert memory.location == os.path.dirname(g.store_backend.location) + + # Test that the code is stored. + # For the following test to be robust to previous execution, we clear + # the in-memory store + _FUNCTION_HASHES.clear() + assert not g._check_previous_func_code() + assert os.path.exists(os.path.join(path, 'func_code.py')) + assert g._check_previous_func_code() + + # Test the robustness to failure of loading previous results. + args_id = g._get_args_id(1) + output_dir = os.path.join(g.store_backend.location, g.func_id, args_id) + a = g(1) + assert os.path.exists(output_dir) + os.remove(os.path.join(output_dir, 'output.pkl')) + assert a == g(1) + + +def test_persistence(tmpdir): + # Test the memorized functions can be pickled and restored. + memory = Memory(location=tmpdir.strpath, verbose=0) + g = memory.cache(f) + output = g(1) + + h = pickle.loads(pickle.dumps(g)) + + args_id = h._get_args_id(1) + output_dir = os.path.join(h.store_backend.location, h.func_id, args_id) + assert os.path.exists(output_dir) + assert output == h.store_backend.load_item([h.func_id, args_id]) + memory2 = pickle.loads(pickle.dumps(memory)) + assert memory.store_backend.location == memory2.store_backend.location + + # Smoke test that pickling a memory with location=None works + memory = Memory(location=None, verbose=0) + pickle.loads(pickle.dumps(memory)) + g = memory.cache(f) + gp = pickle.loads(pickle.dumps(g)) + gp(1) + + +def test_check_call_in_cache(tmpdir): + for func in (MemorizedFunc(f, tmpdir.strpath), + Memory(location=tmpdir.strpath, verbose=0).cache(f)): + result = func.check_call_in_cache(2) + assert not result + assert isinstance(result, bool) + assert func(2) == 5 + result = func.check_call_in_cache(2) + assert result + assert isinstance(result, bool) + func.clear() + + +def test_call_and_shelve(tmpdir): + # Test MemorizedFunc outputting a reference to cache. + + for func, Result in zip((MemorizedFunc(f, tmpdir.strpath), + NotMemorizedFunc(f), + Memory(location=tmpdir.strpath, + verbose=0).cache(f), + Memory(location=None).cache(f), + ), + (MemorizedResult, NotMemorizedResult, + MemorizedResult, NotMemorizedResult)): + assert func(2) == 5 + result = func.call_and_shelve(2) + assert isinstance(result, Result) + assert result.get() == 5 + + result.clear() + with raises(KeyError): + result.get() + result.clear() # Do nothing if there is no cache. + + +def test_call_and_shelve_argument_hash(tmpdir): + # Verify that a warning is raised when accessing arguments_hash + # attribute from MemorizedResult + func = Memory(location=tmpdir.strpath, verbose=0).cache(f) + result = func.call_and_shelve(2) + assert isinstance(result, MemorizedResult) + with warns(DeprecationWarning) as w: + assert result.argument_hash == result.args_id + assert len(w) == 1 + assert "The 'argument_hash' attribute has been deprecated" \ + in str(w[-1].message) + + +def test_call_and_shelve_lazily_load_stored_result(tmpdir): + """Check call_and_shelve only load stored data if needed.""" + test_access_time_file = tmpdir.join('test_access') + test_access_time_file.write('test_access') + test_access_time = os.stat(test_access_time_file.strpath).st_atime + # check file system access time stats resolution is lower than test wait + # timings. + time.sleep(0.5) + assert test_access_time_file.read() == 'test_access' + + if test_access_time == os.stat(test_access_time_file.strpath).st_atime: + # Skip this test when access time cannot be retrieved with enough + # precision from the file system (e.g. NTFS on windows). + pytest.skip("filesystem does not support fine-grained access time " + "attribute") + + memory = Memory(location=tmpdir.strpath, verbose=0) + func = memory.cache(f) + args_id = func._get_args_id(2) + result_path = os.path.join(memory.store_backend.location, + func.func_id, args_id, 'output.pkl') + assert func(2) == 5 + first_access_time = os.stat(result_path).st_atime + time.sleep(1) + + # Should not access the stored data + result = func.call_and_shelve(2) + assert isinstance(result, MemorizedResult) + assert os.stat(result_path).st_atime == first_access_time + time.sleep(1) + + # Read the stored data => last access time is greater than first_access + assert result.get() == 5 + assert os.stat(result_path).st_atime > first_access_time + + +def test_memorized_pickling(tmpdir): + for func in (MemorizedFunc(f, tmpdir.strpath), NotMemorizedFunc(f)): + filename = tmpdir.join('pickling_test.dat').strpath + result = func.call_and_shelve(2) + with open(filename, 'wb') as fp: + pickle.dump(result, fp) + with open(filename, 'rb') as fp: + result2 = pickle.load(fp) + assert result2.get() == result.get() + os.remove(filename) + + +def test_memorized_repr(tmpdir): + func = MemorizedFunc(f, tmpdir.strpath) + result = func.call_and_shelve(2) + + func2 = MemorizedFunc(f, tmpdir.strpath) + result2 = func2.call_and_shelve(2) + assert result.get() == result2.get() + assert repr(func) == repr(func2) + + # Smoke test with NotMemorizedFunc + func = NotMemorizedFunc(f) + repr(func) + repr(func.call_and_shelve(2)) + + # Smoke test for message output (increase code coverage) + func = MemorizedFunc(f, tmpdir.strpath, verbose=11, timestamp=time.time()) + result = func.call_and_shelve(11) + result.get() + + func = MemorizedFunc(f, tmpdir.strpath, verbose=11) + result = func.call_and_shelve(11) + result.get() + + func = MemorizedFunc(f, tmpdir.strpath, verbose=5, timestamp=time.time()) + result = func.call_and_shelve(11) + result.get() + + func = MemorizedFunc(f, tmpdir.strpath, verbose=5) + result = func.call_and_shelve(11) + result.get() + + +def test_memory_file_modification(capsys, tmpdir, monkeypatch): + # Test that modifying a Python file after loading it does not lead to + # Recomputation + dir_name = tmpdir.mkdir('tmp_import').strpath + filename = os.path.join(dir_name, 'tmp_joblib_.py') + content = 'def f(x):\n print(x)\n return x\n' + with open(filename, 'w') as module_file: + module_file.write(content) + + # Load the module: + monkeypatch.syspath_prepend(dir_name) + import tmp_joblib_ as tmp + + memory = Memory(location=tmpdir.strpath, verbose=0) + f = memory.cache(tmp.f) + # First call f a few times + f(1) + f(2) + f(1) + + # Now modify the module where f is stored without modifying f + with open(filename, 'w') as module_file: + module_file.write('\n\n' + content) + + # And call f a couple more times + f(1) + f(1) + + # Flush the .pyc files + shutil.rmtree(dir_name) + os.mkdir(dir_name) + # Now modify the module where f is stored, modifying f + content = 'def f(x):\n print("x=%s" % x)\n return x\n' + with open(filename, 'w') as module_file: + module_file.write(content) + + # And call f more times prior to reloading: the cache should not be + # invalidated at this point as the active function definition has not + # changed in memory yet. + f(1) + f(1) + + # Now reload + sys.stdout.write('Reloading\n') + sys.modules.pop('tmp_joblib_') + import tmp_joblib_ as tmp + f = memory.cache(tmp.f) + + # And call f more times + f(1) + f(1) + + out, err = capsys.readouterr() + assert out == '1\n2\nReloading\nx=1\n' + + +def _function_to_cache(a, b): + # Just a place holder function to be mutated by tests + pass + + +def _sum(a, b): + return a + b + + +def _product(a, b): + return a * b + + +def test_memory_in_memory_function_code_change(tmpdir): + _function_to_cache.__code__ = _sum.__code__ + + memory = Memory(location=tmpdir.strpath, verbose=0) + f = memory.cache(_function_to_cache) + + assert f(1, 2) == 3 + assert f(1, 2) == 3 + + with warns(JobLibCollisionWarning): + # Check that inline function modification triggers a cache invalidation + _function_to_cache.__code__ = _product.__code__ + assert f(1, 2) == 2 + assert f(1, 2) == 2 + + +def test_clear_memory_with_none_location(): + memory = Memory(location=None) + memory.clear() + + +def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'): + return a, b, kw1, kw2 + + +def func_with_signature(a: int, b: float) -> float: + return a + b + + +def test_memory_func_with_kwonly_args(tmpdir): + memory = Memory(location=tmpdir.strpath, verbose=0) + func_cached = memory.cache(func_with_kwonly_args) + + assert func_cached(1, 2, kw1=3) == (1, 2, 3, 'kw2') + + # Making sure that providing a keyword-only argument by + # position raises an exception + with raises(ValueError) as excinfo: + func_cached(1, 2, 3, kw2=4) + excinfo.match("Keyword-only parameter 'kw1' was passed as positional " + "parameter") + + # Keyword-only parameter passed by position with cached call + # should still raise ValueError + func_cached(1, 2, kw1=3, kw2=4) + + with raises(ValueError) as excinfo: + func_cached(1, 2, 3, kw2=4) + excinfo.match("Keyword-only parameter 'kw1' was passed as positional " + "parameter") + + # Test 'ignore' parameter + func_cached = memory.cache(func_with_kwonly_args, ignore=['kw2']) + assert func_cached(1, 2, kw1=3, kw2=4) == (1, 2, 3, 4) + assert func_cached(1, 2, kw1=3, kw2='ignored') == (1, 2, 3, 4) + + +def test_memory_func_with_signature(tmpdir): + memory = Memory(location=tmpdir.strpath, verbose=0) + func_cached = memory.cache(func_with_signature) + + assert func_cached(1, 2.) == 3. + + +def _setup_toy_cache(tmpdir, num_inputs=10): + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache() + def get_1000_bytes(arg): + return 'a' * 1000 + + inputs = list(range(num_inputs)) + for arg in inputs: + get_1000_bytes(arg) + + func_id = _build_func_identifier(get_1000_bytes) + hash_dirnames = [get_1000_bytes._get_args_id(arg) + for arg in inputs] + + full_hashdirs = [os.path.join(get_1000_bytes.store_backend.location, + func_id, dirname) + for dirname in hash_dirnames] + return memory, full_hashdirs, get_1000_bytes + + +def test__get_items(tmpdir): + memory, expected_hash_dirs, _ = _setup_toy_cache(tmpdir) + items = memory.store_backend.get_items() + hash_dirs = [ci.path for ci in items] + assert set(hash_dirs) == set(expected_hash_dirs) + + def get_files_size(directory): + full_paths = [os.path.join(directory, fn) + for fn in os.listdir(directory)] + return sum(os.path.getsize(fp) for fp in full_paths) + + expected_hash_cache_sizes = [get_files_size(hash_dir) + for hash_dir in hash_dirs] + hash_cache_sizes = [ci.size for ci in items] + assert hash_cache_sizes == expected_hash_cache_sizes + + output_filenames = [os.path.join(hash_dir, 'output.pkl') + for hash_dir in hash_dirs] + + expected_last_accesses = [ + datetime.datetime.fromtimestamp(os.path.getatime(fn)) + for fn in output_filenames] + last_accesses = [ci.last_access for ci in items] + assert last_accesses == expected_last_accesses + + +def test__get_items_to_delete(tmpdir): + # test empty cache + memory, _, _ = _setup_toy_cache(tmpdir, num_inputs=0) + items_to_delete = memory.store_backend._get_items_to_delete('1K') + assert items_to_delete == [] + + memory, expected_hash_cachedirs, _ = _setup_toy_cache(tmpdir) + items = memory.store_backend.get_items() + # bytes_limit set to keep only one cache item (each hash cache + # folder is about 1000 bytes + metadata) + items_to_delete = memory.store_backend._get_items_to_delete('2K') + nb_hashes = len(expected_hash_cachedirs) + assert set.issubset(set(items_to_delete), set(items)) + assert len(items_to_delete) == nb_hashes - 1 + + # Sanity check bytes_limit=2048 is the same as bytes_limit='2K' + items_to_delete_2048b = memory.store_backend._get_items_to_delete(2048) + assert sorted(items_to_delete) == sorted(items_to_delete_2048b) + + # bytes_limit greater than the size of the cache + items_to_delete_empty = memory.store_backend._get_items_to_delete('1M') + assert items_to_delete_empty == [] + + # All the cache items need to be deleted + bytes_limit_too_small = 500 + items_to_delete_500b = memory.store_backend._get_items_to_delete( + bytes_limit_too_small + ) + assert set(items_to_delete_500b), set(items) + + # Test LRU property: surviving cache items should all have a more + # recent last_access that the ones that have been deleted + items_to_delete_6000b = memory.store_backend._get_items_to_delete(6000) + surviving_items = set(items).difference(items_to_delete_6000b) + + assert (max(ci.last_access for ci in items_to_delete_6000b) <= + min(ci.last_access for ci in surviving_items)) + + +def test_memory_reduce_size_bytes_limit(tmpdir): + memory, _, _ = _setup_toy_cache(tmpdir) + ref_cache_items = memory.store_backend.get_items() + + # By default memory.bytes_limit is None and reduce_size is a noop + memory.reduce_size() + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # No cache items deleted if bytes_limit greater than the size of + # the cache + memory.reduce_size(bytes_limit='1M') + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # bytes_limit is set so that only two cache items are kept + memory.reduce_size(bytes_limit='3K') + cache_items = memory.store_backend.get_items() + assert set.issubset(set(cache_items), set(ref_cache_items)) + assert len(cache_items) == 2 + + # bytes_limit set so that no cache item is kept + bytes_limit_too_small = 500 + memory.reduce_size(bytes_limit=bytes_limit_too_small) + cache_items = memory.store_backend.get_items() + assert cache_items == [] + + +def test_memory_reduce_size_items_limit(tmpdir): + memory, _, _ = _setup_toy_cache(tmpdir) + ref_cache_items = memory.store_backend.get_items() + + # By default reduce_size is a noop + memory.reduce_size() + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # No cache items deleted if items_limit greater than the size of + # the cache + memory.reduce_size(items_limit=10) + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # items_limit is set so that only two cache items are kept + memory.reduce_size(items_limit=2) + cache_items = memory.store_backend.get_items() + assert set.issubset(set(cache_items), set(ref_cache_items)) + assert len(cache_items) == 2 + + # item_limit set so that no cache item is kept + memory.reduce_size(items_limit=0) + cache_items = memory.store_backend.get_items() + assert cache_items == [] + + +def test_memory_reduce_size_age_limit(tmpdir): + import time + import datetime + memory, _, put_cache = _setup_toy_cache(tmpdir) + ref_cache_items = memory.store_backend.get_items() + + # By default reduce_size is a noop + memory.reduce_size() + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # No cache items deleted if age_limit big. + memory.reduce_size(age_limit=datetime.timedelta(days=1)) + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # age_limit is set so that only two cache items are kept + time.sleep(1) + put_cache(-1) + put_cache(-2) + memory.reduce_size(age_limit=datetime.timedelta(seconds=1)) + cache_items = memory.store_backend.get_items() + assert not set.issubset(set(cache_items), set(ref_cache_items)) + assert len(cache_items) == 2 + + # age_limit set so that no cache item is kept + memory.reduce_size(age_limit=datetime.timedelta(seconds=0)) + cache_items = memory.store_backend.get_items() + assert cache_items == [] + + +def test_memory_clear(tmpdir): + memory, _, g = _setup_toy_cache(tmpdir) + memory.clear() + + assert os.listdir(memory.store_backend.location) == [] + + # Check that the cache for functions hash is also reset. + assert not g._check_previous_func_code(stacklevel=4) + + +def fast_func_with_complex_output(): + complex_obj = ['a' * 1000] * 1000 + return complex_obj + + +def fast_func_with_conditional_complex_output(complex_output=True): + complex_obj = {str(i): i for i in range(int(1e5))} + return complex_obj if complex_output else 'simple output' + + +@with_multiprocessing +def test_cached_function_race_condition_when_persisting_output(tmpdir, capfd): + # Test race condition where multiple processes are writing into + # the same output.pkl. See + # https://github.com/joblib/joblib/issues/490 for more details. + memory = Memory(location=tmpdir.strpath) + func_cached = memory.cache(fast_func_with_complex_output) + + Parallel(n_jobs=2)(delayed(func_cached)() for i in range(3)) + + stdout, stderr = capfd.readouterr() + + # Checking both stdout and stderr (ongoing PR #434 may change + # logging destination) to make sure there is no exception while + # loading the results + exception_msg = 'Exception while loading results' + assert exception_msg not in stdout + assert exception_msg not in stderr + + +@with_multiprocessing +def test_cached_function_race_condition_when_persisting_output_2(tmpdir, + capfd): + # Test race condition in first attempt at solving + # https://github.com/joblib/joblib/issues/490. The race condition + # was due to the delay between seeing the cache directory created + # (interpreted as the result being cached) and the output.pkl being + # pickled. + memory = Memory(location=tmpdir.strpath) + func_cached = memory.cache(fast_func_with_conditional_complex_output) + + Parallel(n_jobs=2)(delayed(func_cached)(True if i % 2 == 0 else False) + for i in range(3)) + + stdout, stderr = capfd.readouterr() + + # Checking both stdout and stderr (ongoing PR #434 may change + # logging destination) to make sure there is no exception while + # loading the results + exception_msg = 'Exception while loading results' + assert exception_msg not in stdout + assert exception_msg not in stderr + + +def test_memory_recomputes_after_an_error_while_loading_results( + tmpdir, monkeypatch): + memory = Memory(location=tmpdir.strpath) + + def func(arg): + # This makes sure that the timestamp returned by two calls of + # func are different. This is needed on Windows where + # time.time resolution may not be accurate enough + time.sleep(0.01) + return arg, time.time() + + cached_func = memory.cache(func) + input_arg = 'arg' + arg, timestamp = cached_func(input_arg) + + # Make sure the function is correctly cached + assert arg == input_arg + + # Corrupting output.pkl to make sure that an error happens when + # loading the cached result + corrupt_single_cache_item(memory) + + # Make sure that corrupting the file causes recomputation and that + # a warning is issued. + recorded_warnings = monkeypatch_cached_func_warn(cached_func, monkeypatch) + recomputed_arg, recomputed_timestamp = cached_func(arg) + assert len(recorded_warnings) == 1 + exception_msg = 'Exception while loading results' + assert exception_msg in recorded_warnings[0] + assert recomputed_arg == arg + assert recomputed_timestamp > timestamp + + # Corrupting output.pkl to make sure that an error happens when + # loading the cached result + corrupt_single_cache_item(memory) + reference = cached_func.call_and_shelve(arg) + try: + reference.get() + raise AssertionError( + "It normally not possible to load a corrupted" + " MemorizedResult" + ) + except KeyError as e: + message = "is corrupted" + assert message in str(e.args) + + +class IncompleteStoreBackend(StoreBackendBase): + """This backend cannot be instantiated and should raise a TypeError.""" + pass + + +class DummyStoreBackend(StoreBackendBase): + """A dummy store backend that does nothing.""" + + def _open_item(self, *args, **kwargs): + """Open an item on store.""" + "Does nothing" + + def _item_exists(self, location): + """Check if an item location exists.""" + "Does nothing" + + def _move_item(self, src, dst): + """Move an item from src to dst in store.""" + "Does nothing" + + def create_location(self, location): + """Create location on store.""" + "Does nothing" + + def exists(self, obj): + """Check if an object exists in the store""" + return False + + def clear_location(self, obj): + """Clear object on store""" + "Does nothing" + + def get_items(self): + """Returns the whole list of items available in cache.""" + return [] + + def configure(self, location, *args, **kwargs): + """Configure the store""" + "Does nothing" + + +@parametrize("invalid_prefix", [None, dict(), list()]) +def test_register_invalid_store_backends_key(invalid_prefix): + # verify the right exceptions are raised when passing a wrong backend key. + with raises(ValueError) as excinfo: + register_store_backend(invalid_prefix, None) + excinfo.match(r'Store backend name should be a string*') + + +def test_register_invalid_store_backends_object(): + # verify the right exceptions are raised when passing a wrong backend + # object. + with raises(ValueError) as excinfo: + register_store_backend("fs", None) + excinfo.match(r'Store backend should inherit StoreBackendBase*') + + +def test_memory_default_store_backend(): + # test an unknown backend falls back into a FileSystemStoreBackend + with raises(TypeError) as excinfo: + Memory(location='/tmp/joblib', backend='unknown') + excinfo.match(r"Unknown location*") + + +def test_warning_on_unknown_location_type(): + class NonSupportedLocationClass: + pass + unsupported_location = NonSupportedLocationClass() + + with warns(UserWarning) as warninfo: + _store_backend_factory("local", location=unsupported_location) + + expected_mesage = ("Instantiating a backend using a " + "NonSupportedLocationClass as a location is not " + "supported by joblib") + assert expected_mesage in str(warninfo[0].message) + + +def test_instanciate_incomplete_store_backend(): + # Verify that registering an external incomplete store backend raises an + # exception when one tries to instantiate it. + backend_name = "isb" + register_store_backend(backend_name, IncompleteStoreBackend) + assert (backend_name, IncompleteStoreBackend) in _STORE_BACKENDS.items() + with raises(TypeError) as excinfo: + _store_backend_factory(backend_name, "fake_location") + excinfo.match(r"Can't instantiate abstract class IncompleteStoreBackend " + "(without an implementation for|with) abstract methods*") + + +def test_dummy_store_backend(): + # Verify that registering an external store backend works. + + backend_name = "dsb" + register_store_backend(backend_name, DummyStoreBackend) + assert (backend_name, DummyStoreBackend) in _STORE_BACKENDS.items() + + backend_obj = _store_backend_factory(backend_name, "dummy_location") + assert isinstance(backend_obj, DummyStoreBackend) + + +def test_instanciate_store_backend_with_pathlib_path(): + # Instantiate a FileSystemStoreBackend using a pathlib.Path object + path = pathlib.Path("some_folder") + backend_obj = _store_backend_factory("local", path) + assert backend_obj.location == "some_folder" + + +def test_filesystem_store_backend_repr(tmpdir): + # Verify string representation of a filesystem store backend. + + repr_pattern = 'FileSystemStoreBackend(location="{location}")' + backend = FileSystemStoreBackend() + assert backend.location is None + + repr(backend) # Should not raise an exception + + assert str(backend) == repr_pattern.format(location=None) + + # backend location is passed explicitly via the configure method (called + # by the internal _store_backend_factory function) + backend.configure(tmpdir.strpath) + + assert str(backend) == repr_pattern.format(location=tmpdir.strpath) + + repr(backend) # Should not raise an exception + + +def test_memory_objects_repr(tmpdir): + # Verify printable reprs of MemorizedResult, MemorizedFunc and Memory. + + def my_func(a, b): + return a + b + + memory = Memory(location=tmpdir.strpath, verbose=0) + memorized_func = memory.cache(my_func) + + memorized_func_repr = 'MemorizedFunc(func={func}, location={location})' + + assert str(memorized_func) == memorized_func_repr.format( + func=my_func, + location=memory.store_backend.location) + + memorized_result = memorized_func.call_and_shelve(42, 42) + + memorized_result_repr = ('MemorizedResult(location="{location}", ' + 'func="{func}", args_id="{args_id}")') + + assert str(memorized_result) == memorized_result_repr.format( + location=memory.store_backend.location, + func=memorized_result.func_id, + args_id=memorized_result.args_id) + + assert str(memory) == 'Memory(location={location})'.format( + location=memory.store_backend.location) + + +def test_memorized_result_pickle(tmpdir): + # Verify a MemoryResult object can be pickled/depickled. Non regression + # test introduced following issue + # https://github.com/joblib/joblib/issues/747 + + memory = Memory(location=tmpdir.strpath) + + @memory.cache + def g(x): + return x**2 + + memorized_result = g.call_and_shelve(4) + memorized_result_pickle = pickle.dumps(memorized_result) + memorized_result_loads = pickle.loads(memorized_result_pickle) + + assert memorized_result.store_backend.location == \ + memorized_result_loads.store_backend.location + assert memorized_result.func == memorized_result_loads.func + assert memorized_result.args_id == memorized_result_loads.args_id + assert str(memorized_result) == str(memorized_result_loads) + + +def compare(left, right, ignored_attrs=None): + if ignored_attrs is None: + ignored_attrs = [] + + left_vars = vars(left) + right_vars = vars(right) + assert set(left_vars.keys()) == set(right_vars.keys()) + for attr in left_vars.keys(): + if attr in ignored_attrs: + continue + assert left_vars[attr] == right_vars[attr] + + +@pytest.mark.parametrize('memory_kwargs', + [{'compress': 3, 'verbose': 2}, + {'mmap_mode': 'r', 'verbose': 5, + 'backend_options': {'parameter': 'unused'}}]) +def test_memory_pickle_dump_load(tmpdir, memory_kwargs): + memory = Memory(location=tmpdir.strpath, **memory_kwargs) + + memory_reloaded = pickle.loads(pickle.dumps(memory)) + + # Compare Memory instance before and after pickle roundtrip + compare(memory.store_backend, memory_reloaded.store_backend) + compare(memory, memory_reloaded, + ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id'])) + assert hash(memory) == hash(memory_reloaded) + + func_cached = memory.cache(f) + + func_cached_reloaded = pickle.loads(pickle.dumps(func_cached)) + + # Compare MemorizedFunc instance before/after pickle roundtrip + compare(func_cached.store_backend, func_cached_reloaded.store_backend) + compare(func_cached, func_cached_reloaded, + ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id'])) + assert hash(func_cached) == hash(func_cached_reloaded) + + # Compare MemorizedResult instance before/after pickle roundtrip + memorized_result = func_cached.call_and_shelve(1) + memorized_result_reloaded = pickle.loads(pickle.dumps(memorized_result)) + + compare(memorized_result.store_backend, + memorized_result_reloaded.store_backend) + compare(memorized_result, memorized_result_reloaded, + ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id'])) + assert hash(memorized_result) == hash(memorized_result_reloaded) + + +def test_info_log(tmpdir, caplog): + caplog.set_level(logging.INFO) + x = 3 + + memory = Memory(location=tmpdir.strpath, verbose=20) + + @memory.cache + def f(x): + return x ** 2 + + _ = f(x) + assert "Querying" in caplog.text + caplog.clear() + + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache + def f(x): + return x ** 2 + + _ = f(x) + assert "Querying" not in caplog.text + caplog.clear() + + +def test_deprecated_bytes_limit(tmpdir): + from joblib import __version__ + if __version__ >= "1.5": + raise DeprecationWarning( + "Bytes limit is deprecated and should be removed by 1.4" + ) + with pytest.warns(DeprecationWarning, match="bytes_limit"): + _ = Memory(location=tmpdir.strpath, bytes_limit='1K') + + +class TestCacheValidationCallback: + "Tests on parameter `cache_validation_callback`" + + @pytest.fixture() + def memory(self, tmp_path): + mem = Memory(location=tmp_path) + yield mem + mem.clear() + + def foo(self, x, d, delay=None): + d["run"] = True + if delay is not None: + time.sleep(delay) + return x * 2 + + def test_invalid_cache_validation_callback(self, memory): + "Test invalid values for `cache_validation_callback" + match = "cache_validation_callback needs to be callable. Got True." + with pytest.raises(ValueError, match=match): + memory.cache(cache_validation_callback=True) + + @pytest.mark.parametrize("consider_cache_valid", [True, False]) + def test_constant_cache_validation_callback( + self, memory, consider_cache_valid + ): + "Test expiry of old results" + f = memory.cache( + self.foo, cache_validation_callback=lambda _: consider_cache_valid, + ignore=["d"] + ) + + d1, d2 = {"run": False}, {"run": False} + assert f(2, d1) == 4 + assert f(2, d2) == 4 + + assert d1["run"] + assert d2["run"] != consider_cache_valid + + def test_memory_only_cache_long_run(self, memory): + "Test cache validity based on run duration." + + def cache_validation_callback(metadata): + duration = metadata['duration'] + if duration > 0.1: + return True + + f = memory.cache( + self.foo, cache_validation_callback=cache_validation_callback, + ignore=["d"] + ) + + # Short run are not cached + d1, d2 = {"run": False}, {"run": False} + assert f(2, d1, delay=0) == 4 + assert f(2, d2, delay=0) == 4 + assert d1["run"] + assert d2["run"] + + # Longer run are cached + d1, d2 = {"run": False}, {"run": False} + assert f(2, d1, delay=0.2) == 4 + assert f(2, d2, delay=0.2) == 4 + assert d1["run"] + assert not d2["run"] + + def test_memory_expires_after(self, memory): + "Test expiry of old cached results" + + f = memory.cache( + self.foo, cache_validation_callback=expires_after(seconds=.3), + ignore=["d"] + ) + + d1, d2, d3 = {"run": False}, {"run": False}, {"run": False} + assert f(2, d1) == 4 + assert f(2, d2) == 4 + time.sleep(.5) + assert f(2, d3) == 4 + + assert d1["run"] + assert not d2["run"] + assert d3["run"] diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_memory_async.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_memory_async.py new file mode 100644 index 0000000000000000000000000000000000000000..ecad0c926193a7ed84f669cface0023363f231ff --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_memory_async.py @@ -0,0 +1,149 @@ +import asyncio +import gc +import shutil + +import pytest + +from joblib.memory import (AsyncMemorizedFunc, AsyncNotMemorizedFunc, + MemorizedResult, Memory, NotMemorizedResult) +from joblib.test.common import np, with_numpy +from joblib.testing import raises + +from .test_memory import (corrupt_single_cache_item, + monkeypatch_cached_func_warn) + + +async def check_identity_lazy_async(func, accumulator, location): + """ Similar to check_identity_lazy_async for coroutine functions""" + memory = Memory(location=location, verbose=0) + func = memory.cache(func) + for i in range(3): + for _ in range(2): + value = await func(i) + assert value == i + assert len(accumulator) == i + 1 + + +@pytest.mark.asyncio +async def test_memory_integration_async(tmpdir): + accumulator = list() + + async def f(n): + await asyncio.sleep(0.1) + accumulator.append(1) + return n + + await check_identity_lazy_async(f, accumulator, tmpdir.strpath) + + # Now test clearing + for compress in (False, True): + for mmap_mode in ('r', None): + memory = Memory(location=tmpdir.strpath, verbose=10, + mmap_mode=mmap_mode, compress=compress) + # First clear the cache directory, to check that our code can + # handle that + # NOTE: this line would raise an exception, as the database + # file is still open; we ignore the error since we want to + # test what happens if the directory disappears + shutil.rmtree(tmpdir.strpath, ignore_errors=True) + g = memory.cache(f) + await g(1) + g.clear(warn=False) + current_accumulator = len(accumulator) + out = await g(1) + + assert len(accumulator) == current_accumulator + 1 + # Also, check that Memory.eval works similarly + evaled = await memory.eval(f, 1) + assert evaled == out + assert len(accumulator) == current_accumulator + 1 + + # Now do a smoke test with a function defined in __main__, as the name + # mangling rules are more complex + f.__module__ = '__main__' + memory = Memory(location=tmpdir.strpath, verbose=0) + await memory.cache(f)(1) + + +@pytest.mark.asyncio +async def test_no_memory_async(): + accumulator = list() + + async def ff(x): + await asyncio.sleep(0.1) + accumulator.append(1) + return x + + memory = Memory(location=None, verbose=0) + gg = memory.cache(ff) + for _ in range(4): + current_accumulator = len(accumulator) + await gg(1) + assert len(accumulator) == current_accumulator + 1 + + +@with_numpy +@pytest.mark.asyncio +async def test_memory_numpy_check_mmap_mode_async(tmpdir, monkeypatch): + """Check that mmap_mode is respected even at the first call""" + + memory = Memory(location=tmpdir.strpath, mmap_mode='r', verbose=0) + + @memory.cache() + async def twice(a): + return a * 2 + + a = np.ones(3) + b = await twice(a) + c = await twice(a) + + assert isinstance(c, np.memmap) + assert c.mode == 'r' + + assert isinstance(b, np.memmap) + assert b.mode == 'r' + + # Corrupts the file, Deleting b and c mmaps + # is necessary to be able edit the file + del b + del c + gc.collect() + corrupt_single_cache_item(memory) + + # Make sure that corrupting the file causes recomputation and that + # a warning is issued. + recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch) + d = await twice(a) + assert len(recorded_warnings) == 1 + exception_msg = 'Exception while loading results' + assert exception_msg in recorded_warnings[0] + # Asserts that the recomputation returns a mmap + assert isinstance(d, np.memmap) + assert d.mode == 'r' + + +@pytest.mark.asyncio +async def test_call_and_shelve_async(tmpdir): + async def f(x, y=1): + await asyncio.sleep(0.1) + return x ** 2 + y + + # Test MemorizedFunc outputting a reference to cache. + for func, Result in zip((AsyncMemorizedFunc(f, tmpdir.strpath), + AsyncNotMemorizedFunc(f), + Memory(location=tmpdir.strpath, + verbose=0).cache(f), + Memory(location=None).cache(f), + ), + (MemorizedResult, NotMemorizedResult, + MemorizedResult, NotMemorizedResult, + )): + for _ in range(2): + result = await func.call_and_shelve(2) + assert isinstance(result, Result) + assert result.get() == 5 + + result.clear() + with raises(KeyError): + result.get() + result.clear() # Do nothing if there is no cache. diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..251925ced5208b4aaf09d9aab305eb44c7102818 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py @@ -0,0 +1,32 @@ +""" +Pyodide and other single-threaded Python builds will be missing the +_multiprocessing module. Test that joblib still works in this environment. +""" + +import os +import subprocess +import sys + + +def test_missing_multiprocessing(tmp_path): + """ + Test that import joblib works even if _multiprocessing is missing. + + pytest has already imported everything from joblib. The most reasonable way + to test importing joblib with modified environment is to invoke a separate + Python process. This also ensures that we don't break other tests by + importing a bad `_multiprocessing` module. + """ + (tmp_path / "_multiprocessing.py").write_text( + 'raise ImportError("No _multiprocessing module!")' + ) + env = dict(os.environ) + # For subprocess, use current sys.path with our custom version of + # multiprocessing inserted. + env["PYTHONPATH"] = ":".join([str(tmp_path)] + sys.path) + subprocess.check_call( + [sys.executable, "-c", + "import joblib, math; " + "joblib.Parallel(n_jobs=1)(" + "joblib.delayed(math.sqrt)(i**2) for i in range(10))" + ], env=env) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_module.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_module.py new file mode 100644 index 0000000000000000000000000000000000000000..a2257a4142d79996f3d299cb820927ae48a05810 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_module.py @@ -0,0 +1,53 @@ +import sys +import joblib +from joblib.testing import check_subprocess_call +from joblib.test.common import with_multiprocessing + + +def test_version(): + assert hasattr(joblib, '__version__'), ( + "There are no __version__ argument on the joblib module") + + +@with_multiprocessing +def test_no_start_method_side_effect_on_import(): + # check that importing joblib does not implicitly set the global + # start_method for multiprocessing. + code = """if True: + import joblib + import multiprocessing as mp + # The following line would raise RuntimeError if the + # start_method is already set. + mp.set_start_method("loky") + """ + check_subprocess_call([sys.executable, '-c', code]) + + +@with_multiprocessing +def test_no_semaphore_tracker_on_import(): + # check that importing joblib does not implicitly spawn a resource tracker + # or a semaphore tracker + code = """if True: + import joblib + from multiprocessing import semaphore_tracker + # The following line would raise RuntimeError if the + # start_method is already set. + msg = "multiprocessing.semaphore_tracker has been spawned on import" + assert semaphore_tracker._semaphore_tracker._fd is None, msg""" + if sys.version_info >= (3, 8): + # semaphore_tracker was renamed in Python 3.8: + code = code.replace("semaphore_tracker", "resource_tracker") + check_subprocess_call([sys.executable, '-c', code]) + + +@with_multiprocessing +def test_no_resource_tracker_on_import(): + code = """if True: + import joblib + from joblib.externals.loky.backend import resource_tracker + # The following line would raise RuntimeError if the + # start_method is already set. + msg = "loky.resource_tracker has been spawned on import" + assert resource_tracker._resource_tracker._fd is None, msg + """ + check_subprocess_call([sys.executable, '-c', code]) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..9fee585c79ad219d3a9f8cdc6a55655b50099c09 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py @@ -0,0 +1,1159 @@ +"""Test the numpy pickler as a replacement of the standard pickler.""" + +import copy +import os +import random +import re +import io +import sys +import warnings +import gzip +import zlib +import bz2 +import pickle +import socket +from contextlib import closing +import mmap +from pathlib import Path + +try: + import lzma +except ImportError: + lzma = None + +import pytest + +from joblib.test.common import np, with_numpy, with_lz4, without_lz4 +from joblib.test.common import with_memory_profiler, memory_used +from joblib.testing import parametrize, raises, warns + +# numpy_pickle is not a drop-in replacement of pickle, as it takes +# filenames instead of open files as arguments. +from joblib import numpy_pickle, register_compressor +from joblib.test import data + +from joblib.numpy_pickle_utils import _IO_BUFFER_SIZE +from joblib.numpy_pickle_utils import _detect_compressor +from joblib.numpy_pickle_utils import _is_numpy_array_byte_order_mismatch +from joblib.numpy_pickle_utils import _ensure_native_byte_order +from joblib.compressor import (_COMPRESSORS, _LZ4_PREFIX, CompressorWrapper, + LZ4_NOT_INSTALLED_ERROR, BinaryZlibFile) + + +############################################################################### +# Define a list of standard types. +# Borrowed from dill, initial author: Micheal McKerns: +# http://dev.danse.us/trac/pathos/browser/dill/dill_test2.py + +typelist = [] + +# testing types +_none = None +typelist.append(_none) +_type = type +typelist.append(_type) +_bool = bool(1) +typelist.append(_bool) +_int = int(1) +typelist.append(_int) +_float = float(1) +typelist.append(_float) +_complex = complex(1) +typelist.append(_complex) +_string = str(1) +typelist.append(_string) +_tuple = () +typelist.append(_tuple) +_list = [] +typelist.append(_list) +_dict = {} +typelist.append(_dict) +_builtin = len +typelist.append(_builtin) + + +def _function(x): + yield x + + +class _class: + def _method(self): + pass + + +class _newclass(object): + def _method(self): + pass + + +typelist.append(_function) +typelist.append(_class) +typelist.append(_newclass) # +_instance = _class() +typelist.append(_instance) +_object = _newclass() +typelist.append(_object) # + + +############################################################################### +# Tests + +@parametrize('compress', [0, 1]) +@parametrize('member', typelist) +def test_standard_types(tmpdir, compress, member): + # Test pickling and saving with standard types. + filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(member, filename, compress=compress) + _member = numpy_pickle.load(filename) + # We compare the pickled instance to the reloaded one only if it + # can be compared to a copied one + if member == copy.deepcopy(member): + assert member == _member + + +def test_value_error(): + # Test inverting the input arguments to dump + with raises(ValueError): + numpy_pickle.dump('foo', dict()) + + +@parametrize('wrong_compress', [-1, 10, dict()]) +def test_compress_level_error(wrong_compress): + # Verify that passing an invalid compress argument raises an error. + exception_msg = ('Non valid compress level given: ' + '"{0}"'.format(wrong_compress)) + with raises(ValueError) as excinfo: + numpy_pickle.dump('dummy', 'foo', compress=wrong_compress) + excinfo.match(exception_msg) + + +@with_numpy +@parametrize('compress', [False, True, 0, 3, 'zlib']) +def test_numpy_persistence(tmpdir, compress): + filename = tmpdir.join('test.pkl').strpath + rnd = np.random.RandomState(0) + a = rnd.random_sample((10, 2)) + # We use 'a.T' to have a non C-contiguous array. + for index, obj in enumerate(((a,), (a.T,), (a, a), [a, a, a])): + filenames = numpy_pickle.dump(obj, filename, compress=compress) + + # All is cached in one file + assert len(filenames) == 1 + # Check that only one file was created + assert filenames[0] == filename + # Check that this file does exist + assert os.path.exists(filenames[0]) + + # Unpickle the object + obj_ = numpy_pickle.load(filename) + # Check that the items are indeed arrays + for item in obj_: + assert isinstance(item, np.ndarray) + # And finally, check that all the values are equal. + np.testing.assert_array_equal(np.array(obj), np.array(obj_)) + + # Now test with an array subclass + obj = np.memmap(filename + 'mmap', mode='w+', shape=4, dtype=np.float64) + filenames = numpy_pickle.dump(obj, filename, compress=compress) + # All is cached in one file + assert len(filenames) == 1 + + obj_ = numpy_pickle.load(filename) + if (type(obj) is not np.memmap and + hasattr(obj, '__array_prepare__')): + # We don't reconstruct memmaps + assert isinstance(obj_, type(obj)) + + np.testing.assert_array_equal(obj_, obj) + + # Test with an object containing multiple numpy arrays + obj = ComplexTestObject() + filenames = numpy_pickle.dump(obj, filename, compress=compress) + # All is cached in one file + assert len(filenames) == 1 + + obj_loaded = numpy_pickle.load(filename) + assert isinstance(obj_loaded, type(obj)) + np.testing.assert_array_equal(obj_loaded.array_float, obj.array_float) + np.testing.assert_array_equal(obj_loaded.array_int, obj.array_int) + np.testing.assert_array_equal(obj_loaded.array_obj, obj.array_obj) + + +@with_numpy +def test_numpy_persistence_bufferred_array_compression(tmpdir): + big_array = np.ones((_IO_BUFFER_SIZE + 100), dtype=np.uint8) + filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(big_array, filename, compress=True) + arr_reloaded = numpy_pickle.load(filename) + + np.testing.assert_array_equal(big_array, arr_reloaded) + + +@with_numpy +def test_memmap_persistence(tmpdir): + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + filename = tmpdir.join('test1.pkl').strpath + numpy_pickle.dump(a, filename) + b = numpy_pickle.load(filename, mmap_mode='r') + + assert isinstance(b, np.memmap) + + # Test with an object containing multiple numpy arrays + filename = tmpdir.join('test2.pkl').strpath + obj = ComplexTestObject() + numpy_pickle.dump(obj, filename) + obj_loaded = numpy_pickle.load(filename, mmap_mode='r') + assert isinstance(obj_loaded, type(obj)) + assert isinstance(obj_loaded.array_float, np.memmap) + assert not obj_loaded.array_float.flags.writeable + assert isinstance(obj_loaded.array_int, np.memmap) + assert not obj_loaded.array_int.flags.writeable + # Memory map not allowed for numpy object arrays + assert not isinstance(obj_loaded.array_obj, np.memmap) + np.testing.assert_array_equal(obj_loaded.array_float, + obj.array_float) + np.testing.assert_array_equal(obj_loaded.array_int, + obj.array_int) + np.testing.assert_array_equal(obj_loaded.array_obj, + obj.array_obj) + + # Test we can write in memmapped arrays + obj_loaded = numpy_pickle.load(filename, mmap_mode='r+') + assert obj_loaded.array_float.flags.writeable + obj_loaded.array_float[0:10] = 10.0 + assert obj_loaded.array_int.flags.writeable + obj_loaded.array_int[0:10] = 10 + + obj_reloaded = numpy_pickle.load(filename, mmap_mode='r') + np.testing.assert_array_equal(obj_reloaded.array_float, + obj_loaded.array_float) + np.testing.assert_array_equal(obj_reloaded.array_int, + obj_loaded.array_int) + + # Test w+ mode is caught and the mode has switched to r+ + numpy_pickle.load(filename, mmap_mode='w+') + assert obj_loaded.array_int.flags.writeable + assert obj_loaded.array_int.mode == 'r+' + assert obj_loaded.array_float.flags.writeable + assert obj_loaded.array_float.mode == 'r+' + + +@with_numpy +def test_memmap_persistence_mixed_dtypes(tmpdir): + # loading datastructures that have sub-arrays with dtype=object + # should not prevent memmapping on fixed size dtype sub-arrays. + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + b = np.array([1, 'b'], dtype=object) + construct = (a, b) + filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(construct, filename) + a_clone, b_clone = numpy_pickle.load(filename, mmap_mode='r') + + # the floating point array has been memory mapped + assert isinstance(a_clone, np.memmap) + + # the object-dtype array has been loaded in memory + assert not isinstance(b_clone, np.memmap) + + +@with_numpy +def test_masked_array_persistence(tmpdir): + # The special-case picker fails, because saving masked_array + # not implemented, but it just delegates to the standard pickler. + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + a = np.ma.masked_greater(a, 0.5) + filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(a, filename) + b = numpy_pickle.load(filename, mmap_mode='r') + assert isinstance(b, np.ma.masked_array) + + +@with_numpy +def test_compress_mmap_mode_warning(tmpdir): + # Test the warning in case of compress + mmap_mode + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + this_filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(a, this_filename, compress=1) + with warns(UserWarning) as warninfo: + numpy_pickle.load(this_filename, mmap_mode='r+') + debug_msg = "\n".join([str(w) for w in warninfo]) + warninfo = [w.message for w in warninfo] + assert len(warninfo) == 1, debug_msg + assert ( + str(warninfo[0]) == + 'mmap_mode "r+" is not compatible with compressed ' + f'file {this_filename}. "r+" flag will be ignored.' + ) + + +@with_numpy +@parametrize('cache_size', [None, 0, 10]) +def test_cache_size_warning(tmpdir, cache_size): + # Check deprecation warning raised when cache size is not None + filename = tmpdir.join('test.pkl').strpath + rnd = np.random.RandomState(0) + a = rnd.random_sample((10, 2)) + + warnings.simplefilter("always") + with warnings.catch_warnings(record=True) as warninfo: + numpy_pickle.dump(a, filename, cache_size=cache_size) + expected_nb_warnings = 1 if cache_size is not None else 0 + assert len(warninfo) == expected_nb_warnings + for w in warninfo: + assert w.category == DeprecationWarning + assert (str(w.message) == + "Please do not set 'cache_size' in joblib.dump, this " + "parameter has no effect and will be removed. You " + "used 'cache_size={0}'".format(cache_size)) + + +@with_numpy +@with_memory_profiler +@parametrize('compress', [True, False]) +def test_memory_usage(tmpdir, compress): + # Verify memory stays within expected bounds. + filename = tmpdir.join('test.pkl').strpath + small_array = np.ones((10, 10)) + big_array = np.ones(shape=100 * int(1e6), dtype=np.uint8) + + for obj in (small_array, big_array): + size = obj.nbytes / 1e6 + obj_filename = filename + str(np.random.randint(0, 1000)) + mem_used = memory_used(numpy_pickle.dump, + obj, obj_filename, compress=compress) + + # The memory used to dump the object shouldn't exceed the buffer + # size used to write array chunks (16MB). + write_buf_size = _IO_BUFFER_SIZE + 16 * 1024 ** 2 / 1e6 + assert mem_used <= write_buf_size + + mem_used = memory_used(numpy_pickle.load, obj_filename) + # memory used should be less than array size + buffer size used to + # read the array chunk by chunk. + read_buf_size = 32 + _IO_BUFFER_SIZE # MiB + assert mem_used < size + read_buf_size + + +@with_numpy +def test_compressed_pickle_dump_and_load(tmpdir): + expected_list = [np.arange(5, dtype=np.dtype('i8')), + np.arange(5, dtype=np.dtype('f8')), + np.array([1, 'abc', {'a': 1, 'b': 2}], dtype='O'), + np.arange(256, dtype=np.uint8).tobytes(), + u"C'est l'\xe9t\xe9 !"] + + fname = tmpdir.join('temp.pkl.gz').strpath + + dumped_filenames = numpy_pickle.dump(expected_list, fname, compress=1) + assert len(dumped_filenames) == 1 + result_list = numpy_pickle.load(fname) + for result, expected in zip(result_list, expected_list): + if isinstance(expected, np.ndarray): + expected = _ensure_native_byte_order(expected) + assert result.dtype == expected.dtype + np.testing.assert_equal(result, expected) + else: + assert result == expected + + +def _check_pickle(filename, expected_list, mmap_mode=None): + """Helper function to test joblib pickle content. + + Note: currently only pickles containing an iterable are supported + by this function. + """ + version_match = re.match(r'.+py(\d)(\d).+', filename) + py_version_used_for_writing = int(version_match.group(1)) + + py_version_to_default_pickle_protocol = {2: 2, 3: 3} + pickle_reading_protocol = py_version_to_default_pickle_protocol.get(3, 4) + pickle_writing_protocol = py_version_to_default_pickle_protocol.get( + py_version_used_for_writing, 4) + if pickle_reading_protocol >= pickle_writing_protocol: + try: + with warnings.catch_warnings(record=True) as warninfo: + warnings.simplefilter('always') + warnings.filterwarnings( + 'ignore', module='numpy', + message='The compiler package is deprecated') + result_list = numpy_pickle.load(filename, mmap_mode=mmap_mode) + filename_base = os.path.basename(filename) + expected_nb_deprecation_warnings = 1 if ( + "_0.9" in filename_base or "_0.8.4" in filename_base) else 0 + + expected_nb_user_warnings = 3 if ( + re.search("_0.1.+.pkl$", filename_base) and + mmap_mode is not None) else 0 + expected_nb_warnings = \ + expected_nb_deprecation_warnings + expected_nb_user_warnings + assert len(warninfo) == expected_nb_warnings + + deprecation_warnings = [ + w for w in warninfo if issubclass( + w.category, DeprecationWarning)] + user_warnings = [ + w for w in warninfo if issubclass( + w.category, UserWarning)] + for w in deprecation_warnings: + assert (str(w.message) == + "The file '{0}' has been generated with a joblib " + "version less than 0.10. Please regenerate this " + "pickle file.".format(filename)) + + for w in user_warnings: + escaped_filename = re.escape(filename) + assert re.search( + f"memmapped.+{escaped_filename}.+segmentation fault", + str(w.message)) + + for result, expected in zip(result_list, expected_list): + if isinstance(expected, np.ndarray): + expected = _ensure_native_byte_order(expected) + assert result.dtype == expected.dtype + np.testing.assert_equal(result, expected) + else: + assert result == expected + except Exception as exc: + # When trying to read with python 3 a pickle generated + # with python 2 we expect a user-friendly error + if py_version_used_for_writing == 2: + assert isinstance(exc, ValueError) + message = ('You may be trying to read with ' + 'python 3 a joblib pickle generated with python 2.') + assert message in str(exc) + elif filename.endswith('.lz4') and with_lz4.args[0]: + assert isinstance(exc, ValueError) + assert LZ4_NOT_INSTALLED_ERROR in str(exc) + else: + raise + else: + # Pickle protocol used for writing is too high. We expect a + # "unsupported pickle protocol" error message + try: + numpy_pickle.load(filename) + raise AssertionError('Numpy pickle loading should ' + 'have raised a ValueError exception') + except ValueError as e: + message = 'unsupported pickle protocol: {0}'.format( + pickle_writing_protocol) + assert message in str(e.args) + + +@with_numpy +def test_joblib_pickle_across_python_versions(): + # We need to be specific about dtypes in particular endianness + # because the pickles can be generated on one architecture and + # the tests run on another one. See + # https://github.com/joblib/joblib/issues/279. + expected_list = [np.arange(5, dtype=np.dtype('i8'), ('', '>f8')]), + np.arange(3, dtype=np.dtype('>i8')), + np.arange(3, dtype=np.dtype('>f8'))] + + # Verify the byteorder mismatch is correctly detected. + for array in be_arrays: + if sys.byteorder == 'big': + assert not _is_numpy_array_byte_order_mismatch(array) + else: + assert _is_numpy_array_byte_order_mismatch(array) + converted = _ensure_native_byte_order(array) + if converted.dtype.fields: + for f in converted.dtype.fields.values(): + f[0].byteorder == '=' + else: + assert converted.dtype.byteorder == "=" + + # List of numpy arrays with little endian byteorder. + le_arrays = [np.array([(1, 2.0), (3, 4.0)], + dtype=[('', ' size + np.testing.assert_array_equal(obj, memmaps) + + +def test_register_compressor(tmpdir): + # Check that registering compressor file works. + compressor_name = 'test-name' + compressor_prefix = 'test-prefix' + + class BinaryCompressorTestFile(io.BufferedIOBase): + pass + + class BinaryCompressorTestWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryCompressorTestFile, + prefix=compressor_prefix) + + register_compressor(compressor_name, BinaryCompressorTestWrapper()) + + assert (_COMPRESSORS[compressor_name].fileobj_factory == + BinaryCompressorTestFile) + assert _COMPRESSORS[compressor_name].prefix == compressor_prefix + + # Remove this dummy compressor file from extra compressors because other + # tests might fail because of this + _COMPRESSORS.pop(compressor_name) + + +@parametrize('invalid_name', [1, (), {}]) +def test_register_compressor_invalid_name(invalid_name): + # Test that registering an invalid compressor name is not allowed. + with raises(ValueError) as excinfo: + register_compressor(invalid_name, None) + excinfo.match("Compressor name should be a string") + + +def test_register_compressor_invalid_fileobj(): + # Test that registering an invalid file object is not allowed. + + class InvalidFileObject(): + pass + + class InvalidFileObjectWrapper(CompressorWrapper): + def __init__(self): + CompressorWrapper.__init__(self, obj=InvalidFileObject, + prefix=b'prefix') + + with raises(ValueError) as excinfo: + register_compressor('invalid', InvalidFileObjectWrapper()) + + excinfo.match("Compressor 'fileobj_factory' attribute should implement " + "the file object interface") + + +class AnotherZlibCompressorWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryZlibFile, prefix=b'prefix') + + +class StandardLibGzipCompressorWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=gzip.GzipFile, prefix=b'prefix') + + +def test_register_compressor_already_registered(): + # Test registration of existing compressor files. + compressor_name = 'test-name' + + # register a test compressor + register_compressor(compressor_name, AnotherZlibCompressorWrapper()) + + with raises(ValueError) as excinfo: + register_compressor(compressor_name, + StandardLibGzipCompressorWrapper()) + excinfo.match("Compressor '{}' already registered." + .format(compressor_name)) + + register_compressor(compressor_name, StandardLibGzipCompressorWrapper(), + force=True) + + assert compressor_name in _COMPRESSORS + assert _COMPRESSORS[compressor_name].fileobj_factory == gzip.GzipFile + + # Remove this dummy compressor file from extra compressors because other + # tests might fail because of this + _COMPRESSORS.pop(compressor_name) + + +@with_lz4 +def test_lz4_compression(tmpdir): + # Check that lz4 can be used when dependency is available. + import lz4.frame + compressor = 'lz4' + assert compressor in _COMPRESSORS + assert _COMPRESSORS[compressor].fileobj_factory == lz4.frame.LZ4FrameFile + + fname = tmpdir.join('test.pkl').strpath + data = 'test data' + numpy_pickle.dump(data, fname, compress=compressor) + + with open(fname, 'rb') as f: + assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX + assert numpy_pickle.load(fname) == data + + # Test that LZ4 is applied based on file extension + numpy_pickle.dump(data, fname + '.lz4') + with open(fname, 'rb') as f: + assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX + assert numpy_pickle.load(fname) == data + + +@without_lz4 +def test_lz4_compression_without_lz4(tmpdir): + # Check that lz4 cannot be used when dependency is not available. + fname = tmpdir.join('test.nolz4').strpath + data = 'test data' + msg = LZ4_NOT_INSTALLED_ERROR + with raises(ValueError) as excinfo: + numpy_pickle.dump(data, fname, compress='lz4') + excinfo.match(msg) + + with raises(ValueError) as excinfo: + numpy_pickle.dump(data, fname + '.lz4') + excinfo.match(msg) + + +protocols = [pickle.DEFAULT_PROTOCOL] +if pickle.HIGHEST_PROTOCOL != pickle.DEFAULT_PROTOCOL: + protocols.append(pickle.HIGHEST_PROTOCOL) + + +@with_numpy +@parametrize('protocol', protocols) +def test_memmap_alignment_padding(tmpdir, protocol): + # Test that memmaped arrays returned by numpy.load are correctly aligned + fname = tmpdir.join('test.mmap').strpath + + a = np.random.randn(2) + numpy_pickle.dump(a, fname, protocol=protocol) + memmap = numpy_pickle.load(fname, mmap_mode='r') + assert isinstance(memmap, np.memmap) + np.testing.assert_array_equal(a, memmap) + assert ( + memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0) + assert memmap.flags.aligned + + array_list = [ + np.random.randn(2), np.random.randn(2), + np.random.randn(2), np.random.randn(2) + ] + + # On Windows OSError 22 if reusing the same path for memmap ... + fname = tmpdir.join('test1.mmap').strpath + numpy_pickle.dump(array_list, fname, protocol=protocol) + l_reloaded = numpy_pickle.load(fname, mmap_mode='r') + + for idx, memmap in enumerate(l_reloaded): + assert isinstance(memmap, np.memmap) + np.testing.assert_array_equal(array_list[idx], memmap) + assert ( + memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0) + assert memmap.flags.aligned + + array_dict = { + 'a0': np.arange(2, dtype=np.uint8), + 'a1': np.arange(3, dtype=np.uint8), + 'a2': np.arange(5, dtype=np.uint8), + 'a3': np.arange(7, dtype=np.uint8), + 'a4': np.arange(11, dtype=np.uint8), + 'a5': np.arange(13, dtype=np.uint8), + 'a6': np.arange(17, dtype=np.uint8), + 'a7': np.arange(19, dtype=np.uint8), + 'a8': np.arange(23, dtype=np.uint8), + } + + # On Windows OSError 22 if reusing the same path for memmap ... + fname = tmpdir.join('test2.mmap').strpath + numpy_pickle.dump(array_dict, fname, protocol=protocol) + d_reloaded = numpy_pickle.load(fname, mmap_mode='r') + + for key, memmap in d_reloaded.items(): + assert isinstance(memmap, np.memmap) + np.testing.assert_array_equal(array_dict[key], memmap) + assert ( + memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0) + assert memmap.flags.aligned diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..9e95393b19e979593c7037d0d4fb740e47131dde --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py @@ -0,0 +1,16 @@ +"""Test the old numpy pickler, compatibility version.""" + +# numpy_pickle is not a drop-in replacement of pickle, as it takes +# filenames instead of open files as arguments. +from joblib import numpy_pickle_compat + + +def test_z_file(tmpdir): + # Test saving and loading data with Zfiles. + filename = tmpdir.join('test.pkl').strpath + data = numpy_pickle_compat.asbytes('Foo, \n Bar, baz, \n\nfoobar') + with open(filename, 'wb') as f: + numpy_pickle_compat.write_zfile(f, data) + with open(filename, 'rb') as f: + data_read = numpy_pickle_compat.read_zfile(f) + assert data == data_read diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5c414a2227cbf6094dc594f6712139d4fd397a9d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py @@ -0,0 +1,9 @@ +from joblib.compressor import BinaryZlibFile +from joblib.testing import parametrize + + +@parametrize('filename', ['test', u'test']) # testing str and unicode names +def test_binary_zlib_file(tmpdir, filename): + """Testing creation of files depending on the type of the filenames.""" + binary_file = BinaryZlibFile(tmpdir.join(filename).strpath, mode='wb') + binary_file.close() diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_parallel.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..21f06faa01c3ed93b80accad494e6a0482cc2e72 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_parallel.py @@ -0,0 +1,2034 @@ +""" +Test the parallel module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2010-2011 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import os +import sys +import time +import mmap +import weakref +import warnings +import threading +from traceback import format_exception +from math import sqrt +from time import sleep +from pickle import PicklingError +from contextlib import nullcontext +from multiprocessing import TimeoutError +import pytest + +import joblib +from joblib import parallel +from joblib import dump, load + +from joblib._multiprocessing_helpers import mp + +from joblib.test.common import np, with_numpy +from joblib.test.common import with_multiprocessing +from joblib.test.common import IS_PYPY, force_gc_pypy +from joblib.testing import (parametrize, raises, check_subprocess_call, + skipif, warns) + +if mp is not None: + # Loky is not available if multiprocessing is not + from joblib.externals.loky import get_reusable_executor + +from queue import Queue + +try: + import posix +except ImportError: + posix = None + +try: + from ._openmp_test_helper.parallel_sum import parallel_sum +except ImportError: + parallel_sum = None + +try: + import distributed +except ImportError: + distributed = None + +from joblib._parallel_backends import SequentialBackend +from joblib._parallel_backends import ThreadingBackend +from joblib._parallel_backends import MultiprocessingBackend +from joblib._parallel_backends import ParallelBackendBase +from joblib._parallel_backends import LokyBackend + +from joblib.parallel import Parallel, delayed +from joblib.parallel import parallel_config +from joblib.parallel import parallel_backend +from joblib.parallel import register_parallel_backend +from joblib.parallel import effective_n_jobs, cpu_count + +from joblib.parallel import mp, BACKENDS, DEFAULT_BACKEND + + +RETURN_GENERATOR_BACKENDS = BACKENDS.copy() +RETURN_GENERATOR_BACKENDS.pop("multiprocessing", None) + +ALL_VALID_BACKENDS = [None] + sorted(BACKENDS.keys()) +# Add instances of backend classes deriving from ParallelBackendBase +ALL_VALID_BACKENDS += [BACKENDS[backend_str]() for backend_str in BACKENDS] +if mp is None: + PROCESS_BACKENDS = [] +else: + PROCESS_BACKENDS = ['multiprocessing', 'loky'] +PARALLEL_BACKENDS = PROCESS_BACKENDS + ['threading'] + +if hasattr(mp, 'get_context'): + # Custom multiprocessing context in Python 3.4+ + ALL_VALID_BACKENDS.append(mp.get_context('spawn')) + +DefaultBackend = BACKENDS[DEFAULT_BACKEND] + + +def get_workers(backend): + return getattr(backend, '_pool', getattr(backend, '_workers', None)) + + +def division(x, y): + return x / y + + +def square(x): + return x ** 2 + + +class MyExceptionWithFinickyInit(Exception): + """An exception class with non trivial __init__ + """ + def __init__(self, a, b, c, d): + pass + + +def exception_raiser(x, custom_exception=False): + if x == 7: + raise (MyExceptionWithFinickyInit('a', 'b', 'c', 'd') + if custom_exception else ValueError) + return x + + +def interrupt_raiser(x): + time.sleep(.05) + raise KeyboardInterrupt + + +def f(x, y=0, z=0): + """ A module-level function so that it can be spawn with + multiprocessing. + """ + return x ** 2 + y + z + + +def _active_backend_type(): + return type(parallel.get_active_backend()[0]) + + +def parallel_func(inner_n_jobs, backend): + return Parallel(n_jobs=inner_n_jobs, backend=backend)( + delayed(square)(i) for i in range(3)) + + +############################################################################### +def test_cpu_count(): + assert cpu_count() > 0 + + +def test_effective_n_jobs(): + assert effective_n_jobs() > 0 + + +@parametrize("context", [parallel_config, parallel_backend]) +@pytest.mark.parametrize( + "backend_n_jobs, expected_n_jobs", + [(3, 3), (-1, effective_n_jobs(n_jobs=-1)), (None, 1)], + ids=["positive-int", "negative-int", "None"] +) +@with_multiprocessing +def test_effective_n_jobs_None(context, backend_n_jobs, expected_n_jobs): + # check the number of effective jobs when `n_jobs=None` + # non-regression test for https://github.com/joblib/joblib/issues/984 + with context("threading", n_jobs=backend_n_jobs): + # when using a backend, the default of number jobs will be the one set + # in the backend + assert effective_n_jobs(n_jobs=None) == expected_n_jobs + # without any backend, None will default to a single job + assert effective_n_jobs(n_jobs=None) == 1 + + +############################################################################### +# Test parallel + +@parametrize('backend', ALL_VALID_BACKENDS) +@parametrize('n_jobs', [1, 2, -1, -2]) +@parametrize('verbose', [2, 11, 100]) +def test_simple_parallel(backend, n_jobs, verbose): + assert ([square(x) for x in range(5)] == + Parallel(n_jobs=n_jobs, backend=backend, + verbose=verbose)( + delayed(square)(x) for x in range(5))) + + +@parametrize('backend', ALL_VALID_BACKENDS) +def test_main_thread_renamed_no_warning(backend, monkeypatch): + # Check that no default backend relies on the name of the main thread: + # https://github.com/joblib/joblib/issues/180#issuecomment-253266247 + # Some programs use a different name for the main thread. This is the case + # for uWSGI apps for instance. + monkeypatch.setattr(target=threading.current_thread(), name='name', + value='some_new_name_for_the_main_thread') + + with warnings.catch_warnings(record=True) as warninfo: + results = Parallel(n_jobs=2, backend=backend)( + delayed(square)(x) for x in range(3)) + assert results == [0, 1, 4] + + # Due to the default parameters of LokyBackend, there is a chance that + # warninfo catches Warnings from worker timeouts. We remove it if it exists + warninfo = [w for w in warninfo if "worker timeout" not in str(w.message)] + + # The multiprocessing backend will raise a warning when detecting that is + # started from the non-main thread. Let's check that there is no false + # positive because of the name change. + assert len(warninfo) == 0 + + +def _assert_warning_nested(backend, inner_n_jobs, expected): + with warnings.catch_warnings(record=True) as warninfo: + warnings.simplefilter("always") + parallel_func(backend=backend, inner_n_jobs=inner_n_jobs) + + warninfo = [w.message for w in warninfo] + if expected: + if warninfo: + warnings_are_correct = all( + 'backed parallel loops cannot' in each.args[0] + for each in warninfo + ) + # With Python nogil, when the outer backend is threading, we might + # see more that one warning + warnings_have_the_right_length = ( + len(warninfo) >= 1 if getattr(sys.flags, 'nogil', False) + else len(warninfo) == 1) + return warnings_are_correct and warnings_have_the_right_length + + return False + else: + assert not warninfo + return True + + +@with_multiprocessing +@parametrize('parent_backend,child_backend,expected', [ + ('loky', 'multiprocessing', True), + ('loky', 'loky', False), + ('multiprocessing', 'multiprocessing', True), + ('multiprocessing', 'loky', True), + ('threading', 'multiprocessing', True), + ('threading', 'loky', True), +]) +def test_nested_parallel_warnings(parent_backend, child_backend, expected): + + # no warnings if inner_n_jobs=1 + Parallel(n_jobs=2, backend=parent_backend)( + delayed(_assert_warning_nested)( + backend=child_backend, inner_n_jobs=1, + expected=False) + for _ in range(5)) + + # warnings if inner_n_jobs != 1 and expected + res = Parallel(n_jobs=2, backend=parent_backend)( + delayed(_assert_warning_nested)( + backend=child_backend, inner_n_jobs=2, + expected=expected) + for _ in range(5)) + + # warning handling is not thread safe. One thread might see multiple + # warning or no warning at all. + if parent_backend == "threading": + if IS_PYPY and not any(res): + # Related to joblib#1426, should be removed once it is solved. + pytest.xfail(reason="This test often fails in PyPy.") + assert any(res) + else: + assert all(res) + + +@with_multiprocessing +@parametrize('backend', ['loky', 'multiprocessing', 'threading']) +def test_background_thread_parallelism(backend): + is_run_parallel = [False] + + def background_thread(is_run_parallel): + with warnings.catch_warnings(record=True) as warninfo: + Parallel(n_jobs=2)( + delayed(sleep)(.1) for _ in range(4)) + print(len(warninfo)) + is_run_parallel[0] = len(warninfo) == 0 + + t = threading.Thread(target=background_thread, args=(is_run_parallel,)) + t.start() + t.join() + assert is_run_parallel[0] + + +def nested_loop(backend): + Parallel(n_jobs=2, backend=backend)( + delayed(square)(.01) for _ in range(2)) + + +@parametrize('child_backend', BACKENDS) +@parametrize('parent_backend', BACKENDS) +def test_nested_loop(parent_backend, child_backend): + Parallel(n_jobs=2, backend=parent_backend)( + delayed(nested_loop)(child_backend) for _ in range(2)) + + +def raise_exception(backend): + raise ValueError + + +@with_multiprocessing +def test_nested_loop_with_exception_with_loky(): + with raises(ValueError): + with Parallel(n_jobs=2, backend="loky") as parallel: + parallel([delayed(nested_loop)("loky"), + delayed(raise_exception)("loky")]) + + +def test_mutate_input_with_threads(): + """Input is mutable when using the threading backend""" + q = Queue(maxsize=5) + Parallel(n_jobs=2, backend="threading")( + delayed(q.put)(1) for _ in range(5)) + assert q.full() + + +@parametrize('n_jobs', [1, 2, 3]) +def test_parallel_kwargs(n_jobs): + """Check the keyword argument processing of pmap.""" + lst = range(10) + assert ([f(x, y=1) for x in lst] == + Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst)) + + +@parametrize('backend', PARALLEL_BACKENDS) +def test_parallel_as_context_manager(backend): + lst = range(10) + expected = [f(x, y=1) for x in lst] + + with Parallel(n_jobs=4, backend=backend) as p: + # Internally a pool instance has been eagerly created and is managed + # via the context manager protocol + managed_backend = p._backend + + # We make call with the managed parallel object several times inside + # the managed block: + assert expected == p(delayed(f)(x, y=1) for x in lst) + assert expected == p(delayed(f)(x, y=1) for x in lst) + + # Those calls have all used the same pool instance: + if mp is not None: + assert get_workers(managed_backend) is get_workers(p._backend) + + # As soon as we exit the context manager block, the pool is terminated and + # no longer referenced from the parallel object: + if mp is not None: + assert get_workers(p._backend) is None + + # It's still possible to use the parallel instance in non-managed mode: + assert expected == p(delayed(f)(x, y=1) for x in lst) + if mp is not None: + assert get_workers(p._backend) is None + + +@with_multiprocessing +def test_parallel_pickling(): + """ Check that pmap captures the errors when it is passed an object + that cannot be pickled. + """ + class UnpicklableObject(object): + def __reduce__(self): + raise RuntimeError('123') + + with raises(PicklingError, match=r"the task to send"): + Parallel(n_jobs=2, backend='loky')(delayed(id)( + UnpicklableObject()) for _ in range(10)) + + +@parametrize('backend', PARALLEL_BACKENDS) +def test_parallel_timeout_success(backend): + # Check that timeout isn't thrown when function is fast enough + assert len(Parallel(n_jobs=2, backend=backend, timeout=30)( + delayed(sleep)(0.001) for x in range(10))) == 10 + + +@with_multiprocessing +@parametrize('backend', PARALLEL_BACKENDS) +def test_parallel_timeout_fail(backend): + # Check that timeout properly fails when function is too slow + with raises(TimeoutError): + Parallel(n_jobs=2, backend=backend, timeout=0.01)( + delayed(sleep)(10) for x in range(10)) + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_error_capture(backend): + # Check that error are captured, and that correct exceptions + # are raised. + if mp is not None: + with raises(ZeroDivisionError): + Parallel(n_jobs=2, backend=backend)( + [delayed(division)(x, y) + for x, y in zip((0, 1), (1, 0))]) + + with raises(KeyboardInterrupt): + Parallel(n_jobs=2, backend=backend)( + [delayed(interrupt_raiser)(x) for x in (1, 0)]) + + # Try again with the context manager API + with Parallel(n_jobs=2, backend=backend) as parallel: + assert get_workers(parallel._backend) is not None + original_workers = get_workers(parallel._backend) + + with raises(ZeroDivisionError): + parallel([delayed(division)(x, y) + for x, y in zip((0, 1), (1, 0))]) + + # The managed pool should still be available and be in a working + # state despite the previously raised (and caught) exception + assert get_workers(parallel._backend) is not None + + # The pool should have been interrupted and restarted: + assert get_workers(parallel._backend) is not original_workers + + assert ([f(x, y=1) for x in range(10)] == + parallel(delayed(f)(x, y=1) for x in range(10))) + + original_workers = get_workers(parallel._backend) + with raises(KeyboardInterrupt): + parallel([delayed(interrupt_raiser)(x) for x in (1, 0)]) + + # The pool should still be available despite the exception + assert get_workers(parallel._backend) is not None + + # The pool should have been interrupted and restarted: + assert get_workers(parallel._backend) is not original_workers + + assert ([f(x, y=1) for x in range(10)] == + parallel(delayed(f)(x, y=1) for x in range(10))), ( + parallel._iterating, parallel.n_completed_tasks, + parallel.n_dispatched_tasks, parallel._aborting + ) + + # Check that the inner pool has been terminated when exiting the + # context manager + assert get_workers(parallel._backend) is None + else: + with raises(KeyboardInterrupt): + Parallel(n_jobs=2)( + [delayed(interrupt_raiser)(x) for x in (1, 0)]) + + # wrapped exceptions should inherit from the class of the original + # exception to make it easy to catch them + with raises(ZeroDivisionError): + Parallel(n_jobs=2)( + [delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))]) + + with raises(MyExceptionWithFinickyInit): + Parallel(n_jobs=2, verbose=0)( + (delayed(exception_raiser)(i, custom_exception=True) + for i in range(30))) + + +@with_multiprocessing +@parametrize('backend', BACKENDS) +def test_error_in_task_iterator(backend): + + def my_generator(raise_at=0): + for i in range(20): + if i == raise_at: + raise ValueError("Iterator Raising Error") + yield i + + with Parallel(n_jobs=2, backend=backend) as p: + # The error is raised in the pre-dispatch phase + with raises(ValueError, match="Iterator Raising Error"): + p(delayed(square)(i) for i in my_generator(raise_at=0)) + + # The error is raised when dispatching a new task after the + # pre-dispatch (likely to happen in a different thread) + with raises(ValueError, match="Iterator Raising Error"): + p(delayed(square)(i) for i in my_generator(raise_at=5)) + + # Same, but raises long after the pre-dispatch phase + with raises(ValueError, match="Iterator Raising Error"): + p(delayed(square)(i) for i in my_generator(raise_at=19)) + + +def consumer(queue, item): + queue.append('Consumed %s' % item) + + +@parametrize('backend', BACKENDS) +@parametrize('batch_size, expected_queue', + [(1, ['Produced 0', 'Consumed 0', + 'Produced 1', 'Consumed 1', + 'Produced 2', 'Consumed 2', + 'Produced 3', 'Consumed 3', + 'Produced 4', 'Consumed 4', + 'Produced 5', 'Consumed 5']), + (4, [ # First Batch + 'Produced 0', 'Produced 1', 'Produced 2', 'Produced 3', + 'Consumed 0', 'Consumed 1', 'Consumed 2', 'Consumed 3', + # Second batch + 'Produced 4', 'Produced 5', 'Consumed 4', 'Consumed 5'])]) +def test_dispatch_one_job(backend, batch_size, expected_queue): + """ Test that with only one job, Parallel does act as a iterator. + """ + queue = list() + + def producer(): + for i in range(6): + queue.append('Produced %i' % i) + yield i + + Parallel(n_jobs=1, batch_size=batch_size, backend=backend)( + delayed(consumer)(queue, x) for x in producer()) + assert queue == expected_queue + assert len(queue) == 12 + + +@with_multiprocessing +@parametrize('backend', PARALLEL_BACKENDS) +def test_dispatch_multiprocessing(backend): + """ Check that using pre_dispatch Parallel does indeed dispatch items + lazily. + """ + manager = mp.Manager() + queue = manager.list() + + def producer(): + for i in range(6): + queue.append('Produced %i' % i) + yield i + + Parallel(n_jobs=2, batch_size=1, pre_dispatch=3, backend=backend)( + delayed(consumer)(queue, 'any') for _ in producer()) + + queue_contents = list(queue) + assert queue_contents[0] == 'Produced 0' + + # Only 3 tasks are pre-dispatched out of 6. The 4th task is dispatched only + # after any of the first 3 jobs have completed. + first_consumption_index = queue_contents[:4].index('Consumed any') + assert first_consumption_index > -1 + + produced_3_index = queue_contents.index('Produced 3') # 4th task produced + assert produced_3_index > first_consumption_index + + assert len(queue) == 12 + + +def test_batching_auto_threading(): + # batching='auto' with the threading backend leaves the effective batch + # size to 1 (no batching) as it has been found to never be beneficial with + # this low-overhead backend. + + with Parallel(n_jobs=2, batch_size='auto', backend='threading') as p: + p(delayed(id)(i) for i in range(5000)) # many very fast tasks + assert p._backend.compute_batch_size() == 1 + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_batching_auto_subprocesses(backend): + with Parallel(n_jobs=2, batch_size='auto', backend=backend) as p: + p(delayed(id)(i) for i in range(5000)) # many very fast tasks + + # It should be strictly larger than 1 but as we don't want heisen + # failures on clogged CI worker environment be safe and only check that + # it's a strictly positive number. + assert p._backend.compute_batch_size() > 0 + + +def test_exception_dispatch(): + """Make sure that exception raised during dispatch are indeed captured""" + with raises(ValueError): + Parallel(n_jobs=2, pre_dispatch=16, verbose=0)( + delayed(exception_raiser)(i) for i in range(30)) + + +def nested_function_inner(i): + Parallel(n_jobs=2)( + delayed(exception_raiser)(j) for j in range(30)) + + +def nested_function_outer(i): + Parallel(n_jobs=2)( + delayed(nested_function_inner)(j) for j in range(30)) + + +@with_multiprocessing +@parametrize('backend', PARALLEL_BACKENDS) +@pytest.mark.xfail(reason="https://github.com/joblib/loky/pull/255") +def test_nested_exception_dispatch(backend): + """Ensure errors for nested joblib cases gets propagated + + We rely on the Python 3 built-in __cause__ system that already + report this kind of information to the user. + """ + with raises(ValueError) as excinfo: + Parallel(n_jobs=2, backend=backend)( + delayed(nested_function_outer)(i) for i in range(30)) + + # Check that important information such as function names are visible + # in the final error message reported to the user + report_lines = format_exception(excinfo.type, excinfo.value, excinfo.tb) + report = "".join(report_lines) + assert 'nested_function_outer' in report + assert 'nested_function_inner' in report + assert 'exception_raiser' in report + + assert type(excinfo.value) is ValueError + + +class FakeParallelBackend(SequentialBackend): + """Pretends to run concurrently while running sequentially.""" + + def configure(self, n_jobs=1, parallel=None, **backend_args): + self.n_jobs = self.effective_n_jobs(n_jobs) + self.parallel = parallel + return n_jobs + + def effective_n_jobs(self, n_jobs=1): + if n_jobs < 0: + n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1) + return n_jobs + + +def test_invalid_backend(): + with raises(ValueError, match="Invalid backend:"): + Parallel(backend='unit-testing') + + with raises(ValueError, match="Invalid backend:"): + with parallel_config(backend='unit-testing'): + pass + + with raises(ValueError, match="Invalid backend:"): + with parallel_config(backend='unit-testing'): + pass + + +@parametrize('backend', ALL_VALID_BACKENDS) +def test_invalid_njobs(backend): + with raises(ValueError) as excinfo: + Parallel(n_jobs=0, backend=backend)._initialize_backend() + assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value) + + with raises(ValueError) as excinfo: + Parallel(n_jobs=0.5, backend=backend)._initialize_backend() + assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value) + + with raises(ValueError) as excinfo: + Parallel(n_jobs="2.3", backend=backend)._initialize_backend() + assert "n_jobs could not be converted to int" in str(excinfo.value) + + with raises(ValueError) as excinfo: + Parallel(n_jobs="invalid_str", backend=backend)._initialize_backend() + assert "n_jobs could not be converted to int" in str(excinfo.value) + + +@with_multiprocessing +@parametrize('backend', PARALLEL_BACKENDS) +@parametrize('n_jobs', ['2', 2.3, 2]) +def test_njobs_converted_to_int(backend, n_jobs): + p = Parallel(n_jobs=n_jobs, backend=backend) + assert p._effective_n_jobs() == 2 + + res = p(delayed(square)(i) for i in range(10)) + assert all(r == square(i) for i, r in enumerate(res)) + + +def test_register_parallel_backend(): + try: + register_parallel_backend("test_backend", FakeParallelBackend) + assert "test_backend" in BACKENDS + assert BACKENDS["test_backend"] == FakeParallelBackend + finally: + del BACKENDS["test_backend"] + + +def test_overwrite_default_backend(): + assert _active_backend_type() == DefaultBackend + try: + register_parallel_backend("threading", BACKENDS["threading"], + make_default=True) + assert _active_backend_type() == ThreadingBackend + finally: + # Restore the global default manually + parallel.DEFAULT_BACKEND = DEFAULT_BACKEND + assert _active_backend_type() == DefaultBackend + + +@skipif(mp is not None, reason="Only without multiprocessing") +def test_backend_no_multiprocessing(): + with warns(UserWarning, + match="joblib backend '.*' is not available on.*"): + Parallel(backend='loky')(delayed(square)(i) for i in range(3)) + + # The below should now work without problems + with parallel_config(backend='loky'): + Parallel()(delayed(square)(i) for i in range(3)) + + +def check_backend_context_manager(context, backend_name): + with context(backend_name, n_jobs=3): + active_backend, active_n_jobs = parallel.get_active_backend() + assert active_n_jobs == 3 + assert effective_n_jobs(3) == 3 + p = Parallel() + assert p.n_jobs == 3 + if backend_name == 'multiprocessing': + assert type(active_backend) is MultiprocessingBackend + assert type(p._backend) is MultiprocessingBackend + elif backend_name == 'loky': + assert type(active_backend) is LokyBackend + assert type(p._backend) is LokyBackend + elif backend_name == 'threading': + assert type(active_backend) is ThreadingBackend + assert type(p._backend) is ThreadingBackend + elif backend_name.startswith('test_'): + assert type(active_backend) is FakeParallelBackend + assert type(p._backend) is FakeParallelBackend + + +all_backends_for_context_manager = PARALLEL_BACKENDS[:] +all_backends_for_context_manager.extend( + ['test_backend_%d' % i for i in range(3)] +) + + +@with_multiprocessing +@parametrize('backend', all_backends_for_context_manager) +@parametrize('context', [parallel_backend, parallel_config]) +def test_backend_context_manager(monkeypatch, backend, context): + if backend not in BACKENDS: + monkeypatch.setitem(BACKENDS, backend, FakeParallelBackend) + + assert _active_backend_type() == DefaultBackend + # check that this possible to switch parallel backends sequentially + check_backend_context_manager(context, backend) + + # The default backend is restored + assert _active_backend_type() == DefaultBackend + + # Check that context manager switching is thread safe: + Parallel(n_jobs=2, backend='threading')( + delayed(check_backend_context_manager)(context, b) + for b in all_backends_for_context_manager if not b) + + # The default backend is again restored + assert _active_backend_type() == DefaultBackend + + +class ParameterizedParallelBackend(SequentialBackend): + """Pretends to run conncurrently while running sequentially.""" + + def __init__(self, param=None): + if param is None: + raise ValueError('param should not be None') + self.param = param + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_parameterized_backend_context_manager(monkeypatch, context): + monkeypatch.setitem(BACKENDS, 'param_backend', + ParameterizedParallelBackend) + assert _active_backend_type() == DefaultBackend + + with context('param_backend', param=42, n_jobs=3): + active_backend, active_n_jobs = parallel.get_active_backend() + assert type(active_backend) is ParameterizedParallelBackend + assert active_backend.param == 42 + assert active_n_jobs == 3 + p = Parallel() + assert p.n_jobs == 3 + assert p._backend is active_backend + results = p(delayed(sqrt)(i) for i in range(5)) + assert results == [sqrt(i) for i in range(5)] + + # The default backend is again restored + assert _active_backend_type() == DefaultBackend + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_directly_parameterized_backend_context_manager(context): + assert _active_backend_type() == DefaultBackend + + # Check that it's possible to pass a backend instance directly, + # without registration + with context(ParameterizedParallelBackend(param=43), n_jobs=5): + active_backend, active_n_jobs = parallel.get_active_backend() + assert type(active_backend) is ParameterizedParallelBackend + assert active_backend.param == 43 + assert active_n_jobs == 5 + p = Parallel() + assert p.n_jobs == 5 + assert p._backend is active_backend + results = p(delayed(sqrt)(i) for i in range(5)) + assert results == [sqrt(i) for i in range(5)] + + # The default backend is again restored + assert _active_backend_type() == DefaultBackend + + +def sleep_and_return_pid(): + sleep(.1) + return os.getpid() + + +def get_nested_pids(): + assert _active_backend_type() == ThreadingBackend + # Assert that the nested backend does not change the default number of + # jobs used in Parallel + assert Parallel()._effective_n_jobs() == 1 + + # Assert that the tasks are running only on one process + return Parallel(n_jobs=2)(delayed(sleep_and_return_pid)() + for _ in range(2)) + + +class MyBackend(joblib._parallel_backends.LokyBackend): + """Backend to test backward compatibility with older backends""" + def get_nested_backend(self, ): + # Older backends only return a backend, without n_jobs indications. + return super(MyBackend, self).get_nested_backend()[0] + + +register_parallel_backend('back_compat_backend', MyBackend) + + +@with_multiprocessing +@parametrize('backend', ['threading', 'loky', 'multiprocessing', + 'back_compat_backend']) +@parametrize("context", [parallel_config, parallel_backend]) +def test_nested_backend_context_manager(context, backend): + # Check that by default, nested parallel calls will always use the + # ThreadingBackend + + with context(backend): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids)() + for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) == 1 + + +@with_multiprocessing +@parametrize('n_jobs', [2, -1, None]) +@parametrize('backend', PARALLEL_BACKENDS) +@parametrize("context", [parallel_config, parallel_backend]) +def test_nested_backend_in_sequential(backend, n_jobs, context): + # Check that by default, nested parallel calls will always use the + # ThreadingBackend + + def check_nested_backend(expected_backend_type, expected_n_job): + # Assert that the sequential backend at top level, does not change the + # backend for nested calls. + assert _active_backend_type() == BACKENDS[expected_backend_type] + + # Assert that the nested backend in SequentialBackend does not change + # the default number of jobs used in Parallel + expected_n_job = effective_n_jobs(expected_n_job) + assert Parallel()._effective_n_jobs() == expected_n_job + + Parallel(n_jobs=1)( + delayed(check_nested_backend)(DEFAULT_BACKEND, 1) + for _ in range(10) + ) + + with context(backend, n_jobs=n_jobs): + Parallel(n_jobs=1)( + delayed(check_nested_backend)(backend, n_jobs) + for _ in range(10) + ) + + +def check_nesting_level(context, inner_backend, expected_level): + with context(inner_backend) as ctx: + if context is parallel_config: + backend = ctx["backend"] + if context is parallel_backend: + backend = ctx[0] + assert backend.nesting_level == expected_level + + +@with_multiprocessing +@parametrize('outer_backend', PARALLEL_BACKENDS) +@parametrize('inner_backend', PARALLEL_BACKENDS) +@parametrize("context", [parallel_config, parallel_backend]) +def test_backend_nesting_level(context, outer_backend, inner_backend): + # Check that the nesting level for the backend is correctly set + check_nesting_level(context, outer_backend, 0) + + Parallel(n_jobs=2, backend=outer_backend)( + delayed(check_nesting_level)(context, inner_backend, 1) + for _ in range(10) + ) + + with context(inner_backend, n_jobs=2): + Parallel()(delayed(check_nesting_level)(context, inner_backend, 1) + for _ in range(10)) + + +@with_multiprocessing +@parametrize("context", [parallel_config, parallel_backend]) +@parametrize('with_retrieve_callback', [True, False]) +def test_retrieval_context(context, with_retrieve_callback): + import contextlib + + class MyBackend(ThreadingBackend): + i = 0 + supports_retrieve_callback = with_retrieve_callback + + @contextlib.contextmanager + def retrieval_context(self): + self.i += 1 + yield + + register_parallel_backend("retrieval", MyBackend) + + def nested_call(n): + return Parallel(n_jobs=2)(delayed(id)(i) for i in range(n)) + + with context("retrieval") as ctx: + Parallel(n_jobs=2)( + delayed(nested_call)(i) + for i in range(5) + ) + if context is parallel_config: + assert ctx["backend"].i == 1 + if context is parallel_backend: + assert ctx[0].i == 1 + + +############################################################################### +# Test helpers + +@parametrize('batch_size', [0, -1, 1.42]) +def test_invalid_batch_size(batch_size): + with raises(ValueError): + Parallel(batch_size=batch_size) + + +@parametrize('n_tasks, n_jobs, pre_dispatch, batch_size', + [(2, 2, 'all', 'auto'), + (2, 2, 'n_jobs', 'auto'), + (10, 2, 'n_jobs', 'auto'), + (517, 2, 'n_jobs', 'auto'), + (10, 2, 'n_jobs', 'auto'), + (10, 4, 'n_jobs', 'auto'), + (200, 12, 'n_jobs', 'auto'), + (25, 12, '2 * n_jobs', 1), + (250, 12, 'all', 1), + (250, 12, '2 * n_jobs', 7), + (200, 12, '2 * n_jobs', 'auto')]) +def test_dispatch_race_condition(n_tasks, n_jobs, pre_dispatch, batch_size): + # Check that using (async-)dispatch does not yield a race condition on the + # iterable generator that is not thread-safe natively. + # This is a non-regression test for the "Pool seems closed" class of error + params = {'n_jobs': n_jobs, 'pre_dispatch': pre_dispatch, + 'batch_size': batch_size} + expected = [square(i) for i in range(n_tasks)] + results = Parallel(**params)(delayed(square)(i) for i in range(n_tasks)) + assert results == expected + + +@with_multiprocessing +def test_default_mp_context(): + mp_start_method = mp.get_start_method() + p = Parallel(n_jobs=2, backend='multiprocessing') + context = p._backend_args.get('context') + start_method = context.get_start_method() + assert start_method == mp_start_method + + +@with_numpy +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_no_blas_crash_or_freeze_with_subprocesses(backend): + if backend == 'multiprocessing': + # Use the spawn backend that is both robust and available on all + # platforms + backend = mp.get_context('spawn') + + # Check that on recent Python version, the 'spawn' start method can make + # it possible to use multiprocessing in conjunction of any BLAS + # implementation that happens to be used by numpy with causing a freeze or + # a crash + rng = np.random.RandomState(42) + + # call BLAS DGEMM to force the initialization of the internal thread-pool + # in the main process + a = rng.randn(1000, 1000) + np.dot(a, a.T) + + # check that the internal BLAS thread-pool is not in an inconsistent state + # in the worker processes managed by multiprocessing + Parallel(n_jobs=2, backend=backend)( + delayed(np.dot)(a, a.T) for i in range(2)) + + +UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN = """\ +from joblib import Parallel, delayed + +def square(x): + return x ** 2 + +backend = "{}" +if backend == "spawn": + from multiprocessing import get_context + backend = get_context(backend) + +print(Parallel(n_jobs=2, backend=backend)( + delayed(square)(i) for i in range(5))) +""" + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_parallel_with_interactively_defined_functions(backend): + # When using the "-c" flag, interactive functions defined in __main__ + # should work with any backend. + if backend == "multiprocessing" and mp.get_start_method() != "fork": + pytest.skip("Require fork start method to use interactively defined " + "functions with multiprocessing.") + code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN.format(backend) + check_subprocess_call( + [sys.executable, '-c', code], timeout=10, + stdout_regex=r'\[0, 1, 4, 9, 16\]') + + +UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN = """\ +import sys +# Make sure that joblib is importable in the subprocess launching this +# script. This is needed in case we run the tests from the joblib root +# folder without having installed joblib +sys.path.insert(0, {joblib_root_folder!r}) + +from joblib import Parallel, delayed + +def run(f, x): + return f(x) + +{define_func} + +if __name__ == "__main__": + backend = "{backend}" + if backend == "spawn": + from multiprocessing import get_context + backend = get_context(backend) + + callable_position = "{callable_position}" + if callable_position == "delayed": + print(Parallel(n_jobs=2, backend=backend)( + delayed(square)(i) for i in range(5))) + elif callable_position == "args": + print(Parallel(n_jobs=2, backend=backend)( + delayed(run)(square, i) for i in range(5))) + else: + print(Parallel(n_jobs=2, backend=backend)( + delayed(run)(f=square, x=i) for i in range(5))) +""" + +SQUARE_MAIN = """\ +def square(x): + return x ** 2 +""" +SQUARE_LOCAL = """\ +def gen_square(): + def square(x): + return x ** 2 + return square +square = gen_square() +""" +SQUARE_LAMBDA = """\ +square = lambda x: x ** 2 +""" + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS + ([] if mp is None else ['spawn'])) +@parametrize('define_func', [SQUARE_MAIN, SQUARE_LOCAL, SQUARE_LAMBDA]) +@parametrize('callable_position', ['delayed', 'args', 'kwargs']) +def test_parallel_with_unpicklable_functions_in_args( + backend, define_func, callable_position, tmpdir): + if backend in ['multiprocessing', 'spawn'] and ( + define_func != SQUARE_MAIN or sys.platform == "win32"): + pytest.skip("Not picklable with pickle") + code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN.format( + define_func=define_func, backend=backend, + callable_position=callable_position, + joblib_root_folder=os.path.dirname(os.path.dirname(joblib.__file__))) + code_file = tmpdir.join("unpicklable_func_script.py") + code_file.write(code) + check_subprocess_call( + [sys.executable, code_file.strpath], timeout=10, + stdout_regex=r'\[0, 1, 4, 9, 16\]') + + +INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT = """\ +import sys +import faulthandler +# Make sure that joblib is importable in the subprocess launching this +# script. This is needed in case we run the tests from the joblib root +# folder without having installed joblib +sys.path.insert(0, {joblib_root_folder!r}) + +from joblib import Parallel, delayed +from functools import partial + +class MyClass: + '''Class defined in the __main__ namespace''' + def __init__(self, value): + self.value = value + + +def square(x, ignored=None, ignored2=None): + '''Function defined in the __main__ namespace''' + return x.value ** 2 + + +square2 = partial(square, ignored2='something') + +# Here, we do not need the `if __name__ == "__main__":` safeguard when +# using the default `loky` backend (even on Windows). + +# To make debugging easier +faulthandler.dump_traceback_later(30, exit=True) + +# The following baroque function call is meant to check that joblib +# introspection rightfully uses cloudpickle instead of the (faster) pickle +# module of the standard library when necessary. In particular cloudpickle is +# necessary for functions and instances of classes interactively defined in the +# __main__ module. + +print(Parallel(backend="loky", n_jobs=2)( + delayed(square2)(MyClass(i), ignored=[dict(a=MyClass(1))]) + for i in range(5) +)) +""".format(joblib_root_folder=os.path.dirname( + os.path.dirname(joblib.__file__))) + + +@with_multiprocessing +def test_parallel_with_interactively_defined_functions_loky(tmpdir): + # loky accepts interactive functions defined in __main__ and does not + # require if __name__ == '__main__' even when the __main__ module is + # defined by the result of the execution of a filesystem script. + script = tmpdir.join('joblib_interactively_defined_function.py') + script.write(INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT) + check_subprocess_call( + [sys.executable, script.strpath], + stdout_regex=r'\[0, 1, 4, 9, 16\]', + timeout=None, # rely on faulthandler to kill the process + ) + + +INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT = """\ +import sys +# Make sure that joblib is importable in the subprocess launching this +# script. This is needed in case we run the tests from the joblib root +# folder without having installed joblib +sys.path.insert(0, {joblib_root_folder!r}) + +from joblib import Parallel, delayed, hash +import multiprocessing as mp +mp.util.log_to_stderr(5) + +class MyList(list): + '''MyList is interactively defined by MyList.append is a built-in''' + def __hash__(self): + # XXX: workaround limitation in cloudpickle + return hash(self).__hash__() + +l = MyList() + +print(Parallel(backend="loky", n_jobs=2)( + delayed(l.append)(i) for i in range(3) +)) +""".format(joblib_root_folder=os.path.dirname( + os.path.dirname(joblib.__file__))) + + +@with_multiprocessing +def test_parallel_with_interactively_defined_bound_method_loky(tmpdir): + script = tmpdir.join('joblib_interactive_bound_method_script.py') + script.write(INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT) + check_subprocess_call([sys.executable, script.strpath], + stdout_regex=r'\[None, None, None\]', + stderr_regex=r'LokyProcess', + timeout=15) + + +def test_parallel_with_exhausted_iterator(): + exhausted_iterator = iter([]) + assert Parallel(n_jobs=2)(exhausted_iterator) == [] + + +def _cleanup_worker(): + """Helper function to force gc in each worker.""" + force_gc_pypy() + time.sleep(.1) + + +def check_memmap(a): + if not isinstance(a, np.memmap): + raise TypeError('Expected np.memmap instance, got %r', + type(a)) + return a.copy() # return a regular array instead of a memmap + + +@with_numpy +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_auto_memmap_on_arrays_from_generator(backend): + # Non-regression test for a problem with a bad interaction between the + # GC collecting arrays recently created during iteration inside the + # parallel dispatch loop and the auto-memmap feature of Parallel. + # See: https://github.com/joblib/joblib/pull/294 + def generate_arrays(n): + for i in range(n): + yield np.ones(10, dtype=np.float32) * i + # Use max_nbytes=1 to force the use of memory-mapping even for small + # arrays + results = Parallel(n_jobs=2, max_nbytes=1, backend=backend)( + delayed(check_memmap)(a) for a in generate_arrays(100)) + for result, expected in zip(results, generate_arrays(len(results))): + np.testing.assert_array_equal(expected, result) + + # Second call to force loky to adapt the executor by growing the number + # of worker processes. This is a non-regression test for: + # https://github.com/joblib/joblib/issues/629. + results = Parallel(n_jobs=4, max_nbytes=1, backend=backend)( + delayed(check_memmap)(a) for a in generate_arrays(100)) + for result, expected in zip(results, generate_arrays(len(results))): + np.testing.assert_array_equal(expected, result) + + +def identity(arg): + return arg + + +@with_numpy +@with_multiprocessing +def test_memmap_with_big_offset(tmpdir): + fname = tmpdir.join('test.mmap').strpath + size = mmap.ALLOCATIONGRANULARITY + obj = [np.zeros(size, dtype='uint8'), np.ones(size, dtype='uint8')] + dump(obj, fname) + memmap = load(fname, mmap_mode='r') + result, = Parallel(n_jobs=2)(delayed(identity)(memmap) for _ in [0]) + assert isinstance(memmap[1], np.memmap) + assert memmap[1].offset > size + np.testing.assert_array_equal(obj, result) + + +def test_warning_about_timeout_not_supported_by_backend(): + with warnings.catch_warnings(record=True) as warninfo: + Parallel(n_jobs=1, timeout=1)(delayed(square)(i) for i in range(50)) + assert len(warninfo) == 1 + w = warninfo[0] + assert isinstance(w.message, UserWarning) + assert str(w.message) == ( + "The backend class 'SequentialBackend' does not support timeout. " + "You have set 'timeout=1' in Parallel but the 'timeout' parameter " + "will not be used.") + + +def set_list_value(input_list, index, value): + input_list[index] = value + return value + + +@pytest.mark.parametrize('n_jobs', [1, 2, 4]) +def test_parallel_return_order_with_return_as_generator_parameter(n_jobs): + # This test inserts values in a list in some expected order + # in sequential computing, and then checks that this order has been + # respected by Parallel output generator. + input_list = [0] * 5 + result = Parallel(n_jobs=n_jobs, return_as="generator", + backend='threading')( + delayed(set_list_value)(input_list, i, i) for i in range(5)) + + # Ensure that all the tasks are completed before checking the result + result = list(result) + + assert all(v == r for v, r in zip(input_list, result)) + + +def _sqrt_with_delay(e, delay): + if delay: + sleep(30) + return sqrt(e) + + +def _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs): + # This test submits 10 tasks, but the second task is super slow. This test + # checks that the 9 other tasks return before the slow task is done, when + # `return_as` parameter is set to `'generator_unordered'` + result = Parallel(n_jobs=n_jobs, return_as="generator_unordered", + backend=backend)( + delayed(_sqrt_with_delay)(i**2, (i == 1)) for i in range(10)) + + quickly_returned = sorted(next(result) for _ in range(9)) + + expected_quickly_returned = [0] + list(range(2, 10)) + + assert all( + v == r for v, r in zip(expected_quickly_returned, quickly_returned) + ) + + del result + force_gc_pypy() + + +@pytest.mark.parametrize('n_jobs', [2, 4]) +# NB: for this test to work, the backend must be allowed to process tasks +# concurrently, so at least two jobs with a non-sequential backend are +# mandatory. +@with_multiprocessing +@parametrize('backend', set(RETURN_GENERATOR_BACKENDS) - {"sequential"}) +def test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs): + _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs) + + +@pytest.mark.parametrize('n_jobs', [2, -1]) +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is None, reason='This test requires dask') +def test_parallel_unordered_generator_returns_fastest_first_with_dask( + n_jobs, context +): + with distributed.Client( + n_workers=2, threads_per_worker=2 + ), context("dask"): + _test_parallel_unordered_generator_returns_fastest_first(None, n_jobs) + + +@parametrize('backend', ALL_VALID_BACKENDS) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_abort_backend(n_jobs, backend): + delays = ["a"] + [10] * 100 + with raises(TypeError): + t_start = time.time() + Parallel(n_jobs=n_jobs, backend=backend)( + delayed(time.sleep)(i) for i in delays) + dt = time.time() - t_start + assert dt < 20 + + +def get_large_object(arg): + result = np.ones(int(5 * 1e5), dtype=bool) + result[0] = False + return result + + +def _test_deadlock_with_generator(backend, return_as, n_jobs): + # Non-regression test for a race condition in the backends when the pickler + # is delayed by a large object. + with Parallel(n_jobs=n_jobs, backend=backend, + return_as=return_as) as parallel: + result = parallel(delayed(get_large_object)(i) for i in range(10)) + next(result) + next(result) + del result + # The gc in pypy can be delayed. Force it to make sure this test does + # not cause timeout on the CI. + force_gc_pypy() + + +@with_numpy +@parametrize('backend', RETURN_GENERATOR_BACKENDS) +@parametrize('return_as', ["generator", "generator_unordered"]) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_deadlock_with_generator(backend, return_as, n_jobs): + _test_deadlock_with_generator(backend, return_as, n_jobs) + + +@with_numpy +@pytest.mark.parametrize('n_jobs', [2, -1]) +@parametrize('return_as', ["generator", "generator_unordered"]) +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is None, reason='This test requires dask') +def test_deadlock_with_generator_and_dask(context, return_as, n_jobs): + with distributed.Client( + n_workers=2, threads_per_worker=2 + ), context("dask"): + _test_deadlock_with_generator(None, return_as, n_jobs) + + +@parametrize('backend', RETURN_GENERATOR_BACKENDS) +@parametrize('return_as', ["generator", "generator_unordered"]) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_multiple_generator_call(backend, return_as, n_jobs): + # Non-regression test that ensures the dispatch of the tasks starts + # immediately when Parallel.__call__ is called. This test relies on the + # assumption that only one generator can be submitted at a time. + with raises(RuntimeError, + match="This Parallel instance is already running"): + parallel = Parallel(n_jobs, backend=backend, return_as=return_as) + g = parallel(delayed(sleep)(1) for _ in range(10)) # noqa: F841 + t_start = time.time() + gen2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841 + + # Make sure that the error is raised quickly + assert time.time() - t_start < 2, ( + "The error should be raised immediatly when submitting a new task " + "but it took more than 2s." + ) + + del g + # The gc in pypy can be delayed. Force it to make sure this test does not + # cause timeout on the CI. + force_gc_pypy() + + +@parametrize('backend', RETURN_GENERATOR_BACKENDS) +@parametrize('return_as', ["generator", "generator_unordered"]) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_multiple_generator_call_managed(backend, return_as, n_jobs): + # Non-regression test that ensures the dispatch of the tasks starts + # immediately when Parallel.__call__ is called. This test relies on the + # assumption that only one generator can be submitted at a time. + with Parallel(n_jobs, backend=backend, + return_as=return_as) as parallel: + g = parallel(delayed(sleep)(10) for _ in range(10)) # noqa: F841 + t_start = time.time() + with raises(RuntimeError, + match="This Parallel instance is already running"): + g2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841 + + # Make sure that the error is raised quickly + assert time.time() - t_start < 2, ( + "The error should be raised immediatly when submitting a new task " + "but it took more than 2s." + ) + + # The gc in pypy can be delayed. Force it to make sure this test does not + # cause timeout on the CI. + del g + force_gc_pypy() + + +@parametrize('backend', RETURN_GENERATOR_BACKENDS) +@parametrize('return_as_1', ["generator", "generator_unordered"]) +@parametrize('return_as_2', ["generator", "generator_unordered"]) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_multiple_generator_call_separated( + backend, return_as_1, return_as_2, n_jobs +): + # Check that for separated Parallel, both tasks are correctly returned. + g = Parallel(n_jobs, backend=backend, return_as=return_as_1)( + delayed(sqrt)(i ** 2) for i in range(10) + ) + g2 = Parallel(n_jobs, backend=backend, return_as=return_as_2)( + delayed(sqrt)(i ** 2) for i in range(10, 20) + ) + + if return_as_1 == "generator_unordered": + g = sorted(g) + + if return_as_2 == "generator_unordered": + g2 = sorted(g2) + + assert all(res == i for res, i in zip(g, range(10))) + assert all(res == i for res, i in zip(g2, range(10, 20))) + + +@parametrize('backend, error', [ + ('loky', True), + ('threading', False), + ('sequential', False), +]) +@parametrize('return_as_1', ["generator", "generator_unordered"]) +@parametrize('return_as_2', ["generator", "generator_unordered"]) +def test_multiple_generator_call_separated_gc( + backend, return_as_1, return_as_2, error +): + + if (backend == 'loky') and (mp is None): + pytest.skip("Requires multiprocessing") + + # Check that in loky, only one call can be run at a time with + # a single executor. + parallel = Parallel(2, backend=backend, return_as=return_as_1) + g = parallel(delayed(sleep)(10) for i in range(10)) + g_wr = weakref.finalize(g, lambda: print("Generator collected")) + ctx = ( + raises(RuntimeError, match="The executor underlying Parallel") + if error else nullcontext() + ) + with ctx: + # For loky, this call will raise an error as the gc of the previous + # generator will shutdown the shared executor. + # For the other backends, as the worker pools are not shared between + # the two calls, this should proceed correctly. + t_start = time.time() + g = Parallel(2, backend=backend, return_as=return_as_2)( + delayed(sqrt)(i ** 2) for i in range(10, 20) + ) + + # The gc in pypy can be delayed. Force it to test the behavior when it + # will eventually be collected. + force_gc_pypy() + + if return_as_2 == "generator_unordered": + g = sorted(g) + + assert all(res == i for res, i in zip(g, range(10, 20))) + + assert time.time() - t_start < 5 + + # Make sure that the computation are stopped for the gc'ed generator + retry = 0 + while g_wr.alive and retry < 3: + retry += 1 + time.sleep(.5) + assert time.time() - t_start < 5 + + if parallel._effective_n_jobs() != 1: + # check that the first parallel object is aborting (the final _aborted + # state might be delayed). + assert parallel._aborting + + +@with_numpy +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_memmapping_leaks(backend, tmpdir): + # Non-regression test for memmapping backends. Ensure that the data + # does not stay too long in memory + tmpdir = tmpdir.strpath + + # Use max_nbytes=1 to force the use of memory-mapping even for small + # arrays + with Parallel(n_jobs=2, max_nbytes=1, backend=backend, + temp_folder=tmpdir) as p: + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + + # The memmap folder should not be clean in the context scope + assert len(os.listdir(tmpdir)) > 0 + + # Cleaning of the memmap folder is triggered by the garbage + # collection. With pypy the garbage collection has been observed to be + # delayed, sometimes up until the shutdown of the interpreter. This + # cleanup job executed in the worker ensures that it's triggered + # immediately. + p(delayed(_cleanup_worker)() for _ in range(2)) + + # Make sure that the shared memory is cleaned at the end when we exit + # the context + for _ in range(100): + if not os.listdir(tmpdir): + break + sleep(.1) + else: + raise AssertionError('temporary directory of Parallel was not removed') + + # Make sure that the shared memory is cleaned at the end of a call + p = Parallel(n_jobs=2, max_nbytes=1, backend=backend) + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + p(delayed(_cleanup_worker)() for _ in range(2)) + + for _ in range(100): + if not os.listdir(tmpdir): + break + sleep(.1) + else: + raise AssertionError('temporary directory of Parallel was not removed') + + +@parametrize('backend', + ([None, 'threading'] if mp is None + else [None, 'loky', 'threading']) + ) +def test_lambda_expression(backend): + # cloudpickle is used to pickle delayed callables + results = Parallel(n_jobs=2, backend=backend)( + delayed(lambda x: x ** 2)(i) for i in range(10)) + assert results == [i ** 2 for i in range(10)] + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_backend_batch_statistics_reset(backend): + """Test that a parallel backend correctly resets its batch statistics.""" + n_jobs = 2 + n_inputs = 500 + task_time = 2. / n_inputs + + p = Parallel(verbose=10, n_jobs=n_jobs, backend=backend) + p(delayed(time.sleep)(task_time) for i in range(n_inputs)) + assert (p._backend._effective_batch_size == + p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE) + assert (p._backend._smoothed_batch_duration == + p._backend._DEFAULT_SMOOTHED_BATCH_DURATION) + + p(delayed(time.sleep)(task_time) for i in range(n_inputs)) + assert (p._backend._effective_batch_size == + p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE) + assert (p._backend._smoothed_batch_duration == + p._backend._DEFAULT_SMOOTHED_BATCH_DURATION) + + +@with_multiprocessing +@parametrize("context", [parallel_config, parallel_backend]) +def test_backend_hinting_and_constraints(context): + for n_jobs in [1, 2, -1]: + assert type(Parallel(n_jobs=n_jobs)._backend) == DefaultBackend + + p = Parallel(n_jobs=n_jobs, prefer='threads') + assert type(p._backend) is ThreadingBackend + + p = Parallel(n_jobs=n_jobs, prefer='processes') + assert type(p._backend) is DefaultBackend + + p = Parallel(n_jobs=n_jobs, require='sharedmem') + assert type(p._backend) is ThreadingBackend + + # Explicit backend selection can override backend hinting although it + # is useless to pass a hint when selecting a backend. + p = Parallel(n_jobs=2, backend='loky', prefer='threads') + assert type(p._backend) is LokyBackend + + with context('loky', n_jobs=2): + # Explicit backend selection by the user with the context manager + # should be respected when combined with backend hints only. + p = Parallel(prefer='threads') + assert type(p._backend) is LokyBackend + assert p.n_jobs == 2 + + with context('loky', n_jobs=2): + # Locally hard-coded n_jobs value is respected. + p = Parallel(n_jobs=3, prefer='threads') + assert type(p._backend) is LokyBackend + assert p.n_jobs == 3 + + with context('loky', n_jobs=2): + # Explicit backend selection by the user with the context manager + # should be ignored when the Parallel call has hard constraints. + # In this case, the default backend that supports shared mem is + # used an the default number of processes is used. + p = Parallel(require='sharedmem') + assert type(p._backend) is ThreadingBackend + assert p.n_jobs == 1 + + with context('loky', n_jobs=2): + p = Parallel(n_jobs=3, require='sharedmem') + assert type(p._backend) is ThreadingBackend + assert p.n_jobs == 3 + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_backend_hinting_and_constraints_with_custom_backends( + capsys, context +): + # Custom backends can declare that they use threads and have shared memory + # semantics: + class MyCustomThreadingBackend(ParallelBackendBase): + supports_sharedmem = True + use_threads = True + + def apply_async(self): + pass + + def effective_n_jobs(self, n_jobs): + return n_jobs + + with context(MyCustomThreadingBackend()): + p = Parallel(n_jobs=2, prefer='processes') # ignored + assert type(p._backend) is MyCustomThreadingBackend + + p = Parallel(n_jobs=2, require='sharedmem') + assert type(p._backend) is MyCustomThreadingBackend + + class MyCustomProcessingBackend(ParallelBackendBase): + supports_sharedmem = False + use_threads = False + + def apply_async(self): + pass + + def effective_n_jobs(self, n_jobs): + return n_jobs + + with context(MyCustomProcessingBackend()): + p = Parallel(n_jobs=2, prefer='processes') + assert type(p._backend) is MyCustomProcessingBackend + + out, err = capsys.readouterr() + assert out == "" + assert err == "" + + p = Parallel(n_jobs=2, require='sharedmem', verbose=10) + assert type(p._backend) is ThreadingBackend + + out, err = capsys.readouterr() + expected = ("Using ThreadingBackend as joblib backend " + "instead of MyCustomProcessingBackend as the latter " + "does not provide shared memory semantics.") + assert out.strip() == expected + assert err == "" + + with raises(ValueError): + Parallel(backend=MyCustomProcessingBackend(), require='sharedmem') + + +def test_invalid_backend_hinting_and_constraints(): + with raises(ValueError): + Parallel(prefer='invalid') + + with raises(ValueError): + Parallel(require='invalid') + + with raises(ValueError): + # It is inconsistent to prefer process-based parallelism while + # requiring shared memory semantics. + Parallel(prefer='processes', require='sharedmem') + + if mp is not None: + # It is inconsistent to ask explicitly for a process-based + # parallelism while requiring shared memory semantics. + with raises(ValueError): + Parallel(backend='loky', require='sharedmem') + with raises(ValueError): + Parallel(backend='multiprocessing', require='sharedmem') + + +def _recursive_backend_info(limit=3, **kwargs): + """Perform nested parallel calls and introspect the backend on the way""" + + with Parallel(n_jobs=2) as p: + this_level = [(type(p._backend).__name__, p._backend.nesting_level)] + if limit == 0: + return this_level + results = p(delayed(_recursive_backend_info)(limit=limit - 1, **kwargs) + for i in range(1)) + return this_level + results[0] + + +@with_multiprocessing +@parametrize('backend', ['loky', 'threading']) +@parametrize("context", [parallel_config, parallel_backend]) +def test_nested_parallelism_limit(context, backend): + with context(backend, n_jobs=2): + backend_types_and_levels = _recursive_backend_info() + + if cpu_count() == 1: + second_level_backend_type = 'SequentialBackend' + max_level = 1 + else: + second_level_backend_type = 'ThreadingBackend' + max_level = 2 + + top_level_backend_type = backend.title() + 'Backend' + expected_types_and_levels = [ + (top_level_backend_type, 0), + (second_level_backend_type, 1), + ('SequentialBackend', max_level), + ('SequentialBackend', max_level) + ] + assert backend_types_and_levels == expected_types_and_levels + + +@with_numpy +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is None, reason='This test requires dask') +def test_nested_parallelism_with_dask(context): + with distributed.Client(n_workers=2, threads_per_worker=2): + # 10 MB of data as argument to trigger implicit scattering + data = np.ones(int(1e7), dtype=np.uint8) + for i in range(2): + with context('dask'): + backend_types_and_levels = _recursive_backend_info(data=data) + assert len(backend_types_and_levels) == 4 + assert all(name == 'DaskDistributedBackend' + for name, _ in backend_types_and_levels) + + # No argument + with context('dask'): + backend_types_and_levels = _recursive_backend_info() + assert len(backend_types_and_levels) == 4 + assert all(name == 'DaskDistributedBackend' + for name, _ in backend_types_and_levels) + + +def _recursive_parallel(nesting_limit=None): + """A horrible function that does recursive parallel calls""" + return Parallel()(delayed(_recursive_parallel)() for i in range(2)) + + +@pytest.mark.no_cover +@parametrize("context", [parallel_config, parallel_backend]) +@parametrize( + 'backend', (['threading'] if mp is None else ['loky', 'threading']) +) +def test_thread_bomb_mitigation(context, backend): + # Test that recursive parallelism raises a recursion rather than + # saturating the operating system resources by creating a unbounded number + # of threads. + with context(backend, n_jobs=2): + with raises(BaseException) as excinfo: + _recursive_parallel() + exc = excinfo.value + if backend == "loky": + # Local import because loky may not be importable for lack of + # multiprocessing + from joblib.externals.loky.process_executor import TerminatedWorkerError # noqa + if isinstance(exc, (TerminatedWorkerError, PicklingError)): + # The recursion exception can itself cause an error when + # pickling it to be send back to the parent process. In this + # case the worker crashes but the original traceback is still + # printed on stderr. This could be improved but does not seem + # simple to do and this is not critical for users (as long + # as there is no process or thread bomb happening). + pytest.xfail("Loky worker crash when serializing RecursionError") + + assert isinstance(exc, RecursionError) + + +def _run_parallel_sum(): + env_vars = {} + for var in ['OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS', + 'VECLIB_MAXIMUM_THREADS', 'NUMEXPR_NUM_THREADS', + 'NUMBA_NUM_THREADS', 'ENABLE_IPC']: + env_vars[var] = os.environ.get(var) + return env_vars, parallel_sum(100) + + +@parametrize("backend", ([None, 'loky'] if mp is not None else [None])) +@skipif(parallel_sum is None, reason="Need OpenMP helper compiled") +def test_parallel_thread_limit(backend): + results = Parallel(n_jobs=2, backend=backend)( + delayed(_run_parallel_sum)() for _ in range(2) + ) + expected_num_threads = max(cpu_count() // 2, 1) + for worker_env_vars, omp_num_threads in results: + assert omp_num_threads == expected_num_threads + for name, value in worker_env_vars.items(): + if name.endswith("_THREADS"): + assert value == str(expected_num_threads) + else: + assert name == "ENABLE_IPC" + assert value == "1" + + +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is not None, reason='This test requires dask') +def test_dask_backend_when_dask_not_installed(context): + with raises(ValueError, match='Please install dask'): + context('dask') + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_zero_worker_backend(context): + # joblib.Parallel should reject with an explicit error message parallel + # backends that have no worker. + class ZeroWorkerBackend(ThreadingBackend): + def configure(self, *args, **kwargs): + return 0 + + def apply_async(self, func, callback=None): # pragma: no cover + raise TimeoutError("No worker available") + + def effective_n_jobs(self, n_jobs): # pragma: no cover + return 0 + + expected_msg = "ZeroWorkerBackend has no active worker" + with context(ZeroWorkerBackend()): + with pytest.raises(RuntimeError, match=expected_msg): + Parallel(n_jobs=2)(delayed(id)(i) for i in range(2)) + + +def test_globals_update_at_each_parallel_call(): + # This is a non-regression test related to joblib issues #836 and #833. + # Cloudpickle versions between 0.5.4 and 0.7 introduced a bug where global + # variables changes in a parent process between two calls to + # joblib.Parallel would not be propagated into the workers. + global MY_GLOBAL_VARIABLE + MY_GLOBAL_VARIABLE = "original value" + + def check_globals(): + global MY_GLOBAL_VARIABLE + return MY_GLOBAL_VARIABLE + + assert check_globals() == "original value" + + workers_global_variable = Parallel(n_jobs=2)( + delayed(check_globals)() for i in range(2)) + assert set(workers_global_variable) == {"original value"} + + # Change the value of MY_GLOBAL_VARIABLE, and make sure this change gets + # propagated into the workers environment + MY_GLOBAL_VARIABLE = "changed value" + assert check_globals() == "changed value" + + workers_global_variable = Parallel(n_jobs=2)( + delayed(check_globals)() for i in range(2)) + assert set(workers_global_variable) == {"changed value"} + + +############################################################################## +# Test environment variable in child env, in particular for limiting +# the maximal number of threads in C-library threadpools. +# + +def _check_numpy_threadpool_limits(): + import numpy as np + # Let's call BLAS on a Matrix Matrix multiplication with dimensions large + # enough to ensure that the threadpool managed by the underlying BLAS + # implementation is actually used so as to force its initialization. + a = np.random.randn(100, 100) + np.dot(a, a) + from threadpoolctl import threadpool_info + return threadpool_info() + + +def _parent_max_num_threads_for(child_module, parent_info): + for parent_module in parent_info: + if parent_module['filepath'] == child_module['filepath']: + return parent_module['num_threads'] + raise ValueError("An unexpected module was loaded in child:\n{}" + .format(child_module)) + + +def check_child_num_threads(workers_info, parent_info, num_threads): + # Check that the number of threads reported in workers_info is consistent + # with the expectation. We need to be careful to handle the cases where + # the requested number of threads is below max_num_thread for the library. + for child_threadpool_info in workers_info: + for child_module in child_threadpool_info: + parent_max_num_threads = _parent_max_num_threads_for( + child_module, parent_info) + expected = {min(num_threads, parent_max_num_threads), num_threads} + assert child_module['num_threads'] in expected + + +@with_numpy +@with_multiprocessing +@parametrize('n_jobs', [2, 4, -2, -1]) +def test_threadpool_limitation_in_child_loky(n_jobs): + # Check that the protection against oversubscription in workers is working + # using threadpoolctl functionalities. + + # Skip this test if numpy is not linked to a BLAS library + parent_info = _check_numpy_threadpool_limits() + if len(parent_info) == 0: + pytest.skip(reason="Need a version of numpy linked to BLAS") + + workers_threadpool_infos = Parallel(backend="loky", n_jobs=n_jobs)( + delayed(_check_numpy_threadpool_limits)() for i in range(2)) + + n_jobs = effective_n_jobs(n_jobs) + expected_child_num_threads = max(cpu_count() // n_jobs, 1) + + check_child_num_threads(workers_threadpool_infos, parent_info, + expected_child_num_threads) + + +@with_numpy +@with_multiprocessing +@parametrize('inner_max_num_threads', [1, 2, 4, None]) +@parametrize('n_jobs', [2, -1]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_threadpool_limitation_in_child_context( + context, n_jobs, inner_max_num_threads +): + # Check that the protection against oversubscription in workers is working + # using threadpoolctl functionalities. + + # Skip this test if numpy is not linked to a BLAS library + parent_info = _check_numpy_threadpool_limits() + if len(parent_info) == 0: + pytest.skip(reason="Need a version of numpy linked to BLAS") + + with context('loky', inner_max_num_threads=inner_max_num_threads): + workers_threadpool_infos = Parallel(n_jobs=n_jobs)( + delayed(_check_numpy_threadpool_limits)() for i in range(2)) + + n_jobs = effective_n_jobs(n_jobs) + if inner_max_num_threads is None: + expected_child_num_threads = max(cpu_count() // n_jobs, 1) + else: + expected_child_num_threads = inner_max_num_threads + + check_child_num_threads(workers_threadpool_infos, parent_info, + expected_child_num_threads) + + +@with_multiprocessing +@parametrize('n_jobs', [2, -1]) +@parametrize('var_name', ["OPENBLAS_NUM_THREADS", + "MKL_NUM_THREADS", + "OMP_NUM_THREADS"]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_threadpool_limitation_in_child_override(context, n_jobs, var_name): + # Check that environment variables set by the user on the main process + # always have the priority. + + # Clean up the existing executor because we change the environment of the + # parent at runtime and it is not detected in loky intentionally. + get_reusable_executor(reuse=True).shutdown() + + def _get_env(var_name): + return os.environ.get(var_name) + + original_var_value = os.environ.get(var_name) + try: + os.environ[var_name] = "4" + # Skip this test if numpy is not linked to a BLAS library + results = Parallel(n_jobs=n_jobs)( + delayed(_get_env)(var_name) for i in range(2)) + assert results == ["4", "4"] + + with context('loky', inner_max_num_threads=1): + results = Parallel(n_jobs=n_jobs)( + delayed(_get_env)(var_name) for i in range(2)) + assert results == ["1", "1"] + + finally: + if original_var_value is None: + del os.environ[var_name] + else: + os.environ[var_name] = original_var_value + + +@with_multiprocessing +@parametrize('n_jobs', [2, 4, -1]) +def test_loky_reuse_workers(n_jobs): + # Non-regression test for issue #967 where the workers are not reused when + # calling multiple Parallel loops. + + def parallel_call(n_jobs): + x = range(10) + Parallel(n_jobs=n_jobs)(delayed(sum)(x) for i in range(10)) + + # Run a parallel loop and get the workers used for computations + parallel_call(n_jobs) + first_executor = get_reusable_executor(reuse=True) + + # Ensure that the workers are reused for the next calls, as the executor is + # not restarted. + for _ in range(10): + parallel_call(n_jobs) + executor = get_reusable_executor(reuse=True) + assert executor == first_executor diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_store_backends.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_store_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..e5db16757ea45fd6761e1b8cfd35e5f5a920752f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_store_backends.py @@ -0,0 +1,94 @@ + +try: + # Python 2.7: use the C pickle to speed up + # test_concurrency_safe_write which pickles big python objects + import cPickle as cpickle +except ImportError: + import pickle as cpickle +import functools +from pickle import PicklingError +import time + +import pytest + +from joblib.testing import parametrize, timeout +from joblib.test.common import with_multiprocessing +from joblib.backports import concurrency_safe_rename +from joblib import Parallel, delayed +from joblib._store_backends import ( + concurrency_safe_write, + FileSystemStoreBackend, + CacheWarning, +) + + +def write_func(output, filename): + with open(filename, 'wb') as f: + cpickle.dump(output, f) + + +def load_func(expected, filename): + for i in range(10): + try: + with open(filename, 'rb') as f: + reloaded = cpickle.load(f) + break + except (OSError, IOError): + # On Windows you can have WindowsError ([Error 5] Access + # is denied or [Error 13] Permission denied) when reading the file, + # probably because a writer process has a lock on the file + time.sleep(0.1) + else: + raise + assert expected == reloaded + + +def concurrency_safe_write_rename(to_write, filename, write_func): + temporary_filename = concurrency_safe_write(to_write, + filename, write_func) + concurrency_safe_rename(temporary_filename, filename) + + +@timeout(0) # No timeout as this test can be long +@with_multiprocessing +@parametrize('backend', ['multiprocessing', 'loky', 'threading']) +def test_concurrency_safe_write(tmpdir, backend): + # Add one item to cache + filename = tmpdir.join('test.pkl').strpath + + obj = {str(i): i for i in range(int(1e5))} + funcs = [functools.partial(concurrency_safe_write_rename, + write_func=write_func) + if i % 3 != 2 else load_func for i in range(12)] + Parallel(n_jobs=2, backend=backend)( + delayed(func)(obj, filename) for func in funcs) + + +def test_warning_on_dump_failure(tmpdir): + # Check that a warning is raised when the dump fails for any reason but + # a PicklingError. + class UnpicklableObject(object): + def __reduce__(self): + raise RuntimeError("some exception") + + backend = FileSystemStoreBackend() + backend.location = tmpdir.join('test_warning_on_pickling_error').strpath + backend.compress = None + + with pytest.warns(CacheWarning, match="some exception"): + backend.dump_item("testpath", UnpicklableObject()) + + +def test_warning_on_pickling_error(tmpdir): + # This is separate from test_warning_on_dump_failure because in the + # future we will turn this into an exception. + class UnpicklableObject(object): + def __reduce__(self): + raise PicklingError("not picklable") + + backend = FileSystemStoreBackend() + backend.location = tmpdir.join('test_warning_on_pickling_error').strpath + backend.compress = None + + with pytest.warns(FutureWarning, match="not picklable"): + backend.dump_item("testpath", UnpicklableObject()) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_testing.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_testing.py new file mode 100644 index 0000000000000000000000000000000000000000..e8095aa67040ce868849b89927b325895b5d8e34 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_testing.py @@ -0,0 +1,75 @@ +import sys +import re + +from joblib.testing import raises, check_subprocess_call + + +def test_check_subprocess_call(): + code = '\n'.join(['result = 1 + 2 * 3', + 'print(result)', + 'my_list = [1, 2, 3]', + 'print(my_list)']) + + check_subprocess_call([sys.executable, '-c', code]) + + # Now checking stdout with a regex + check_subprocess_call([sys.executable, '-c', code], + # Regex needed for platform-specific line endings + stdout_regex=r'7\s{1,2}\[1, 2, 3\]') + + +def test_check_subprocess_call_non_matching_regex(): + code = '42' + non_matching_pattern = '_no_way_this_matches_anything_' + + with raises(ValueError) as excinfo: + check_subprocess_call([sys.executable, '-c', code], + stdout_regex=non_matching_pattern) + excinfo.match('Unexpected stdout.+{}'.format(non_matching_pattern)) + + +def test_check_subprocess_call_wrong_command(): + wrong_command = '_a_command_that_does_not_exist_' + with raises(OSError): + check_subprocess_call([wrong_command]) + + +def test_check_subprocess_call_non_zero_return_code(): + code_with_non_zero_exit = '\n'.join([ + 'import sys', + 'print("writing on stdout")', + 'sys.stderr.write("writing on stderr")', + 'sys.exit(123)']) + + pattern = re.compile('Non-zero return code: 123.+' + 'Stdout:\nwriting on stdout.+' + 'Stderr:\nwriting on stderr', re.DOTALL) + + with raises(ValueError) as excinfo: + check_subprocess_call([sys.executable, '-c', code_with_non_zero_exit]) + excinfo.match(pattern) + + +def test_check_subprocess_call_timeout(): + code_timing_out = '\n'.join([ + 'import time', + 'import sys', + 'print("before sleep on stdout")', + 'sys.stdout.flush()', + 'sys.stderr.write("before sleep on stderr")', + 'sys.stderr.flush()', + # We need to sleep for at least 2 * timeout seconds in case the SIGKILL + # is triggered. + 'time.sleep(10)', + 'print("process should have be killed before")', + 'sys.stdout.flush()']) + + pattern = re.compile('Non-zero return code:.+' + 'Stdout:\nbefore sleep on stdout\\s+' + 'Stderr:\nbefore sleep on stderr', + re.DOTALL) + + with raises(ValueError) as excinfo: + check_subprocess_call([sys.executable, '-c', code_timing_out], + timeout=1) + excinfo.match(pattern) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/test_utils.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4999a212c462bdb6c10e9e08fdfba74d03b05294 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/test_utils.py @@ -0,0 +1,27 @@ +import pytest + +from joblib._utils import eval_expr + + +@pytest.mark.parametrize( + "expr", + ["exec('import os')", "print(1)", "import os", "1+1; import os", "1^1"], +) +def test_eval_expr_invalid(expr): + with pytest.raises( + ValueError, match="is not a valid or supported arithmetic" + ): + eval_expr(expr) + + +@pytest.mark.parametrize( + "expr, result", + [ + ("2*6", 12), + ("2**6", 64), + ("1 + 2*3**(4) / (6 + -7)", -161.0), + ("(20 // 3) % 5", 1), + ], +) +def test_eval_expr_valid(expr, result): + assert eval_expr(expr) == result diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/test/testutils.py b/env-llmeval/lib/python3.10/site-packages/joblib/test/testutils.py new file mode 100644 index 0000000000000000000000000000000000000000..20ec8c1ba0a50da6be9fce3686ac1950b1a55ab5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/test/testutils.py @@ -0,0 +1,8 @@ +def return_slice_of_data(arr, start_idx, end_idx): + return arr[start_idx:end_idx] + + +def print_filename_and_raise(arr): + from joblib._memmapping_reducer import _get_backing_memmap + print(_get_backing_memmap(arr).filename) + raise ValueError