diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3994fd6e3096008027390be8b060b3dbaad0816f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6190627aad90cb0afab0fdc3ba7bbb3cab2f639a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67d09e01ede3e23217946269f7564e1a52e8e621 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6348c40a0781c7c52377afc3fc52780e029e41ad Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b07491d932d1344cc746609eea2e60c51e2173c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4c447a12ce4e8ec1b046ad9e30d0f88a37a2d1a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5e6236abe3dae5643fa19ea44eae25db4a5e9b8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92c4f611b5261cc02fd2d85fa066dedf17656cfe Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7975c5880e5b3bcd1be0648756d4eb0e39862ab0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c8e39bdba6103ddd203f6581691ab82675aa7e7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ece9b052b84e58e09316b2a16d4e05aef06d748 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92fd4c7f21346f28d13db447ce7b43eb13a35de0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdc6bbbccd619e295adfd4472c3a0929787262c3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd3ff005a90b100c302e49104209909493f35faf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..170bf02c5ed279fc51626e14af3a4aef9a6c8819 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22ba5435ad28beb08c25c60218b72418fa28431d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c3f32e26e8fe174a8ba8ff1ffb1d67302874ace Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb138073a1323a980dc68aebe82415bcae44cb3c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f499f7c7a72a888f936a3628fa0d4309b65280c4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9dd689d41446cb09b34ec449ddb2d9d4c7ad4e49 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3d7236dfff2b5854f3c9407b3a025f4c55b9686 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cbe9b7302bc22b065f9946bc3988c0145f9abb9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/_cloudpickle_wrapper.py b/llmeval-env/lib/python3.10/site-packages/joblib/_cloudpickle_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..daf899d04ff513e1ce9c9c41871adcfd72c8fcf7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/_cloudpickle_wrapper.py @@ -0,0 +1,19 @@ +""" +Small shim of loky's cloudpickle_wrapper to avoid failure when +multiprocessing is not available. +""" + + +from ._multiprocessing_helpers import mp + + +def _my_wrap_non_picklable_objects(obj, keep_wrapper=True): + return obj + + +if mp is not None: + from .externals.loky import wrap_non_picklable_objects +else: + wrap_non_picklable_objects = _my_wrap_non_picklable_objects + +__all__ = ["wrap_non_picklable_objects"] diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/_memmapping_reducer.py b/llmeval-env/lib/python3.10/site-packages/joblib/_memmapping_reducer.py new file mode 100644 index 0000000000000000000000000000000000000000..13f5c4a17ef0794dc965ca3e3a3ef216125e2946 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/_memmapping_reducer.py @@ -0,0 +1,657 @@ +""" +Reducer using memory mapping for numpy arrays +""" +# Author: Thomas Moreau +# Copyright: 2017, Thomas Moreau +# License: BSD 3 clause + +from mmap import mmap +import errno +import os +import stat +import threading +import atexit +import tempfile +import time +import warnings +import weakref +from uuid import uuid4 +from multiprocessing import util + +from pickle import whichmodule, loads, dumps, HIGHEST_PROTOCOL, PicklingError + +try: + WindowsError +except NameError: + WindowsError = type(None) + +try: + import numpy as np + from numpy.lib.stride_tricks import as_strided +except ImportError: + np = None + +from .numpy_pickle import dump, load, load_temporary_memmap +from .backports import make_memmap +from .disk import delete_folder +from .externals.loky.backend import resource_tracker + +# Some system have a ramdisk mounted by default, we can use it instead of /tmp +# as the default folder to dump big arrays to share with subprocesses. +SYSTEM_SHARED_MEM_FS = '/dev/shm' + +# Minimal number of bytes available on SYSTEM_SHARED_MEM_FS to consider using +# it as the default folder to dump big arrays to share with subprocesses. +SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(2e9) + +# Folder and file permissions to chmod temporary files generated by the +# memmapping pool. Only the owner of the Python process can access the +# temporary files and folder. +FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR +FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR + +# Set used in joblib workers, referencing the filenames of temporary memmaps +# created by joblib to speed up data communication. In child processes, we add +# a finalizer to these memmaps that sends a maybe_unlink call to the +# resource_tracker, in order to free main memory as fast as possible. +JOBLIB_MMAPS = set() + + +def _log_and_unlink(filename): + from .externals.loky.backend.resource_tracker import _resource_tracker + util.debug( + "[FINALIZER CALL] object mapping to {} about to be deleted," + " decrementing the refcount of the file (pid: {})".format( + os.path.basename(filename), os.getpid())) + _resource_tracker.maybe_unlink(filename, "file") + + +def add_maybe_unlink_finalizer(memmap): + util.debug( + "[FINALIZER ADD] adding finalizer to {} (id {}, filename {}, pid {})" + "".format(type(memmap), id(memmap), os.path.basename(memmap.filename), + os.getpid())) + weakref.finalize(memmap, _log_and_unlink, memmap.filename) + + +def unlink_file(filename): + """Wrapper around os.unlink with a retry mechanism. + + The retry mechanism has been implemented primarily to overcome a race + condition happening during the finalizer of a np.memmap: when a process + holding the last reference to a mmap-backed np.memmap/np.array is about to + delete this array (and close the reference), it sends a maybe_unlink + request to the resource_tracker. This request can be processed faster than + it takes for the last reference of the memmap to be closed, yielding (on + Windows) a PermissionError in the resource_tracker loop. + """ + NUM_RETRIES = 10 + for retry_no in range(1, NUM_RETRIES + 1): + try: + os.unlink(filename) + break + except PermissionError: + util.debug( + '[ResourceTracker] tried to unlink {}, got ' + 'PermissionError'.format(filename) + ) + if retry_no == NUM_RETRIES: + raise + else: + time.sleep(.2) + except FileNotFoundError: + # In case of a race condition when deleting the temporary folder, + # avoid noisy FileNotFoundError exception in the resource tracker. + pass + + +resource_tracker._CLEANUP_FUNCS['file'] = unlink_file + + +class _WeakArrayKeyMap: + """A variant of weakref.WeakKeyDictionary for unhashable numpy arrays. + + This datastructure will be used with numpy arrays as obj keys, therefore we + do not use the __get__ / __set__ methods to avoid any conflict with the + numpy fancy indexing syntax. + """ + + def __init__(self): + self._data = {} + + def get(self, obj): + ref, val = self._data[id(obj)] + if ref() is not obj: + # In case of race condition with on_destroy: could never be + # triggered by the joblib tests with CPython. + raise KeyError(obj) + return val + + def set(self, obj, value): + key = id(obj) + try: + ref, _ = self._data[key] + if ref() is not obj: + # In case of race condition with on_destroy: could never be + # triggered by the joblib tests with CPython. + raise KeyError(obj) + except KeyError: + # Insert the new entry in the mapping along with a weakref + # callback to automatically delete the entry from the mapping + # as soon as the object used as key is garbage collected. + def on_destroy(_): + del self._data[key] + ref = weakref.ref(obj, on_destroy) + self._data[key] = ref, value + + def __getstate__(self): + raise PicklingError("_WeakArrayKeyMap is not pickleable") + + +############################################################################### +# Support for efficient transient pickling of numpy data structures + + +def _get_backing_memmap(a): + """Recursively look up the original np.memmap instance base if any.""" + b = getattr(a, 'base', None) + if b is None: + # TODO: check scipy sparse datastructure if scipy is installed + # a nor its descendants do not have a memmap base + return None + + elif isinstance(b, mmap): + # a is already a real memmap instance. + return a + + else: + # Recursive exploration of the base ancestry + return _get_backing_memmap(b) + + +def _get_temp_dir(pool_folder_name, temp_folder=None): + """Get the full path to a subfolder inside the temporary folder. + + Parameters + ---------- + pool_folder_name : str + Sub-folder name used for the serialization of a pool instance. + + temp_folder: str, optional + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + + - a folder pointed by the JOBLIB_TEMP_FOLDER environment + variable, + - /dev/shm if the folder exists and is writable: this is a + RAMdisk filesystem available by default on modern Linux + distributions, + - the default system temporary folder that can be + overridden with TMP, TMPDIR or TEMP environment + variables, typically /tmp under Unix operating systems. + + Returns + ------- + pool_folder : str + full path to the temporary folder + use_shared_mem : bool + whether the temporary folder is written to the system shared memory + folder or some other temporary folder. + """ + use_shared_mem = False + if temp_folder is None: + temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None) + if temp_folder is None: + if os.path.exists(SYSTEM_SHARED_MEM_FS) and hasattr(os, 'statvfs'): + try: + shm_stats = os.statvfs(SYSTEM_SHARED_MEM_FS) + available_nbytes = shm_stats.f_bsize * shm_stats.f_bavail + if available_nbytes > SYSTEM_SHARED_MEM_FS_MIN_SIZE: + # Try to see if we have write access to the shared mem + # folder only if it is reasonably large (that is 2GB or + # more). + temp_folder = SYSTEM_SHARED_MEM_FS + pool_folder = os.path.join(temp_folder, pool_folder_name) + if not os.path.exists(pool_folder): + os.makedirs(pool_folder) + use_shared_mem = True + except (IOError, OSError): + # Missing rights in the /dev/shm partition, fallback to regular + # temp folder. + temp_folder = None + if temp_folder is None: + # Fallback to the default tmp folder, typically /tmp + temp_folder = tempfile.gettempdir() + temp_folder = os.path.abspath(os.path.expanduser(temp_folder)) + pool_folder = os.path.join(temp_folder, pool_folder_name) + return pool_folder, use_shared_mem + + +def has_shareable_memory(a): + """Return True if a is backed by some mmap buffer directly or not.""" + return _get_backing_memmap(a) is not None + + +def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides, + total_buffer_len, unlink_on_gc_collect): + """Reconstruct an array view on a memory mapped file.""" + if mode == 'w+': + # Do not zero the original data when unpickling + mode = 'r+' + + if strides is None: + # Simple, contiguous memmap + return make_memmap( + filename, dtype=dtype, shape=shape, mode=mode, offset=offset, + order=order, unlink_on_gc_collect=unlink_on_gc_collect + ) + else: + # For non-contiguous data, memmap the total enclosing buffer and then + # extract the non-contiguous view with the stride-tricks API + base = make_memmap( + filename, dtype=dtype, shape=total_buffer_len, offset=offset, + mode=mode, order=order, unlink_on_gc_collect=unlink_on_gc_collect + ) + return as_strided(base, shape=shape, strides=strides) + + +def _reduce_memmap_backed(a, m): + """Pickling reduction for memmap backed arrays. + + a is expected to be an instance of np.ndarray (or np.memmap) + m is expected to be an instance of np.memmap on the top of the ``base`` + attribute ancestry of a. ``m.base`` should be the real python mmap object. + """ + # offset that comes from the striding differences between a and m + util.debug('[MEMMAP REDUCE] reducing a memmap-backed array ' + '(shape, {}, pid: {})'.format(a.shape, os.getpid())) + try: + from numpy.lib.array_utils import byte_bounds + except (ModuleNotFoundError, ImportError): + # Backward-compat for numpy < 2.0 + from numpy import byte_bounds + a_start, a_end = byte_bounds(a) + m_start = byte_bounds(m)[0] + offset = a_start - m_start + + # offset from the backing memmap + offset += m.offset + + if m.flags['F_CONTIGUOUS']: + order = 'F' + else: + # The backing memmap buffer is necessarily contiguous hence C if not + # Fortran + order = 'C' + + if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']: + # If the array is a contiguous view, no need to pass the strides + strides = None + total_buffer_len = None + else: + # Compute the total number of items to map from which the strided + # view will be extracted. + strides = a.strides + total_buffer_len = (a_end - a_start) // a.itemsize + + return (_strided_from_memmap, + (m.filename, a.dtype, m.mode, offset, order, a.shape, strides, + total_buffer_len, False)) + + +def reduce_array_memmap_backward(a): + """reduce a np.array or a np.memmap from a child process""" + m = _get_backing_memmap(a) + if isinstance(m, np.memmap) and m.filename not in JOBLIB_MMAPS: + # if a is backed by a memmaped file, reconstruct a using the + # memmaped file. + return _reduce_memmap_backed(a, m) + else: + # a is either a regular (not memmap-backed) numpy array, or an array + # backed by a shared temporary file created by joblib. In the latter + # case, in order to limit the lifespan of these temporary files, we + # serialize the memmap as a regular numpy array, and decref the + # file backing the memmap (done implicitly in a previously registered + # finalizer, see ``unlink_on_gc_collect`` for more details) + return ( + loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL), ) + ) + + +class ArrayMemmapForwardReducer(object): + """Reducer callable to dump large arrays to memmap files. + + Parameters + ---------- + max_nbytes: int + Threshold to trigger memmapping of large arrays to files created + a folder. + temp_folder_resolver: callable + An callable in charge of resolving a temporary folder name where files + for backing memmapped arrays are created. + mmap_mode: 'r', 'r+' or 'c' + Mode for the created memmap datastructure. See the documentation of + numpy.memmap for more details. Note: 'w+' is coerced to 'r+' + automatically to avoid zeroing the data on unpickling. + verbose: int, optional, 0 by default + If verbose > 0, memmap creations are logged. + If verbose > 1, both memmap creations, reuse and array pickling are + logged. + prewarm: bool, optional, False by default. + Force a read on newly memmapped array to make sure that OS pre-cache it + memory. This can be useful to avoid concurrent disk access when the + same data array is passed to different worker processes. + """ + + def __init__(self, max_nbytes, temp_folder_resolver, mmap_mode, + unlink_on_gc_collect, verbose=0, prewarm=True): + self._max_nbytes = max_nbytes + self._temp_folder_resolver = temp_folder_resolver + self._mmap_mode = mmap_mode + self.verbose = int(verbose) + if prewarm == "auto": + self._prewarm = not self._temp_folder.startswith( + SYSTEM_SHARED_MEM_FS + ) + else: + self._prewarm = prewarm + self._prewarm = prewarm + self._memmaped_arrays = _WeakArrayKeyMap() + self._temporary_memmaped_filenames = set() + self._unlink_on_gc_collect = unlink_on_gc_collect + + @property + def _temp_folder(self): + return self._temp_folder_resolver() + + def __reduce__(self): + # The ArrayMemmapForwardReducer is passed to the children processes: it + # needs to be pickled but the _WeakArrayKeyMap need to be skipped as + # it's only guaranteed to be consistent with the parent process memory + # garbage collection. + # Although this reducer is pickled, it is not needed in its destination + # process (child processes), as we only use this reducer to send + # memmaps from the parent process to the children processes. For this + # reason, we can afford skipping the resolver, (which would otherwise + # be unpicklable), and pass it as None instead. + args = (self._max_nbytes, None, self._mmap_mode, + self._unlink_on_gc_collect) + kwargs = { + 'verbose': self.verbose, + 'prewarm': self._prewarm, + } + return ArrayMemmapForwardReducer, args, kwargs + + def __call__(self, a): + m = _get_backing_memmap(a) + if m is not None and isinstance(m, np.memmap): + # a is already backed by a memmap file, let's reuse it directly + return _reduce_memmap_backed(a, m) + + if (not a.dtype.hasobject and self._max_nbytes is not None and + a.nbytes > self._max_nbytes): + # check that the folder exists (lazily create the pool temp folder + # if required) + try: + os.makedirs(self._temp_folder) + os.chmod(self._temp_folder, FOLDER_PERMISSIONS) + except OSError as e: + if e.errno != errno.EEXIST: + raise e + + try: + basename = self._memmaped_arrays.get(a) + except KeyError: + # Generate a new unique random filename. The process and thread + # ids are only useful for debugging purpose and to make it + # easier to cleanup orphaned files in case of hard process + # kill (e.g. by "kill -9" or segfault). + basename = "{}-{}-{}.pkl".format( + os.getpid(), id(threading.current_thread()), uuid4().hex) + self._memmaped_arrays.set(a, basename) + filename = os.path.join(self._temp_folder, basename) + + # In case the same array with the same content is passed several + # times to the pool subprocess children, serialize it only once + + is_new_memmap = filename not in self._temporary_memmaped_filenames + + # add the memmap to the list of temporary memmaps created by joblib + self._temporary_memmaped_filenames.add(filename) + + if self._unlink_on_gc_collect: + # Bump reference count of the memmap by 1 to account for + # shared usage of the memmap by a child process. The + # corresponding decref call will be executed upon calling + # resource_tracker.maybe_unlink, registered as a finalizer in + # the child. + # the incref/decref calls here are only possible when the child + # and the parent share the same resource_tracker. It is not the + # case for the multiprocessing backend, but it does not matter + # because unlinking a memmap from a child process is only + # useful to control the memory usage of long-lasting child + # processes, while the multiprocessing-based pools terminate + # their workers at the end of a map() call. + resource_tracker.register(filename, "file") + + if is_new_memmap: + # Incref each temporary memmap created by joblib one extra + # time. This means that these memmaps will only be deleted + # once an extra maybe_unlink() is called, which is done once + # all the jobs have completed (or been canceled) in the + # Parallel._terminate_backend() method. + resource_tracker.register(filename, "file") + + if not os.path.exists(filename): + util.debug( + "[ARRAY DUMP] Pickling new array (shape={}, dtype={}) " + "creating a new memmap at {}".format( + a.shape, a.dtype, filename)) + for dumped_filename in dump(a, filename): + os.chmod(dumped_filename, FILE_PERMISSIONS) + + if self._prewarm: + # Warm up the data by accessing it. This operation ensures + # that the disk access required to create the memmapping + # file are performed in the reducing process and avoids + # concurrent memmap creation in multiple children + # processes. + load(filename, mmap_mode=self._mmap_mode).max() + + else: + util.debug( + "[ARRAY DUMP] Pickling known array (shape={}, dtype={}) " + "reusing memmap file: {}".format( + a.shape, a.dtype, os.path.basename(filename))) + + # The worker process will use joblib.load to memmap the data + return ( + (load_temporary_memmap, (filename, self._mmap_mode, + self._unlink_on_gc_collect)) + ) + else: + # do not convert a into memmap, let pickler do its usual copy with + # the default system pickler + util.debug( + '[ARRAY DUMP] Pickling array (NO MEMMAPPING) (shape={}, ' + ' dtype={}).'.format(a.shape, a.dtype)) + return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),)) + + +def get_memmapping_reducers( + forward_reducers=None, backward_reducers=None, + temp_folder_resolver=None, max_nbytes=1e6, mmap_mode='r', verbose=0, + prewarm=False, unlink_on_gc_collect=True, **kwargs): + """Construct a pair of memmapping reducer linked to a tmpdir. + + This function manage the creation and the clean up of the temporary folders + underlying the memory maps and should be use to get the reducers necessary + to construct joblib pool or executor. + """ + if forward_reducers is None: + forward_reducers = dict() + if backward_reducers is None: + backward_reducers = dict() + + if np is not None: + # Register smart numpy.ndarray reducers that detects memmap backed + # arrays and that is also able to dump to memmap large in-memory + # arrays over the max_nbytes threshold + forward_reduce_ndarray = ArrayMemmapForwardReducer( + max_nbytes, temp_folder_resolver, mmap_mode, unlink_on_gc_collect, + verbose, prewarm=prewarm) + forward_reducers[np.ndarray] = forward_reduce_ndarray + forward_reducers[np.memmap] = forward_reduce_ndarray + + # Communication from child process to the parent process always + # pickles in-memory numpy.ndarray without dumping them as memmap + # to avoid confusing the caller and make it tricky to collect the + # temporary folder + backward_reducers[np.ndarray] = reduce_array_memmap_backward + backward_reducers[np.memmap] = reduce_array_memmap_backward + + return forward_reducers, backward_reducers + + +class TemporaryResourcesManager(object): + """Stateful object able to manage temporary folder and pickles + + It exposes: + - a per-context folder name resolving API that memmap-based reducers will + rely on to know where to pickle the temporary memmaps + - a temporary file/folder management API that internally uses the + resource_tracker. + """ + + def __init__(self, temp_folder_root=None, context_id=None): + self._current_temp_folder = None + self._temp_folder_root = temp_folder_root + self._use_shared_mem = None + self._cached_temp_folders = dict() + self._id = uuid4().hex + self._finalizers = {} + if context_id is None: + # It would be safer to not assign a default context id (less silent + # bugs), but doing this while maintaining backward compatibility + # with the previous, context-unaware version get_memmaping_executor + # exposes too many low-level details. + context_id = uuid4().hex + self.set_current_context(context_id) + + def set_current_context(self, context_id): + self._current_context_id = context_id + self.register_new_context(context_id) + + def register_new_context(self, context_id): + # Prepare a sub-folder name specific to a context (usually a unique id + # generated by each instance of the Parallel class). Do not create in + # advance to spare FS write access if no array is to be dumped). + if context_id in self._cached_temp_folders: + return + else: + # During its lifecycle, one Parallel object can have several + # executors associated to it (for instance, if a loky worker raises + # an exception, joblib shutdowns the executor and instantly + # recreates a new one before raising the error - see + # ``ensure_ready``. Because we don't want two executors tied to + # the same Parallel object (and thus the same context id) to + # register/use/delete the same folder, we also add an id specific + # to the current Manager (and thus specific to its associated + # executor) to the folder name. + new_folder_name = ( + "joblib_memmapping_folder_{}_{}_{}".format( + os.getpid(), self._id, context_id) + ) + new_folder_path, _ = _get_temp_dir( + new_folder_name, self._temp_folder_root + ) + self.register_folder_finalizer(new_folder_path, context_id) + self._cached_temp_folders[context_id] = new_folder_path + + def resolve_temp_folder_name(self): + """Return a folder name specific to the currently activated context""" + return self._cached_temp_folders[self._current_context_id] + + # resource management API + + def register_folder_finalizer(self, pool_subfolder, context_id): + # Register the garbage collector at program exit in case caller forgets + # to call terminate explicitly: note we do not pass any reference to + # ensure that this callback won't prevent garbage collection of + # parallel instance and related file handler resources such as POSIX + # semaphores and pipes + pool_module_name = whichmodule(delete_folder, 'delete_folder') + resource_tracker.register(pool_subfolder, "folder") + + def _cleanup(): + # In some cases the Python runtime seems to set delete_folder to + # None just before exiting when accessing the delete_folder + # function from the closure namespace. So instead we reimport + # the delete_folder function explicitly. + # https://github.com/joblib/joblib/issues/328 + # We cannot just use from 'joblib.pool import delete_folder' + # because joblib should only use relative imports to allow + # easy vendoring. + delete_folder = __import__( + pool_module_name, fromlist=['delete_folder'] + ).delete_folder + try: + delete_folder(pool_subfolder, allow_non_empty=True) + resource_tracker.unregister(pool_subfolder, "folder") + except OSError: + warnings.warn("Failed to delete temporary folder: {}" + .format(pool_subfolder)) + + self._finalizers[context_id] = atexit.register(_cleanup) + + def _clean_temporary_resources(self, context_id=None, force=False, + allow_non_empty=False): + """Clean temporary resources created by a process-based pool""" + if context_id is None: + # Iterates over a copy of the cache keys to avoid Error due to + # iterating over a changing size dictionary. + for context_id in list(self._cached_temp_folders): + self._clean_temporary_resources( + context_id, force=force, allow_non_empty=allow_non_empty + ) + else: + temp_folder = self._cached_temp_folders.get(context_id) + if temp_folder and os.path.exists(temp_folder): + for filename in os.listdir(temp_folder): + if force: + # Some workers have failed and the ref counted might + # be off. The workers should have shut down by this + # time so forcefully clean up the files. + resource_tracker.unregister( + os.path.join(temp_folder, filename), "file" + ) + else: + resource_tracker.maybe_unlink( + os.path.join(temp_folder, filename), "file" + ) + + # When forcing clean-up, try to delete the folder even if some + # files are still in it. Otherwise, try to delete the folder + allow_non_empty |= force + + # Clean up the folder if possible, either if it is empty or + # if none of the files in it are in used and allow_non_empty. + try: + delete_folder( + temp_folder, allow_non_empty=allow_non_empty + ) + # Forget the folder once it has been deleted + self._cached_temp_folders.pop(context_id, None) + resource_tracker.unregister(temp_folder, "folder") + + # Also cancel the finalizers that gets triggered at gc. + finalizer = self._finalizers.pop(context_id, None) + if finalizer is not None: + atexit.unregister(finalizer) + + except OSError: + # Temporary folder cannot be deleted right now. + # This folder will be cleaned up by an atexit + # finalizer registered by the memmapping_reducer. + pass diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/_multiprocessing_helpers.py b/llmeval-env/lib/python3.10/site-packages/joblib/_multiprocessing_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..bde4bc1905311cdc4cd337e9b72e3b24f50a3ed5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/_multiprocessing_helpers.py @@ -0,0 +1,53 @@ +"""Helper module to factorize the conditional multiprocessing import logic + +We use a distinct module to simplify import statements and avoid introducing +circular dependencies (for instance for the assert_spawning name). +""" +import os +import warnings + + +# Obtain possible configuration from the environment, assuming 1 (on) +# by default, upon 0 set to None. Should instructively fail if some non +# 0/1 value is set. +mp = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None +if mp: + try: + import multiprocessing as mp + import _multiprocessing # noqa + except ImportError: + mp = None + +# 2nd stage: validate that locking is available on the system and +# issue a warning if not +if mp is not None: + try: + # try to create a named semaphore using SemLock to make sure they are + # available on this platform. We use the low level object + # _multiprocessing.SemLock to avoid spawning a resource tracker on + # Unix system or changing the default backend. + import tempfile + from _multiprocessing import SemLock + + _rand = tempfile._RandomNameSequence() + for i in range(100): + try: + name = '/joblib-{}-{}' .format( + os.getpid(), next(_rand)) + _sem = SemLock(0, 0, 1, name=name, unlink=True) + del _sem # cleanup + break + except FileExistsError as e: # pragma: no cover + if i >= 99: + raise FileExistsError( + 'cannot find name for semaphore') from e + except (FileExistsError, AttributeError, ImportError, OSError) as e: + mp = None + warnings.warn('%s. joblib will operate in serial mode' % (e,)) + + +# 3rd stage: backward compat for the assert_spawning helper +if mp is not None: + from multiprocessing.context import assert_spawning +else: + assert_spawning = None diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/_parallel_backends.py b/llmeval-env/lib/python3.10/site-packages/joblib/_parallel_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..8201c96bcf61b5c6e61821e7073a0babf6510268 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/_parallel_backends.py @@ -0,0 +1,649 @@ +""" +Backends for embarrassingly parallel code. +""" + +import gc +import os +import warnings +import threading +import contextlib +from abc import ABCMeta, abstractmethod + +from ._utils import ( + _TracebackCapturingWrapper, + _retrieve_traceback_capturing_wrapped_call +) + +from ._multiprocessing_helpers import mp + +if mp is not None: + from .pool import MemmappingPool + from multiprocessing.pool import ThreadPool + from .executor import get_memmapping_executor + + # Import loky only if multiprocessing is present + from .externals.loky import process_executor, cpu_count + from .externals.loky.process_executor import ShutdownExecutorError + + +class ParallelBackendBase(metaclass=ABCMeta): + """Helper abc which defines all methods a ParallelBackend must implement""" + + supports_inner_max_num_threads = False + supports_retrieve_callback = False + default_n_jobs = 1 + + @property + def supports_return_generator(self): + return self.supports_retrieve_callback + + @property + def supports_timeout(self): + return self.supports_retrieve_callback + + nesting_level = None + + def __init__(self, nesting_level=None, inner_max_num_threads=None, + **kwargs): + super().__init__(**kwargs) + self.nesting_level = nesting_level + self.inner_max_num_threads = inner_max_num_threads + + MAX_NUM_THREADS_VARS = [ + 'OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS', + 'BLIS_NUM_THREADS', 'VECLIB_MAXIMUM_THREADS', 'NUMBA_NUM_THREADS', + 'NUMEXPR_NUM_THREADS', + ] + + TBB_ENABLE_IPC_VAR = "ENABLE_IPC" + + @abstractmethod + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs that can actually run in parallel + + n_jobs is the number of workers requested by the callers. Passing + n_jobs=-1 means requesting all available workers for instance matching + the number of CPU cores on the worker host(s). + + This method should return a guesstimate of the number of workers that + can actually perform work concurrently. The primary use case is to make + it possible for the caller to know in how many chunks to slice the + work. + + In general working on larger data chunks is more efficient (less + scheduling overhead and better use of CPU cache prefetching heuristics) + as long as all the workers have enough work to do. + """ + + @abstractmethod + def apply_async(self, func, callback=None): + """Schedule a func to be run""" + + def retrieve_result_callback(self, out): + """Called within the callback function passed in apply_async. + + The argument of this function is the argument given to a callback in + the considered backend. It is supposed to return the outcome of a task + if it succeeded or raise the exception if it failed. + """ + + def configure(self, n_jobs=1, parallel=None, prefer=None, require=None, + **backend_args): + """Reconfigure the backend and return the number of workers. + + This makes it possible to reuse an existing backend instance for + successive independent calls to Parallel with different parameters. + """ + self.parallel = parallel + return self.effective_n_jobs(n_jobs) + + def start_call(self): + """Call-back method called at the beginning of a Parallel call""" + + def stop_call(self): + """Call-back method called at the end of a Parallel call""" + + def terminate(self): + """Shutdown the workers and free the shared memory.""" + + def compute_batch_size(self): + """Determine the optimal batch size""" + return 1 + + def batch_completed(self, batch_size, duration): + """Callback indicate how long it took to run a batch""" + + def get_exceptions(self): + """List of exception types to be captured.""" + return [] + + def abort_everything(self, ensure_ready=True): + """Abort any running tasks + + This is called when an exception has been raised when executing a task + and all the remaining tasks will be ignored and can therefore be + aborted to spare computation resources. + + If ensure_ready is True, the backend should be left in an operating + state as future tasks might be re-submitted via that same backend + instance. + + If ensure_ready is False, the implementer of this method can decide + to leave the backend in a closed / terminated state as no new task + are expected to be submitted to this backend. + + Setting ensure_ready to False is an optimization that can be leveraged + when aborting tasks via killing processes from a local process pool + managed by the backend it-self: if we expect no new tasks, there is no + point in re-creating new workers. + """ + # Does nothing by default: to be overridden in subclasses when + # canceling tasks is possible. + pass + + def get_nested_backend(self): + """Backend instance to be used by nested Parallel calls. + + By default a thread-based backend is used for the first level of + nesting. Beyond, switch to sequential backend to avoid spawning too + many threads on the host. + """ + nesting_level = getattr(self, 'nesting_level', 0) + 1 + if nesting_level > 1: + return SequentialBackend(nesting_level=nesting_level), None + else: + return ThreadingBackend(nesting_level=nesting_level), None + + @contextlib.contextmanager + def retrieval_context(self): + """Context manager to manage an execution context. + + Calls to Parallel.retrieve will be made inside this context. + + By default, this does nothing. It may be useful for subclasses to + handle nested parallelism. In particular, it may be required to avoid + deadlocks if a backend manages a fixed number of workers, when those + workers may be asked to do nested Parallel calls. Without + 'retrieval_context' this could lead to deadlock, as all the workers + managed by the backend may be "busy" waiting for the nested parallel + calls to finish, but the backend has no free workers to execute those + tasks. + """ + yield + + def _prepare_worker_env(self, n_jobs): + """Return environment variables limiting threadpools in external libs. + + This function return a dict containing environment variables to pass + when creating a pool of process. These environment variables limit the + number of threads to `n_threads` for OpenMP, MKL, Accelerated and + OpenBLAS libraries in the child processes. + """ + explicit_n_threads = self.inner_max_num_threads + default_n_threads = max(cpu_count() // n_jobs, 1) + + # Set the inner environment variables to self.inner_max_num_threads if + # it is given. Else, default to cpu_count // n_jobs unless the variable + # is already present in the parent process environment. + env = {} + for var in self.MAX_NUM_THREADS_VARS: + if explicit_n_threads is None: + var_value = os.environ.get(var, default_n_threads) + else: + var_value = explicit_n_threads + + env[var] = str(var_value) + + if self.TBB_ENABLE_IPC_VAR not in os.environ: + # To avoid over-subscription when using TBB, let the TBB schedulers + # use Inter Process Communication to coordinate: + env[self.TBB_ENABLE_IPC_VAR] = "1" + return env + + @staticmethod + def in_main_thread(): + return isinstance(threading.current_thread(), threading._MainThread) + + +class SequentialBackend(ParallelBackendBase): + """A ParallelBackend which will execute all batches sequentially. + + Does not use/create any threading objects, and hence has minimal + overhead. Used when n_jobs == 1. + """ + + uses_threads = True + supports_timeout = False + supports_retrieve_callback = False + supports_sharedmem = True + + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs which are going to run in parallel""" + if n_jobs == 0: + raise ValueError('n_jobs == 0 in Parallel has no meaning') + return 1 + + def apply_async(self, func, callback=None): + """Schedule a func to be run""" + raise RuntimeError("Should never be called for SequentialBackend.") + + def retrieve_result_callback(self, out): + raise RuntimeError("Should never be called for SequentialBackend.") + + def get_nested_backend(self): + # import is not top level to avoid cyclic import errors. + from .parallel import get_active_backend + + # SequentialBackend should neither change the nesting level, the + # default backend or the number of jobs. Just return the current one. + return get_active_backend() + + +class PoolManagerMixin(object): + """A helper class for managing pool of workers.""" + + _pool = None + + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs which are going to run in parallel""" + if n_jobs == 0: + raise ValueError('n_jobs == 0 in Parallel has no meaning') + elif mp is None or n_jobs is None: + # multiprocessing is not available or disabled, fallback + # to sequential mode + return 1 + elif n_jobs < 0: + n_jobs = max(cpu_count() + 1 + n_jobs, 1) + return n_jobs + + def terminate(self): + """Shutdown the process or thread pool""" + if self._pool is not None: + self._pool.close() + self._pool.terminate() # terminate does a join() + self._pool = None + + def _get_pool(self): + """Used by apply_async to make it possible to implement lazy init""" + return self._pool + + def apply_async(self, func, callback=None): + """Schedule a func to be run""" + # Here, we need a wrapper to avoid crashes on KeyboardInterruptErrors. + # We also call the callback on error, to make sure the pool does not + # wait on crashed jobs. + return self._get_pool().apply_async( + _TracebackCapturingWrapper(func), (), + callback=callback, error_callback=callback + ) + + def retrieve_result_callback(self, out): + """Mimic concurrent.futures results, raising an error if needed.""" + return _retrieve_traceback_capturing_wrapped_call(out) + + def abort_everything(self, ensure_ready=True): + """Shutdown the pool and restart a new one with the same parameters""" + self.terminate() + if ensure_ready: + self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel, + **self.parallel._backend_args) + + +class AutoBatchingMixin(object): + """A helper class for automagically batching jobs.""" + + # In seconds, should be big enough to hide multiprocessing dispatching + # overhead. + # This settings was found by running benchmarks/bench_auto_batching.py + # with various parameters on various platforms. + MIN_IDEAL_BATCH_DURATION = .2 + + # Should not be too high to avoid stragglers: long jobs running alone + # on a single worker while other workers have no work to process any more. + MAX_IDEAL_BATCH_DURATION = 2 + + # Batching counters default values + _DEFAULT_EFFECTIVE_BATCH_SIZE = 1 + _DEFAULT_SMOOTHED_BATCH_DURATION = 0.0 + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE + self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION + + def compute_batch_size(self): + """Determine the optimal batch size""" + old_batch_size = self._effective_batch_size + batch_duration = self._smoothed_batch_duration + if (batch_duration > 0 and + batch_duration < self.MIN_IDEAL_BATCH_DURATION): + # The current batch size is too small: the duration of the + # processing of a batch of task is not large enough to hide + # the scheduling overhead. + ideal_batch_size = int(old_batch_size * + self.MIN_IDEAL_BATCH_DURATION / + batch_duration) + # Multiply by two to limit oscilations between min and max. + ideal_batch_size *= 2 + + # dont increase the batch size too fast to limit huge batch sizes + # potentially leading to starving worker + batch_size = min(2 * old_batch_size, ideal_batch_size) + + batch_size = max(batch_size, 1) + + self._effective_batch_size = batch_size + if self.parallel.verbose >= 10: + self.parallel._print( + f"Batch computation too fast ({batch_duration}s.) " + f"Setting batch_size={batch_size}." + ) + elif (batch_duration > self.MAX_IDEAL_BATCH_DURATION and + old_batch_size >= 2): + # The current batch size is too big. If we schedule overly long + # running batches some CPUs might wait with nothing left to do + # while a couple of CPUs a left processing a few long running + # batches. Better reduce the batch size a bit to limit the + # likelihood of scheduling such stragglers. + + # decrease the batch size quickly to limit potential starving + ideal_batch_size = int( + old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration + ) + # Multiply by two to limit oscilations between min and max. + batch_size = max(2 * ideal_batch_size, 1) + self._effective_batch_size = batch_size + if self.parallel.verbose >= 10: + self.parallel._print( + f"Batch computation too slow ({batch_duration}s.) " + f"Setting batch_size={batch_size}." + ) + else: + # No batch size adjustment + batch_size = old_batch_size + + if batch_size != old_batch_size: + # Reset estimation of the smoothed mean batch duration: this + # estimate is updated in the multiprocessing apply_async + # CallBack as long as the batch_size is constant. Therefore + # we need to reset the estimate whenever we re-tune the batch + # size. + self._smoothed_batch_duration = \ + self._DEFAULT_SMOOTHED_BATCH_DURATION + + return batch_size + + def batch_completed(self, batch_size, duration): + """Callback indicate how long it took to run a batch""" + if batch_size == self._effective_batch_size: + # Update the smoothed streaming estimate of the duration of a batch + # from dispatch to completion + old_duration = self._smoothed_batch_duration + if old_duration == self._DEFAULT_SMOOTHED_BATCH_DURATION: + # First record of duration for this batch size after the last + # reset. + new_duration = duration + else: + # Update the exponentially weighted average of the duration of + # batch for the current effective size. + new_duration = 0.8 * old_duration + 0.2 * duration + self._smoothed_batch_duration = new_duration + + def reset_batch_stats(self): + """Reset batch statistics to default values. + + This avoids interferences with future jobs. + """ + self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE + self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION + + +class ThreadingBackend(PoolManagerMixin, ParallelBackendBase): + """A ParallelBackend which will use a thread pool to execute batches in. + + This is a low-overhead backend but it suffers from the Python Global + Interpreter Lock if the called function relies a lot on Python objects. + Mostly useful when the execution bottleneck is a compiled extension that + explicitly releases the GIL (for instance a Cython loop wrapped in a "with + nogil" block or an expensive call to a library such as NumPy). + + The actual thread pool is lazily initialized: the actual thread pool + construction is delayed to the first call to apply_async. + + ThreadingBackend is used as the default backend for nested calls. + """ + + supports_retrieve_callback = True + uses_threads = True + supports_sharedmem = True + + def configure(self, n_jobs=1, parallel=None, **backend_args): + """Build a process or thread pool and return the number of workers""" + n_jobs = self.effective_n_jobs(n_jobs) + if n_jobs == 1: + # Avoid unnecessary overhead and use sequential backend instead. + raise FallbackToBackend( + SequentialBackend(nesting_level=self.nesting_level)) + self.parallel = parallel + self._n_jobs = n_jobs + return n_jobs + + def _get_pool(self): + """Lazily initialize the thread pool + + The actual pool of worker threads is only initialized at the first + call to apply_async. + """ + if self._pool is None: + self._pool = ThreadPool(self._n_jobs) + return self._pool + + +class MultiprocessingBackend(PoolManagerMixin, AutoBatchingMixin, + ParallelBackendBase): + """A ParallelBackend which will use a multiprocessing.Pool. + + Will introduce some communication and memory overhead when exchanging + input and output data with the with the worker Python processes. + However, does not suffer from the Python Global Interpreter Lock. + """ + + supports_retrieve_callback = True + supports_return_generator = False + + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs which are going to run in parallel. + + This also checks if we are attempting to create a nested parallel + loop. + """ + if mp is None: + return 1 + + if mp.current_process().daemon: + # Daemonic processes cannot have children + if n_jobs != 1: + if inside_dask_worker(): + msg = ( + "Inside a Dask worker with daemon=True, " + "setting n_jobs=1.\nPossible work-arounds:\n" + "- dask.config.set(" + "{'distributed.worker.daemon': False})" + "- set the environment variable " + "DASK_DISTRIBUTED__WORKER__DAEMON=False\n" + "before creating your Dask cluster." + ) + else: + msg = ( + 'Multiprocessing-backed parallel loops ' + 'cannot be nested, setting n_jobs=1' + ) + warnings.warn(msg, stacklevel=3) + return 1 + + if process_executor._CURRENT_DEPTH > 0: + # Mixing loky and multiprocessing in nested loop is not supported + if n_jobs != 1: + warnings.warn( + 'Multiprocessing-backed parallel loops cannot be nested,' + ' below loky, setting n_jobs=1', + stacklevel=3) + return 1 + + elif not (self.in_main_thread() or self.nesting_level == 0): + # Prevent posix fork inside in non-main posix threads + if n_jobs != 1: + warnings.warn( + 'Multiprocessing-backed parallel loops cannot be nested' + ' below threads, setting n_jobs=1', + stacklevel=3) + return 1 + + return super(MultiprocessingBackend, self).effective_n_jobs(n_jobs) + + def configure(self, n_jobs=1, parallel=None, prefer=None, require=None, + **memmappingpool_args): + """Build a process or thread pool and return the number of workers""" + n_jobs = self.effective_n_jobs(n_jobs) + if n_jobs == 1: + raise FallbackToBackend( + SequentialBackend(nesting_level=self.nesting_level)) + + # Make sure to free as much memory as possible before forking + gc.collect() + self._pool = MemmappingPool(n_jobs, **memmappingpool_args) + self.parallel = parallel + return n_jobs + + def terminate(self): + """Shutdown the process or thread pool""" + super(MultiprocessingBackend, self).terminate() + self.reset_batch_stats() + + +class LokyBackend(AutoBatchingMixin, ParallelBackendBase): + """Managing pool of workers with loky instead of multiprocessing.""" + + supports_retrieve_callback = True + supports_inner_max_num_threads = True + + def configure(self, n_jobs=1, parallel=None, prefer=None, require=None, + idle_worker_timeout=300, **memmappingexecutor_args): + """Build a process executor and return the number of workers""" + n_jobs = self.effective_n_jobs(n_jobs) + if n_jobs == 1: + raise FallbackToBackend( + SequentialBackend(nesting_level=self.nesting_level)) + + self._workers = get_memmapping_executor( + n_jobs, timeout=idle_worker_timeout, + env=self._prepare_worker_env(n_jobs=n_jobs), + context_id=parallel._id, **memmappingexecutor_args) + self.parallel = parallel + return n_jobs + + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs which are going to run in parallel""" + if n_jobs == 0: + raise ValueError('n_jobs == 0 in Parallel has no meaning') + elif mp is None or n_jobs is None: + # multiprocessing is not available or disabled, fallback + # to sequential mode + return 1 + elif mp.current_process().daemon: + # Daemonic processes cannot have children + if n_jobs != 1: + if inside_dask_worker(): + msg = ( + "Inside a Dask worker with daemon=True, " + "setting n_jobs=1.\nPossible work-arounds:\n" + "- dask.config.set(" + "{'distributed.worker.daemon': False})\n" + "- set the environment variable " + "DASK_DISTRIBUTED__WORKER__DAEMON=False\n" + "before creating your Dask cluster." + ) + else: + msg = ( + 'Loky-backed parallel loops cannot be called in a' + ' multiprocessing, setting n_jobs=1' + ) + warnings.warn(msg, stacklevel=3) + + return 1 + elif not (self.in_main_thread() or self.nesting_level == 0): + # Prevent posix fork inside in non-main posix threads + if n_jobs != 1: + warnings.warn( + 'Loky-backed parallel loops cannot be nested below ' + 'threads, setting n_jobs=1', + stacklevel=3) + return 1 + elif n_jobs < 0: + n_jobs = max(cpu_count() + 1 + n_jobs, 1) + return n_jobs + + def apply_async(self, func, callback=None): + """Schedule a func to be run""" + future = self._workers.submit(func) + if callback is not None: + future.add_done_callback(callback) + return future + + def retrieve_result_callback(self, out): + try: + return out.result() + except ShutdownExecutorError: + raise RuntimeError( + "The executor underlying Parallel has been shutdown. " + "This is likely due to the garbage collection of a previous " + "generator from a call to Parallel with return_as='generator'." + " Make sure the generator is not garbage collected when " + "submitting a new job or that it is first properly exhausted." + ) + + def terminate(self): + if self._workers is not None: + # Don't terminate the workers as we want to reuse them in later + # calls, but cleanup the temporary resources that the Parallel call + # created. This 'hack' requires a private, low-level operation. + self._workers._temp_folder_manager._clean_temporary_resources( + context_id=self.parallel._id, force=False + ) + self._workers = None + + self.reset_batch_stats() + + def abort_everything(self, ensure_ready=True): + """Shutdown the workers and restart a new one with the same parameters + """ + self._workers.terminate(kill_workers=True) + self._workers = None + + if ensure_ready: + self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel) + + +class FallbackToBackend(Exception): + """Raised when configuration should fallback to another backend""" + + def __init__(self, backend): + self.backend = backend + + +def inside_dask_worker(): + """Check whether the current function is executed inside a Dask worker. + """ + # This function can not be in joblib._dask because there would be a + # circular import: + # _dask imports _parallel_backend that imports _dask ... + try: + from distributed import get_worker + except ImportError: + return False + + try: + get_worker() + return True + except ValueError: + return False diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/_utils.py b/llmeval-env/lib/python3.10/site-packages/joblib/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0b7cc64ee51695fe18e2fc8a819696e0246b54f8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/_utils.py @@ -0,0 +1,83 @@ +# Adapted from https://stackoverflow.com/a/9558001/2536294 + +import ast +from dataclasses import dataclass +import operator as op + + +from ._multiprocessing_helpers import mp + +if mp is not None: + from .externals.loky.process_executor import _ExceptionWithTraceback + + +# supported operators +operators = { + ast.Add: op.add, + ast.Sub: op.sub, + ast.Mult: op.mul, + ast.Div: op.truediv, + ast.FloorDiv: op.floordiv, + ast.Mod: op.mod, + ast.Pow: op.pow, + ast.USub: op.neg, +} + + +def eval_expr(expr): + """ + >>> eval_expr('2*6') + 12 + >>> eval_expr('2**6') + 64 + >>> eval_expr('1 + 2*3**(4) / (6 + -7)') + -161.0 + """ + try: + return eval_(ast.parse(expr, mode="eval").body) + except (TypeError, SyntaxError, KeyError) as e: + raise ValueError( + f"{expr!r} is not a valid or supported arithmetic expression." + ) from e + + +def eval_(node): + if isinstance(node, ast.Constant): # + return node.value + elif isinstance(node, ast.BinOp): # + return operators[type(node.op)](eval_(node.left), eval_(node.right)) + elif isinstance(node, ast.UnaryOp): # e.g., -1 + return operators[type(node.op)](eval_(node.operand)) + else: + raise TypeError(node) + + +@dataclass(frozen=True) +class _Sentinel: + """A sentinel to mark a parameter as not explicitly set""" + default_value: object + + def __repr__(self): + return f"default({self.default_value!r})" + + +class _TracebackCapturingWrapper: + """Protect function call and return error with traceback.""" + + def __init__(self, func): + self.func = func + + def __call__(self, **kwargs): + try: + return self.func(**kwargs) + except BaseException as e: + return _ExceptionWithTraceback(e) + + +def _retrieve_traceback_capturing_wrapped_call(out): + if isinstance(out, _ExceptionWithTraceback): + rebuild, args = out.__reduce__() + out = rebuild(*args) + if isinstance(out, BaseException): + raise out + return out diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/backports.py b/llmeval-env/lib/python3.10/site-packages/joblib/backports.py new file mode 100644 index 0000000000000000000000000000000000000000..3a14f107689e82b634da6794d30e658fdc000987 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/backports.py @@ -0,0 +1,177 @@ +""" +Backports of fixes for joblib dependencies +""" +import os +import re +import time + +from os.path import basename +from multiprocessing import util + + +class Version: + """Backport from deprecated distutils + + We maintain this backport to avoid introducing a new dependency on + `packaging`. + + We might rexplore this choice in the future if all major Python projects + introduce a dependency on packaging anyway. + """ + + def __init__(self, vstring=None): + if vstring: + self.parse(vstring) + + def __repr__(self): + return "%s ('%s')" % (self.__class__.__name__, str(self)) + + def __eq__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c == 0 + + def __lt__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c < 0 + + def __le__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c <= 0 + + def __gt__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c > 0 + + def __ge__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c >= 0 + + +class LooseVersion(Version): + """Backport from deprecated distutils + + We maintain this backport to avoid introducing a new dependency on + `packaging`. + + We might rexplore this choice in the future if all major Python projects + introduce a dependency on packaging anyway. + """ + + component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE) + + def __init__(self, vstring=None): + if vstring: + self.parse(vstring) + + def parse(self, vstring): + # I've given up on thinking I can reconstruct the version string + # from the parsed tuple -- so I just store the string here for + # use by __str__ + self.vstring = vstring + components = [x for x in self.component_re.split(vstring) + if x and x != '.'] + for i, obj in enumerate(components): + try: + components[i] = int(obj) + except ValueError: + pass + + self.version = components + + def __str__(self): + return self.vstring + + def __repr__(self): + return "LooseVersion ('%s')" % str(self) + + def _cmp(self, other): + if isinstance(other, str): + other = LooseVersion(other) + elif not isinstance(other, LooseVersion): + return NotImplemented + + if self.version == other.version: + return 0 + if self.version < other.version: + return -1 + if self.version > other.version: + return 1 + + +try: + import numpy as np + + def make_memmap(filename, dtype='uint8', mode='r+', offset=0, + shape=None, order='C', unlink_on_gc_collect=False): + """Custom memmap constructor compatible with numpy.memmap. + + This function: + - is a backport the numpy memmap offset fix (See + https://github.com/numpy/numpy/pull/8443 for more details. + The numpy fix is available starting numpy 1.13) + - adds ``unlink_on_gc_collect``, which specifies explicitly whether + the process re-constructing the memmap owns a reference to the + underlying file. If set to True, it adds a finalizer to the + newly-created memmap that sends a maybe_unlink request for the + memmaped file to resource_tracker. + """ + util.debug( + "[MEMMAP READ] creating a memmap (shape {}, filename {}, " + "pid {})".format(shape, basename(filename), os.getpid()) + ) + + mm = np.memmap(filename, dtype=dtype, mode=mode, offset=offset, + shape=shape, order=order) + if LooseVersion(np.__version__) < '1.13': + mm.offset = offset + if unlink_on_gc_collect: + from ._memmapping_reducer import add_maybe_unlink_finalizer + add_maybe_unlink_finalizer(mm) + return mm +except ImportError: + def make_memmap(filename, dtype='uint8', mode='r+', offset=0, + shape=None, order='C', unlink_on_gc_collect=False): + raise NotImplementedError( + "'joblib.backports.make_memmap' should not be used " + 'if numpy is not installed.') + + +if os.name == 'nt': + # https://github.com/joblib/joblib/issues/540 + access_denied_errors = (5, 13) + from os import replace + + def concurrency_safe_rename(src, dst): + """Renames ``src`` into ``dst`` overwriting ``dst`` if it exists. + + On Windows os.replace can yield permission errors if executed by two + different processes. + """ + max_sleep_time = 1 + total_sleep_time = 0 + sleep_time = 0.001 + while total_sleep_time < max_sleep_time: + try: + replace(src, dst) + break + except Exception as exc: + if getattr(exc, 'winerror', None) in access_denied_errors: + time.sleep(sleep_time) + total_sleep_time += sleep_time + sleep_time *= 2 + else: + raise + else: + raise +else: + from os import replace as concurrency_safe_rename # noqa diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/__init__.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__init__.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5886d2a62092bdc9f444d7a22058d065de567818 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__init__.py @@ -0,0 +1,44 @@ +r"""The :mod:`loky` module manages a pool of worker that can be re-used across time. +It provides a robust and dynamic implementation os the +:class:`ProcessPoolExecutor` and a function :func:`get_reusable_executor` which +hide the pool management under the hood. +""" +from concurrent.futures import ( + ALL_COMPLETED, + FIRST_COMPLETED, + FIRST_EXCEPTION, + CancelledError, + Executor, + TimeoutError, + as_completed, + wait, +) + +from ._base import Future +from .backend.context import cpu_count +from .backend.reduction import set_loky_pickler +from .reusable_executor import get_reusable_executor +from .cloudpickle_wrapper import wrap_non_picklable_objects +from .process_executor import BrokenProcessPool, ProcessPoolExecutor + + +__all__ = [ + "get_reusable_executor", + "cpu_count", + "wait", + "as_completed", + "Future", + "Executor", + "ProcessPoolExecutor", + "BrokenProcessPool", + "CancelledError", + "TimeoutError", + "FIRST_COMPLETED", + "FIRST_EXCEPTION", + "ALL_COMPLETED", + "wrap_non_picklable_objects", + "set_loky_pickler", +] + + +__version__ = "3.4.1" diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/_base.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..da0abc1e7fa18363e6342a3b67410f1429e6fa10 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/_base.py @@ -0,0 +1,28 @@ +############################################################################### +# Modification of concurrent.futures.Future +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from concurrent/futures/_base.py (17/02/2017) +# * Do not use yield from +# * Use old super syntax +# +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +from concurrent.futures import Future as _BaseFuture +from concurrent.futures._base import LOGGER + + +# To make loky._base.Future instances awaitable by concurrent.futures.wait, +# derive our custom Future class from _BaseFuture. _invoke_callback is the only +# modification made to this class in loky. +# TODO investigate why using `concurrent.futures.Future` directly does not +# always work in our test suite. +class Future(_BaseFuture): + def _invoke_callbacks(self): + for callback in self._done_callbacks: + try: + callback(self) + except BaseException: + LOGGER.exception(f"exception calling callback for {self!r}") diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..4b800ec07ff26af38174097a194e24413bf6fc2d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py @@ -0,0 +1,67 @@ +############################################################################### +# Extra reducers for Unix based system and connections objects +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/reduction.py (17/02/2017) +# * Add adapted reduction for LokyProcesses and socket/Connection +# +import os +import socket +import _socket +from multiprocessing.connection import Connection +from multiprocessing.context import get_spawning_popen + +from .reduction import register + +HAVE_SEND_HANDLE = ( + hasattr(socket, "CMSG_LEN") + and hasattr(socket, "SCM_RIGHTS") + and hasattr(socket.socket, "sendmsg") +) + + +def _mk_inheritable(fd): + os.set_inheritable(fd, True) + return fd + + +def DupFd(fd): + """Return a wrapper for an fd.""" + popen_obj = get_spawning_popen() + if popen_obj is not None: + return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) + elif HAVE_SEND_HANDLE: + from multiprocessing import resource_sharer + + return resource_sharer.DupFd(fd) + else: + raise TypeError( + "Cannot pickle connection object. This object can only be " + "passed when spawning a new process" + ) + + +def _reduce_socket(s): + df = DupFd(s.fileno()) + return _rebuild_socket, (df, s.family, s.type, s.proto) + + +def _rebuild_socket(df, family, type, proto): + fd = df.detach() + return socket.fromfd(fd, family, type, proto) + + +def rebuild_connection(df, readable, writable): + fd = df.detach() + return Connection(fd, readable, writable) + + +def reduce_connection(conn): + df = DupFd(conn.fileno()) + return rebuild_connection, (df, conn.readable, conn.writable) + + +register(socket.socket, _reduce_socket) +register(_socket.socket, _reduce_socket) +register(Connection, reduce_connection) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..506d0ecba7c8951ddeaa05b48eb1bdadc8d5ff46 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py @@ -0,0 +1,18 @@ +############################################################################### +# Extra reducers for Windows system and connections objects +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/reduction.py (17/02/2017) +# * Add adapted reduction for LokyProcesses and socket/PipeConnection +# +import socket +from multiprocessing import connection +from multiprocessing.reduction import _reduce_socket + +from .reduction import register + +# register reduction for win32 communication objects +register(socket.socket, _reduce_socket) +register(connection.Connection, connection.reduce_connection) +register(connection.PipeConnection, connection.reduce_pipe_connection) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py new file mode 100644 index 0000000000000000000000000000000000000000..2353c42f51a6e6c558ce70e35e1b7405e22d70ed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py @@ -0,0 +1,43 @@ +############################################################################### +# Launch a subprocess using forkexec and make sure only the needed fd are +# shared in the two process. +# +# author: Thomas Moreau and Olivier Grisel +# +import os +import sys + + +def close_fds(keep_fds): # pragma: no cover + """Close all the file descriptors except those in keep_fds.""" + + # Make sure to keep stdout and stderr open for logging purpose + keep_fds = {*keep_fds, 1, 2} + + # We try to retrieve all the open fds + try: + open_fds = {int(fd) for fd in os.listdir("/proc/self/fd")} + except FileNotFoundError: + import resource + + max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0] + open_fds = {*range(max_nfds)} + + for i in open_fds - keep_fds: + try: + os.close(i) + except OSError: + pass + + +def fork_exec(cmd, keep_fds, env=None): + # copy the environment variables to set in the child process + env = env or {} + child_env = {**os.environ, **env} + + pid = os.fork() + if pid == 0: # pragma: no cover + close_fds(keep_fds) + os.execve(sys.executable, cmd, child_env) + else: + return pid diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py new file mode 100644 index 0000000000000000000000000000000000000000..74395be0757f0a07ef92a7b0efe1e1ea4ecdac77 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py @@ -0,0 +1,193 @@ +############################################################################### +# Popen for LokyProcess. +# +# author: Thomas Moreau and Olivier Grisel +# +import os +import sys +import signal +import pickle +from io import BytesIO +from multiprocessing import util, process +from multiprocessing.connection import wait +from multiprocessing.context import set_spawning_popen + +from . import reduction, resource_tracker, spawn + + +__all__ = ["Popen"] + + +# +# Wrapper for an fd used while launching a process +# + + +class _DupFd: + def __init__(self, fd): + self.fd = reduction._mk_inheritable(fd) + + def detach(self): + return self.fd + + +# +# Start child process using subprocess.Popen +# + + +class Popen: + method = "loky" + DupFd = _DupFd + + def __init__(self, process_obj): + sys.stdout.flush() + sys.stderr.flush() + self.returncode = None + self._fds = [] + self._launch(process_obj) + + def duplicate_for_child(self, fd): + self._fds.append(fd) + return reduction._mk_inheritable(fd) + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + while True: + try: + pid, sts = os.waitpid(self.pid, flag) + except OSError: + # Child process not yet created. See #1731717 + # e.errno == errno.ECHILD == 10 + return None + else: + break + if pid == self.pid: + if os.WIFSIGNALED(sts): + self.returncode = -os.WTERMSIG(sts) + else: + assert os.WIFEXITED(sts) + self.returncode = os.WEXITSTATUS(sts) + return self.returncode + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is not None: + if not wait([self.sentinel], timeout): + return None + # This shouldn't block if wait() returned successfully. + return self.poll(os.WNOHANG if timeout == 0.0 else 0) + return self.returncode + + def terminate(self): + if self.returncode is None: + try: + os.kill(self.pid, signal.SIGTERM) + except ProcessLookupError: + pass + except OSError: + if self.wait(timeout=0.1) is None: + raise + + def _launch(self, process_obj): + + tracker_fd = resource_tracker._resource_tracker.getfd() + + fp = BytesIO() + set_spawning_popen(self) + try: + prep_data = spawn.get_preparation_data( + process_obj._name, + getattr(process_obj, "init_main_module", True), + ) + reduction.dump(prep_data, fp) + reduction.dump(process_obj, fp) + + finally: + set_spawning_popen(None) + + try: + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + # for fd in self._fds: + # _mk_inheritable(fd) + + cmd_python = [sys.executable] + cmd_python += ["-m", self.__module__] + cmd_python += ["--process-name", str(process_obj.name)] + cmd_python += ["--pipe", str(reduction._mk_inheritable(child_r))] + reduction._mk_inheritable(child_w) + reduction._mk_inheritable(tracker_fd) + self._fds += [child_r, child_w, tracker_fd] + if sys.version_info >= (3, 8) and os.name == "posix": + mp_tracker_fd = prep_data["mp_tracker_args"]["fd"] + self.duplicate_for_child(mp_tracker_fd) + + from .fork_exec import fork_exec + + pid = fork_exec(cmd_python, self._fds, env=process_obj.env) + util.debug( + f"launched python with pid {pid} and cmd:\n{cmd_python}" + ) + self.sentinel = parent_r + + method = "getbuffer" + if not hasattr(fp, method): + method = "getvalue" + with os.fdopen(parent_w, "wb") as f: + f.write(getattr(fp, method)()) + self.pid = pid + finally: + if parent_r is not None: + util.Finalize(self, os.close, (parent_r,)) + for fd in (child_r, child_w): + if fd is not None: + os.close(fd) + + @staticmethod + def thread_is_spawning(): + return True + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser("Command line parser") + parser.add_argument( + "--pipe", type=int, required=True, help="File handle for the pipe" + ) + parser.add_argument( + "--process-name", + type=str, + default=None, + help="Identifier for debugging purpose", + ) + + args = parser.parse_args() + + info = {} + exitcode = 1 + try: + with os.fdopen(args.pipe, "rb") as from_parent: + process.current_process()._inheriting = True + try: + prep_data = pickle.load(from_parent) + spawn.prepare(prep_data) + process_obj = pickle.load(from_parent) + finally: + del process.current_process()._inheriting + + exitcode = process_obj._bootstrap() + except Exception: + print("\n\n" + "-" * 80) + print(f"{args.process_name} failed with traceback: ") + print("-" * 80) + import traceback + + print(traceback.format_exc()) + print("\n" + "-" * 80) + finally: + if from_parent is not None: + from_parent.close() + + sys.exit(exitcode) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py new file mode 100644 index 0000000000000000000000000000000000000000..356255094b7647be8de6998a8752dd7807b25e10 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py @@ -0,0 +1,85 @@ +############################################################################### +# LokyProcess implementation +# +# authors: Thomas Moreau and Olivier Grisel +# +# based on multiprocessing/process.py (17/02/2017) +# +import sys +from multiprocessing.context import assert_spawning +from multiprocessing.process import BaseProcess + + +class LokyProcess(BaseProcess): + _start_method = "loky" + + def __init__( + self, + group=None, + target=None, + name=None, + args=(), + kwargs={}, + daemon=None, + init_main_module=False, + env=None, + ): + super().__init__( + group=group, + target=target, + name=name, + args=args, + kwargs=kwargs, + daemon=daemon, + ) + self.env = {} if env is None else env + self.authkey = self.authkey + self.init_main_module = init_main_module + + @staticmethod + def _Popen(process_obj): + if sys.platform == "win32": + from .popen_loky_win32 import Popen + else: + from .popen_loky_posix import Popen + return Popen(process_obj) + + +class LokyInitMainProcess(LokyProcess): + _start_method = "loky_init_main" + + def __init__( + self, + group=None, + target=None, + name=None, + args=(), + kwargs={}, + daemon=None, + ): + super().__init__( + group=group, + target=target, + name=name, + args=args, + kwargs=kwargs, + daemon=daemon, + init_main_module=True, + ) + + +# +# We subclass bytes to avoid accidental transmission of auth keys over network +# + + +class AuthenticationKey(bytes): + def __reduce__(self): + try: + assert_spawning(self) + except RuntimeError: + raise TypeError( + "Pickling an AuthenticationKey object is " + "disallowed for security reasons" + ) + return AuthenticationKey, (bytes(self),) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..25204a7a729d4d5f295070cd050c17a4ed9d49b7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py @@ -0,0 +1,378 @@ +############################################################################### +# Server process to keep track of unlinked resources, like folders and +# semaphores and clean them. +# +# author: Thomas Moreau +# +# adapted from multiprocessing/semaphore_tracker.py (17/02/2017) +# * include custom spawnv_passfds to start the process +# * add some VERBOSE logging +# +# TODO: multiprocessing.resource_tracker was contributed to Python 3.8 so +# once loky drops support for Python 3.7 it might be possible to stop +# maintaining this loky-specific fork. As a consequence, it might also be +# possible to stop maintaining the loky.backend.synchronize fork of +# multiprocessing.synchronize. + +# +# On Unix we run a server process which keeps track of unlinked +# resources. The server ignores SIGINT and SIGTERM and reads from a +# pipe. The resource_tracker implements a reference counting scheme: each time +# a Python process anticipates the shared usage of a resource by another +# process, it signals the resource_tracker of this shared usage, and in return, +# the resource_tracker increments the resource's reference count by 1. +# Similarly, when access to a resource is closed by a Python process, the +# process notifies the resource_tracker by asking it to decrement the +# resource's reference count by 1. When the reference count drops to 0, the +# resource_tracker attempts to clean up the underlying resource. + +# Finally, every other process connected to the resource tracker has a copy of +# the writable end of the pipe used to communicate with it, so the resource +# tracker gets EOF when all other processes have exited. Then the +# resource_tracker process unlinks any remaining leaked resources (with +# reference count above 0) + +# For semaphores, this is important because the system only supports a limited +# number of named semaphores, and they will not be automatically removed till +# the next reboot. Without this resource tracker process, "killall python" +# would probably leave unlinked semaphores. + +# Note that this behavior differs from CPython's resource_tracker, which only +# implements list of shared resources, and not a proper refcounting scheme. +# Also, CPython's resource tracker will only attempt to cleanup those shared +# resources once all procsses connected to the resouce tracker have exited. + + +import os +import shutil +import sys +import signal +import warnings +import threading +from _multiprocessing import sem_unlink +from multiprocessing import util + +from . import spawn + +if sys.platform == "win32": + import _winapi + import msvcrt + from multiprocessing.reduction import duplicate + + +__all__ = ["ensure_running", "register", "unregister"] + +_HAVE_SIGMASK = hasattr(signal, "pthread_sigmask") +_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) + +_CLEANUP_FUNCS = {"folder": shutil.rmtree, "file": os.unlink} + +if os.name == "posix": + _CLEANUP_FUNCS["semlock"] = sem_unlink + + +VERBOSE = False + + +class ResourceTracker: + def __init__(self): + self._lock = threading.Lock() + self._fd = None + self._pid = None + + def getfd(self): + self.ensure_running() + return self._fd + + def ensure_running(self): + """Make sure that resource tracker process is running. + + This can be run from any process. Usually a child process will use + the resource created by its parent.""" + with self._lock: + if self._fd is not None: + # resource tracker was launched before, is it still running? + if self._check_alive(): + # => still alive + return + # => dead, launch it again + os.close(self._fd) + if os.name == "posix": + try: + # At this point, the resource_tracker process has been + # killed or crashed. Let's remove the process entry + # from the process table to avoid zombie processes. + os.waitpid(self._pid, 0) + except OSError: + # The process was terminated or is a child from an + # ancestor of the current process. + pass + self._fd = None + self._pid = None + + warnings.warn( + "resource_tracker: process died unexpectedly, " + "relaunching. Some folders/sempahores might " + "leak." + ) + + fds_to_pass = [] + try: + fds_to_pass.append(sys.stderr.fileno()) + except Exception: + pass + + r, w = os.pipe() + if sys.platform == "win32": + _r = duplicate(msvcrt.get_osfhandle(r), inheritable=True) + os.close(r) + r = _r + + cmd = f"from {main.__module__} import main; main({r}, {VERBOSE})" + try: + fds_to_pass.append(r) + # process will out live us, so no need to wait on pid + exe = spawn.get_executable() + args = [exe, *util._args_from_interpreter_flags(), "-c", cmd] + util.debug(f"launching resource tracker: {args}") + # bpo-33613: Register a signal mask that will block the + # signals. This signal mask will be inherited by the child + # that is going to be spawned and will protect the child from a + # race condition that can make the child die before it + # registers signal handlers for SIGINT and SIGTERM. The mask is + # unregistered after spawning the child. + try: + if _HAVE_SIGMASK: + signal.pthread_sigmask( + signal.SIG_BLOCK, _IGNORED_SIGNALS + ) + pid = spawnv_passfds(exe, args, fds_to_pass) + finally: + if _HAVE_SIGMASK: + signal.pthread_sigmask( + signal.SIG_UNBLOCK, _IGNORED_SIGNALS + ) + except BaseException: + os.close(w) + raise + else: + self._fd = w + self._pid = pid + finally: + if sys.platform == "win32": + _winapi.CloseHandle(r) + else: + os.close(r) + + def _check_alive(self): + """Check for the existence of the resource tracker process.""" + try: + self._send("PROBE", "", "") + except BrokenPipeError: + return False + else: + return True + + def register(self, name, rtype): + """Register a named resource, and increment its refcount.""" + self.ensure_running() + self._send("REGISTER", name, rtype) + + def unregister(self, name, rtype): + """Unregister a named resource with resource tracker.""" + self.ensure_running() + self._send("UNREGISTER", name, rtype) + + def maybe_unlink(self, name, rtype): + """Decrement the refcount of a resource, and delete it if it hits 0""" + self.ensure_running() + self._send("MAYBE_UNLINK", name, rtype) + + def _send(self, cmd, name, rtype): + if len(name) > 512: + # posix guarantees that writes to a pipe of less than PIPE_BUF + # bytes are atomic, and that PIPE_BUF >= 512 + raise ValueError("name too long") + msg = f"{cmd}:{name}:{rtype}\n".encode("ascii") + nbytes = os.write(self._fd, msg) + assert nbytes == len(msg) + + +_resource_tracker = ResourceTracker() +ensure_running = _resource_tracker.ensure_running +register = _resource_tracker.register +maybe_unlink = _resource_tracker.maybe_unlink +unregister = _resource_tracker.unregister +getfd = _resource_tracker.getfd + + +def main(fd, verbose=0): + """Run resource tracker.""" + # protect the process from ^C and "killall python" etc + if verbose: + util.log_to_stderr(level=util.DEBUG) + + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) + + for f in (sys.stdin, sys.stdout): + try: + f.close() + except Exception: + pass + + if verbose: + util.debug("Main resource tracker is running") + + registry = {rtype: {} for rtype in _CLEANUP_FUNCS.keys()} + try: + # keep track of registered/unregistered resources + if sys.platform == "win32": + fd = msvcrt.open_osfhandle(fd, os.O_RDONLY) + with open(fd, "rb") as f: + while True: + line = f.readline() + if line == b"": # EOF + break + try: + splitted = line.strip().decode("ascii").split(":") + # name can potentially contain separator symbols (for + # instance folders on Windows) + cmd, name, rtype = ( + splitted[0], + ":".join(splitted[1:-1]), + splitted[-1], + ) + + if cmd == "PROBE": + continue + + if rtype not in _CLEANUP_FUNCS: + raise ValueError( + f"Cannot register {name} for automatic cleanup: " + f"unknown resource type ({rtype}). Resource type " + "should be one of the following: " + f"{list(_CLEANUP_FUNCS.keys())}" + ) + + if cmd == "REGISTER": + if name not in registry[rtype]: + registry[rtype][name] = 1 + else: + registry[rtype][name] += 1 + + if verbose: + util.debug( + "[ResourceTracker] incremented refcount of " + f"{rtype} {name} " + f"(current {registry[rtype][name]})" + ) + elif cmd == "UNREGISTER": + del registry[rtype][name] + if verbose: + util.debug( + f"[ResourceTracker] unregister {name} {rtype}: " + f"registry({len(registry)})" + ) + elif cmd == "MAYBE_UNLINK": + registry[rtype][name] -= 1 + if verbose: + util.debug( + "[ResourceTracker] decremented refcount of " + f"{rtype} {name} " + f"(current {registry[rtype][name]})" + ) + + if registry[rtype][name] == 0: + del registry[rtype][name] + try: + if verbose: + util.debug( + f"[ResourceTracker] unlink {name}" + ) + _CLEANUP_FUNCS[rtype](name) + except Exception as e: + warnings.warn( + f"resource_tracker: {name}: {e!r}" + ) + + else: + raise RuntimeError(f"unrecognized command {cmd!r}") + except BaseException: + try: + sys.excepthook(*sys.exc_info()) + except BaseException: + pass + finally: + # all processes have terminated; cleanup any remaining resources + def _unlink_resources(rtype_registry, rtype): + if rtype_registry: + try: + warnings.warn( + "resource_tracker: There appear to be " + f"{len(rtype_registry)} leaked {rtype} objects to " + "clean up at shutdown" + ) + except Exception: + pass + for name in rtype_registry: + # For some reason the process which created and registered this + # resource has failed to unregister it. Presumably it has + # died. We therefore clean it up. + try: + _CLEANUP_FUNCS[rtype](name) + if verbose: + util.debug(f"[ResourceTracker] unlink {name}") + except Exception as e: + warnings.warn(f"resource_tracker: {name}: {e!r}") + + for rtype, rtype_registry in registry.items(): + if rtype == "folder": + continue + else: + _unlink_resources(rtype_registry, rtype) + + # The default cleanup routine for folders deletes everything inside + # those folders recursively, which can include other resources tracked + # by the resource tracker). To limit the risk of the resource tracker + # attempting to delete twice a resource (once as part of a tracked + # folder, and once as a resource), we delete the folders after all + # other resource types. + if "folder" in registry: + _unlink_resources(registry["folder"], "folder") + + if verbose: + util.debug("resource tracker shut down") + + +# +# Start a program with only specified fds kept open +# + + +def spawnv_passfds(path, args, passfds): + passfds = sorted(passfds) + if sys.platform != "win32": + errpipe_read, errpipe_write = os.pipe() + try: + from .reduction import _mk_inheritable + from .fork_exec import fork_exec + + _pass = [_mk_inheritable(fd) for fd in passfds] + return fork_exec(args, _pass) + finally: + os.close(errpipe_read) + os.close(errpipe_write) + else: + cmd = " ".join(f'"{x}"' for x in args) + try: + _, ht, pid, _ = _winapi.CreateProcess( + path, cmd, None, None, True, 0, None, None, None + ) + _winapi.CloseHandle(ht) + except BaseException: + pass + return pid diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py new file mode 100644 index 0000000000000000000000000000000000000000..d011c398035f4e013ef36615a56e3bf0d8519d07 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py @@ -0,0 +1,250 @@ +############################################################################### +# Prepares and processes the data to setup the new process environment +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/spawn.py (17/02/2017) +# * Improve logging data +# +import os +import sys +import runpy +import textwrap +import types +from multiprocessing import process, util + + +if sys.platform != "win32": + WINEXE = False + WINSERVICE = False +else: + import msvcrt + from multiprocessing.reduction import duplicate + + WINEXE = sys.platform == "win32" and getattr(sys, "frozen", False) + WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + +if WINSERVICE: + _python_exe = os.path.join(sys.exec_prefix, "python.exe") +else: + _python_exe = sys.executable + + +def get_executable(): + return _python_exe + + +def _check_not_importing_main(): + if getattr(process.current_process(), "_inheriting", False): + raise RuntimeError( + textwrap.dedent( + """\ + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable.""" + ) + ) + + +def get_preparation_data(name, init_main_module=True): + """Return info about parent needed by child to unpickle process object.""" + _check_not_importing_main() + d = dict( + log_to_stderr=util._log_to_stderr, + authkey=bytes(process.current_process().authkey), + name=name, + sys_argv=sys.argv, + orig_dir=process.ORIGINAL_DIR, + dir=os.getcwd(), + ) + + # Send sys_path and make sure the current directory will not be changed + d["sys_path"] = [p if p != "" else process.ORIGINAL_DIR for p in sys.path] + + # Make sure to pass the information if the multiprocessing logger is active + if util._logger is not None: + d["log_level"] = util._logger.getEffectiveLevel() + if util._logger.handlers: + h = util._logger.handlers[0] + d["log_fmt"] = h.formatter._fmt + + # Tell the child how to communicate with the resource_tracker + from .resource_tracker import _resource_tracker + + _resource_tracker.ensure_running() + d["tracker_args"] = {"pid": _resource_tracker._pid} + if sys.platform == "win32": + d["tracker_args"]["fh"] = msvcrt.get_osfhandle(_resource_tracker._fd) + else: + d["tracker_args"]["fd"] = _resource_tracker._fd + + if sys.version_info >= (3, 8) and os.name == "posix": + # joblib/loky#242: allow loky processes to retrieve the resource + # tracker of their parent in case the child processes depickles + # shared_memory objects, that are still tracked by multiprocessing's + # resource_tracker by default. + # XXX: this is a workaround that may be error prone: in the future, it + # would be better to have loky subclass multiprocessing's shared_memory + # to force registration of shared_memory segments via loky's + # resource_tracker. + from multiprocessing.resource_tracker import ( + _resource_tracker as mp_resource_tracker, + ) + + # multiprocessing's resource_tracker must be running before loky + # process is created (othewise the child won't be able to use it if it + # is created later on) + mp_resource_tracker.ensure_running() + d["mp_tracker_args"] = { + "fd": mp_resource_tracker._fd, + "pid": mp_resource_tracker._pid, + } + + # Figure out whether to initialise main in the subprocess as a module + # or through direct execution (or to leave it alone entirely) + if init_main_module: + main_module = sys.modules["__main__"] + try: + main_mod_name = getattr(main_module.__spec__, "name", None) + except BaseException: + main_mod_name = None + if main_mod_name is not None: + d["init_main_from_name"] = main_mod_name + elif sys.platform != "win32" or (not WINEXE and not WINSERVICE): + main_path = getattr(main_module, "__file__", None) + if main_path is not None: + if ( + not os.path.isabs(main_path) + and process.ORIGINAL_DIR is not None + ): + main_path = os.path.join(process.ORIGINAL_DIR, main_path) + d["init_main_from_path"] = os.path.normpath(main_path) + + return d + + +# +# Prepare current process +# +old_main_modules = [] + + +def prepare(data, parent_sentinel=None): + """Try to get current process ready to unpickle process object.""" + if "name" in data: + process.current_process().name = data["name"] + + if "authkey" in data: + process.current_process().authkey = data["authkey"] + + if "log_to_stderr" in data and data["log_to_stderr"]: + util.log_to_stderr() + + if "log_level" in data: + util.get_logger().setLevel(data["log_level"]) + + if "log_fmt" in data: + import logging + + util.get_logger().handlers[0].setFormatter( + logging.Formatter(data["log_fmt"]) + ) + + if "sys_path" in data: + sys.path = data["sys_path"] + + if "sys_argv" in data: + sys.argv = data["sys_argv"] + + if "dir" in data: + os.chdir(data["dir"]) + + if "orig_dir" in data: + process.ORIGINAL_DIR = data["orig_dir"] + + if "mp_tracker_args" in data: + from multiprocessing.resource_tracker import ( + _resource_tracker as mp_resource_tracker, + ) + + mp_resource_tracker._fd = data["mp_tracker_args"]["fd"] + mp_resource_tracker._pid = data["mp_tracker_args"]["pid"] + if "tracker_args" in data: + from .resource_tracker import _resource_tracker + + _resource_tracker._pid = data["tracker_args"]["pid"] + if sys.platform == "win32": + handle = data["tracker_args"]["fh"] + handle = duplicate(handle, source_process=parent_sentinel) + _resource_tracker._fd = msvcrt.open_osfhandle(handle, os.O_RDONLY) + else: + _resource_tracker._fd = data["tracker_args"]["fd"] + + if "init_main_from_name" in data: + _fixup_main_from_name(data["init_main_from_name"]) + elif "init_main_from_path" in data: + _fixup_main_from_path(data["init_main_from_path"]) + + +# Multiprocessing module helpers to fix up the main module in +# spawned subprocesses +def _fixup_main_from_name(mod_name): + # __main__.py files for packages, directories, zip archives, etc, run + # their "main only" code unconditionally, so we don't even try to + # populate anything in __main__, nor do we make any changes to + # __main__ attributes + current_main = sys.modules["__main__"] + if mod_name == "__main__" or mod_name.endswith(".__main__"): + return + + # If this process was forked, __main__ may already be populated + if getattr(current_main.__spec__, "name", None) == mod_name: + return + + # Otherwise, __main__ may contain some non-main code where we need to + # support unpickling it properly. We rerun it as __mp_main__ and make + # the normal __main__ an alias to that + old_main_modules.append(current_main) + main_module = types.ModuleType("__mp_main__") + main_content = runpy.run_module( + mod_name, run_name="__mp_main__", alter_sys=True + ) + main_module.__dict__.update(main_content) + sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module + + +def _fixup_main_from_path(main_path): + # If this process was forked, __main__ may already be populated + current_main = sys.modules["__main__"] + + # Unfortunately, the main ipython launch script historically had no + # "if __name__ == '__main__'" guard, so we work around that + # by treating it like a __main__.py file + # See https://github.com/ipython/ipython/issues/4698 + main_name = os.path.splitext(os.path.basename(main_path))[0] + if main_name == "ipython": + return + + # Otherwise, if __file__ already has the setting we expect, + # there's nothing more to do + if getattr(current_main, "__file__", None) == main_path: + return + + # If the parent process has sent a path through rather than a module + # name we assume it is an executable script that may contain + # non-main code that needs to be executed + old_main_modules.append(current_main) + main_module = types.ModuleType("__mp_main__") + main_content = runpy.run_path(main_path, run_name="__mp_main__") + main_module.__dict__.update(main_content) + sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py new file mode 100644 index 0000000000000000000000000000000000000000..18db3e34db979240b4a4a943ea6931db3091321d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py @@ -0,0 +1,409 @@ +############################################################################### +# Synchronization primitives based on our SemLock implementation +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/synchronize.py (17/02/2017) +# * Remove ctx argument for compatibility reason +# * Registers a cleanup function with the loky resource_tracker to remove the +# semaphore when the process dies instead. +# +# TODO: investigate which Python version is required to be able to use +# multiprocessing.resource_tracker and therefore multiprocessing.synchronize +# instead of a loky-specific fork. + +import os +import sys +import tempfile +import threading +import _multiprocessing +from time import time as _time +from multiprocessing import process, util +from multiprocessing.context import assert_spawning + +from . import resource_tracker + +__all__ = [ + "Lock", + "RLock", + "Semaphore", + "BoundedSemaphore", + "Condition", + "Event", +] +# Try to import the mp.synchronize module cleanly, if it fails +# raise ImportError for platforms lacking a working sem_open implementation. +# See issue 3770 +try: + from _multiprocessing import SemLock as _SemLock + from _multiprocessing import sem_unlink +except ImportError: + raise ImportError( + "This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770." + ) + +# +# Constants +# + +RECURSIVE_MUTEX, SEMAPHORE = range(2) +SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX + + +# +# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` +# + + +class SemLock: + + _rand = tempfile._RandomNameSequence() + + def __init__(self, kind, value, maxvalue, name=None): + # unlink_now is only used on win32 or when we are using fork. + unlink_now = False + if name is None: + # Try to find an unused name for the SemLock instance. + for _ in range(100): + try: + self._semlock = _SemLock( + kind, value, maxvalue, SemLock._make_name(), unlink_now + ) + except FileExistsError: # pragma: no cover + pass + else: + break + else: # pragma: no cover + raise FileExistsError("cannot find name for semaphore") + else: + self._semlock = _SemLock(kind, value, maxvalue, name, unlink_now) + self.name = name + util.debug( + f"created semlock with handle {self._semlock.handle} and name " + f'"{self.name}"' + ) + + self._make_methods() + + def _after_fork(obj): + obj._semlock._after_fork() + + util.register_after_fork(self, _after_fork) + + # When the object is garbage collected or the + # process shuts down we unlink the semaphore name + resource_tracker.register(self._semlock.name, "semlock") + util.Finalize( + self, SemLock._cleanup, (self._semlock.name,), exitpriority=0 + ) + + @staticmethod + def _cleanup(name): + try: + sem_unlink(name) + except FileNotFoundError: + # Already unlinked, possibly by user code: ignore and make sure to + # unregister the semaphore from the resource tracker. + pass + finally: + resource_tracker.unregister(name, "semlock") + + def _make_methods(self): + self.acquire = self._semlock.acquire + self.release = self._semlock.release + + def __enter__(self): + return self._semlock.acquire() + + def __exit__(self, *args): + return self._semlock.release() + + def __getstate__(self): + assert_spawning(self) + sl = self._semlock + h = sl.handle + return (h, sl.kind, sl.maxvalue, sl.name) + + def __setstate__(self, state): + self._semlock = _SemLock._rebuild(*state) + util.debug( + f'recreated blocker with handle {state[0]!r} and name "{state[3]}"' + ) + self._make_methods() + + @staticmethod + def _make_name(): + # OSX does not support long names for semaphores + return f"/loky-{os.getpid()}-{next(SemLock._rand)}" + + +# +# Semaphore +# + + +class Semaphore(SemLock): + def __init__(self, value=1): + SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX) + + def get_value(self): + if sys.platform == "darwin": + raise NotImplementedError("OSX does not implement sem_getvalue") + return self._semlock._get_value() + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = "unknown" + return f"<{self.__class__.__name__}(value={value})>" + + +# +# Bounded semaphore +# + + +class BoundedSemaphore(Semaphore): + def __init__(self, value=1): + SemLock.__init__(self, SEMAPHORE, value, value) + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = "unknown" + return ( + f"<{self.__class__.__name__}(value={value}, " + f"maxvalue={self._semlock.maxvalue})>" + ) + + +# +# Non-recursive lock +# + + +class Lock(SemLock): + def __init__(self): + super().__init__(SEMAPHORE, 1, 1) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != "MainThread": + name = f"{name}|{threading.current_thread().name}" + elif self._semlock._get_value() == 1: + name = "None" + elif self._semlock._count() > 0: + name = "SomeOtherThread" + else: + name = "SomeOtherProcess" + except Exception: + name = "unknown" + return f"<{self.__class__.__name__}(owner={name})>" + + +# +# Recursive lock +# + + +class RLock(SemLock): + def __init__(self): + super().__init__(RECURSIVE_MUTEX, 1, 1) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != "MainThread": + name = f"{name}|{threading.current_thread().name}" + count = self._semlock._count() + elif self._semlock._get_value() == 1: + name, count = "None", 0 + elif self._semlock._count() > 0: + name, count = "SomeOtherThread", "nonzero" + else: + name, count = "SomeOtherProcess", "nonzero" + except Exception: + name, count = "unknown", "unknown" + return f"<{self.__class__.__name__}({name}, {count})>" + + +# +# Condition variable +# + + +class Condition: + def __init__(self, lock=None): + self._lock = lock or RLock() + self._sleeping_count = Semaphore(0) + self._woken_count = Semaphore(0) + self._wait_semaphore = Semaphore(0) + self._make_methods() + + def __getstate__(self): + assert_spawning(self) + return ( + self._lock, + self._sleeping_count, + self._woken_count, + self._wait_semaphore, + ) + + def __setstate__(self, state): + ( + self._lock, + self._sleeping_count, + self._woken_count, + self._wait_semaphore, + ) = state + self._make_methods() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def _make_methods(self): + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __repr__(self): + try: + num_waiters = ( + self._sleeping_count._semlock._get_value() + - self._woken_count._semlock._get_value() + ) + except Exception: + num_waiters = "unknown" + return f"<{self.__class__.__name__}({self._lock}, {num_waiters})>" + + def wait(self, timeout=None): + assert ( + self._lock._semlock._is_mine() + ), "must acquire() condition before using wait()" + + # indicate that this thread is going to sleep + self._sleeping_count.release() + + # release lock + count = self._lock._semlock._count() + for _ in range(count): + self._lock.release() + + try: + # wait for notification or timeout + return self._wait_semaphore.acquire(True, timeout) + finally: + # indicate that this thread has woken + self._woken_count.release() + + # reacquire lock + for _ in range(count): + self._lock.acquire() + + def notify(self): + assert self._lock._semlock._is_mine(), "lock is not owned" + assert not self._wait_semaphore.acquire(False) + + # to take account of timeouts since last notify() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res + + if self._sleeping_count.acquire(False): # try grabbing a sleeper + self._wait_semaphore.release() # wake up one sleeper + self._woken_count.acquire() # wait for the sleeper to wake + + # rezero _wait_semaphore in case a timeout just happened + self._wait_semaphore.acquire(False) + + def notify_all(self): + assert self._lock._semlock._is_mine(), "lock is not owned" + assert not self._wait_semaphore.acquire(False) + + # to take account of timeouts since last notify*() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res + + sleepers = 0 + while self._sleeping_count.acquire(False): + self._wait_semaphore.release() # wake up one sleeper + sleepers += 1 + + if sleepers: + for _ in range(sleepers): + self._woken_count.acquire() # wait for a sleeper to wake + + # rezero wait_semaphore in case some timeouts just happened + while self._wait_semaphore.acquire(False): + pass + + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = _time() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - _time() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + +# +# Event +# + + +class Event: + def __init__(self): + self._cond = Condition(Lock()) + self._flag = Semaphore(0) + + def is_set(self): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + return True + return False + + def set(self): + with self._cond: + self._flag.acquire(False) + self._flag.release() + self._cond.notify_all() + + def clear(self): + with self._cond: + self._flag.acquire(False) + + def wait(self, timeout=None): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + else: + self._cond.wait(timeout) + + if self._flag.acquire(False): + self._flag.release() + return True + return False diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..099debcb711c6695f0570861293b198047bd6093 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py @@ -0,0 +1,102 @@ +import inspect +from functools import partial +from joblib.externals.cloudpickle import dumps, loads + + +WRAP_CACHE = {} + + +class CloudpickledObjectWrapper: + def __init__(self, obj, keep_wrapper=False): + self._obj = obj + self._keep_wrapper = keep_wrapper + + def __reduce__(self): + _pickled_object = dumps(self._obj) + if not self._keep_wrapper: + return loads, (_pickled_object,) + + return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper) + + def __getattr__(self, attr): + # Ensure that the wrapped object can be used seemlessly as the + # previous object. + if attr not in ["_obj", "_keep_wrapper"]: + return getattr(self._obj, attr) + return getattr(self, attr) + + +# Make sure the wrapped object conserves the callable property +class CallableObjectWrapper(CloudpickledObjectWrapper): + def __call__(self, *args, **kwargs): + return self._obj(*args, **kwargs) + + +def _wrap_non_picklable_objects(obj, keep_wrapper): + if callable(obj): + return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper) + return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper) + + +def _reconstruct_wrapper(_pickled_object, keep_wrapper): + obj = loads(_pickled_object) + return _wrap_non_picklable_objects(obj, keep_wrapper) + + +def _wrap_objects_when_needed(obj): + # Function to introspect an object and decide if it should be wrapped or + # not. + need_wrap = "__main__" in getattr(obj, "__module__", "") + if isinstance(obj, partial): + return partial( + _wrap_objects_when_needed(obj.func), + *[_wrap_objects_when_needed(a) for a in obj.args], + **{ + k: _wrap_objects_when_needed(v) + for k, v in obj.keywords.items() + } + ) + if callable(obj): + # Need wrap if the object is a function defined in a local scope of + # another function. + func_code = getattr(obj, "__code__", "") + need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED + + # Need wrap if the obj is a lambda expression + func_name = getattr(obj, "__name__", "") + need_wrap |= "" in func_name + + if not need_wrap: + return obj + + wrapped_obj = WRAP_CACHE.get(obj) + if wrapped_obj is None: + wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False) + WRAP_CACHE[obj] = wrapped_obj + return wrapped_obj + + +def wrap_non_picklable_objects(obj, keep_wrapper=True): + """Wrapper for non-picklable object to use cloudpickle to serialize them. + + Note that this wrapper tends to slow down the serialization process as it + is done with cloudpickle which is typically slower compared to pickle. The + proper way to solve serialization issues is to avoid defining functions and + objects in the main scripts and to implement __reduce__ functions for + complex classes. + """ + # If obj is a class, create a CloudpickledClassWrapper which instantiates + # the object internally and wrap it directly in a CloudpickledObjectWrapper + if inspect.isclass(obj): + + class CloudpickledClassWrapper(CloudpickledObjectWrapper): + def __init__(self, *args, **kwargs): + self._obj = obj(*args, **kwargs) + self._keep_wrapper = keep_wrapper + + CloudpickledClassWrapper.__name__ = obj.__name__ + return CloudpickledClassWrapper + + # If obj is an instance of a class, just wrap it in a regular + # CloudpickledObjectWrapper + return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/initializers.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/initializers.py new file mode 100644 index 0000000000000000000000000000000000000000..aea0e56c25d0d74e04788493058549a1399f8342 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/initializers.py @@ -0,0 +1,80 @@ +import warnings + + +def _viztracer_init(init_kwargs): + """Initialize viztracer's profiler in worker processes""" + from viztracer import VizTracer + + tracer = VizTracer(**init_kwargs) + tracer.register_exit() + tracer.start() + + +def _make_viztracer_initializer_and_initargs(): + try: + import viztracer + + tracer = viztracer.get_tracer() + if tracer is not None and getattr(tracer, "enable", False): + # Profiler is active: introspect its configuration to + # initialize the workers with the same configuration. + return _viztracer_init, (tracer.init_kwargs,) + except ImportError: + # viztracer is not installed: nothing to do + pass + except Exception as e: + # In case viztracer's API evolve, we do not want to crash loky but + # we want to know about it to be able to update loky. + warnings.warn(f"Unable to introspect viztracer state: {e}") + return None, () + + +class _ChainedInitializer: + """Compound worker initializer + + This is meant to be used in conjunction with _chain_initializers to + produce the necessary chained_args list to be passed to __call__. + """ + + def __init__(self, initializers): + self._initializers = initializers + + def __call__(self, *chained_args): + for initializer, args in zip(self._initializers, chained_args): + initializer(*args) + + +def _chain_initializers(initializer_and_args): + """Convenience helper to combine a sequence of initializers. + + If some initializers are None, they are filtered out. + """ + filtered_initializers = [] + filtered_initargs = [] + for initializer, initargs in initializer_and_args: + if initializer is not None: + filtered_initializers.append(initializer) + filtered_initargs.append(initargs) + + if not filtered_initializers: + return None, () + elif len(filtered_initializers) == 1: + return filtered_initializers[0], filtered_initargs[0] + else: + return _ChainedInitializer(filtered_initializers), filtered_initargs + + +def _prepare_initializer(initializer, initargs): + if initializer is not None and not callable(initializer): + raise TypeError( + f"initializer must be a callable, got: {initializer!r}" + ) + + # Introspect runtime to determine if we need to propagate the viztracer + # profiler information to the workers: + return _chain_initializers( + [ + (initializer, initargs), + _make_viztracer_initializer_and_initargs(), + ] + ) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..3040719579f74ecc7d5645e4894dbad138f0a5c1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py @@ -0,0 +1,1314 @@ +############################################################################### +# Re-implementation of the ProcessPoolExecutor more robust to faults +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from concurrent/futures/process_pool_executor.py (17/02/2017) +# * Add an extra management thread to detect executor_manager_thread failures, +# * Improve the shutdown process to avoid deadlocks, +# * Add timeout for workers, +# * More robust pickling process. +# +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ProcessPoolExecutor. + +The follow diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | | | | Call Q | | Process | +| | +----------+ | | +-----------+ | Pool | +| | | ... | | | | ... | +---------+ +| | | 6 | => | | => | 5, call() | => | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | +--------+ | 4, result | | | +| | | ... | | 3, except | | | ++----------+ +------------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Result Q" +""" + + +__author__ = "Thomas Moreau (thomas.moreau.2010@gmail.com)" + + +import os +import gc +import sys +import queue +import struct +import weakref +import warnings +import itertools +import traceback +import threading +from time import time, sleep +import multiprocessing as mp +from functools import partial +from pickle import PicklingError +from concurrent.futures import Executor +from concurrent.futures._base import LOGGER +from concurrent.futures.process import BrokenProcessPool as _BPPException +from multiprocessing.connection import wait + +from ._base import Future +from .backend import get_context +from .backend.context import cpu_count, _MAX_WINDOWS_WORKERS +from .backend.queues import Queue, SimpleQueue +from .backend.reduction import set_loky_pickler, get_loky_pickler_name +from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker +from .initializers import _prepare_initializer + + +# Mechanism to prevent infinite process spawning. When a worker of a +# ProcessPoolExecutor nested in MAX_DEPTH Executor tries to create a new +# Executor, a LokyRecursionError is raised +MAX_DEPTH = int(os.environ.get("LOKY_MAX_DEPTH", 10)) +_CURRENT_DEPTH = 0 + +# Minimum time interval between two consecutive memory leak protection checks. +_MEMORY_LEAK_CHECK_DELAY = 1.0 + +# Number of bytes of memory usage allowed over the reference process size. +_MAX_MEMORY_LEAK_SIZE = int(3e8) + + +try: + from psutil import Process + + _USE_PSUTIL = True + + def _get_memory_usage(pid, force_gc=False): + if force_gc: + gc.collect() + + mem_size = Process(pid).memory_info().rss + mp.util.debug(f"psutil return memory size: {mem_size}") + return mem_size + +except ImportError: + _USE_PSUTIL = False + + +class _ThreadWakeup: + def __init__(self): + self._closed = False + self._reader, self._writer = mp.Pipe(duplex=False) + + def close(self): + if not self._closed: + self._closed = True + self._writer.close() + self._reader.close() + + def wakeup(self): + if not self._closed: + self._writer.send_bytes(b"") + + def clear(self): + if not self._closed: + while self._reader.poll(): + self._reader.recv_bytes() + + +class _ExecutorFlags: + """necessary references to maintain executor states without preventing gc + + It permits to keep the information needed by executor_manager_thread + and crash_detection_thread to maintain the pool without preventing the + garbage collection of unreferenced executors. + """ + + def __init__(self, shutdown_lock): + + self.shutdown = False + self.broken = None + self.kill_workers = False + self.shutdown_lock = shutdown_lock + + def flag_as_shutting_down(self, kill_workers=None): + with self.shutdown_lock: + self.shutdown = True + if kill_workers is not None: + self.kill_workers = kill_workers + + def flag_as_broken(self, broken): + with self.shutdown_lock: + self.shutdown = True + self.broken = broken + + +# Prior to 3.9, executor_manager_thread is created as daemon thread. This means +# that it is not joined automatically when the interpreter is shutting down. +# To work around this problem, an exit handler is installed to tell the +# thread to exit when the interpreter is shutting down and then waits until +# it finishes. The thread needs to be daemonized because the atexit hooks are +# called after all non daemonized threads are joined. +# +# Starting 3.9, there exists a specific atexit hook to be called before joining +# the threads so the executor_manager_thread does not need to be daemonized +# anymore. +# +# The atexit hooks are registered when starting the first ProcessPoolExecutor +# to avoid import having an effect on the interpreter. + +_global_shutdown = False +_global_shutdown_lock = threading.Lock() +_threads_wakeups = weakref.WeakKeyDictionary() + + +def _python_exit(): + global _global_shutdown + _global_shutdown = True + + # Materialize the list of items to avoid error due to iterating over + # changing size dictionary. + items = list(_threads_wakeups.items()) + if len(items) > 0: + mp.util.debug( + "Interpreter shutting down. Waking up {len(items)}" + f"executor_manager_thread:\n{items}" + ) + + # Wake up the executor_manager_thread's so they can detect the interpreter + # is shutting down and exit. + for _, (shutdown_lock, thread_wakeup) in items: + with shutdown_lock: + thread_wakeup.wakeup() + + # Collect the executor_manager_thread's to make sure we exit cleanly. + for thread, _ in items: + # This locks is to prevent situations where an executor is gc'ed in one + # thread while the atexit finalizer is running in another thread. This + # can happen when joblib is used in pypy for instance. + with _global_shutdown_lock: + thread.join() + + +# With the fork context, _thread_wakeups is propagated to children. +# Clear it after fork to avoid some situation that can cause some +# freeze when joining the workers. +mp.util.register_after_fork(_threads_wakeups, lambda obj: obj.clear()) + + +# Module variable to register the at_exit call +process_pool_executor_at_exit = None + +# Controls how many more calls than processes will be queued in the call queue. +# A smaller number will mean that processes spend more time idle waiting for +# work while a larger number will make Future.cancel() succeed less frequently +# (Futures in the call queue cannot be cancelled). +EXTRA_QUEUED_CALLS = 1 + + +class _RemoteTraceback(Exception): + """Embed stringification of remote traceback in local traceback""" + + def __init__(self, tb=None): + self.tb = f'\n"""\n{tb}"""' + + def __str__(self): + return self.tb + + +# Do not inherit from BaseException to mirror +# concurrent.futures.process._ExceptionWithTraceback +class _ExceptionWithTraceback: + def __init__(self, exc): + tb = getattr(exc, "__traceback__", None) + if tb is None: + _, _, tb = sys.exc_info() + tb = traceback.format_exception(type(exc), exc, tb) + tb = "".join(tb) + self.exc = exc + self.tb = tb + + def __reduce__(self): + return _rebuild_exc, (self.exc, self.tb) + + +def _rebuild_exc(exc, tb): + exc.__cause__ = _RemoteTraceback(tb) + return exc + + +class _WorkItem: + + __slots__ = ["future", "fn", "args", "kwargs"] + + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + +class _ResultItem: + def __init__(self, work_id, exception=None, result=None): + self.work_id = work_id + self.exception = exception + self.result = result + + +class _CallItem: + def __init__(self, work_id, fn, args, kwargs): + self.work_id = work_id + self.fn = fn + self.args = args + self.kwargs = kwargs + + # Store the current loky_pickler so it is correctly set in the worker + self.loky_pickler = get_loky_pickler_name() + + def __call__(self): + set_loky_pickler(self.loky_pickler) + return self.fn(*self.args, **self.kwargs) + + def __repr__(self): + return ( + f"CallItem({self.work_id}, {self.fn}, {self.args}, {self.kwargs})" + ) + + +class _SafeQueue(Queue): + """Safe Queue set exception to the future object linked to a job""" + + def __init__( + self, + max_size=0, + ctx=None, + pending_work_items=None, + running_work_items=None, + thread_wakeup=None, + reducers=None, + ): + self.thread_wakeup = thread_wakeup + self.pending_work_items = pending_work_items + self.running_work_items = running_work_items + super().__init__(max_size, reducers=reducers, ctx=ctx) + + def _on_queue_feeder_error(self, e, obj): + if isinstance(obj, _CallItem): + # format traceback only works on python3 + if isinstance(e, struct.error): + raised_error = RuntimeError( + "The task could not be sent to the workers as it is too " + "large for `send_bytes`." + ) + else: + raised_error = PicklingError( + "Could not pickle the task to send it to the workers." + ) + tb = traceback.format_exception( + type(e), e, getattr(e, "__traceback__", None) + ) + raised_error.__cause__ = _RemoteTraceback("".join(tb)) + work_item = self.pending_work_items.pop(obj.work_id, None) + self.running_work_items.remove(obj.work_id) + # work_item can be None if another process terminated. In this + # case, the executor_manager_thread fails all work_items with + # BrokenProcessPool + if work_item is not None: + work_item.future.set_exception(raised_error) + del work_item + self.thread_wakeup.wakeup() + else: + super()._on_queue_feeder_error(e, obj) + + +def _get_chunks(chunksize, *iterables): + """Iterates over zip()ed iterables in chunks.""" + it = zip(*iterables) + while True: + chunk = tuple(itertools.islice(it, chunksize)) + if not chunk: + return + yield chunk + + +def _process_chunk(fn, chunk): + """Processes a chunk of an iterable passed to map. + + Runs the function passed to map() on a chunk of the + iterable passed to map. + + This function is run in a separate process. + + """ + return [fn(*args) for args in chunk] + + +def _sendback_result(result_queue, work_id, result=None, exception=None): + """Safely send back the given result or exception""" + try: + result_queue.put( + _ResultItem(work_id, result=result, exception=exception) + ) + except BaseException as e: + exc = _ExceptionWithTraceback(e) + result_queue.put(_ResultItem(work_id, exception=exc)) + + +def _process_worker( + call_queue, + result_queue, + initializer, + initargs, + processes_management_lock, + timeout, + worker_exit_lock, + current_depth, +): + """Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A ctx.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A ctx.Queue of _ResultItems that will written + to by the worker. + initializer: A callable initializer, or None + initargs: A tuple of args for the initializer + processes_management_lock: A ctx.Lock avoiding worker timeout while + some workers are being spawned. + timeout: maximum time to wait for a new item in the call_queue. If that + time is expired, the worker will shutdown. + worker_exit_lock: Lock to avoid flagging the executor as broken on + workers timeout. + current_depth: Nested parallelism level, to avoid infinite spawning. + """ + if initializer is not None: + try: + initializer(*initargs) + except BaseException: + LOGGER.critical("Exception in initializer:", exc_info=True) + # The parent will notice that the process stopped and + # mark the pool broken + return + + # set the global _CURRENT_DEPTH mechanism to limit recursive call + global _CURRENT_DEPTH + _CURRENT_DEPTH = current_depth + _process_reference_size = None + _last_memory_leak_check = None + pid = os.getpid() + + mp.util.debug(f"Worker started with timeout={timeout}") + while True: + try: + call_item = call_queue.get(block=True, timeout=timeout) + if call_item is None: + mp.util.info("Shutting down worker on sentinel") + except queue.Empty: + mp.util.info(f"Shutting down worker after timeout {timeout:0.3f}s") + if processes_management_lock.acquire(block=False): + processes_management_lock.release() + call_item = None + else: + mp.util.info("Could not acquire processes_management_lock") + continue + except BaseException: + previous_tb = traceback.format_exc() + try: + result_queue.put(_RemoteTraceback(previous_tb)) + except BaseException: + # If we cannot format correctly the exception, at least print + # the traceback. + print(previous_tb) + mp.util.debug("Exiting with code 1") + sys.exit(1) + if call_item is None: + # Notify queue management thread about worker shutdown + result_queue.put(pid) + is_clean = worker_exit_lock.acquire(True, timeout=30) + + # Early notify any loky executor running in this worker process + # (nested parallelism) that this process is about to shutdown to + # avoid a deadlock waiting undifinitely for the worker to finish. + _python_exit() + + if is_clean: + mp.util.debug("Exited cleanly") + else: + mp.util.info("Main process did not release worker_exit") + return + try: + r = call_item() + except BaseException as e: + exc = _ExceptionWithTraceback(e) + result_queue.put(_ResultItem(call_item.work_id, exception=exc)) + else: + _sendback_result(result_queue, call_item.work_id, result=r) + del r + + # Free the resource as soon as possible, to avoid holding onto + # open files or shared memory that is not needed anymore + del call_item + + if _USE_PSUTIL: + if _process_reference_size is None: + # Make reference measurement after the first call + _process_reference_size = _get_memory_usage(pid, force_gc=True) + _last_memory_leak_check = time() + continue + if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY: + mem_usage = _get_memory_usage(pid) + _last_memory_leak_check = time() + if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE: + # Memory usage stays within bounds: everything is fine. + continue + + # Check again memory usage; this time take the measurement + # after a forced garbage collection to break any reference + # cycles. + mem_usage = _get_memory_usage(pid, force_gc=True) + _last_memory_leak_check = time() + if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE: + # The GC managed to free the memory: everything is fine. + continue + + # The process is leaking memory: let the main process + # know that we need to start a new worker. + mp.util.info("Memory leak detected: shutting down worker") + result_queue.put(pid) + with worker_exit_lock: + mp.util.debug("Exit due to memory leak") + return + else: + # if psutil is not installed, trigger gc.collect events + # regularly to limit potential memory leaks due to reference cycles + if _last_memory_leak_check is None or ( + time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY + ): + gc.collect() + _last_memory_leak_check = time() + + +class _ExecutorManagerThread(threading.Thread): + """Manages the communication between this process and the worker processes. + + The manager is run in a local thread. + + Args: + executor: A reference to the ProcessPoolExecutor that owns + this thread. A weakref will be own by the manager as well as + references to internal objects used to introspect the state of + the executor. + """ + + def __init__(self, executor): + # Store references to necessary internals of the executor. + + # A _ThreadWakeup to allow waking up the executor_manager_thread from + # the main Thread and avoid deadlocks caused by permanently + # locked queues. + self.thread_wakeup = executor._executor_manager_thread_wakeup + self.shutdown_lock = executor._shutdown_lock + + # A weakref.ref to the ProcessPoolExecutor that owns this thread. Used + # to determine if the ProcessPoolExecutor has been garbage collected + # and that the manager can exit. + # When the executor gets garbage collected, the weakref callback + # will wake up the queue management thread so that it can terminate + # if there is no pending work item. + def weakref_cb( + _, + thread_wakeup=self.thread_wakeup, + shutdown_lock=self.shutdown_lock, + ): + if mp is not None: + # At this point, the multiprocessing module can already be + # garbage collected. We only log debug info when still + # possible. + mp.util.debug( + "Executor collected: triggering callback for" + " QueueManager wakeup" + ) + with shutdown_lock: + thread_wakeup.wakeup() + + self.executor_reference = weakref.ref(executor, weakref_cb) + + # The flags of the executor + self.executor_flags = executor._flags + + # A list of the ctx.Process instances used as workers. + self.processes = executor._processes + + # A ctx.Queue that will be filled with _CallItems derived from + # _WorkItems for processing by the process workers. + self.call_queue = executor._call_queue + + # A ctx.SimpleQueue of _ResultItems generated by the process workers. + self.result_queue = executor._result_queue + + # A queue.Queue of work ids e.g. Queue([5, 6, ...]). + self.work_ids_queue = executor._work_ids + + # A dict mapping work ids to _WorkItems e.g. + # {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + self.pending_work_items = executor._pending_work_items + + # A list of the work_ids that are currently running + self.running_work_items = executor._running_work_items + + # A lock to avoid concurrent shutdown of workers on timeout and spawn + # of new processes or shut down + self.processes_management_lock = executor._processes_management_lock + + super().__init__(name="ExecutorManagerThread") + if sys.version_info < (3, 9): + self.daemon = True + + def run(self): + # Main loop for the executor manager thread. + + while True: + self.add_call_item_to_queue() + + result_item, is_broken, bpe = self.wait_result_broken_or_wakeup() + + if is_broken: + self.terminate_broken(bpe) + return + if result_item is not None: + self.process_result_item(result_item) + # Delete reference to result_item to avoid keeping references + # while waiting on new results. + del result_item + + if self.is_shutting_down(): + self.flag_executor_shutting_down() + + # Since no new work items can be added, it is safe to shutdown + # this thread if there are no pending work items. + if not self.pending_work_items: + self.join_executor_internals() + return + + def add_call_item_to_queue(self): + # Fills call_queue with _WorkItems from pending_work_items. + # This function never blocks. + while True: + if self.call_queue.full(): + return + try: + work_id = self.work_ids_queue.get(block=False) + except queue.Empty: + return + else: + work_item = self.pending_work_items[work_id] + + if work_item.future.set_running_or_notify_cancel(): + self.running_work_items += [work_id] + self.call_queue.put( + _CallItem( + work_id, + work_item.fn, + work_item.args, + work_item.kwargs, + ), + block=True, + ) + else: + del self.pending_work_items[work_id] + continue + + def wait_result_broken_or_wakeup(self): + # Wait for a result to be ready in the result_queue while checking + # that all worker processes are still running, or for a wake up + # signal send. The wake up signals come either from new tasks being + # submitted, from the executor being shutdown/gc-ed, or from the + # shutdown of the python interpreter. + result_reader = self.result_queue._reader + wakeup_reader = self.thread_wakeup._reader + readers = [result_reader, wakeup_reader] + worker_sentinels = [p.sentinel for p in list(self.processes.values())] + ready = wait(readers + worker_sentinels) + + bpe = None + is_broken = True + result_item = None + if result_reader in ready: + try: + result_item = result_reader.recv() + if isinstance(result_item, _RemoteTraceback): + bpe = BrokenProcessPool( + "A task has failed to un-serialize. Please ensure that" + " the arguments of the function are all picklable." + ) + bpe.__cause__ = result_item + else: + is_broken = False + except BaseException as e: + bpe = BrokenProcessPool( + "A result has failed to un-serialize. Please ensure that " + "the objects returned by the function are always " + "picklable." + ) + tb = traceback.format_exception( + type(e), e, getattr(e, "__traceback__", None) + ) + bpe.__cause__ = _RemoteTraceback("".join(tb)) + + elif wakeup_reader in ready: + # This is simply a wake-up event that might either trigger putting + # more tasks in the queue or trigger the clean up of resources. + is_broken = False + else: + # A worker has terminated and we don't know why, set the state of + # the executor as broken + exit_codes = "" + if sys.platform != "win32": + # In Windows, introspecting terminated workers exitcodes seems + # unstable, therefore they are not appended in the exception + # message. + exit_codes = ( + "\nThe exit codes of the workers are " + f"{get_exitcodes_terminated_worker(self.processes)}" + ) + mp.util.debug( + "A worker unexpectedly terminated. Workers that " + "might have caused the breakage: " + + str( + { + p.name: p.exitcode + for p in list(self.processes.values()) + if p is not None and p.sentinel in ready + } + ) + ) + bpe = TerminatedWorkerError( + "A worker process managed by the executor was unexpectedly " + "terminated. This could be caused by a segmentation fault " + "while calling the function or by an excessive memory usage " + "causing the Operating System to kill the worker.\n" + f"{exit_codes}" + ) + + self.thread_wakeup.clear() + + return result_item, is_broken, bpe + + def process_result_item(self, result_item): + # Process the received a result_item. This can be either the PID of a + # worker that exited gracefully or a _ResultItem + + if isinstance(result_item, int): + # Clean shutdown of a worker using its PID, either on request + # by the executor.shutdown method or by the timeout of the worker + # itself: we should not mark the executor as broken. + with self.processes_management_lock: + p = self.processes.pop(result_item, None) + + # p can be None if the executor is concurrently shutting down. + if p is not None: + p._worker_exit_lock.release() + mp.util.debug( + f"joining {p.name} when processing {p.pid} as result_item" + ) + p.join() + del p + + # Make sure the executor have the right number of worker, even if a + # worker timeout while some jobs were submitted. If some work is + # pending or there is less processes than running items, we need to + # start a new Process and raise a warning. + n_pending = len(self.pending_work_items) + n_running = len(self.running_work_items) + if n_pending - n_running > 0 or n_running > len(self.processes): + executor = self.executor_reference() + if ( + executor is not None + and len(self.processes) < executor._max_workers + ): + warnings.warn( + "A worker stopped while some jobs were given to the " + "executor. This can be caused by a too short worker " + "timeout or by a memory leak.", + UserWarning, + ) + with executor._processes_management_lock: + executor._adjust_process_count() + executor = None + else: + # Received a _ResultItem so mark the future as completed. + work_item = self.pending_work_items.pop(result_item.work_id, None) + # work_item can be None if another process terminated (see above) + if work_item is not None: + if result_item.exception: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) + self.running_work_items.remove(result_item.work_id) + + def is_shutting_down(self): + # Check whether we should start shutting down the executor. + executor = self.executor_reference() + # No more work items can be added if: + # - The interpreter is shutting down OR + # - The executor that owns this thread is not broken AND + # * The executor that owns this worker has been collected OR + # * The executor that owns this worker has been shutdown. + # If the executor is broken, it should be detected in the next loop. + return _global_shutdown or ( + (executor is None or self.executor_flags.shutdown) + and not self.executor_flags.broken + ) + + def terminate_broken(self, bpe): + # Terminate the executor because it is in a broken state. The bpe + # argument can be used to display more information on the error that + # lead the executor into becoming broken. + + # Mark the process pool broken so that submits fail right now. + self.executor_flags.flag_as_broken(bpe) + + # Mark pending tasks as failed. + for work_item in self.pending_work_items.values(): + work_item.future.set_exception(bpe) + # Delete references to object. See issue16284 + del work_item + self.pending_work_items.clear() + + # Terminate remaining workers forcibly: the queues or their + # locks may be in a dirty state and block forever. + self.kill_workers(reason="broken executor") + + # clean up resources + self.join_executor_internals() + + def flag_executor_shutting_down(self): + # Flag the executor as shutting down and cancel remaining tasks if + # requested as early as possible if it is not gc-ed yet. + self.executor_flags.flag_as_shutting_down() + + # Cancel pending work items if requested. + if self.executor_flags.kill_workers: + while self.pending_work_items: + _, work_item = self.pending_work_items.popitem() + work_item.future.set_exception( + ShutdownExecutorError( + "The Executor was shutdown with `kill_workers=True` " + "before this job could complete." + ) + ) + del work_item + + # Kill the remaining worker forcibly to no waste time joining them + self.kill_workers(reason="executor shutting down") + + def kill_workers(self, reason=""): + # Terminate the remaining workers using SIGKILL. This function also + # terminates descendant workers of the children in case there is some + # nested parallelism. + while self.processes: + _, p = self.processes.popitem() + mp.util.debug(f"terminate process {p.name}, reason: {reason}") + try: + kill_process_tree(p) + except ProcessLookupError: # pragma: no cover + pass + + def shutdown_workers(self): + # shutdown all workers in self.processes + + # Create a list to avoid RuntimeError due to concurrent modification of + # processes. nb_children_alive is thus an upper bound. Also release the + # processes' _worker_exit_lock to accelerate the shutdown procedure, as + # there is no need for hand-shake here. + with self.processes_management_lock: + n_children_to_stop = 0 + for p in list(self.processes.values()): + mp.util.debug(f"releasing worker exit lock on {p.name}") + p._worker_exit_lock.release() + n_children_to_stop += 1 + + mp.util.debug(f"found {n_children_to_stop} processes to stop") + + # Send the right number of sentinels, to make sure all children are + # properly terminated. Do it with a mechanism that avoid hanging on + # Full queue when all workers have already been shutdown. + n_sentinels_sent = 0 + cooldown_time = 0.001 + while ( + n_sentinels_sent < n_children_to_stop + and self.get_n_children_alive() > 0 + ): + for _ in range(n_children_to_stop - n_sentinels_sent): + try: + self.call_queue.put_nowait(None) + n_sentinels_sent += 1 + except queue.Full as e: + if cooldown_time > 5.0: + mp.util.info( + "failed to send all sentinels and exit with error." + f"\ncall_queue size={self.call_queue._maxsize}; " + f" full is {self.call_queue.full()}; " + ) + raise e + mp.util.info( + "full call_queue prevented to send all sentinels at " + "once, waiting..." + ) + sleep(cooldown_time) + cooldown_time *= 1.2 + break + + mp.util.debug(f"sent {n_sentinels_sent} sentinels to the call queue") + + def join_executor_internals(self): + self.shutdown_workers() + + # Release the queue's resources as soon as possible. Flag the feeder + # thread for clean exit to avoid having the crash detection thread flag + # the Executor as broken during the shutdown. This is safe as either: + # * We don't need to communicate with the workers anymore + # * There is nothing left in the Queue buffer except None sentinels + mp.util.debug("closing call_queue") + self.call_queue.close() + self.call_queue.join_thread() + + # Closing result_queue + mp.util.debug("closing result_queue") + self.result_queue.close() + + mp.util.debug("closing thread_wakeup") + with self.shutdown_lock: + self.thread_wakeup.close() + + # If .join() is not called on the created processes then + # some ctx.Queue methods may deadlock on macOS. + with self.processes_management_lock: + mp.util.debug(f"joining {len(self.processes)} processes") + n_joined_processes = 0 + while True: + try: + pid, p = self.processes.popitem() + mp.util.debug(f"joining process {p.name} with pid {pid}") + p.join() + n_joined_processes += 1 + except KeyError: + break + + mp.util.debug( + "executor management thread clean shutdown of " + f"{n_joined_processes} workers" + ) + + def get_n_children_alive(self): + # This is an upper bound on the number of children alive. + with self.processes_management_lock: + return sum(p.is_alive() for p in list(self.processes.values())) + + +_system_limits_checked = False +_system_limited = None + + +def _check_system_limits(): + global _system_limits_checked, _system_limited + if _system_limits_checked and _system_limited: + raise NotImplementedError(_system_limited) + _system_limits_checked = True + try: + nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems_max == -1: + # undetermined limit, assume that limit is determined + # by available memory only + return + if nsems_max >= 256: + # minimum number of semaphores available + # according to POSIX + return + _system_limited = ( + f"system provides too few semaphores ({nsems_max} available, " + "256 necessary)" + ) + raise NotImplementedError(_system_limited) + + +def _chain_from_iterable_of_lists(iterable): + """ + Specialized implementation of itertools.chain.from_iterable. + Each item in *iterable* should be a list. This function is + careful not to keep references to yielded objects. + """ + for element in iterable: + element.reverse() + while element: + yield element.pop() + + +def _check_max_depth(context): + # Limit the maxmal recursion level + global _CURRENT_DEPTH + if context.get_start_method() == "fork" and _CURRENT_DEPTH > 0: + raise LokyRecursionError( + "Could not spawn extra nested processes at depth superior to " + "MAX_DEPTH=1. It is not possible to increase this limit when " + "using the 'fork' start method." + ) + + if 0 < MAX_DEPTH and _CURRENT_DEPTH + 1 > MAX_DEPTH: + raise LokyRecursionError( + "Could not spawn extra nested processes at depth superior to " + f"MAX_DEPTH={MAX_DEPTH}. If this is intendend, you can change " + "this limit with the LOKY_MAX_DEPTH environment variable." + ) + + +class LokyRecursionError(RuntimeError): + """A process tries to spawn too many levels of nested processes.""" + + +class BrokenProcessPool(_BPPException): + """ + Raised when the executor is broken while a future was in the running state. + The cause can an error raised when unpickling the task in the worker + process or when unpickling the result value in the parent process. It can + also be caused by a worker process being terminated unexpectedly. + """ + + +class TerminatedWorkerError(BrokenProcessPool): + """ + Raised when a process in a ProcessPoolExecutor terminated abruptly + while a future was in the running state. + """ + + +# Alias for backward compat (for code written for loky 1.1.4 and earlier). Do +# not use in new code. +BrokenExecutor = BrokenProcessPool + + +class ShutdownExecutorError(RuntimeError): + + """ + Raised when a ProcessPoolExecutor is shutdown while a future was in the + running or pending state. + """ + + +class ProcessPoolExecutor(Executor): + + _at_exit = None + + def __init__( + self, + max_workers=None, + job_reducers=None, + result_reducers=None, + timeout=None, + context=None, + initializer=None, + initargs=(), + env=None, + ): + """Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: int, optional (default: cpu_count()) + The maximum number of processes that can be used to execute the + given calls. If None or not given then as many worker processes + will be created as the number of CPUs the current process + can use. + job_reducers, result_reducers: dict(type: reducer_func) + Custom reducer for pickling the jobs and the results from the + Executor. If only `job_reducers` is provided, `result_reducer` + will use the same reducers + timeout: int, optional (default: None) + Idle workers exit after timeout seconds. If a new job is + submitted after the timeout, the executor will start enough + new Python processes to make sure the pool of workers is full. + context: A multiprocessing context to launch the workers. This + object should provide SimpleQueue, Queue and Process. + initializer: An callable used to initialize worker processes. + initargs: A tuple of arguments to pass to the initializer. + env: A dict of environment variable to overwrite in the child + process. The environment variables are set before any module is + loaded. Note that this only works with the loky context. + """ + _check_system_limits() + + if max_workers is None: + self._max_workers = cpu_count() + else: + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + self._max_workers = max_workers + + if ( + sys.platform == "win32" + and self._max_workers > _MAX_WINDOWS_WORKERS + ): + warnings.warn( + f"On Windows, max_workers cannot exceed {_MAX_WINDOWS_WORKERS} " + "due to limitations of the operating system." + ) + self._max_workers = _MAX_WINDOWS_WORKERS + + if context is None: + context = get_context() + self._context = context + self._env = env + + self._initializer, self._initargs = _prepare_initializer( + initializer, initargs + ) + _check_max_depth(self._context) + + if result_reducers is None: + result_reducers = job_reducers + + # Timeout + self._timeout = timeout + + # Management thread + self._executor_manager_thread = None + + # Map of pids to processes + self._processes = {} + + # Internal variables of the ProcessPoolExecutor + self._processes = {} + self._queue_count = 0 + self._pending_work_items = {} + self._running_work_items = [] + self._work_ids = queue.Queue() + self._processes_management_lock = self._context.Lock() + self._executor_manager_thread = None + self._shutdown_lock = threading.Lock() + + # _ThreadWakeup is a communication channel used to interrupt the wait + # of the main loop of executor_manager_thread from another thread (e.g. + # when calling executor.submit or executor.shutdown). We do not use the + # _result_queue to send wakeup signals to the executor_manager_thread + # as it could result in a deadlock if a worker process dies with the + # _result_queue write lock still acquired. + # + # _shutdown_lock must be locked to access _ThreadWakeup.wakeup. + self._executor_manager_thread_wakeup = _ThreadWakeup() + + # Flag to hold the state of the Executor. This permits to introspect + # the Executor state even once it has been garbage collected. + self._flags = _ExecutorFlags(self._shutdown_lock) + + # Finally setup the queues for interprocess communication + self._setup_queues(job_reducers, result_reducers) + + mp.util.debug("ProcessPoolExecutor is setup") + + def _setup_queues(self, job_reducers, result_reducers, queue_size=None): + # Make the call queue slightly larger than the number of processes to + # prevent the worker processes from idling. But don't make it too big + # because futures in the call queue cannot be cancelled. + if queue_size is None: + queue_size = 2 * self._max_workers + EXTRA_QUEUED_CALLS + self._call_queue = _SafeQueue( + max_size=queue_size, + pending_work_items=self._pending_work_items, + running_work_items=self._running_work_items, + thread_wakeup=self._executor_manager_thread_wakeup, + reducers=job_reducers, + ctx=self._context, + ) + # Killed worker processes can produce spurious "broken pipe" + # tracebacks in the queue's own worker thread. But we detect killed + # processes anyway, so silence the tracebacks. + self._call_queue._ignore_epipe = True + + self._result_queue = SimpleQueue( + reducers=result_reducers, ctx=self._context + ) + + def _start_executor_manager_thread(self): + if self._executor_manager_thread is None: + mp.util.debug("_start_executor_manager_thread called") + + # Start the processes so that their sentinels are known. + self._executor_manager_thread = _ExecutorManagerThread(self) + self._executor_manager_thread.start() + + # register this executor in a mechanism that ensures it will wakeup + # when the interpreter is exiting. + _threads_wakeups[self._executor_manager_thread] = ( + self._shutdown_lock, + self._executor_manager_thread_wakeup, + ) + + global process_pool_executor_at_exit + if process_pool_executor_at_exit is None: + # Ensure that the _python_exit function will be called before + # the multiprocessing.Queue._close finalizers which have an + # exitpriority of 10. + + if sys.version_info < (3, 9): + process_pool_executor_at_exit = mp.util.Finalize( + None, _python_exit, exitpriority=20 + ) + else: + process_pool_executor_at_exit = threading._register_atexit( + _python_exit + ) + + def _adjust_process_count(self): + while len(self._processes) < self._max_workers: + worker_exit_lock = self._context.BoundedSemaphore(1) + args = ( + self._call_queue, + self._result_queue, + self._initializer, + self._initargs, + self._processes_management_lock, + self._timeout, + worker_exit_lock, + _CURRENT_DEPTH + 1, + ) + worker_exit_lock.acquire() + try: + # Try to spawn the process with some environment variable to + # overwrite but it only works with the loky context for now. + p = self._context.Process( + target=_process_worker, args=args, env=self._env + ) + except TypeError: + p = self._context.Process(target=_process_worker, args=args) + p._worker_exit_lock = worker_exit_lock + p.start() + self._processes[p.pid] = p + mp.util.debug( + f"Adjusted process count to {self._max_workers}: " + f"{[(p.name, pid) for pid, p in self._processes.items()]}" + ) + + def _ensure_executor_running(self): + """ensures all workers and management thread are running""" + with self._processes_management_lock: + if len(self._processes) != self._max_workers: + self._adjust_process_count() + self._start_executor_manager_thread() + + def submit(self, fn, *args, **kwargs): + with self._flags.shutdown_lock: + if self._flags.broken is not None: + raise self._flags.broken + if self._flags.shutdown: + raise ShutdownExecutorError( + "cannot schedule new futures after shutdown" + ) + + # Cannot submit a new calls once the interpreter is shutting down. + # This check avoids spawning new processes at exit. + if _global_shutdown: + raise RuntimeError( + "cannot schedule new futures after " "interpreter shutdown" + ) + + f = Future() + w = _WorkItem(f, fn, args, kwargs) + + self._pending_work_items[self._queue_count] = w + self._work_ids.put(self._queue_count) + self._queue_count += 1 + # Wake up queue management thread + self._executor_manager_thread_wakeup.wakeup() + + self._ensure_executor_running() + return f + + submit.__doc__ = Executor.submit.__doc__ + + def map(self, fn, *iterables, **kwargs): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: If greater than one, the iterables will be chopped into + chunks of size chunksize and submitted to the process pool. + If set to one, the items in the list will be sent one at a + time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + timeout = kwargs.get("timeout", None) + chunksize = kwargs.get("chunksize", 1) + if chunksize < 1: + raise ValueError("chunksize must be >= 1.") + + results = super().map( + partial(_process_chunk, fn), + _get_chunks(chunksize, *iterables), + timeout=timeout, + ) + return _chain_from_iterable_of_lists(results) + + def shutdown(self, wait=True, kill_workers=False): + mp.util.debug(f"shutting down executor {self}") + + self._flags.flag_as_shutting_down(kill_workers) + executor_manager_thread = self._executor_manager_thread + executor_manager_thread_wakeup = self._executor_manager_thread_wakeup + + if executor_manager_thread_wakeup is not None: + # Wake up queue management thread + with self._shutdown_lock: + self._executor_manager_thread_wakeup.wakeup() + + if executor_manager_thread is not None and wait: + # This locks avoids concurrent join if the interpreter + # is shutting down. + with _global_shutdown_lock: + executor_manager_thread.join() + _threads_wakeups.pop(executor_manager_thread, None) + + # To reduce the risk of opening too many files, remove references to + # objects that use file descriptors. + self._executor_manager_thread = None + self._executor_manager_thread_wakeup = None + self._call_queue = None + self._result_queue = None + self._processes_management_lock = None + + shutdown.__doc__ = Executor.shutdown.__doc__ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..ad016fd389762a1c458200ffe7b310239da3a3f3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py @@ -0,0 +1,285 @@ +############################################################################### +# Reusable ProcessPoolExecutor +# +# author: Thomas Moreau and Olivier Grisel +# +import time +import warnings +import threading +import multiprocessing as mp + +from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS +from .backend.context import cpu_count +from .backend import get_context + +__all__ = ["get_reusable_executor"] + +# Singleton executor and id management +_executor_lock = threading.RLock() +_next_executor_id = 0 +_executor = None +_executor_kwargs = None + + +def _get_next_executor_id(): + """Ensure that each successive executor instance has a unique, monotonic id. + + The purpose of this monotonic id is to help debug and test automated + instance creation. + """ + global _next_executor_id + with _executor_lock: + executor_id = _next_executor_id + _next_executor_id += 1 + return executor_id + + +def get_reusable_executor( + max_workers=None, + context=None, + timeout=10, + kill_workers=False, + reuse="auto", + job_reducers=None, + result_reducers=None, + initializer=None, + initargs=(), + env=None, +): + """Return the current ReusableExectutor instance. + + Start a new instance if it has not been started already or if the previous + instance was left in a broken state. + + If the previous instance does not have the requested number of workers, the + executor is dynamically resized to adjust the number of workers prior to + returning. + + Reusing a singleton instance spares the overhead of starting new worker + processes and importing common python packages each time. + + ``max_workers`` controls the maximum number of tasks that can be running in + parallel in worker processes. By default this is set to the number of + CPUs on the host. + + Setting ``timeout`` (in seconds) makes idle workers automatically shutdown + so as to release system resources. New workers are respawn upon submission + of new tasks so that ``max_workers`` are available to accept the newly + submitted tasks. Setting ``timeout`` to around 100 times the time required + to spawn new processes and import packages in them (on the order of 100ms) + ensures that the overhead of spawning workers is negligible. + + Setting ``kill_workers=True`` makes it possible to forcibly interrupt + previously spawned jobs to get a new instance of the reusable executor + with new constructor argument values. + + The ``job_reducers`` and ``result_reducers`` are used to customize the + pickling of tasks and results send to the executor. + + When provided, the ``initializer`` is run first in newly spawned + processes with argument ``initargs``. + + The environment variable in the child process are a copy of the values in + the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and + ``VAL`` are string literals to overwrite the environment variable ``ENV`` + in the child processes to value ``VAL``. The environment variables are set + in the children before any module is loaded. This only works with the + ``loky`` context. + """ + _executor, _ = _ReusablePoolExecutor.get_reusable_executor( + max_workers=max_workers, + context=context, + timeout=timeout, + kill_workers=kill_workers, + reuse=reuse, + job_reducers=job_reducers, + result_reducers=result_reducers, + initializer=initializer, + initargs=initargs, + env=env, + ) + return _executor + + +class _ReusablePoolExecutor(ProcessPoolExecutor): + def __init__( + self, + submit_resize_lock, + max_workers=None, + context=None, + timeout=None, + executor_id=0, + job_reducers=None, + result_reducers=None, + initializer=None, + initargs=(), + env=None, + ): + super().__init__( + max_workers=max_workers, + context=context, + timeout=timeout, + job_reducers=job_reducers, + result_reducers=result_reducers, + initializer=initializer, + initargs=initargs, + env=env, + ) + self.executor_id = executor_id + self._submit_resize_lock = submit_resize_lock + + @classmethod + def get_reusable_executor( + cls, + max_workers=None, + context=None, + timeout=10, + kill_workers=False, + reuse="auto", + job_reducers=None, + result_reducers=None, + initializer=None, + initargs=(), + env=None, + ): + with _executor_lock: + global _executor, _executor_kwargs + executor = _executor + + if max_workers is None: + if reuse is True and executor is not None: + max_workers = executor._max_workers + else: + max_workers = cpu_count() + elif max_workers <= 0: + raise ValueError( + f"max_workers must be greater than 0, got {max_workers}." + ) + + if isinstance(context, str): + context = get_context(context) + if context is not None and context.get_start_method() == "fork": + raise ValueError( + "Cannot use reusable executor with the 'fork' context" + ) + + kwargs = dict( + context=context, + timeout=timeout, + job_reducers=job_reducers, + result_reducers=result_reducers, + initializer=initializer, + initargs=initargs, + env=env, + ) + if executor is None: + is_reused = False + mp.util.debug( + f"Create a executor with max_workers={max_workers}." + ) + executor_id = _get_next_executor_id() + _executor_kwargs = kwargs + _executor = executor = cls( + _executor_lock, + max_workers=max_workers, + executor_id=executor_id, + **kwargs, + ) + else: + if reuse == "auto": + reuse = kwargs == _executor_kwargs + if ( + executor._flags.broken + or executor._flags.shutdown + or not reuse + ): + if executor._flags.broken: + reason = "broken" + elif executor._flags.shutdown: + reason = "shutdown" + else: + reason = "arguments have changed" + mp.util.debug( + "Creating a new executor with max_workers=" + f"{max_workers} as the previous instance cannot be " + f"reused ({reason})." + ) + executor.shutdown(wait=True, kill_workers=kill_workers) + _executor = executor = _executor_kwargs = None + # Recursive call to build a new instance + return cls.get_reusable_executor( + max_workers=max_workers, **kwargs + ) + else: + mp.util.debug( + "Reusing existing executor with " + f"max_workers={executor._max_workers}." + ) + is_reused = True + executor._resize(max_workers) + + return executor, is_reused + + def submit(self, fn, *args, **kwargs): + with self._submit_resize_lock: + return super().submit(fn, *args, **kwargs) + + def _resize(self, max_workers): + with self._submit_resize_lock: + if max_workers is None: + raise ValueError("Trying to resize with max_workers=None") + elif max_workers == self._max_workers: + return + + if self._executor_manager_thread is None: + # If the executor_manager_thread has not been started + # then no processes have been spawned and we can just + # update _max_workers and return + self._max_workers = max_workers + return + + self._wait_job_completion() + + # Some process might have returned due to timeout so check how many + # children are still alive. Use the _process_management_lock to + # ensure that no process are spawned or timeout during the resize. + with self._processes_management_lock: + processes = list(self._processes.values()) + nb_children_alive = sum(p.is_alive() for p in processes) + self._max_workers = max_workers + for _ in range(max_workers, nb_children_alive): + self._call_queue.put(None) + while ( + len(self._processes) > max_workers and not self._flags.broken + ): + time.sleep(1e-3) + + self._adjust_process_count() + processes = list(self._processes.values()) + while not all(p.is_alive() for p in processes): + time.sleep(1e-3) + + def _wait_job_completion(self): + """Wait for the cache to be empty before resizing the pool.""" + # Issue a warning to the user about the bad effect of this usage. + if self._pending_work_items: + warnings.warn( + "Trying to resize an executor with running jobs: " + "waiting for jobs completion before resizing.", + UserWarning, + ) + mp.util.debug( + f"Executor {self.executor_id} waiting for jobs completion " + "before resizing" + ) + # Wait for the completion of the jobs + while self._pending_work_items: + time.sleep(1e-3) + + def _setup_queues(self, job_reducers, result_reducers): + # As this executor can be resized, use a large queue size to avoid + # underestimating capacity and introducing overhead + queue_size = 2 * cpu_count() + EXTRA_QUEUED_CALLS + super()._setup_queues( + job_reducers, result_reducers, queue_size=queue_size + ) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/func_inspect.py b/llmeval-env/lib/python3.10/site-packages/joblib/func_inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..3f8094614b90abed5804723908ddf5eb109901b1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/func_inspect.py @@ -0,0 +1,369 @@ +""" +My own variation on function-specific inspect-like features. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import inspect +import warnings +import re +import os +import collections + +from itertools import islice +from tokenize import open as open_py_source + +from .logger import pformat + +full_argspec_fields = ('args varargs varkw defaults kwonlyargs ' + 'kwonlydefaults annotations') +full_argspec_type = collections.namedtuple('FullArgSpec', full_argspec_fields) + + +def get_func_code(func): + """ Attempts to retrieve a reliable function code hash. + + The reason we don't use inspect.getsource is that it caches the + source, whereas we want this to be modified on the fly when the + function is modified. + + Returns + ------- + func_code: string + The function code + source_file: string + The path to the file in which the function is defined. + first_line: int + The first line of the code in the source file. + + Notes + ------ + This function does a bit more magic than inspect, and is thus + more robust. + """ + source_file = None + try: + code = func.__code__ + source_file = code.co_filename + if not os.path.exists(source_file): + # Use inspect for lambda functions and functions defined in an + # interactive shell, or in doctests + source_code = ''.join(inspect.getsourcelines(func)[0]) + line_no = 1 + if source_file.startswith('', source_file).groups() + line_no = int(line_no) + source_file = '' % source_file + return source_code, source_file, line_no + # Try to retrieve the source code. + with open_py_source(source_file) as source_file_obj: + first_line = code.co_firstlineno + # All the lines after the function definition: + source_lines = list(islice(source_file_obj, first_line - 1, None)) + return ''.join(inspect.getblock(source_lines)), source_file, first_line + except: # noqa: E722 + # If the source code fails, we use the hash. This is fragile and + # might change from one session to another. + if hasattr(func, '__code__'): + # Python 3.X + return str(func.__code__.__hash__()), source_file, -1 + else: + # Weird objects like numpy ufunc don't have __code__ + # This is fragile, as quite often the id of the object is + # in the repr, so it might not persist across sessions, + # however it will work for ufuncs. + return repr(func), source_file, -1 + + +def _clean_win_chars(string): + """Windows cannot encode some characters in filename.""" + import urllib + if hasattr(urllib, 'quote'): + quote = urllib.quote + else: + # In Python 3, quote is elsewhere + import urllib.parse + quote = urllib.parse.quote + for char in ('<', '>', '!', ':', '\\'): + string = string.replace(char, quote(char)) + return string + + +def get_func_name(func, resolv_alias=True, win_characters=True): + """ Return the function import path (as a list of module names), and + a name for the function. + + Parameters + ---------- + func: callable + The func to inspect + resolv_alias: boolean, optional + If true, possible local aliases are indicated. + win_characters: boolean, optional + If true, substitute special characters using urllib.quote + This is useful in Windows, as it cannot encode some filenames + """ + if hasattr(func, '__module__'): + module = func.__module__ + else: + try: + module = inspect.getmodule(func) + except TypeError: + if hasattr(func, '__class__'): + module = func.__class__.__module__ + else: + module = 'unknown' + if module is None: + # Happens in doctests, eg + module = '' + if module == '__main__': + try: + filename = os.path.abspath(inspect.getsourcefile(func)) + except: # noqa: E722 + filename = None + if filename is not None: + # mangling of full path to filename + parts = filename.split(os.sep) + if parts[-1].startswith(', where: + # - N is the cell number where the function was defined + # - XYZ is a hash representing the function's code (and name). + # It will be consistent across sessions and kernel restarts, + # and will change if the function's code/name changes + # We remove N so that cache is properly hit if the cell where + # the func is defined is re-exectuted. + # The XYZ hash should avoid collisions between functions with + # the same name, both within the same notebook but also across + # notebooks + splitted = parts[-1].split('-') + parts[-1] = '-'.join(splitted[:2] + splitted[3:]) + elif len(parts) > 2 and parts[-2].startswith('ipykernel_'): + # In a notebook session (ipykernel). Filename seems to be 'xyz' + # of above. parts[-2] has the structure ipykernel_XXXXXX where + # XXXXXX is a six-digit number identifying the current run (?). + # If we split it off, the function again has the same + # identifier across runs. + parts[-2] = 'ipykernel' + filename = '-'.join(parts) + if filename.endswith('.py'): + filename = filename[:-3] + module = module + '-' + filename + module = module.split('.') + if hasattr(func, 'func_name'): + name = func.func_name + elif hasattr(func, '__name__'): + name = func.__name__ + else: + name = 'unknown' + # Hack to detect functions not defined at the module-level + if resolv_alias: + # TODO: Maybe add a warning here? + if hasattr(func, 'func_globals') and name in func.func_globals: + if not func.func_globals[name] is func: + name = '%s-alias' % name + if hasattr(func, '__qualname__') and func.__qualname__ != name: + # Extend the module name in case of nested functions to avoid + # (module, name) collisions + module.extend(func.__qualname__.split(".")[:-1]) + if inspect.ismethod(func): + # We need to add the name of the class + if hasattr(func, 'im_class'): + klass = func.im_class + module.append(klass.__name__) + if os.name == 'nt' and win_characters: + # Windows can't encode certain characters in filenames + name = _clean_win_chars(name) + module = [_clean_win_chars(s) for s in module] + return module, name + + +def _signature_str(function_name, arg_sig): + """Helper function to output a function signature""" + return '{}{}'.format(function_name, arg_sig) + + +def _function_called_str(function_name, args, kwargs): + """Helper function to output a function call""" + template_str = '{0}({1}, {2})' + + args_str = repr(args)[1:-1] + kwargs_str = ', '.join('%s=%s' % (k, v) + for k, v in kwargs.items()) + return template_str.format(function_name, args_str, + kwargs_str) + + +def filter_args(func, ignore_lst, args=(), kwargs=dict()): + """ Filters the given args and kwargs using a list of arguments to + ignore, and a function specification. + + Parameters + ---------- + func: callable + Function giving the argument specification + ignore_lst: list of strings + List of arguments to ignore (either a name of an argument + in the function spec, or '*', or '**') + *args: list + Positional arguments passed to the function. + **kwargs: dict + Keyword arguments passed to the function + + Returns + ------- + filtered_args: list + List of filtered positional and keyword arguments. + """ + args = list(args) + if isinstance(ignore_lst, str): + # Catch a common mistake + raise ValueError( + 'ignore_lst must be a list of parameters to ignore ' + '%s (type %s) was given' % (ignore_lst, type(ignore_lst))) + # Special case for functools.partial objects + if (not inspect.ismethod(func) and not inspect.isfunction(func)): + if ignore_lst: + warnings.warn('Cannot inspect object %s, ignore list will ' + 'not work.' % func, stacklevel=2) + return {'*': args, '**': kwargs} + arg_sig = inspect.signature(func) + arg_names = [] + arg_defaults = [] + arg_kwonlyargs = [] + arg_varargs = None + arg_varkw = None + for param in arg_sig.parameters.values(): + if param.kind is param.POSITIONAL_OR_KEYWORD: + arg_names.append(param.name) + elif param.kind is param.KEYWORD_ONLY: + arg_names.append(param.name) + arg_kwonlyargs.append(param.name) + elif param.kind is param.VAR_POSITIONAL: + arg_varargs = param.name + elif param.kind is param.VAR_KEYWORD: + arg_varkw = param.name + if param.default is not param.empty: + arg_defaults.append(param.default) + if inspect.ismethod(func): + # First argument is 'self', it has been removed by Python + # we need to add it back: + args = [func.__self__, ] + args + # func is an instance method, inspect.signature(func) does not + # include self, we need to fetch it from the class method, i.e + # func.__func__ + class_method_sig = inspect.signature(func.__func__) + self_name = next(iter(class_method_sig.parameters)) + arg_names = [self_name] + arg_names + # XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such + # as on ndarrays. + + _, name = get_func_name(func, resolv_alias=False) + arg_dict = dict() + arg_position = -1 + for arg_position, arg_name in enumerate(arg_names): + if arg_position < len(args): + # Positional argument or keyword argument given as positional + if arg_name not in arg_kwonlyargs: + arg_dict[arg_name] = args[arg_position] + else: + raise ValueError( + "Keyword-only parameter '%s' was passed as " + 'positional parameter for %s:\n' + ' %s was called.' + % (arg_name, + _signature_str(name, arg_sig), + _function_called_str(name, args, kwargs)) + ) + + else: + position = arg_position - len(arg_names) + if arg_name in kwargs: + arg_dict[arg_name] = kwargs[arg_name] + else: + try: + arg_dict[arg_name] = arg_defaults[position] + except (IndexError, KeyError) as e: + # Missing argument + raise ValueError( + 'Wrong number of arguments for %s:\n' + ' %s was called.' + % (_signature_str(name, arg_sig), + _function_called_str(name, args, kwargs)) + ) from e + + varkwargs = dict() + for arg_name, arg_value in sorted(kwargs.items()): + if arg_name in arg_dict: + arg_dict[arg_name] = arg_value + elif arg_varkw is not None: + varkwargs[arg_name] = arg_value + else: + raise TypeError("Ignore list for %s() contains an unexpected " + "keyword argument '%s'" % (name, arg_name)) + + if arg_varkw is not None: + arg_dict['**'] = varkwargs + if arg_varargs is not None: + varargs = args[arg_position + 1:] + arg_dict['*'] = varargs + + # Now remove the arguments to be ignored + for item in ignore_lst: + if item in arg_dict: + arg_dict.pop(item) + else: + raise ValueError("Ignore list: argument '%s' is not defined for " + "function %s" + % (item, + _signature_str(name, arg_sig)) + ) + # XXX: Return a sorted list of pairs? + return arg_dict + + +def _format_arg(arg): + formatted_arg = pformat(arg, indent=2) + if len(formatted_arg) > 1500: + formatted_arg = '%s...' % formatted_arg[:700] + return formatted_arg + + +def format_signature(func, *args, **kwargs): + # XXX: Should this use inspect.formatargvalues/formatargspec? + module, name = get_func_name(func) + module = [m for m in module if m] + if module: + module.append(name) + module_path = '.'.join(module) + else: + module_path = name + arg_str = list() + previous_length = 0 + for arg in args: + formatted_arg = _format_arg(arg) + if previous_length > 80: + formatted_arg = '\n%s' % formatted_arg + previous_length = len(formatted_arg) + arg_str.append(formatted_arg) + arg_str.extend(['%s=%s' % (v, _format_arg(i)) for v, i in kwargs.items()]) + arg_str = ', '.join(arg_str) + + signature = '%s(%s)' % (name, arg_str) + return module_path, signature + + +def format_call(func, args, kwargs, object_name="Memory"): + """ Returns a nicely formatted statement displaying the function + call with the given arguments. + """ + path, signature = format_signature(func, *args, **kwargs) + msg = '%s\n[%s] Calling %s...\n%s' % (80 * '_', object_name, + path, signature) + return msg + # XXX: Not using logging framework + # self.debug(msg) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/logger.py b/llmeval-env/lib/python3.10/site-packages/joblib/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..cf9d258011f3c581334a93ef3ccdac7dfb19b25e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/logger.py @@ -0,0 +1,162 @@ +""" +Helpers for logging. + +This module needs much love to become useful. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2008 Gael Varoquaux +# License: BSD Style, 3 clauses. + +from __future__ import print_function + +import time +import sys +import os +import shutil +import logging +import pprint + +from .disk import mkdirp + + +def _squeeze_time(t): + """Remove .1s to the time under Windows: this is the time it take to + stat files. This is needed to make results similar to timings under + Unix, for tests + """ + if sys.platform.startswith('win'): + return max(0, t - .1) + else: + return t + + +def format_time(t): + t = _squeeze_time(t) + return "%.1fs, %.1fmin" % (t, t / 60.) + + +def short_format_time(t): + t = _squeeze_time(t) + if t > 60: + return "%4.1fmin" % (t / 60.) + else: + return " %5.1fs" % (t) + + +def pformat(obj, indent=0, depth=3): + if 'numpy' in sys.modules: + import numpy as np + print_options = np.get_printoptions() + np.set_printoptions(precision=6, threshold=64, edgeitems=1) + else: + print_options = None + out = pprint.pformat(obj, depth=depth, indent=indent) + if print_options: + np.set_printoptions(**print_options) + return out + + +############################################################################### +# class `Logger` +############################################################################### +class Logger(object): + """ Base class for logging messages. + """ + + def __init__(self, depth=3, name=None): + """ + Parameters + ---------- + depth: int, optional + The depth of objects printed. + name: str, optional + The namespace to log to. If None, defaults to joblib. + """ + self.depth = depth + self._name = name if name else 'joblib' + + def warn(self, msg): + logging.getLogger(self._name).warning("[%s]: %s" % (self, msg)) + + def info(self, msg): + logging.info("[%s]: %s" % (self, msg)) + + def debug(self, msg): + # XXX: This conflicts with the debug flag used in children class + logging.getLogger(self._name).debug("[%s]: %s" % (self, msg)) + + def format(self, obj, indent=0): + """Return the formatted representation of the object.""" + return pformat(obj, indent=indent, depth=self.depth) + + +############################################################################### +# class `PrintTime` +############################################################################### +class PrintTime(object): + """ Print and log messages while keeping track of time. + """ + + def __init__(self, logfile=None, logdir=None): + if logfile is not None and logdir is not None: + raise ValueError('Cannot specify both logfile and logdir') + # XXX: Need argument docstring + self.last_time = time.time() + self.start_time = self.last_time + if logdir is not None: + logfile = os.path.join(logdir, 'joblib.log') + self.logfile = logfile + if logfile is not None: + mkdirp(os.path.dirname(logfile)) + if os.path.exists(logfile): + # Rotate the logs + for i in range(1, 9): + try: + shutil.move(logfile + '.%i' % i, + logfile + '.%i' % (i + 1)) + except: # noqa: E722 + "No reason failing here" + # Use a copy rather than a move, so that a process + # monitoring this file does not get lost. + try: + shutil.copy(logfile, logfile + '.1') + except: # noqa: E722 + "No reason failing here" + try: + with open(logfile, 'w') as logfile: + logfile.write('\nLogging joblib python script\n') + logfile.write('\n---%s---\n' % time.ctime(self.last_time)) + except: # noqa: E722 + """ Multiprocessing writing to files can create race + conditions. Rather fail silently than crash the + computation. + """ + # XXX: We actually need a debug flag to disable this + # silent failure. + + def __call__(self, msg='', total=False): + """ Print the time elapsed between the last call and the current + call, with an optional message. + """ + if not total: + time_lapse = time.time() - self.last_time + full_msg = "%s: %s" % (msg, format_time(time_lapse)) + else: + # FIXME: Too much logic duplicated + time_lapse = time.time() - self.start_time + full_msg = "%s: %.2fs, %.1f min" % (msg, time_lapse, + time_lapse / 60) + print(full_msg, file=sys.stderr) + if self.logfile is not None: + try: + with open(self.logfile, 'a') as f: + print(full_msg, file=f) + except: # noqa: E722 + """ Multiprocessing writing to files can create race + conditions. Rather fail silently than crash the + calculation. + """ + # XXX: We actually need a debug flag to disable this + # silent failure. + self.last_time = time.time() diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/memory.py b/llmeval-env/lib/python3.10/site-packages/joblib/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..b83a855da26edc03e32cc5369d6b6441e7a0203e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/memory.py @@ -0,0 +1,1172 @@ +""" +A context object for caching a function's return value each time it +is called with the same input arguments. + +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + + +import asyncio +import datetime +import functools +import inspect +import logging +import os +import pathlib +import pydoc +import re +import textwrap +import time +import tokenize +import traceback +import warnings +import weakref + +from . import hashing +from ._store_backends import CacheWarning # noqa +from ._store_backends import FileSystemStoreBackend, StoreBackendBase +from .func_inspect import (filter_args, format_call, format_signature, + get_func_code, get_func_name) +from .logger import Logger, format_time, pformat + +FIRST_LINE_TEXT = "# first line:" + +# TODO: The following object should have a data store object as a sub +# object, and the interface to persist and query should be separated in +# the data store. +# +# This would enable creating 'Memory' objects with a different logic for +# pickling that would simply span a MemorizedFunc with the same +# store (or do we want to copy it to avoid cross-talks?), for instance to +# implement HDF5 pickling. + +# TODO: Same remark for the logger, and probably use the Python logging +# mechanism. + + +def extract_first_line(func_code): + """ Extract the first line information from the function code + text if available. + """ + if func_code.startswith(FIRST_LINE_TEXT): + func_code = func_code.split('\n') + first_line = int(func_code[0][len(FIRST_LINE_TEXT):]) + func_code = '\n'.join(func_code[1:]) + else: + first_line = -1 + return func_code, first_line + + +class JobLibCollisionWarning(UserWarning): + """ Warn that there might be a collision between names of functions. + """ + + +_STORE_BACKENDS = {'local': FileSystemStoreBackend} + + +def register_store_backend(backend_name, backend): + """Extend available store backends. + + The Memory, MemorizeResult and MemorizeFunc objects are designed to be + agnostic to the type of store used behind. By default, the local file + system is used but this function gives the possibility to extend joblib's + memory pattern with other types of storage such as cloud storage (S3, GCS, + OpenStack, HadoopFS, etc) or blob DBs. + + Parameters + ---------- + backend_name: str + The name identifying the store backend being registered. For example, + 'local' is used with FileSystemStoreBackend. + backend: StoreBackendBase subclass + The name of a class that implements the StoreBackendBase interface. + + """ + if not isinstance(backend_name, str): + raise ValueError("Store backend name should be a string, " + "'{0}' given.".format(backend_name)) + if backend is None or not issubclass(backend, StoreBackendBase): + raise ValueError("Store backend should inherit " + "StoreBackendBase, " + "'{0}' given.".format(backend)) + + _STORE_BACKENDS[backend_name] = backend + + +def _store_backend_factory(backend, location, verbose=0, backend_options=None): + """Return the correct store object for the given location.""" + if backend_options is None: + backend_options = {} + + if isinstance(location, pathlib.Path): + location = str(location) + + if isinstance(location, StoreBackendBase): + return location + elif isinstance(location, str): + obj = None + location = os.path.expanduser(location) + # The location is not a local file system, we look in the + # registered backends if there's one matching the given backend + # name. + for backend_key, backend_obj in _STORE_BACKENDS.items(): + if backend == backend_key: + obj = backend_obj() + + # By default, we assume the FileSystemStoreBackend can be used if no + # matching backend could be found. + if obj is None: + raise TypeError('Unknown location {0} or backend {1}'.format( + location, backend)) + + # The store backend is configured with the extra named parameters, + # some of them are specific to the underlying store backend. + obj.configure(location, verbose=verbose, + backend_options=backend_options) + return obj + elif location is not None: + warnings.warn( + "Instantiating a backend using a {} as a location is not " + "supported by joblib. Returning None instead.".format( + location.__class__.__name__), UserWarning) + + return None + + +def _build_func_identifier(func): + """Build a roughly unique identifier for the cached function.""" + modules, funcname = get_func_name(func) + # We reuse historical fs-like way of building a function identifier + return os.path.join(*modules, funcname) + + +# An in-memory store to avoid looking at the disk-based function +# source code to check if a function definition has changed +_FUNCTION_HASHES = weakref.WeakKeyDictionary() + + +############################################################################### +# class `MemorizedResult` +############################################################################### +class MemorizedResult(Logger): + """Object representing a cached value. + + Attributes + ---------- + location: str + The location of joblib cache. Depends on the store backend used. + + func: function or str + function whose output is cached. The string case is intended only for + instantiation based on the output of repr() on another instance. + (namely eval(repr(memorized_instance)) works). + + argument_hash: str + hash of the function arguments. + + backend: str + Type of store backend for reading/writing cache files. + Default is 'local'. + + mmap_mode: {None, 'r+', 'r', 'w+', 'c'} + The memmapping mode used when loading from cache numpy arrays. See + numpy.load for the meaning of the different values. + + verbose: int + verbosity level (0 means no message). + + timestamp, metadata: string + for internal use only. + """ + def __init__(self, location, call_id, backend='local', mmap_mode=None, + verbose=0, timestamp=None, metadata=None): + Logger.__init__(self) + self._call_id = call_id + self.store_backend = _store_backend_factory(backend, location, + verbose=verbose) + self.mmap_mode = mmap_mode + + if metadata is not None: + self.metadata = metadata + else: + self.metadata = self.store_backend.get_metadata(self._call_id) + + self.duration = self.metadata.get('duration', None) + self.verbose = verbose + self.timestamp = timestamp + + @property + def func(self): + return self.func_id + + @property + def func_id(self): + return self._call_id[0] + + @property + def args_id(self): + return self._call_id[1] + + @property + def argument_hash(self): + warnings.warn( + "The 'argument_hash' attribute has been deprecated in version " + "0.12 and will be removed in version 0.14.\n" + "Use `args_id` attribute instead.", + DeprecationWarning, stacklevel=2) + return self.args_id + + def get(self): + """Read value from cache and return it.""" + try: + return self.store_backend.load_item( + self._call_id, + timestamp=self.timestamp, + metadata=self.metadata, + verbose=self.verbose + ) + except ValueError as exc: + new_exc = KeyError( + "Error while trying to load a MemorizedResult's value. " + "It seems that this folder is corrupted : {}".format( + os.path.join(self.store_backend.location, *self._call_id))) + raise new_exc from exc + + def clear(self): + """Clear value from cache""" + self.store_backend.clear_item(self._call_id) + + def __repr__(self): + return '{}(location="{}", func="{}", args_id="{}")'.format( + self.__class__.__name__, self.store_backend.location, + *self._call_id + ) + + def __getstate__(self): + state = self.__dict__.copy() + state['timestamp'] = None + return state + + +class NotMemorizedResult(object): + """Class representing an arbitrary value. + + This class is a replacement for MemorizedResult when there is no cache. + """ + __slots__ = ('value', 'valid') + + def __init__(self, value): + self.value = value + self.valid = True + + def get(self): + if self.valid: + return self.value + else: + raise KeyError("No value stored.") + + def clear(self): + self.valid = False + self.value = None + + def __repr__(self): + if self.valid: + return ('{class_name}({value})' + .format(class_name=self.__class__.__name__, + value=pformat(self.value))) + else: + return self.__class__.__name__ + ' with no value' + + # __getstate__ and __setstate__ are required because of __slots__ + def __getstate__(self): + return {"valid": self.valid, "value": self.value} + + def __setstate__(self, state): + self.valid = state["valid"] + self.value = state["value"] + + +############################################################################### +# class `NotMemorizedFunc` +############################################################################### +class NotMemorizedFunc(object): + """No-op object decorating a function. + + This class replaces MemorizedFunc when there is no cache. It provides an + identical API but does not write anything on disk. + + Attributes + ---------- + func: callable + Original undecorated function. + """ + # Should be a light as possible (for speed) + def __init__(self, func): + self.func = func + + def __call__(self, *args, **kwargs): + return self.func(*args, **kwargs) + + def call_and_shelve(self, *args, **kwargs): + return NotMemorizedResult(self.func(*args, **kwargs)) + + def __repr__(self): + return '{0}(func={1})'.format(self.__class__.__name__, self.func) + + def clear(self, warn=True): + # Argument "warn" is for compatibility with MemorizedFunc.clear + pass + + def call(self, *args, **kwargs): + return self.func(*args, **kwargs), {} + + def check_call_in_cache(self, *args, **kwargs): + return False + + +############################################################################### +# class `AsyncNotMemorizedFunc` +############################################################################### +class AsyncNotMemorizedFunc(NotMemorizedFunc): + async def call_and_shelve(self, *args, **kwargs): + return NotMemorizedResult(await self.func(*args, **kwargs)) + + +############################################################################### +# class `MemorizedFunc` +############################################################################### +class MemorizedFunc(Logger): + """Callable object decorating a function for caching its return value + each time it is called. + + Methods are provided to inspect the cache or clean it. + + Attributes + ---------- + func: callable + The original, undecorated, function. + + location: string + The location of joblib cache. Depends on the store backend used. + + backend: str + Type of store backend for reading/writing cache files. + Default is 'local', in which case the location is the path to a + disk storage. + + ignore: list or None + List of variable names to ignore when choosing whether to + recompute. + + mmap_mode: {None, 'r+', 'r', 'w+', 'c'} + The memmapping mode used when loading from cache + numpy arrays. See numpy.load for the meaning of the different + values. + + compress: boolean, or integer + Whether to zip the stored data on disk. If an integer is + given, it should be between 1 and 9, and sets the amount + of compression. Note that compressed arrays cannot be + read by memmapping. + + verbose: int, optional + The verbosity flag, controls messages that are issued as + the function is evaluated. + + cache_validation_callback: callable, optional + Callable to check if a result in cache is valid or is to be recomputed. + When the function is called with arguments for which a cache exists, + the callback is called with the cache entry's metadata as its sole + argument. If it returns True, the cached result is returned, else the + cache for these arguments is cleared and the result is recomputed. + """ + # ------------------------------------------------------------------------ + # Public interface + # ------------------------------------------------------------------------ + + def __init__(self, func, location, backend='local', ignore=None, + mmap_mode=None, compress=False, verbose=1, timestamp=None, + cache_validation_callback=None): + Logger.__init__(self) + self.mmap_mode = mmap_mode + self.compress = compress + self.func = func + self.cache_validation_callback = cache_validation_callback + self.func_id = _build_func_identifier(func) + self.ignore = ignore if ignore is not None else [] + self._verbose = verbose + + # retrieve store object from backend type and location. + self.store_backend = _store_backend_factory(backend, location, + verbose=verbose, + backend_options=dict( + compress=compress, + mmap_mode=mmap_mode), + ) + if self.store_backend is not None: + # Create func directory on demand. + self.store_backend.store_cached_func_code([self.func_id]) + + self.timestamp = timestamp if timestamp is not None else time.time() + try: + functools.update_wrapper(self, func) + except Exception: + pass # Objects like ufunc don't like that + if inspect.isfunction(func): + doc = pydoc.TextDoc().document(func) + # Remove blank line + doc = doc.replace('\n', '\n\n', 1) + # Strip backspace-overprints for compatibility with autodoc + doc = re.sub('\x08.', '', doc) + else: + # Pydoc does a poor job on other objects + doc = func.__doc__ + self.__doc__ = 'Memoized version of %s' % doc + + self._func_code_info = None + self._func_code_id = None + + def _is_in_cache_and_valid(self, call_id): + """Check if the function call is cached and valid for given arguments. + + - Compare the function code with the one from the cached function, + asserting if it has changed. + - Check if the function call is present in the cache. + - Call `cache_validation_callback` for user define cache validation. + + Returns True if the function call is in cache and can be used, and + returns False otherwise. + """ + # Check if the code of the function has changed + if not self._check_previous_func_code(stacklevel=4): + return False + + # Check if this specific call is in the cache + if not self.store_backend.contains_item(call_id): + return False + + # Call the user defined cache validation callback + metadata = self.store_backend.get_metadata(call_id) + if (self.cache_validation_callback is not None and + not self.cache_validation_callback(metadata)): + self.store_backend.clear_item(call_id) + return False + + return True + + def _cached_call(self, args, kwargs, shelving): + """Call wrapped function and cache result, or read cache if available. + + This function returns the wrapped function output or a reference to + the cached result. + + Arguments: + ---------- + + args, kwargs: list and dict + input arguments for wrapped function + + shelving: bool + True when called via the call_and_shelve function. + + + Returns + ------- + output: Output of the wrapped function if shelving is false, or a + MemorizedResult reference to the value if shelving is true. + metadata: dict containing the metadata associated with the call. + """ + args_id = self._get_args_id(*args, **kwargs) + call_id = (self.func_id, args_id) + _, func_name = get_func_name(self.func) + func_info = self.store_backend.get_cached_func_info([self.func_id]) + location = func_info['location'] + + if self._verbose >= 20: + logging.basicConfig(level=logging.INFO) + _, signature = format_signature(self.func, *args, **kwargs) + self.info( + textwrap.dedent( + f""" + Querying {func_name} with signature + {signature}. + + (argument hash {args_id}) + + The store location is {location}. + """ + ) + ) + + # Compare the function code with the previous to see if the + # function code has changed and check if the results are present in + # the cache. + if self._is_in_cache_and_valid(call_id): + if shelving: + return self._get_memorized_result(call_id), {} + + try: + start_time = time.time() + output = self._load_item(call_id) + if self._verbose > 4: + self._print_duration(time.time() - start_time, + context='cache loaded ') + return output, {} + except Exception: + # XXX: Should use an exception logger + _, signature = format_signature(self.func, *args, **kwargs) + self.warn('Exception while loading results for ' + '{}\n {}'.format(signature, traceback.format_exc())) + + if self._verbose > 10: + self.warn( + f"Computing func {func_name}, argument hash {args_id} " + f"in location {location}" + ) + + # Returns the output but not the metadata + return self._call(call_id, args, kwargs, shelving) + + @property + def func_code_info(self): + # 3-tuple property containing: the function source code, source file, + # and first line of the code inside the source file + if hasattr(self.func, '__code__'): + if self._func_code_id is None: + self._func_code_id = id(self.func.__code__) + elif id(self.func.__code__) != self._func_code_id: + # Be robust to dynamic reassignments of self.func.__code__ + self._func_code_info = None + + if self._func_code_info is None: + # Cache the source code of self.func . Provided that get_func_code + # (which should be called once on self) gets called in the process + # in which self.func was defined, this caching mechanism prevents + # undesired cache clearing when the cached function is called in + # an environment where the introspection utilities get_func_code + # relies on do not work (typically, in joblib child processes). + # See #1035 for more info + # TODO (pierreglaser): do the same with get_func_name? + self._func_code_info = get_func_code(self.func) + return self._func_code_info + + def call_and_shelve(self, *args, **kwargs): + """Call wrapped function, cache result and return a reference. + + This method returns a reference to the cached result instead of the + result itself. The reference object is small and pickeable, allowing + to send or store it easily. Call .get() on reference object to get + result. + + Returns + ------- + cached_result: MemorizedResult or NotMemorizedResult + reference to the value returned by the wrapped function. The + class "NotMemorizedResult" is used when there is no cache + activated (e.g. location=None in Memory). + """ + # Return the wrapped output, without the metadata + return self._cached_call(args, kwargs, shelving=True)[0] + + def __call__(self, *args, **kwargs): + # Return the output, without the metadata + return self._cached_call(args, kwargs, shelving=False)[0] + + def __getstate__(self): + # Make sure self.func's source is introspected prior to being pickled - + # code introspection utilities typically do not work inside child + # processes + _ = self.func_code_info + + # We don't store the timestamp when pickling, to avoid the hash + # depending from it. + state = self.__dict__.copy() + state['timestamp'] = None + + # Invalidate the code id as id(obj) will be different in the child + state['_func_code_id'] = None + + return state + + def check_call_in_cache(self, *args, **kwargs): + """Check if function call is in the memory cache. + + Does not call the function or do any work besides func inspection + and arg hashing. + + Returns + ------- + is_call_in_cache: bool + Whether or not the result of the function has been cached + for the input arguments that have been passed. + """ + call_id = (self.func_id, self._get_args_id(*args, **kwargs)) + return self.store_backend.contains_item(call_id) + + # ------------------------------------------------------------------------ + # Private interface + # ------------------------------------------------------------------------ + + def _get_args_id(self, *args, **kwargs): + """Return the input parameter hash of a result.""" + return hashing.hash(filter_args(self.func, self.ignore, args, kwargs), + coerce_mmap=self.mmap_mode is not None) + + def _hash_func(self): + """Hash a function to key the online cache""" + func_code_h = hash(getattr(self.func, '__code__', None)) + return id(self.func), hash(self.func), func_code_h + + def _write_func_code(self, func_code, first_line): + """ Write the function code and the filename to a file. + """ + # We store the first line because the filename and the function + # name is not always enough to identify a function: people + # sometimes have several functions named the same way in a + # file. This is bad practice, but joblib should be robust to bad + # practice. + func_code = u'%s %i\n%s' % (FIRST_LINE_TEXT, first_line, func_code) + self.store_backend.store_cached_func_code([self.func_id], func_code) + + # Also store in the in-memory store of function hashes + is_named_callable = (hasattr(self.func, '__name__') and + self.func.__name__ != '') + if is_named_callable: + # Don't do this for lambda functions or strange callable + # objects, as it ends up being too fragile + func_hash = self._hash_func() + try: + _FUNCTION_HASHES[self.func] = func_hash + except TypeError: + # Some callable are not hashable + pass + + def _check_previous_func_code(self, stacklevel=2): + """ + stacklevel is the depth a which this function is called, to + issue useful warnings to the user. + """ + # First check if our function is in the in-memory store. + # Using the in-memory store not only makes things faster, but it + # also renders us robust to variations of the files when the + # in-memory version of the code does not vary + try: + if self.func in _FUNCTION_HASHES: + # We use as an identifier the id of the function and its + # hash. This is more likely to falsely change than have hash + # collisions, thus we are on the safe side. + func_hash = self._hash_func() + if func_hash == _FUNCTION_HASHES[self.func]: + return True + except TypeError: + # Some callables are not hashable + pass + + # Here, we go through some effort to be robust to dynamically + # changing code and collision. We cannot inspect.getsource + # because it is not reliable when using IPython's magic "%run". + func_code, source_file, first_line = self.func_code_info + try: + old_func_code, old_first_line = extract_first_line( + self.store_backend.get_cached_func_code([self.func_id])) + except (IOError, OSError): # some backend can also raise OSError + self._write_func_code(func_code, first_line) + return False + if old_func_code == func_code: + return True + + # We have differing code, is this because we are referring to + # different functions, or because the function we are referring to has + # changed? + + _, func_name = get_func_name(self.func, resolv_alias=False, + win_characters=False) + if old_first_line == first_line == -1 or func_name == '': + if not first_line == -1: + func_description = ("{0} ({1}:{2})" + .format(func_name, source_file, + first_line)) + else: + func_description = func_name + warnings.warn(JobLibCollisionWarning( + "Cannot detect name collisions for function '{0}'" + .format(func_description)), stacklevel=stacklevel) + + # Fetch the code at the old location and compare it. If it is the + # same than the code store, we have a collision: the code in the + # file has not changed, but the name we have is pointing to a new + # code block. + if not old_first_line == first_line and source_file is not None: + if os.path.exists(source_file): + _, func_name = get_func_name(self.func, resolv_alias=False) + num_lines = len(func_code.split('\n')) + with tokenize.open(source_file) as f: + on_disk_func_code = f.readlines()[ + old_first_line - 1:old_first_line - 1 + num_lines - 1] + on_disk_func_code = ''.join(on_disk_func_code) + possible_collision = (on_disk_func_code.rstrip() == + old_func_code.rstrip()) + else: + possible_collision = source_file.startswith(' 10: + _, func_name = get_func_name(self.func, resolv_alias=False) + self.warn("Function {0} (identified by {1}) has changed" + ".".format(func_name, self.func_id)) + self.clear(warn=True) + return False + + def clear(self, warn=True): + """Empty the function's cache.""" + func_id = self.func_id + if self._verbose > 0 and warn: + self.warn("Clearing function cache identified by %s" % func_id) + self.store_backend.clear_path([func_id, ]) + + func_code, _, first_line = self.func_code_info + self._write_func_code(func_code, first_line) + + def call(self, *args, **kwargs): + """Force the execution of the function with the given arguments. + + The output values will be persisted, i.e., the cache will be updated + with any new values. + + Parameters + ---------- + *args: arguments + The arguments. + **kwargs: keyword arguments + Keyword arguments. + + Returns + ------- + output : object + The output of the function call. + metadata : dict + The metadata associated with the call. + """ + call_id = (self.func_id, self._get_args_id(*args, **kwargs)) + + # Return the output and the metadata + return self._call(call_id, args, kwargs) + + def _call(self, call_id, args, kwargs, shelving=False): + # Return the output and the metadata + self._before_call(args, kwargs) + start_time = time.time() + output = self.func(*args, **kwargs) + return self._after_call(call_id, args, kwargs, shelving, + output, start_time) + + def _before_call(self, args, kwargs): + if self._verbose > 0: + print(format_call(self.func, args, kwargs)) + + def _after_call(self, call_id, args, kwargs, shelving, output, start_time): + self.store_backend.dump_item(call_id, output, verbose=self._verbose) + duration = time.time() - start_time + if self._verbose > 0: + self._print_duration(duration) + metadata = self._persist_input(duration, call_id, args, kwargs) + if shelving: + return self._get_memorized_result(call_id, metadata), metadata + + if self.mmap_mode is not None: + # Memmap the output at the first call to be consistent with + # later calls + output = self._load_item(call_id, metadata) + return output, metadata + + def _persist_input(self, duration, call_id, args, kwargs, + this_duration_limit=0.5): + """ Save a small summary of the call using json format in the + output directory. + + output_dir: string + directory where to write metadata. + + duration: float + time taken by hashing input arguments, calling the wrapped + function and persisting its output. + + args, kwargs: list and dict + input arguments for wrapped function + + this_duration_limit: float + Max execution time for this function before issuing a warning. + """ + start_time = time.time() + argument_dict = filter_args(self.func, self.ignore, + args, kwargs) + + input_repr = dict((k, repr(v)) for k, v in argument_dict.items()) + # This can fail due to race-conditions with multiple + # concurrent joblibs removing the file or the directory + metadata = { + "duration": duration, "input_args": input_repr, "time": start_time, + } + + self.store_backend.store_metadata(call_id, metadata) + + this_duration = time.time() - start_time + if this_duration > this_duration_limit: + # This persistence should be fast. It will not be if repr() takes + # time and its output is large, because json.dump will have to + # write a large file. This should not be an issue with numpy arrays + # for which repr() always output a short representation, but can + # be with complex dictionaries. Fixing the problem should be a + # matter of replacing repr() above by something smarter. + warnings.warn("Persisting input arguments took %.2fs to run." + "If this happens often in your code, it can cause " + "performance problems " + "(results will be correct in all cases). " + "The reason for this is probably some large input " + "arguments for a wrapped function." + % this_duration, stacklevel=5) + return metadata + + def _get_memorized_result(self, call_id, metadata=None): + return MemorizedResult(self.store_backend, call_id, + metadata=metadata, timestamp=self.timestamp, + verbose=self._verbose - 1) + + def _load_item(self, call_id, metadata=None): + return self.store_backend.load_item(call_id, metadata=metadata, + timestamp=self.timestamp, + verbose=self._verbose) + + def _print_duration(self, duration, context=''): + _, name = get_func_name(self.func) + msg = f"{name} {context}- {format_time(duration)}" + print(max(0, (80 - len(msg))) * '_' + msg) + + # ------------------------------------------------------------------------ + # Private `object` interface + # ------------------------------------------------------------------------ + + def __repr__(self): + return '{class_name}(func={func}, location={location})'.format( + class_name=self.__class__.__name__, + func=self.func, + location=self.store_backend.location,) + + +############################################################################### +# class `AsyncMemorizedFunc` +############################################################################### +class AsyncMemorizedFunc(MemorizedFunc): + async def __call__(self, *args, **kwargs): + out = self._cached_call(args, kwargs, shelving=False) + out = await out if asyncio.iscoroutine(out) else out + return out[0] # Don't return metadata + + async def call_and_shelve(self, *args, **kwargs): + out = self._cached_call(args, kwargs, shelving=True) + out = await out if asyncio.iscoroutine(out) else out + return out[0] # Don't return metadata + + async def call(self, *args, **kwargs): + out = super().call(*args, **kwargs) + return await out if asyncio.iscoroutine(out) else out + + async def _call(self, call_id, args, kwargs, shelving=False): + self._before_call(args, kwargs) + start_time = time.time() + output = await self.func(*args, **kwargs) + return self._after_call( + call_id, args, kwargs, shelving, output, start_time + ) + + +############################################################################### +# class `Memory` +############################################################################### +class Memory(Logger): + """ A context object for caching a function's return value each time it + is called with the same input arguments. + + All values are cached on the filesystem, in a deep directory + structure. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + location: str, pathlib.Path or None + The path of the base directory to use as a data store + or None. If None is given, no caching is done and + the Memory object is completely transparent. This option + replaces cachedir since version 0.12. + + backend: str, optional + Type of store backend for reading/writing cache files. + Default: 'local'. + The 'local' backend is using regular filesystem operations to + manipulate data (open, mv, etc) in the backend. + + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional + The memmapping mode used when loading from cache + numpy arrays. See numpy.load for the meaning of the + arguments. + + compress: boolean, or integer, optional + Whether to zip the stored data on disk. If an integer is + given, it should be between 1 and 9, and sets the amount + of compression. Note that compressed arrays cannot be + read by memmapping. + + verbose: int, optional + Verbosity flag, controls the debug messages that are issued + as functions are evaluated. + + bytes_limit: int | str, optional + Limit in bytes of the size of the cache. By default, the size of + the cache is unlimited. When reducing the size of the cache, + ``joblib`` keeps the most recently accessed items first. If a + str is passed, it is converted to a number of bytes using units + { K | M | G} for kilo, mega, giga. + + **Note:** You need to call :meth:`joblib.Memory.reduce_size` to + actually reduce the cache size to be less than ``bytes_limit``. + + **Note:** This argument has been deprecated. One should give the + value of ``bytes_limit`` directly in + :meth:`joblib.Memory.reduce_size`. + + backend_options: dict, optional + Contains a dictionary of named parameters used to configure + the store backend. + """ + # ------------------------------------------------------------------------ + # Public interface + # ------------------------------------------------------------------------ + + def __init__(self, location=None, backend='local', + mmap_mode=None, compress=False, verbose=1, bytes_limit=None, + backend_options=None): + Logger.__init__(self) + self._verbose = verbose + self.mmap_mode = mmap_mode + self.timestamp = time.time() + if bytes_limit is not None: + warnings.warn( + "bytes_limit argument has been deprecated. It will be removed " + "in version 1.5. Please pass its value directly to " + "Memory.reduce_size.", + category=DeprecationWarning + ) + self.bytes_limit = bytes_limit + self.backend = backend + self.compress = compress + if backend_options is None: + backend_options = {} + self.backend_options = backend_options + + if compress and mmap_mode is not None: + warnings.warn('Compressed results cannot be memmapped', + stacklevel=2) + + self.location = location + if isinstance(location, str): + location = os.path.join(location, 'joblib') + + self.store_backend = _store_backend_factory( + backend, location, verbose=self._verbose, + backend_options=dict(compress=compress, mmap_mode=mmap_mode, + **backend_options)) + + def cache(self, func=None, ignore=None, verbose=None, mmap_mode=False, + cache_validation_callback=None): + """ Decorates the given function func to only compute its return + value for input arguments not cached on disk. + + Parameters + ---------- + func: callable, optional + The function to be decorated + ignore: list of strings + A list of arguments name to ignore in the hashing + verbose: integer, optional + The verbosity mode of the function. By default that + of the memory object is used. + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional + The memmapping mode used when loading from cache + numpy arrays. See numpy.load for the meaning of the + arguments. By default that of the memory object is used. + cache_validation_callback: callable, optional + Callable to validate whether or not the cache is valid. When + the cached function is called with arguments for which a cache + exists, this callable is called with the metadata of the cached + result as its sole argument. If it returns True, then the + cached result is returned, else the cache for these arguments + is cleared and recomputed. + + Returns + ------- + decorated_func: MemorizedFunc object + The returned object is a MemorizedFunc object, that is + callable (behaves like a function), but offers extra + methods for cache lookup and management. See the + documentation for :class:`joblib.memory.MemorizedFunc`. + """ + if (cache_validation_callback is not None and + not callable(cache_validation_callback)): + raise ValueError( + "cache_validation_callback needs to be callable. " + f"Got {cache_validation_callback}." + ) + if func is None: + # Partial application, to be able to specify extra keyword + # arguments in decorators + return functools.partial( + self.cache, ignore=ignore, + mmap_mode=mmap_mode, + verbose=verbose, + cache_validation_callback=cache_validation_callback + ) + if self.store_backend is None: + cls = (AsyncNotMemorizedFunc + if asyncio.iscoroutinefunction(func) + else NotMemorizedFunc) + return cls(func) + if verbose is None: + verbose = self._verbose + if mmap_mode is False: + mmap_mode = self.mmap_mode + if isinstance(func, MemorizedFunc): + func = func.func + cls = (AsyncMemorizedFunc + if asyncio.iscoroutinefunction(func) + else MemorizedFunc) + return cls( + func, location=self.store_backend, backend=self.backend, + ignore=ignore, mmap_mode=mmap_mode, compress=self.compress, + verbose=verbose, timestamp=self.timestamp, + cache_validation_callback=cache_validation_callback + ) + + def clear(self, warn=True): + """ Erase the complete cache directory. + """ + if warn: + self.warn('Flushing completely the cache') + if self.store_backend is not None: + self.store_backend.clear() + + # As the cache is completely clear, make sure the _FUNCTION_HASHES + # cache is also reset. Else, for a function that is present in this + # table, results cached after this clear will be have cache miss + # as the function code is not re-written. + _FUNCTION_HASHES.clear() + + def reduce_size(self, bytes_limit=None, items_limit=None, age_limit=None): + """Remove cache elements to make the cache fit its limits. + + The limitation can impose that the cache size fits in ``bytes_limit``, + that the number of cache items is no more than ``items_limit``, and + that all files in cache are not older than ``age_limit``. + + Parameters + ---------- + bytes_limit: int | str, optional + Limit in bytes of the size of the cache. By default, the size of + the cache is unlimited. When reducing the size of the cache, + ``joblib`` keeps the most recently accessed items first. If a + str is passed, it is converted to a number of bytes using units + { K | M | G} for kilo, mega, giga. + + items_limit: int, optional + Number of items to limit the cache to. By default, the number of + items in the cache is unlimited. When reducing the size of the + cache, ``joblib`` keeps the most recently accessed items first. + + age_limit: datetime.timedelta, optional + Maximum age of items to limit the cache to. When reducing the size + of the cache, any items last accessed more than the given length of + time ago are deleted. + """ + if bytes_limit is None: + bytes_limit = self.bytes_limit + + if self.store_backend is None: + # No cached results, this function does nothing. + return + + if bytes_limit is None and items_limit is None and age_limit is None: + # No limitation to impose, returning + return + + # Defers the actual limits enforcing to the store backend. + self.store_backend.enforce_store_limits( + bytes_limit, items_limit, age_limit + ) + + def eval(self, func, *args, **kwargs): + """ Eval function func with arguments `*args` and `**kwargs`, + in the context of the memory. + + This method works similarly to the builtin `apply`, except + that the function is called only if the cache is not + up to date. + + """ + if self.store_backend is None: + return func(*args, **kwargs) + return self.cache(func)(*args, **kwargs) + + # ------------------------------------------------------------------------ + # Private `object` interface + # ------------------------------------------------------------------------ + + def __repr__(self): + return '{class_name}(location={location})'.format( + class_name=self.__class__.__name__, + location=(None if self.store_backend is None + else self.store_backend.location)) + + def __getstate__(self): + """ We don't store the timestamp when pickling, to avoid the hash + depending from it. + """ + state = self.__dict__.copy() + state['timestamp'] = None + return state + + +############################################################################### +# cache_validation_callback helpers +############################################################################### + +def expires_after(days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, + hours=0, weeks=0): + """Helper cache_validation_callback to force recompute after a duration. + + Parameters + ---------- + days, seconds, microseconds, milliseconds, minutes, hours, weeks: numbers + argument passed to a timedelta. + """ + delta = datetime.timedelta( + days=days, seconds=seconds, microseconds=microseconds, + milliseconds=milliseconds, minutes=minutes, hours=hours, weeks=weeks + ) + + def cache_validation_callback(metadata): + computation_age = time.time() - metadata['time'] + return computation_age < delta.total_seconds() + + return cache_validation_callback diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle.py b/llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..bf83bb0914571dfa978bbe41a6d0e3a44a9cb947 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle.py @@ -0,0 +1,659 @@ +"""Utilities for fast persistence of big data, with optional compression.""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import pickle +import os +import warnings +import io +from pathlib import Path + +from .compressor import lz4, LZ4_NOT_INSTALLED_ERROR +from .compressor import _COMPRESSORS, register_compressor, BinaryZlibFile +from .compressor import (ZlibCompressorWrapper, GzipCompressorWrapper, + BZ2CompressorWrapper, LZMACompressorWrapper, + XZCompressorWrapper, LZ4CompressorWrapper) +from .numpy_pickle_utils import Unpickler, Pickler +from .numpy_pickle_utils import _read_fileobject, _write_fileobject +from .numpy_pickle_utils import _read_bytes, BUFFER_SIZE +from .numpy_pickle_utils import _ensure_native_byte_order +from .numpy_pickle_compat import load_compatibility +from .numpy_pickle_compat import NDArrayWrapper +# For compatibility with old versions of joblib, we need ZNDArrayWrapper +# to be visible in the current namespace. +# Explicitly skipping next line from flake8 as it triggers an F401 warning +# which we don't care. +from .numpy_pickle_compat import ZNDArrayWrapper # noqa +from .backports import make_memmap + +# Register supported compressors +register_compressor('zlib', ZlibCompressorWrapper()) +register_compressor('gzip', GzipCompressorWrapper()) +register_compressor('bz2', BZ2CompressorWrapper()) +register_compressor('lzma', LZMACompressorWrapper()) +register_compressor('xz', XZCompressorWrapper()) +register_compressor('lz4', LZ4CompressorWrapper()) + + +############################################################################### +# Utility objects for persistence. + +# For convenience, 16 bytes are used to be sure to cover all the possible +# dtypes' alignments. For reference, see: +# https://numpy.org/devdocs/dev/alignment.html +NUMPY_ARRAY_ALIGNMENT_BYTES = 16 + + +class NumpyArrayWrapper(object): + """An object to be persisted instead of numpy arrays. + + This object is used to hack into the pickle machinery and read numpy + array data from our custom persistence format. + More precisely, this object is used for: + * carrying the information of the persisted array: subclass, shape, order, + dtype. Those ndarray metadata are used to correctly reconstruct the array + with low level numpy functions. + * determining if memmap is allowed on the array. + * reading the array bytes from a file. + * reading the array using memorymap from a file. + * writing the array bytes to a file. + + Attributes + ---------- + subclass: numpy.ndarray subclass + Determine the subclass of the wrapped array. + shape: numpy.ndarray shape + Determine the shape of the wrapped array. + order: {'C', 'F'} + Determine the order of wrapped array data. 'C' is for C order, 'F' is + for fortran order. + dtype: numpy.ndarray dtype + Determine the data type of the wrapped array. + allow_mmap: bool + Determine if memory mapping is allowed on the wrapped array. + Default: False. + """ + + def __init__(self, subclass, shape, order, dtype, allow_mmap=False, + numpy_array_alignment_bytes=NUMPY_ARRAY_ALIGNMENT_BYTES): + """Constructor. Store the useful information for later.""" + self.subclass = subclass + self.shape = shape + self.order = order + self.dtype = dtype + self.allow_mmap = allow_mmap + # We make numpy_array_alignment_bytes an instance attribute to allow us + # to change our mind about the default alignment and still load the old + # pickles (with the previous alignment) correctly + self.numpy_array_alignment_bytes = numpy_array_alignment_bytes + + def safe_get_numpy_array_alignment_bytes(self): + # NumpyArrayWrapper instances loaded from joblib <= 1.1 pickles don't + # have an numpy_array_alignment_bytes attribute + return getattr(self, 'numpy_array_alignment_bytes', None) + + def write_array(self, array, pickler): + """Write array bytes to pickler file handle. + + This function is an adaptation of the numpy write_array function + available in version 1.10.1 in numpy/lib/format.py. + """ + # Set buffer size to 16 MiB to hide the Python loop overhead. + buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) + if array.dtype.hasobject: + # We contain Python objects so we cannot write out the data + # directly. Instead, we will pickle it out with version 2 of the + # pickle protocol. + pickle.dump(array, pickler.file_handle, protocol=2) + else: + numpy_array_alignment_bytes = \ + self.safe_get_numpy_array_alignment_bytes() + if numpy_array_alignment_bytes is not None: + current_pos = pickler.file_handle.tell() + pos_after_padding_byte = current_pos + 1 + padding_length = numpy_array_alignment_bytes - ( + pos_after_padding_byte % numpy_array_alignment_bytes) + # A single byte is written that contains the padding length in + # bytes + padding_length_byte = int.to_bytes( + padding_length, length=1, byteorder='little') + pickler.file_handle.write(padding_length_byte) + + if padding_length != 0: + padding = b'\xff' * padding_length + pickler.file_handle.write(padding) + + for chunk in pickler.np.nditer(array, + flags=['external_loop', + 'buffered', + 'zerosize_ok'], + buffersize=buffersize, + order=self.order): + pickler.file_handle.write(chunk.tobytes('C')) + + def read_array(self, unpickler): + """Read array from unpickler file handle. + + This function is an adaptation of the numpy read_array function + available in version 1.10.1 in numpy/lib/format.py. + """ + if len(self.shape) == 0: + count = 1 + else: + # joblib issue #859: we cast the elements of self.shape to int64 to + # prevent a potential overflow when computing their product. + shape_int64 = [unpickler.np.int64(x) for x in self.shape] + count = unpickler.np.multiply.reduce(shape_int64) + # Now read the actual data. + if self.dtype.hasobject: + # The array contained Python objects. We need to unpickle the data. + array = pickle.load(unpickler.file_handle) + else: + numpy_array_alignment_bytes = \ + self.safe_get_numpy_array_alignment_bytes() + if numpy_array_alignment_bytes is not None: + padding_byte = unpickler.file_handle.read(1) + padding_length = int.from_bytes( + padding_byte, byteorder='little') + if padding_length != 0: + unpickler.file_handle.read(padding_length) + + # This is not a real file. We have to read it the + # memory-intensive way. + # crc32 module fails on reads greater than 2 ** 32 bytes, + # breaking large reads from gzip streams. Chunk reads to + # BUFFER_SIZE bytes to avoid issue and reduce memory overhead + # of the read. In non-chunked case count < max_read_count, so + # only one read is performed. + max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, + self.dtype.itemsize) + + array = unpickler.np.empty(count, dtype=self.dtype) + for i in range(0, count, max_read_count): + read_count = min(max_read_count, count - i) + read_size = int(read_count * self.dtype.itemsize) + data = _read_bytes(unpickler.file_handle, + read_size, "array data") + array[i:i + read_count] = \ + unpickler.np.frombuffer(data, dtype=self.dtype, + count=read_count) + del data + + if self.order == 'F': + array.shape = self.shape[::-1] + array = array.transpose() + else: + array.shape = self.shape + + # Detect byte order mismatch and swap as needed. + return _ensure_native_byte_order(array) + + def read_mmap(self, unpickler): + """Read an array using numpy memmap.""" + current_pos = unpickler.file_handle.tell() + offset = current_pos + numpy_array_alignment_bytes = \ + self.safe_get_numpy_array_alignment_bytes() + + if numpy_array_alignment_bytes is not None: + padding_byte = unpickler.file_handle.read(1) + padding_length = int.from_bytes(padding_byte, byteorder='little') + # + 1 is for the padding byte + offset += padding_length + 1 + + if unpickler.mmap_mode == 'w+': + unpickler.mmap_mode = 'r+' + + marray = make_memmap(unpickler.filename, + dtype=self.dtype, + shape=self.shape, + order=self.order, + mode=unpickler.mmap_mode, + offset=offset) + # update the offset so that it corresponds to the end of the read array + unpickler.file_handle.seek(offset + marray.nbytes) + + if (numpy_array_alignment_bytes is None and + current_pos % NUMPY_ARRAY_ALIGNMENT_BYTES != 0): + message = ( + f'The memmapped array {marray} loaded from the file ' + f'{unpickler.file_handle.name} is not byte aligned. ' + 'This may cause segmentation faults if this memmapped array ' + 'is used in some libraries like BLAS or PyTorch. ' + 'To get rid of this warning, regenerate your pickle file ' + 'with joblib >= 1.2.0. ' + 'See https://github.com/joblib/joblib/issues/563 ' + 'for more details' + ) + warnings.warn(message) + + return _ensure_native_byte_order(marray) + + def read(self, unpickler): + """Read the array corresponding to this wrapper. + + Use the unpickler to get all information to correctly read the array. + + Parameters + ---------- + unpickler: NumpyUnpickler + + Returns + ------- + array: numpy.ndarray + + """ + # When requested, only use memmap mode if allowed. + if unpickler.mmap_mode is not None and self.allow_mmap: + array = self.read_mmap(unpickler) + else: + array = self.read_array(unpickler) + + # Manage array subclass case + if (hasattr(array, '__array_prepare__') and + self.subclass not in (unpickler.np.ndarray, + unpickler.np.memmap)): + # We need to reconstruct another subclass + new_array = unpickler.np.core.multiarray._reconstruct( + self.subclass, (0,), 'b') + return new_array.__array_prepare__(array) + else: + return array + +############################################################################### +# Pickler classes + + +class NumpyPickler(Pickler): + """A pickler to persist big data efficiently. + + The main features of this object are: + * persistence of numpy arrays in a single file. + * optional compression with a special care on avoiding memory copies. + + Attributes + ---------- + fp: file + File object handle used for serializing the input object. + protocol: int, optional + Pickle protocol used. Default is pickle.DEFAULT_PROTOCOL. + """ + + dispatch = Pickler.dispatch.copy() + + def __init__(self, fp, protocol=None): + self.file_handle = fp + self.buffered = isinstance(self.file_handle, BinaryZlibFile) + + # By default we want a pickle protocol that only changes with + # the major python version and not the minor one + if protocol is None: + protocol = pickle.DEFAULT_PROTOCOL + + Pickler.__init__(self, self.file_handle, protocol=protocol) + # delayed import of numpy, to avoid tight coupling + try: + import numpy as np + except ImportError: + np = None + self.np = np + + def _create_array_wrapper(self, array): + """Create and returns a numpy array wrapper from a numpy array.""" + order = 'F' if (array.flags.f_contiguous and + not array.flags.c_contiguous) else 'C' + allow_mmap = not self.buffered and not array.dtype.hasobject + + kwargs = {} + try: + self.file_handle.tell() + except io.UnsupportedOperation: + kwargs = {'numpy_array_alignment_bytes': None} + + wrapper = NumpyArrayWrapper(type(array), + array.shape, order, array.dtype, + allow_mmap=allow_mmap, + **kwargs) + + return wrapper + + def save(self, obj): + """Subclass the Pickler `save` method. + + This is a total abuse of the Pickler class in order to use the numpy + persistence function `save` instead of the default pickle + implementation. The numpy array is replaced by a custom wrapper in the + pickle persistence stack and the serialized array is written right + after in the file. Warning: the file produced does not follow the + pickle format. As such it can not be read with `pickle.load`. + """ + if self.np is not None and type(obj) in (self.np.ndarray, + self.np.matrix, + self.np.memmap): + if type(obj) is self.np.memmap: + # Pickling doesn't work with memmapped arrays + obj = self.np.asanyarray(obj) + + # The array wrapper is pickled instead of the real array. + wrapper = self._create_array_wrapper(obj) + Pickler.save(self, wrapper) + + # A framer was introduced with pickle protocol 4 and we want to + # ensure the wrapper object is written before the numpy array + # buffer in the pickle file. + # See https://www.python.org/dev/peps/pep-3154/#framing to get + # more information on the framer behavior. + if self.proto >= 4: + self.framer.commit_frame(force=True) + + # And then array bytes are written right after the wrapper. + wrapper.write_array(obj, self) + return + + return Pickler.save(self, obj) + + +class NumpyUnpickler(Unpickler): + """A subclass of the Unpickler to unpickle our numpy pickles. + + Attributes + ---------- + mmap_mode: str + The memorymap mode to use for reading numpy arrays. + file_handle: file_like + File object to unpickle from. + filename: str + Name of the file to unpickle from. It should correspond to file_handle. + This parameter is required when using mmap_mode. + np: module + Reference to numpy module if numpy is installed else None. + + """ + + dispatch = Unpickler.dispatch.copy() + + def __init__(self, filename, file_handle, mmap_mode=None): + # The next line is for backward compatibility with pickle generated + # with joblib versions less than 0.10. + self._dirname = os.path.dirname(filename) + + self.mmap_mode = mmap_mode + self.file_handle = file_handle + # filename is required for numpy mmap mode. + self.filename = filename + self.compat_mode = False + Unpickler.__init__(self, self.file_handle) + try: + import numpy as np + except ImportError: + np = None + self.np = np + + def load_build(self): + """Called to set the state of a newly created object. + + We capture it to replace our place-holder objects, NDArrayWrapper or + NumpyArrayWrapper, by the array we are interested in. We + replace them directly in the stack of pickler. + NDArrayWrapper is used for backward compatibility with joblib <= 0.9. + """ + Unpickler.load_build(self) + + # For backward compatibility, we support NDArrayWrapper objects. + if isinstance(self.stack[-1], (NDArrayWrapper, NumpyArrayWrapper)): + if self.np is None: + raise ImportError("Trying to unpickle an ndarray, " + "but numpy didn't import correctly") + array_wrapper = self.stack.pop() + # If any NDArrayWrapper is found, we switch to compatibility mode, + # this will be used to raise a DeprecationWarning to the user at + # the end of the unpickling. + if isinstance(array_wrapper, NDArrayWrapper): + self.compat_mode = True + self.stack.append(array_wrapper.read(self)) + + # Be careful to register our new method. + dispatch[pickle.BUILD[0]] = load_build + + +############################################################################### +# Utility functions + +def dump(value, filename, compress=0, protocol=None, cache_size=None): + """Persist an arbitrary Python object into one file. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + value: any Python object + The object to store to disk. + filename: str, pathlib.Path, or file object. + The file object or path of the file in which it is to be stored. + The compression method corresponding to one of the supported filename + extensions ('.z', '.gz', '.bz2', '.xz' or '.lzma') will be used + automatically. + compress: int from 0 to 9 or bool or 2-tuple, optional + Optional compression level for the data. 0 or False is no compression. + Higher value means more compression, but also slower read and + write times. Using a value of 3 is often a good compromise. + See the notes for more details. + If compress is True, the compression level used is 3. + If compress is a 2-tuple, the first element must correspond to a string + between supported compressors (e.g 'zlib', 'gzip', 'bz2', 'lzma' + 'xz'), the second element must be an integer from 0 to 9, corresponding + to the compression level. + protocol: int, optional + Pickle protocol, see pickle.dump documentation for more details. + cache_size: positive int, optional + This option is deprecated in 0.10 and has no effect. + + Returns + ------- + filenames: list of strings + The list of file names in which the data is stored. If + compress is false, each array is stored in a different file. + + See Also + -------- + joblib.load : corresponding loader + + Notes + ----- + Memmapping on load cannot be used for compressed files. Thus + using compression can significantly slow down loading. In + addition, compressed files take up extra memory during + dump and load. + + """ + + if Path is not None and isinstance(filename, Path): + filename = str(filename) + + is_filename = isinstance(filename, str) + is_fileobj = hasattr(filename, "write") + + compress_method = 'zlib' # zlib is the default compression method. + if compress is True: + # By default, if compress is enabled, we want the default compress + # level of the compressor. + compress_level = None + elif isinstance(compress, tuple): + # a 2-tuple was set in compress + if len(compress) != 2: + raise ValueError( + 'Compress argument tuple should contain exactly 2 elements: ' + '(compress method, compress level), you passed {}' + .format(compress)) + compress_method, compress_level = compress + elif isinstance(compress, str): + compress_method = compress + compress_level = None # Use default compress level + compress = (compress_method, compress_level) + else: + compress_level = compress + + if compress_method == 'lz4' and lz4 is None: + raise ValueError(LZ4_NOT_INSTALLED_ERROR) + + if (compress_level is not None and + compress_level is not False and + compress_level not in range(10)): + # Raising an error if a non valid compress level is given. + raise ValueError( + 'Non valid compress level given: "{}". Possible values are ' + '{}.'.format(compress_level, list(range(10)))) + + if compress_method not in _COMPRESSORS: + # Raising an error if an unsupported compression method is given. + raise ValueError( + 'Non valid compression method given: "{}". Possible values are ' + '{}.'.format(compress_method, _COMPRESSORS)) + + if not is_filename and not is_fileobj: + # People keep inverting arguments, and the resulting error is + # incomprehensible + raise ValueError( + 'Second argument should be a filename or a file-like object, ' + '%s (type %s) was given.' + % (filename, type(filename)) + ) + + if is_filename and not isinstance(compress, tuple): + # In case no explicit compression was requested using both compression + # method and level in a tuple and the filename has an explicit + # extension, we select the corresponding compressor. + + # unset the variable to be sure no compression level is set afterwards. + compress_method = None + for name, compressor in _COMPRESSORS.items(): + if filename.endswith(compressor.extension): + compress_method = name + + if compress_method in _COMPRESSORS and compress_level == 0: + # we choose the default compress_level in case it was not given + # as an argument (using compress). + compress_level = None + + if cache_size is not None: + # Cache size is deprecated starting from version 0.10 + warnings.warn("Please do not set 'cache_size' in joblib.dump, " + "this parameter has no effect and will be removed. " + "You used 'cache_size={}'".format(cache_size), + DeprecationWarning, stacklevel=2) + + if compress_level != 0: + with _write_fileobject(filename, compress=(compress_method, + compress_level)) as f: + NumpyPickler(f, protocol=protocol).dump(value) + elif is_filename: + with open(filename, 'wb') as f: + NumpyPickler(f, protocol=protocol).dump(value) + else: + NumpyPickler(filename, protocol=protocol).dump(value) + + # If the target container is a file object, nothing is returned. + if is_fileobj: + return + + # For compatibility, the list of created filenames (e.g with one element + # after 0.10.0) is returned by default. + return [filename] + + +def _unpickle(fobj, filename="", mmap_mode=None): + """Internal unpickling function.""" + # We are careful to open the file handle early and keep it open to + # avoid race-conditions on renames. + # That said, if data is stored in companion files, which can be + # the case with the old persistence format, moving the directory + # will create a race when joblib tries to access the companion + # files. + unpickler = NumpyUnpickler(filename, fobj, mmap_mode=mmap_mode) + obj = None + try: + obj = unpickler.load() + if unpickler.compat_mode: + warnings.warn("The file '%s' has been generated with a " + "joblib version less than 0.10. " + "Please regenerate this pickle file." + % filename, + DeprecationWarning, stacklevel=3) + except UnicodeDecodeError as exc: + # More user-friendly error message + new_exc = ValueError( + 'You may be trying to read with ' + 'python 3 a joblib pickle generated with python 2. ' + 'This feature is not supported by joblib.') + new_exc.__cause__ = exc + raise new_exc + return obj + + +def load_temporary_memmap(filename, mmap_mode, unlink_on_gc_collect): + from ._memmapping_reducer import JOBLIB_MMAPS, add_maybe_unlink_finalizer + obj = load(filename, mmap_mode) + JOBLIB_MMAPS.add(obj.filename) + if unlink_on_gc_collect: + add_maybe_unlink_finalizer(obj) + return obj + + +def load(filename, mmap_mode=None): + """Reconstruct a Python object from a file persisted with joblib.dump. + + Read more in the :ref:`User Guide `. + + WARNING: joblib.load relies on the pickle module and can therefore + execute arbitrary Python code. It should therefore never be used + to load files from untrusted sources. + + Parameters + ---------- + filename: str, pathlib.Path, or file object. + The file object or path of the file from which to load the object + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional + If not None, the arrays are memory-mapped from the disk. This + mode has no effect for compressed files. Note that in this + case the reconstructed object might no longer match exactly + the originally pickled object. + + Returns + ------- + result: any Python object + The object stored in the file. + + See Also + -------- + joblib.dump : function to save an object + + Notes + ----- + + This function can load numpy array files saved separately during the + dump. If the mmap_mode argument is given, it is passed to np.load and + arrays are loaded as memmaps. As a consequence, the reconstructed + object might not match the original pickled object. Note that if the + file was saved with compression, the arrays cannot be memmapped. + """ + if Path is not None and isinstance(filename, Path): + filename = str(filename) + + if hasattr(filename, "read"): + fobj = filename + filename = getattr(fobj, 'name', '') + with _read_fileobject(fobj, filename, mmap_mode) as fobj: + obj = _unpickle(fobj) + else: + with open(filename, 'rb') as f: + with _read_fileobject(f, filename, mmap_mode) as fobj: + if isinstance(fobj, str): + # if the returned file object is a string, this means we + # try to load a pickle file generated with an version of + # Joblib so we load it with joblib compatibility function. + return load_compatibility(fobj) + + obj = _unpickle(fobj, filename, mmap_mode) + return obj diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle_utils.py b/llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..23cfb34ecb19161a2eca6bc85f29c3996162572c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle_utils.py @@ -0,0 +1,253 @@ +"""Utilities for fast persistence of big data, with optional compression.""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import pickle +import io +import sys +import warnings +import contextlib + +from .compressor import _ZFILE_PREFIX +from .compressor import _COMPRESSORS + +try: + import numpy as np +except ImportError: + np = None + +Unpickler = pickle._Unpickler +Pickler = pickle._Pickler +xrange = range + + +try: + # The python standard library can be built without bz2 so we make bz2 + # usage optional. + # see https://github.com/scikit-learn/scikit-learn/issues/7526 for more + # details. + import bz2 +except ImportError: + bz2 = None + +# Buffer size used in io.BufferedReader and io.BufferedWriter +_IO_BUFFER_SIZE = 1024 ** 2 + + +def _is_raw_file(fileobj): + """Check if fileobj is a raw file object, e.g created with open.""" + fileobj = getattr(fileobj, 'raw', fileobj) + return isinstance(fileobj, io.FileIO) + + +def _get_prefixes_max_len(): + # Compute the max prefix len of registered compressors. + prefixes = [len(compressor.prefix) for compressor in _COMPRESSORS.values()] + prefixes += [len(_ZFILE_PREFIX)] + return max(prefixes) + + +def _is_numpy_array_byte_order_mismatch(array): + """Check if numpy array is having byte order mismatch""" + return ((sys.byteorder == 'big' and + (array.dtype.byteorder == '<' or + (array.dtype.byteorder == '|' and array.dtype.fields and + all(e[0].byteorder == '<' + for e in array.dtype.fields.values())))) or + (sys.byteorder == 'little' and + (array.dtype.byteorder == '>' or + (array.dtype.byteorder == '|' and array.dtype.fields and + all(e[0].byteorder == '>' + for e in array.dtype.fields.values()))))) + + +def _ensure_native_byte_order(array): + """Use the byte order of the host while preserving values + + Does nothing if array already uses the system byte order. + """ + if _is_numpy_array_byte_order_mismatch(array): + array = array.byteswap().view(array.dtype.newbyteorder('=')) + return array + + +############################################################################### +# Cache file utilities +def _detect_compressor(fileobj): + """Return the compressor matching fileobj. + + Parameters + ---------- + fileobj: file object + + Returns + ------- + str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat', 'not-compressed'} + """ + # Read the magic number in the first bytes of the file. + max_prefix_len = _get_prefixes_max_len() + if hasattr(fileobj, 'peek'): + # Peek allows to read those bytes without moving the cursor in the + # file whic. + first_bytes = fileobj.peek(max_prefix_len) + else: + # Fallback to seek if the fileobject is not peekable. + first_bytes = fileobj.read(max_prefix_len) + fileobj.seek(0) + + if first_bytes.startswith(_ZFILE_PREFIX): + return "compat" + else: + for name, compressor in _COMPRESSORS.items(): + if first_bytes.startswith(compressor.prefix): + return name + + return "not-compressed" + + +def _buffered_read_file(fobj): + """Return a buffered version of a read file object.""" + return io.BufferedReader(fobj, buffer_size=_IO_BUFFER_SIZE) + + +def _buffered_write_file(fobj): + """Return a buffered version of a write file object.""" + return io.BufferedWriter(fobj, buffer_size=_IO_BUFFER_SIZE) + + +@contextlib.contextmanager +def _read_fileobject(fileobj, filename, mmap_mode=None): + """Utility function opening the right fileobject from a filename. + + The magic number is used to choose between the type of file object to open: + * regular file object (default) + * zlib file object + * gzip file object + * bz2 file object + * lzma file object (for xz and lzma compressor) + + Parameters + ---------- + fileobj: file object + compressor: str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat', + 'not-compressed'} + filename: str + filename path corresponding to the fileobj parameter. + mmap_mode: str + memory map mode that should be used to open the pickle file. This + parameter is useful to verify that the user is not trying to one with + compression. Default: None. + + Returns + ------- + a file like object + + """ + # Detect if the fileobj contains compressed data. + compressor = _detect_compressor(fileobj) + + if compressor == 'compat': + # Compatibility with old pickle mode: simply return the input + # filename "as-is" and let the compatibility function be called by the + # caller. + warnings.warn("The file '%s' has been generated with a joblib " + "version less than 0.10. " + "Please regenerate this pickle file." % filename, + DeprecationWarning, stacklevel=2) + yield filename + else: + if compressor in _COMPRESSORS: + # based on the compressor detected in the file, we open the + # correct decompressor file object, wrapped in a buffer. + compressor_wrapper = _COMPRESSORS[compressor] + inst = compressor_wrapper.decompressor_file(fileobj) + fileobj = _buffered_read_file(inst) + + # Checking if incompatible load parameters with the type of file: + # mmap_mode cannot be used with compressed file or in memory buffers + # such as io.BytesIO. + if mmap_mode is not None: + if isinstance(fileobj, io.BytesIO): + warnings.warn('In memory persistence is not compatible with ' + 'mmap_mode "%(mmap_mode)s" flag passed. ' + 'mmap_mode option will be ignored.' + % locals(), stacklevel=2) + elif compressor != 'not-compressed': + warnings.warn('mmap_mode "%(mmap_mode)s" is not compatible ' + 'with compressed file %(filename)s. ' + '"%(mmap_mode)s" flag will be ignored.' + % locals(), stacklevel=2) + elif not _is_raw_file(fileobj): + warnings.warn('"%(fileobj)r" is not a raw file, mmap_mode ' + '"%(mmap_mode)s" flag will be ignored.' + % locals(), stacklevel=2) + + yield fileobj + + +def _write_fileobject(filename, compress=("zlib", 3)): + """Return the right compressor file object in write mode.""" + compressmethod = compress[0] + compresslevel = compress[1] + + if compressmethod in _COMPRESSORS.keys(): + file_instance = _COMPRESSORS[compressmethod].compressor_file( + filename, compresslevel=compresslevel) + return _buffered_write_file(file_instance) + else: + file_instance = _COMPRESSORS['zlib'].compressor_file( + filename, compresslevel=compresslevel) + return _buffered_write_file(file_instance) + + +# Utility functions/variables from numpy required for writing arrays. +# We need at least the functions introduced in version 1.9 of numpy. Here, +# we use the ones from numpy 1.10.2. +BUFFER_SIZE = 2 ** 18 # size of buffer for reading npz files in bytes + + +def _read_bytes(fp, size, error_template="ran out of data"): + """Read from file-like object until size bytes are read. + + TODO python2_drop: is it still needed? The docstring mentions python 2.6 + and it looks like this can be at least simplified ... + + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + + This function was taken from numpy/lib/format.py in version 1.10.2. + + Parameters + ---------- + fp: file-like object + size: int + error_template: str + + Returns + ------- + a bytes object + The data read in bytes. + + """ + data = bytes() + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except io.BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/parallel.py b/llmeval-env/lib/python3.10/site-packages/joblib/parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..fa4fd3c2b9647947f648ee1869cd96ffeb01026d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/parallel.py @@ -0,0 +1,2010 @@ +""" +Helpers for embarrassingly parallel code. +""" +# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org > +# Copyright: 2010, Gael Varoquaux +# License: BSD 3 clause + +from __future__ import division + +import os +import sys +from math import sqrt +import functools +import collections +import time +import threading +import itertools +from uuid import uuid4 +from numbers import Integral +import warnings +import queue +import weakref +from contextlib import nullcontext + +from multiprocessing import TimeoutError + +from ._multiprocessing_helpers import mp + +from .logger import Logger, short_format_time +from .disk import memstr_to_bytes +from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend, + ThreadingBackend, SequentialBackend, + LokyBackend) +from ._utils import eval_expr, _Sentinel + +# Make sure that those two classes are part of the public joblib.parallel API +# so that 3rd party backend implementers can import them from here. +from ._parallel_backends import AutoBatchingMixin # noqa +from ._parallel_backends import ParallelBackendBase # noqa + + +IS_PYPY = hasattr(sys, "pypy_version_info") + + +BACKENDS = { + 'threading': ThreadingBackend, + 'sequential': SequentialBackend, +} +# name of the backend used by default by Parallel outside of any context +# managed by ``parallel_config`` or ``parallel_backend``. + +# threading is the only backend that is always everywhere +DEFAULT_BACKEND = 'threading' + +MAYBE_AVAILABLE_BACKENDS = {'multiprocessing', 'loky'} + +# if multiprocessing is available, so is loky, we set it as the default +# backend +if mp is not None: + BACKENDS['multiprocessing'] = MultiprocessingBackend + from .externals import loky + BACKENDS['loky'] = LokyBackend + DEFAULT_BACKEND = 'loky' + + +DEFAULT_THREAD_BACKEND = 'threading' + + +# Thread local value that can be overridden by the ``parallel_config`` context +# manager +_backend = threading.local() + + +def _register_dask(): + """Register Dask Backend if called with parallel_config(backend="dask")""" + try: + from ._dask import DaskDistributedBackend + register_parallel_backend('dask', DaskDistributedBackend) + except ImportError as e: + msg = ("To use the dask.distributed backend you must install both " + "the `dask` and distributed modules.\n\n" + "See https://dask.pydata.org/en/latest/install.html for more " + "information.") + raise ImportError(msg) from e + + +EXTERNAL_BACKENDS = { + 'dask': _register_dask, +} + + +# Sentinels for the default values of the Parallel constructor and +# the parallel_config and parallel_backend context managers +default_parallel_config = { + "backend": _Sentinel(default_value=None), + "n_jobs": _Sentinel(default_value=None), + "verbose": _Sentinel(default_value=0), + "temp_folder": _Sentinel(default_value=None), + "max_nbytes": _Sentinel(default_value="1M"), + "mmap_mode": _Sentinel(default_value="r"), + "prefer": _Sentinel(default_value=None), + "require": _Sentinel(default_value=None), +} + + +VALID_BACKEND_HINTS = ('processes', 'threads', None) +VALID_BACKEND_CONSTRAINTS = ('sharedmem', None) + + +def _get_config_param(param, context_config, key): + """Return the value of a parallel config parameter + + Explicitly setting it in Parallel has priority over setting in a + parallel_(config/backend) context manager. + """ + if param is not default_parallel_config[key]: + # param is explicitly set, return it + return param + + if context_config[key] is not default_parallel_config[key]: + # there's a context manager and the key is set, return it + return context_config[key] + + # Otherwise, we are in the default_parallel_config, + # return the default value + return param.default_value + + +def get_active_backend( + prefer=default_parallel_config["prefer"], + require=default_parallel_config["require"], + verbose=default_parallel_config["verbose"], +): + """Return the active default backend""" + backend, config = _get_active_backend(prefer, require, verbose) + n_jobs = _get_config_param( + default_parallel_config['n_jobs'], config, "n_jobs" + ) + return backend, n_jobs + + +def _get_active_backend( + prefer=default_parallel_config["prefer"], + require=default_parallel_config["require"], + verbose=default_parallel_config["verbose"], +): + """Return the active default backend""" + + backend_config = getattr(_backend, "config", default_parallel_config) + + backend = _get_config_param( + default_parallel_config['backend'], backend_config, "backend" + ) + prefer = _get_config_param(prefer, backend_config, "prefer") + require = _get_config_param(require, backend_config, "require") + verbose = _get_config_param(verbose, backend_config, "verbose") + + if prefer not in VALID_BACKEND_HINTS: + raise ValueError( + f"prefer={prefer} is not a valid backend hint, " + f"expected one of {VALID_BACKEND_HINTS}" + ) + if require not in VALID_BACKEND_CONSTRAINTS: + raise ValueError( + f"require={require} is not a valid backend constraint, " + f"expected one of {VALID_BACKEND_CONSTRAINTS}" + ) + if prefer == 'processes' and require == 'sharedmem': + raise ValueError( + "prefer == 'processes' and require == 'sharedmem'" + " are inconsistent settings" + ) + + explicit_backend = True + if backend is None: + + # We are either outside of the scope of any parallel_(config/backend) + # context manager or the context manager did not set a backend. + # create the default backend instance now. + backend = BACKENDS[DEFAULT_BACKEND](nesting_level=0) + explicit_backend = False + + # Try to use the backend set by the user with the context manager. + + nesting_level = backend.nesting_level + uses_threads = getattr(backend, 'uses_threads', False) + supports_sharedmem = getattr(backend, 'supports_sharedmem', False) + # Force to use thread-based backend if the provided backend does not + # match the shared memory constraint or if the backend is not explicitly + # given and threads are preferred. + force_threads = (require == 'sharedmem' and not supports_sharedmem) + force_threads |= ( + not explicit_backend and prefer == 'threads' and not uses_threads + ) + if force_threads: + # This backend does not match the shared memory constraint: + # fallback to the default thead-based backend. + sharedmem_backend = BACKENDS[DEFAULT_THREAD_BACKEND]( + nesting_level=nesting_level + ) + # Warn the user if we forced the backend to thread-based, while the + # user explicitly specified a non-thread-based backend. + if verbose >= 10 and explicit_backend: + print( + f"Using {sharedmem_backend.__class__.__name__} as " + f"joblib backend instead of {backend.__class__.__name__} " + "as the latter does not provide shared memory semantics." + ) + # Force to n_jobs=1 by default + thread_config = backend_config.copy() + thread_config['n_jobs'] = 1 + return sharedmem_backend, thread_config + + return backend, backend_config + + +class parallel_config: + """Set the default backend or configuration for :class:`~joblib.Parallel`. + + This is an alternative to directly passing keyword arguments to the + :class:`~joblib.Parallel` class constructor. It is particularly useful when + calling into library code that uses joblib internally but does not expose + the various parallel configuration arguments in its own API. + + Parameters + ---------- + backend: str or ParallelBackendBase instance, default=None + If ``backend`` is a string it must match a previously registered + implementation using the :func:`~register_parallel_backend` function. + + By default the following backends are available: + + - 'loky': single-host, process-based parallelism (used by default), + - 'threading': single-host, thread-based parallelism, + - 'multiprocessing': legacy single-host, process-based parallelism. + + 'loky' is recommended to run functions that manipulate Python objects. + 'threading' is a low-overhead alternative that is most efficient for + functions that release the Global Interpreter Lock: e.g. I/O-bound + code or CPU-bound code in a few calls to native code that explicitly + releases the GIL. Note that on some rare systems (such as pyodide), + multiprocessing and loky may not be available, in which case joblib + defaults to threading. + + In addition, if the ``dask`` and ``distributed`` Python packages are + installed, it is possible to use the 'dask' backend for better + scheduling of nested parallel calls without over-subscription and + potentially distribute parallel calls over a networked cluster of + several hosts. + + It is also possible to use the distributed 'ray' backend for + distributing the workload to a cluster of nodes. See more details + in the Examples section below. + + Alternatively the backend can be passed directly as an instance. + + n_jobs: int, default=None + The maximum number of concurrently running jobs, such as the number + of Python worker processes when ``backend="loky"`` or the size of the + thread-pool when ``backend="threading"``. + This argument is converted to an integer, rounded below for float. + If -1 is given, `joblib` tries to use all CPUs. The number of CPUs + ``n_cpus`` is obtained with :func:`~cpu_count`. + For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. For instance, + using ``n_jobs=-2`` will result in all CPUs but one being used. + This argument can also go above ``n_cpus``, which will cause + oversubscription. In some cases, slight oversubscription can be + beneficial, e.g., for tasks with large I/O operations. + If 1 is given, no parallel computing code is used at all, and the + behavior amounts to a simple python `for` loop. This mode is not + compatible with `timeout`. + None is a marker for 'unset' that will be interpreted as n_jobs=1 + unless the call is performed under a :func:`~parallel_config` + context manager that sets another value for ``n_jobs``. + If n_jobs = 0 then a ValueError is raised. + + verbose: int, default=0 + The verbosity level: if non zero, progress messages are + printed. Above 50, the output is sent to stdout. + The frequency of the messages increases with the verbosity level. + If it more than 10, all iterations are reported. + + temp_folder: str or None, default=None + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + + - a folder pointed by the ``JOBLIB_TEMP_FOLDER`` environment + variable, + - ``/dev/shm`` if the folder exists and is writable: this is a + RAM disk filesystem available by default on modern Linux + distributions, + - the default system temporary folder that can be + overridden with ``TMP``, ``TMPDIR`` or ``TEMP`` environment + variables, typically ``/tmp`` under Unix operating systems. + + max_nbytes int, str, or None, optional, default='1M' + Threshold on the size of arrays passed to the workers that + triggers automated memory mapping in temp_folder. Can be an int + in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte. + Use None to disable memmapping of large arrays. + + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, default='r' + Memmapping mode for numpy arrays passed to workers. None will + disable memmapping, other modes defined in the numpy.memmap doc: + https://numpy.org/doc/stable/reference/generated/numpy.memmap.html + Also, see 'max_nbytes' parameter documentation for more details. + + prefer: str in {'processes', 'threads'} or None, default=None + Soft hint to choose the default backend. + The default process-based backend is 'loky' and the default + thread-based backend is 'threading'. Ignored if the ``backend`` + parameter is specified. + + require: 'sharedmem' or None, default=None + Hard constraint to select the backend. If set to 'sharedmem', + the selected backend will be single-host and thread-based. + + inner_max_num_threads: int, default=None + If not None, overwrites the limit set on the number of threads + usable in some third-party library threadpools like OpenBLAS, + MKL or OpenMP. This is only used with the ``loky`` backend. + + backend_params: dict + Additional parameters to pass to the backend constructor when + backend is a string. + + Notes + ----- + Joblib tries to limit the oversubscription by limiting the number of + threads usable in some third-party library threadpools like OpenBLAS, MKL + or OpenMP. The default limit in each worker is set to + ``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be + overwritten with the ``inner_max_num_threads`` argument which will be used + to set this limit in the child processes. + + .. versionadded:: 1.3 + + Examples + -------- + >>> from operator import neg + >>> with parallel_config(backend='threading'): + ... print(Parallel()(delayed(neg)(i + 1) for i in range(5))) + ... + [-1, -2, -3, -4, -5] + + To use the 'ray' joblib backend add the following lines: + + >>> from ray.util.joblib import register_ray # doctest: +SKIP + >>> register_ray() # doctest: +SKIP + >>> with parallel_config(backend="ray"): # doctest: +SKIP + ... print(Parallel()(delayed(neg)(i + 1) for i in range(5))) + [-1, -2, -3, -4, -5] + + """ + def __init__( + self, + backend=default_parallel_config["backend"], + *, + n_jobs=default_parallel_config["n_jobs"], + verbose=default_parallel_config["verbose"], + temp_folder=default_parallel_config["temp_folder"], + max_nbytes=default_parallel_config["max_nbytes"], + mmap_mode=default_parallel_config["mmap_mode"], + prefer=default_parallel_config["prefer"], + require=default_parallel_config["require"], + inner_max_num_threads=None, + **backend_params + ): + # Save the parallel info and set the active parallel config + self.old_parallel_config = getattr( + _backend, "config", default_parallel_config + ) + + backend = self._check_backend( + backend, inner_max_num_threads, **backend_params + ) + + new_config = { + "n_jobs": n_jobs, + "verbose": verbose, + "temp_folder": temp_folder, + "max_nbytes": max_nbytes, + "mmap_mode": mmap_mode, + "prefer": prefer, + "require": require, + "backend": backend + } + self.parallel_config = self.old_parallel_config.copy() + self.parallel_config.update({ + k: v for k, v in new_config.items() + if not isinstance(v, _Sentinel) + }) + + setattr(_backend, "config", self.parallel_config) + + def _check_backend(self, backend, inner_max_num_threads, **backend_params): + if backend is default_parallel_config['backend']: + if inner_max_num_threads is not None or len(backend_params) > 0: + raise ValueError( + "inner_max_num_threads and other constructor " + "parameters backend_params are only supported " + "when backend is not None." + ) + return backend + + if isinstance(backend, str): + # Handle non-registered or missing backends + if backend not in BACKENDS: + if backend in EXTERNAL_BACKENDS: + register = EXTERNAL_BACKENDS[backend] + register() + elif backend in MAYBE_AVAILABLE_BACKENDS: + warnings.warn( + f"joblib backend '{backend}' is not available on " + f"your system, falling back to {DEFAULT_BACKEND}.", + UserWarning, + stacklevel=2 + ) + BACKENDS[backend] = BACKENDS[DEFAULT_BACKEND] + else: + raise ValueError( + f"Invalid backend: {backend}, expected one of " + f"{sorted(BACKENDS.keys())}" + ) + + backend = BACKENDS[backend](**backend_params) + + if inner_max_num_threads is not None: + msg = ( + f"{backend.__class__.__name__} does not accept setting the " + "inner_max_num_threads argument." + ) + assert backend.supports_inner_max_num_threads, msg + backend.inner_max_num_threads = inner_max_num_threads + + # If the nesting_level of the backend is not set previously, use the + # nesting level from the previous active_backend to set it + if backend.nesting_level is None: + parent_backend = self.old_parallel_config['backend'] + if parent_backend is default_parallel_config['backend']: + nesting_level = 0 + else: + nesting_level = parent_backend.nesting_level + backend.nesting_level = nesting_level + + return backend + + def __enter__(self): + return self.parallel_config + + def __exit__(self, type, value, traceback): + self.unregister() + + def unregister(self): + setattr(_backend, "config", self.old_parallel_config) + + +class parallel_backend(parallel_config): + """Change the default backend used by Parallel inside a with block. + + .. warning:: + It is advised to use the :class:`~joblib.parallel_config` context + manager instead, which allows more fine-grained control over the + backend configuration. + + If ``backend`` is a string it must match a previously registered + implementation using the :func:`~register_parallel_backend` function. + + By default the following backends are available: + + - 'loky': single-host, process-based parallelism (used by default), + - 'threading': single-host, thread-based parallelism, + - 'multiprocessing': legacy single-host, process-based parallelism. + + 'loky' is recommended to run functions that manipulate Python objects. + 'threading' is a low-overhead alternative that is most efficient for + functions that release the Global Interpreter Lock: e.g. I/O-bound code or + CPU-bound code in a few calls to native code that explicitly releases the + GIL. Note that on some rare systems (such as Pyodide), + multiprocessing and loky may not be available, in which case joblib + defaults to threading. + + You can also use the `Dask `_ joblib + backend to distribute work across machines. This works well with + scikit-learn estimators with the ``n_jobs`` parameter, for example:: + + >>> import joblib # doctest: +SKIP + >>> from sklearn.model_selection import GridSearchCV # doctest: +SKIP + >>> from dask.distributed import Client, LocalCluster # doctest: +SKIP + + >>> # create a local Dask cluster + >>> cluster = LocalCluster() # doctest: +SKIP + >>> client = Client(cluster) # doctest: +SKIP + >>> grid_search = GridSearchCV(estimator, param_grid, n_jobs=-1) + ... # doctest: +SKIP + >>> with joblib.parallel_backend("dask", scatter=[X, y]): # doctest: +SKIP + ... grid_search.fit(X, y) + + It is also possible to use the distributed 'ray' backend for distributing + the workload to a cluster of nodes. To use the 'ray' joblib backend add + the following lines:: + + >>> from ray.util.joblib import register_ray # doctest: +SKIP + >>> register_ray() # doctest: +SKIP + >>> with parallel_backend("ray"): # doctest: +SKIP + ... print(Parallel()(delayed(neg)(i + 1) for i in range(5))) + [-1, -2, -3, -4, -5] + + Alternatively the backend can be passed directly as an instance. + + By default all available workers will be used (``n_jobs=-1``) unless the + caller passes an explicit value for the ``n_jobs`` parameter. + + This is an alternative to passing a ``backend='backend_name'`` argument to + the :class:`~Parallel` class constructor. It is particularly useful when + calling into library code that uses joblib internally but does not expose + the backend argument in its own API. + + >>> from operator import neg + >>> with parallel_backend('threading'): + ... print(Parallel()(delayed(neg)(i + 1) for i in range(5))) + ... + [-1, -2, -3, -4, -5] + + Joblib also tries to limit the oversubscription by limiting the number of + threads usable in some third-party library threadpools like OpenBLAS, MKL + or OpenMP. The default limit in each worker is set to + ``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be + overwritten with the ``inner_max_num_threads`` argument which will be used + to set this limit in the child processes. + + .. versionadded:: 0.10 + + See Also + -------- + joblib.parallel_config: context manager to change the backend + configuration. + """ + def __init__(self, backend, n_jobs=-1, inner_max_num_threads=None, + **backend_params): + + super().__init__( + backend=backend, + n_jobs=n_jobs, + inner_max_num_threads=inner_max_num_threads, + **backend_params + ) + + if self.old_parallel_config is None: + self.old_backend_and_jobs = None + else: + self.old_backend_and_jobs = ( + self.old_parallel_config["backend"], + self.old_parallel_config["n_jobs"], + ) + self.new_backend_and_jobs = ( + self.parallel_config["backend"], + self.parallel_config["n_jobs"], + ) + + def __enter__(self): + return self.new_backend_and_jobs + + +# Under Linux or OS X the default start method of multiprocessing +# can cause third party libraries to crash. Under Python 3.4+ it is possible +# to set an environment variable to switch the default start method from +# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost +# of causing semantic changes and some additional pool instantiation overhead. +DEFAULT_MP_CONTEXT = None +if hasattr(mp, 'get_context'): + method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None + if method is not None: + DEFAULT_MP_CONTEXT = mp.get_context(method=method) + + +class BatchedCalls(object): + """Wrap a sequence of (func, args, kwargs) tuples as a single callable""" + + def __init__(self, iterator_slice, backend_and_jobs, reducer_callback=None, + pickle_cache=None): + self.items = list(iterator_slice) + self._size = len(self.items) + self._reducer_callback = reducer_callback + if isinstance(backend_and_jobs, tuple): + self._backend, self._n_jobs = backend_and_jobs + else: + # this is for backward compatibility purposes. Before 0.12.6, + # nested backends were returned without n_jobs indications. + self._backend, self._n_jobs = backend_and_jobs, None + self._pickle_cache = pickle_cache if pickle_cache is not None else {} + + def __call__(self): + # Set the default nested backend to self._backend but do not set the + # change the default number of processes to -1 + with parallel_config(backend=self._backend, n_jobs=self._n_jobs): + return [func(*args, **kwargs) + for func, args, kwargs in self.items] + + def __reduce__(self): + if self._reducer_callback is not None: + self._reducer_callback() + # no need to pickle the callback. + return ( + BatchedCalls, + (self.items, (self._backend, self._n_jobs), None, + self._pickle_cache) + ) + + def __len__(self): + return self._size + + +# Possible exit status for a task +TASK_DONE = "Done" +TASK_ERROR = "Error" +TASK_PENDING = "Pending" + + +############################################################################### +# CPU count that works also when multiprocessing has been disabled via +# the JOBLIB_MULTIPROCESSING environment variable +def cpu_count(only_physical_cores=False): + """Return the number of CPUs. + + This delegates to loky.cpu_count that takes into account additional + constraints such as Linux CFS scheduler quotas (typically set by container + runtimes such as docker) and CPU affinity (for instance using the taskset + command on Linux). + + If only_physical_cores is True, do not take hyperthreading / SMT logical + cores into account. + """ + if mp is None: + return 1 + + return loky.cpu_count(only_physical_cores=only_physical_cores) + + +############################################################################### +# For verbosity + +def _verbosity_filter(index, verbose): + """ Returns False for indices increasingly apart, the distance + depending on the value of verbose. + + We use a lag increasing as the square of index + """ + if not verbose: + return True + elif verbose > 10: + return False + if index == 0: + return False + verbose = .5 * (11 - verbose) ** 2 + scale = sqrt(index / verbose) + next_scale = sqrt((index + 1) / verbose) + return (int(next_scale) == int(scale)) + + +############################################################################### +def delayed(function): + """Decorator used to capture the arguments of a function.""" + + def delayed_function(*args, **kwargs): + return function, args, kwargs + try: + delayed_function = functools.wraps(function)(delayed_function) + except AttributeError: + " functools.wraps fails on some callable objects " + return delayed_function + + +############################################################################### +class BatchCompletionCallBack(object): + """Callback to keep track of completed results and schedule the next tasks. + + This callable is executed by the parent process whenever a worker process + has completed a batch of tasks. + + It is used for progress reporting, to update estimate of the batch + processing duration and to schedule the next batch of tasks to be + processed. + + It is assumed that this callback will always be triggered by the backend + right after the end of a task, in case of success as well as in case of + failure. + """ + + ########################################################################## + # METHODS CALLED BY THE MAIN THREAD # + ########################################################################## + def __init__(self, dispatch_timestamp, batch_size, parallel): + self.dispatch_timestamp = dispatch_timestamp + self.batch_size = batch_size + self.parallel = parallel + self.parallel_call_id = parallel._call_id + + # Internals to keep track of the status and outcome of the task. + + # Used to hold a reference to the future-like object returned by the + # backend after launching this task + # This will be set later when calling `register_job`, as it is only + # created once the task has been submitted. + self.job = None + + if not parallel._backend.supports_retrieve_callback: + # The status is only used for asynchronous result retrieval in the + # callback. + self.status = None + else: + # The initial status for the job is TASK_PENDING. + # Once it is done, it will be either TASK_DONE, or TASK_ERROR. + self.status = TASK_PENDING + + def register_job(self, job): + """Register the object returned by `apply_async`.""" + self.job = job + + def get_result(self, timeout): + """Returns the raw result of the task that was submitted. + + If the task raised an exception rather than returning, this same + exception will be raised instead. + + If the backend supports the retrieval callback, it is assumed that this + method is only called after the result has been registered. It is + ensured by checking that `self.status(timeout)` does not return + TASK_PENDING. In this case, `get_result` directly returns the + registered result (or raise the registered exception). + + For other backends, there are no such assumptions, but `get_result` + still needs to synchronously retrieve the result before it can + return it or raise. It will block at most `self.timeout` seconds + waiting for retrieval to complete, after that it raises a TimeoutError. + """ + + backend = self.parallel._backend + + if backend.supports_retrieve_callback: + # We assume that the result has already been retrieved by the + # callback thread, and is stored internally. It's just waiting to + # be returned. + return self._return_or_raise() + + # For other backends, the main thread needs to run the retrieval step. + try: + if backend.supports_timeout: + result = self.job.get(timeout=timeout) + else: + result = self.job.get() + outcome = dict(result=result, status=TASK_DONE) + except BaseException as e: + outcome = dict(result=e, status=TASK_ERROR) + self._register_outcome(outcome) + + return self._return_or_raise() + + def _return_or_raise(self): + try: + if self.status == TASK_ERROR: + raise self._result + return self._result + finally: + del self._result + + def get_status(self, timeout): + """Get the status of the task. + + This function also checks if the timeout has been reached and register + the TimeoutError outcome when it is the case. + """ + if timeout is None or self.status != TASK_PENDING: + return self.status + + # The computation are running and the status is pending. + # Check that we did not wait for this jobs more than `timeout`. + now = time.time() + if not hasattr(self, "_completion_timeout_counter"): + self._completion_timeout_counter = now + + if (now - self._completion_timeout_counter) > timeout: + outcome = dict(result=TimeoutError(), status=TASK_ERROR) + self._register_outcome(outcome) + + return self.status + + ########################################################################## + # METHODS CALLED BY CALLBACK THREADS # + ########################################################################## + def __call__(self, out): + """Function called by the callback thread after a job is completed.""" + + # If the backend doesn't support callback retrievals, the next batch of + # tasks is dispatched regardless. The result will be retrieved by the + # main thread when calling `get_result`. + if not self.parallel._backend.supports_retrieve_callback: + self._dispatch_new() + return + + # If the backend supports retrieving the result in the callback, it + # registers the task outcome (TASK_ERROR or TASK_DONE), and schedules + # the next batch if needed. + with self.parallel._lock: + # Edge case where while the task was processing, the `parallel` + # instance has been reset and a new call has been issued, but the + # worker managed to complete the task and trigger this callback + # call just before being aborted by the reset. + if self.parallel._call_id != self.parallel_call_id: + return + + # When aborting, stop as fast as possible and do not retrieve the + # result as it won't be returned by the Parallel call. + if self.parallel._aborting: + return + + # Retrieves the result of the task in the main process and dispatch + # a new batch if needed. + job_succeeded = self._retrieve_result(out) + + if not self.parallel.return_ordered: + # Append the job to the queue in the order of completion + # instead of submission. + self.parallel._jobs.append(self) + + if job_succeeded: + self._dispatch_new() + + def _dispatch_new(self): + """Schedule the next batch of tasks to be processed.""" + + # This steps ensure that auto-batching works as expected. + this_batch_duration = time.time() - self.dispatch_timestamp + self.parallel._backend.batch_completed(self.batch_size, + this_batch_duration) + + # Schedule the next batch of tasks. + with self.parallel._lock: + self.parallel.n_completed_tasks += self.batch_size + self.parallel.print_progress() + if self.parallel._original_iterator is not None: + self.parallel.dispatch_next() + + def _retrieve_result(self, out): + """Fetch and register the outcome of a task. + + Return True if the task succeeded, False otherwise. + This function is only called by backends that support retrieving + the task result in the callback thread. + """ + try: + result = self.parallel._backend.retrieve_result_callback(out) + outcome = dict(status=TASK_DONE, result=result) + except BaseException as e: + # Avoid keeping references to parallel in the error. + e.__traceback__ = None + outcome = dict(result=e, status=TASK_ERROR) + + self._register_outcome(outcome) + return outcome['status'] != TASK_ERROR + + ########################################################################## + # This method can be called either in the main thread # + # or in the callback thread. # + ########################################################################## + def _register_outcome(self, outcome): + """Register the outcome of a task. + + This method can be called only once, future calls will be ignored. + """ + # Covers the edge case where the main thread tries to register a + # `TimeoutError` while the callback thread tries to register a result + # at the same time. + with self.parallel._lock: + if self.status not in (TASK_PENDING, None): + return + self.status = outcome["status"] + + self._result = outcome["result"] + + # Once the result and the status are extracted, the last reference to + # the job can be deleted. + self.job = None + + # As soon as an error as been spotted, early stopping flags are sent to + # the `parallel` instance. + if self.status == TASK_ERROR: + self.parallel._exception = True + self.parallel._aborting = True + + +############################################################################### +def register_parallel_backend(name, factory, make_default=False): + """Register a new Parallel backend factory. + + The new backend can then be selected by passing its name as the backend + argument to the :class:`~Parallel` class. Moreover, the default backend can + be overwritten globally by setting make_default=True. + + The factory can be any callable that takes no argument and return an + instance of ``ParallelBackendBase``. + + Warning: this function is experimental and subject to change in a future + version of joblib. + + .. versionadded:: 0.10 + """ + BACKENDS[name] = factory + if make_default: + global DEFAULT_BACKEND + DEFAULT_BACKEND = name + + +def effective_n_jobs(n_jobs=-1): + """Determine the number of jobs that can actually run in parallel + + n_jobs is the number of workers requested by the callers. Passing n_jobs=-1 + means requesting all available workers for instance matching the number of + CPU cores on the worker host(s). + + This method should return a guesstimate of the number of workers that can + actually perform work concurrently with the currently enabled default + backend. The primary use case is to make it possible for the caller to know + in how many chunks to slice the work. + + In general working on larger data chunks is more efficient (less scheduling + overhead and better use of CPU cache prefetching heuristics) as long as all + the workers have enough work to do. + + Warning: this function is experimental and subject to change in a future + version of joblib. + + .. versionadded:: 0.10 + """ + if n_jobs == 1: + return 1 + + backend, backend_n_jobs = get_active_backend() + if n_jobs is None: + n_jobs = backend_n_jobs + return backend.effective_n_jobs(n_jobs=n_jobs) + + +############################################################################### +class Parallel(Logger): + ''' Helper class for readable parallel mapping. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_jobs: int, default=None + The maximum number of concurrently running jobs, such as the number + of Python worker processes when ``backend="loky"`` or the size of + the thread-pool when ``backend="threading"``. + This argument is converted to an integer, rounded below for float. + If -1 is given, `joblib` tries to use all CPUs. The number of CPUs + ``n_cpus`` is obtained with :func:`~cpu_count`. + For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. For instance, + using ``n_jobs=-2`` will result in all CPUs but one being used. + This argument can also go above ``n_cpus``, which will cause + oversubscription. In some cases, slight oversubscription can be + beneficial, e.g., for tasks with large I/O operations. + If 1 is given, no parallel computing code is used at all, and the + behavior amounts to a simple python `for` loop. This mode is not + compatible with ``timeout``. + None is a marker for 'unset' that will be interpreted as n_jobs=1 + unless the call is performed under a :func:`~parallel_config` + context manager that sets another value for ``n_jobs``. + If n_jobs = 0 then a ValueError is raised. + backend: str, ParallelBackendBase instance or None, default='loky' + Specify the parallelization backend implementation. + Supported backends are: + + - "loky" used by default, can induce some + communication and memory overhead when exchanging input and + output data with the worker Python processes. On some rare + systems (such as Pyiodide), the loky backend may not be + available. + - "multiprocessing" previous process-based backend based on + `multiprocessing.Pool`. Less robust than `loky`. + - "threading" is a very low-overhead backend but it suffers + from the Python Global Interpreter Lock if the called function + relies a lot on Python objects. "threading" is mostly useful + when the execution bottleneck is a compiled extension that + explicitly releases the GIL (for instance a Cython loop wrapped + in a "with nogil" block or an expensive call to a library such + as NumPy). + - finally, you can register backends by calling + :func:`~register_parallel_backend`. This will allow you to + implement a backend of your liking. + + It is not recommended to hard-code the backend name in a call to + :class:`~Parallel` in a library. Instead it is recommended to set + soft hints (prefer) or hard constraints (require) so as to make it + possible for library users to change the backend from the outside + using the :func:`~parallel_config` context manager. + return_as: str in {'list', 'generator', 'generator_unordered'}, default='list' + If 'list', calls to this instance will return a list, only when + all results have been processed and retrieved. + If 'generator', it will return a generator that yields the results + as soon as they are available, in the order the tasks have been + submitted with. + If 'generator_unordered', the generator will immediately yield + available results independently of the submission order. The output + order is not deterministic in this case because it depends on the + concurrency of the workers. + prefer: str in {'processes', 'threads'} or None, default=None + Soft hint to choose the default backend if no specific backend + was selected with the :func:`~parallel_config` context manager. + The default process-based backend is 'loky' and the default + thread-based backend is 'threading'. Ignored if the ``backend`` + parameter is specified. + require: 'sharedmem' or None, default=None + Hard constraint to select the backend. If set to 'sharedmem', + the selected backend will be single-host and thread-based even + if the user asked for a non-thread based backend with + :func:`~joblib.parallel_config`. + verbose: int, default=0 + The verbosity level: if non zero, progress messages are + printed. Above 50, the output is sent to stdout. + The frequency of the messages increases with the verbosity level. + If it more than 10, all iterations are reported. + timeout: float or None, default=None + Timeout limit for each task to complete. If any task takes longer + a TimeOutError will be raised. Only applied when n_jobs != 1 + pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}, default='2*n_jobs' + The number of batches (of tasks) to be pre-dispatched. + Default is '2*n_jobs'. When batch_size="auto" this is reasonable + default and the workers should never starve. Note that only basic + arithmetics are allowed here and no modules can be used in this + expression. + batch_size: int or 'auto', default='auto' + The number of atomic tasks to dispatch at once to each + worker. When individual evaluations are very fast, dispatching + calls to workers can be slower than sequential computation because + of the overhead. Batching fast computations together can mitigate + this. + The ``'auto'`` strategy keeps track of the time it takes for a + batch to complete, and dynamically adjusts the batch size to keep + the time on the order of half a second, using a heuristic. The + initial batch size is 1. + ``batch_size="auto"`` with ``backend="threading"`` will dispatch + batches of a single task at a time as the threading backend has + very little overhead and using larger batch size has not proved to + bring any gain in that case. + temp_folder: str or None, default=None + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + + - a folder pointed by the JOBLIB_TEMP_FOLDER environment + variable, + - /dev/shm if the folder exists and is writable: this is a + RAM disk filesystem available by default on modern Linux + distributions, + - the default system temporary folder that can be + overridden with TMP, TMPDIR or TEMP environment + variables, typically /tmp under Unix operating systems. + + Only active when ``backend="loky"`` or ``"multiprocessing"``. + max_nbytes int, str, or None, optional, default='1M' + Threshold on the size of arrays passed to the workers that + triggers automated memory mapping in temp_folder. Can be an int + in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte. + Use None to disable memmapping of large arrays. + Only active when ``backend="loky"`` or ``"multiprocessing"``. + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, default='r' + Memmapping mode for numpy arrays passed to workers. None will + disable memmapping, other modes defined in the numpy.memmap doc: + https://numpy.org/doc/stable/reference/generated/numpy.memmap.html + Also, see 'max_nbytes' parameter documentation for more details. + + Notes + ----- + + This object uses workers to compute in parallel the application of a + function to many different arguments. The main functionality it brings + in addition to using the raw multiprocessing or concurrent.futures API + are (see examples for details): + + * More readable code, in particular since it avoids + constructing list of arguments. + + * Easier debugging: + - informative tracebacks even when the error happens on + the client side + - using 'n_jobs=1' enables to turn off parallel computing + for debugging without changing the codepath + - early capture of pickling errors + + * An optional progress meter. + + * Interruption of multiprocesses jobs with 'Ctrl-C' + + * Flexible pickling control for the communication to and from + the worker processes. + + * Ability to use shared memory efficiently with worker + processes for large numpy-based datastructures. + + Note that the intended usage is to run one call at a time. Multiple + calls to the same Parallel object will result in a ``RuntimeError`` + + Examples + -------- + + A simple example: + + >>> from math import sqrt + >>> from joblib import Parallel, delayed + >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10)) + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] + + Reshaping the output when the function has several return + values: + + >>> from math import modf + >>> from joblib import Parallel, delayed + >>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10)) + >>> res, i = zip(*r) + >>> res + (0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5) + >>> i + (0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0) + + The progress meter: the higher the value of `verbose`, the more + messages: + + >>> from time import sleep + >>> from joblib import Parallel, delayed + >>> r = Parallel(n_jobs=2, verbose=10)( + ... delayed(sleep)(.2) for _ in range(10)) #doctest: +SKIP + [Parallel(n_jobs=2)]: Done 1 tasks | elapsed: 0.6s + [Parallel(n_jobs=2)]: Done 4 tasks | elapsed: 0.8s + [Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 1.4s finished + + Traceback example, note how the line of the error is indicated + as well as the values of the parameter passed to the function that + triggered the exception, even though the traceback happens in the + child process: + + >>> from heapq import nlargest + >>> from joblib import Parallel, delayed + >>> Parallel(n_jobs=2)( + ... delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) + ... # doctest: +SKIP + ----------------------------------------------------------------------- + Sub-process traceback: + ----------------------------------------------------------------------- + TypeError Mon Nov 12 11:37:46 2012 + PID: 12934 Python 2.7.3: /usr/bin/python + ........................................................................ + /usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None) + 419 if n >= size: + 420 return sorted(iterable, key=key, reverse=True)[:n] + 421 + 422 # When key is none, use simpler decoration + 423 if key is None: + --> 424 it = izip(iterable, count(0,-1)) # decorate + 425 result = _nlargest(n, it) + 426 return map(itemgetter(0), result) # undecorate + 427 + 428 # General case, slowest method + TypeError: izip argument #1 must support iteration + _______________________________________________________________________ + + + Using pre_dispatch in a producer/consumer situation, where the + data is generated on the fly. Note how the producer is first + called 3 times before the parallel loop is initiated, and then + called to generate new data on the fly: + + >>> from math import sqrt + >>> from joblib import Parallel, delayed + >>> def producer(): + ... for i in range(6): + ... print('Produced %s' % i) + ... yield i + >>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')( + ... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP + Produced 0 + Produced 1 + Produced 2 + [Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s + Produced 3 + [Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s + Produced 4 + [Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s + Produced 5 + [Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s + [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s remaining: 0.0s + [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished + + ''' # noqa: E501 + def __init__( + self, + n_jobs=default_parallel_config["n_jobs"], + backend=default_parallel_config['backend'], + return_as="list", + verbose=default_parallel_config["verbose"], + timeout=None, + pre_dispatch='2 * n_jobs', + batch_size='auto', + temp_folder=default_parallel_config["temp_folder"], + max_nbytes=default_parallel_config["max_nbytes"], + mmap_mode=default_parallel_config["mmap_mode"], + prefer=default_parallel_config["prefer"], + require=default_parallel_config["require"], + ): + # Initiate parent Logger class state + super().__init__() + + # Interpret n_jobs=None as 'unset' + if n_jobs is None: + n_jobs = default_parallel_config["n_jobs"] + + active_backend, context_config = _get_active_backend( + prefer=prefer, require=require, verbose=verbose + ) + + nesting_level = active_backend.nesting_level + + self.verbose = _get_config_param(verbose, context_config, "verbose") + self.timeout = timeout + self.pre_dispatch = pre_dispatch + + if return_as not in {"list", "generator", "generator_unordered"}: + raise ValueError( + 'Expected `return_as` parameter to be a string equal to "list"' + f',"generator" or "generator_unordered", but got {return_as} ' + "instead." + ) + self.return_as = return_as + self.return_generator = return_as != "list" + self.return_ordered = return_as != "generator_unordered" + + # Check if we are under a parallel_config or parallel_backend + # context manager and use the config from the context manager + # for arguments that are not explicitly set. + self._backend_args = { + k: _get_config_param(param, context_config, k) for param, k in [ + (max_nbytes, "max_nbytes"), + (temp_folder, "temp_folder"), + (mmap_mode, "mmap_mode"), + (prefer, "prefer"), + (require, "require"), + (verbose, "verbose"), + ] + } + + if isinstance(self._backend_args["max_nbytes"], str): + self._backend_args["max_nbytes"] = memstr_to_bytes( + self._backend_args["max_nbytes"] + ) + self._backend_args["verbose"] = max( + 0, self._backend_args["verbose"] - 50 + ) + + if DEFAULT_MP_CONTEXT is not None: + self._backend_args['context'] = DEFAULT_MP_CONTEXT + elif hasattr(mp, "get_context"): + self._backend_args['context'] = mp.get_context() + + if backend is default_parallel_config['backend'] or backend is None: + backend = active_backend + + elif isinstance(backend, ParallelBackendBase): + # Use provided backend as is, with the current nesting_level if it + # is not set yet. + if backend.nesting_level is None: + backend.nesting_level = nesting_level + + elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'): + # Make it possible to pass a custom multiprocessing context as + # backend to change the start method to forkserver or spawn or + # preload modules on the forkserver helper process. + self._backend_args['context'] = backend + backend = MultiprocessingBackend(nesting_level=nesting_level) + + elif backend not in BACKENDS and backend in MAYBE_AVAILABLE_BACKENDS: + warnings.warn( + f"joblib backend '{backend}' is not available on " + f"your system, falling back to {DEFAULT_BACKEND}.", + UserWarning, + stacklevel=2) + BACKENDS[backend] = BACKENDS[DEFAULT_BACKEND] + backend = BACKENDS[DEFAULT_BACKEND](nesting_level=nesting_level) + + else: + try: + backend_factory = BACKENDS[backend] + except KeyError as e: + raise ValueError("Invalid backend: %s, expected one of %r" + % (backend, sorted(BACKENDS.keys()))) from e + backend = backend_factory(nesting_level=nesting_level) + + n_jobs = _get_config_param(n_jobs, context_config, "n_jobs") + if n_jobs is None: + # No specific context override and no specific value request: + # default to the default of the backend. + n_jobs = backend.default_n_jobs + try: + n_jobs = int(n_jobs) + except ValueError: + raise ValueError("n_jobs could not be converted to int") + self.n_jobs = n_jobs + + if (require == 'sharedmem' and + not getattr(backend, 'supports_sharedmem', False)): + raise ValueError("Backend %s does not support shared memory" + % backend) + + if (batch_size == 'auto' or isinstance(batch_size, Integral) and + batch_size > 0): + self.batch_size = batch_size + else: + raise ValueError( + "batch_size must be 'auto' or a positive integer, got: %r" + % batch_size) + + if not isinstance(backend, SequentialBackend): + if self.return_generator and not backend.supports_return_generator: + raise ValueError( + "Backend {} does not support " + "return_as={}".format(backend, return_as) + ) + # This lock is used to coordinate the main thread of this process + # with the async callback thread of our the pool. + self._lock = threading.RLock() + self._jobs = collections.deque() + self._pending_outputs = list() + self._ready_batches = queue.Queue() + self._reducer_callback = None + + # Internal variables + self._backend = backend + self._running = False + self._managed_backend = False + self._id = uuid4().hex + self._call_ref = None + + def __enter__(self): + self._managed_backend = True + self._calling = False + self._initialize_backend() + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._managed_backend = False + if self.return_generator and self._calling: + self._abort() + self._terminate_and_reset() + + def _initialize_backend(self): + """Build a process or thread pool and return the number of workers""" + try: + n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self, + **self._backend_args) + if self.timeout is not None and not self._backend.supports_timeout: + warnings.warn( + 'The backend class {!r} does not support timeout. ' + "You have set 'timeout={}' in Parallel but " + "the 'timeout' parameter will not be used.".format( + self._backend.__class__.__name__, + self.timeout)) + + except FallbackToBackend as e: + # Recursively initialize the backend in case of requested fallback. + self._backend = e.backend + n_jobs = self._initialize_backend() + + return n_jobs + + def _effective_n_jobs(self): + if self._backend: + return self._backend.effective_n_jobs(self.n_jobs) + return 1 + + def _terminate_and_reset(self): + if hasattr(self._backend, 'stop_call') and self._calling: + self._backend.stop_call() + self._calling = False + if not self._managed_backend: + self._backend.terminate() + + def _dispatch(self, batch): + """Queue the batch for computing, with or without multiprocessing + + WARNING: this method is not thread-safe: it should be only called + indirectly via dispatch_one_batch. + + """ + # If job.get() catches an exception, it closes the queue: + if self._aborting: + return + + batch_size = len(batch) + + self.n_dispatched_tasks += batch_size + self.n_dispatched_batches += 1 + + dispatch_timestamp = time.time() + + batch_tracker = BatchCompletionCallBack( + dispatch_timestamp, batch_size, self + ) + + if self.return_ordered: + self._jobs.append(batch_tracker) + + # If return_ordered is False, the batch_tracker is not stored in the + # jobs queue at the time of submission. Instead, it will be appended to + # the queue by itself as soon as the callback is triggered to be able + # to return the results in the order of completion. + + job = self._backend.apply_async(batch, callback=batch_tracker) + batch_tracker.register_job(job) + + def dispatch_next(self): + """Dispatch more data for parallel processing + + This method is meant to be called concurrently by the multiprocessing + callback. We rely on the thread-safety of dispatch_one_batch to protect + against concurrent consumption of the unprotected iterator. + + """ + if not self.dispatch_one_batch(self._original_iterator): + self._iterating = False + self._original_iterator = None + + def dispatch_one_batch(self, iterator): + """Prefetch the tasks for the next batch and dispatch them. + + The effective size of the batch is computed here. + If there are no more jobs to dispatch, return False, else return True. + + The iterator consumption and dispatching is protected by the same + lock so calling this function should be thread safe. + + """ + + if self._aborting: + return False + + batch_size = self._get_batch_size() + + with self._lock: + # to ensure an even distribution of the workload between workers, + # we look ahead in the original iterators more than batch_size + # tasks - However, we keep consuming only one batch at each + # dispatch_one_batch call. The extra tasks are stored in a local + # queue, _ready_batches, that is looked-up prior to re-consuming + # tasks from the origal iterator. + try: + tasks = self._ready_batches.get(block=False) + except queue.Empty: + # slice the iterator n_jobs * batchsize items at a time. If the + # slice returns less than that, then the current batchsize puts + # too much weight on a subset of workers, while other may end + # up starving. So in this case, re-scale the batch size + # accordingly to distribute evenly the last items between all + # workers. + n_jobs = self._cached_effective_n_jobs + big_batch_size = batch_size * n_jobs + + try: + islice = list(itertools.islice(iterator, big_batch_size)) + except Exception as e: + # Handle the fact that the generator of task raised an + # exception. As this part of the code can be executed in + # a thread internal to the backend, register a task with + # an error that will be raised in the user's thread. + if isinstance(e.__context__, queue.Empty): + # Suppress the cause of the exception if it is + # queue.Empty to avoid cluttered traceback. Only do it + # if the __context__ is really empty to avoid messing + # with causes of the original error. + e.__cause__ = None + batch_tracker = BatchCompletionCallBack( + 0, batch_size, self + ) + self._jobs.append(batch_tracker) + batch_tracker._register_outcome(dict( + result=e, status=TASK_ERROR + )) + return True + + if len(islice) == 0: + return False + elif (iterator is self._original_iterator and + len(islice) < big_batch_size): + # We reached the end of the original iterator (unless + # iterator is the ``pre_dispatch``-long initial slice of + # the original iterator) -- decrease the batch size to + # account for potential variance in the batches running + # time. + final_batch_size = max(1, len(islice) // (10 * n_jobs)) + else: + final_batch_size = max(1, len(islice) // n_jobs) + + # enqueue n_jobs batches in a local queue + for i in range(0, len(islice), final_batch_size): + tasks = BatchedCalls(islice[i:i + final_batch_size], + self._backend.get_nested_backend(), + self._reducer_callback, + self._pickle_cache) + self._ready_batches.put(tasks) + + # finally, get one task. + tasks = self._ready_batches.get(block=False) + if len(tasks) == 0: + # No more tasks available in the iterator: tell caller to stop. + return False + else: + self._dispatch(tasks) + return True + + def _get_batch_size(self): + """Returns the effective batch size for dispatch""" + if self.batch_size == 'auto': + return self._backend.compute_batch_size() + else: + # Fixed batch size strategy + return self.batch_size + + def _print(self, msg): + """Display the message on stout or stderr depending on verbosity""" + # XXX: Not using the logger framework: need to + # learn to use logger better. + if not self.verbose: + return + if self.verbose < 50: + writer = sys.stderr.write + else: + writer = sys.stdout.write + writer(f"[{self}]: {msg}\n") + + def _is_completed(self): + """Check if all tasks have been completed""" + return self.n_completed_tasks == self.n_dispatched_tasks and not ( + self._iterating or self._aborting + ) + + def print_progress(self): + """Display the process of the parallel execution only a fraction + of time, controlled by self.verbose. + """ + + if not self.verbose: + return + + elapsed_time = time.time() - self._start_time + + if self._is_completed(): + # Make sure that we get a last message telling us we are done + self._print( + f"Done {self.n_completed_tasks:3d} out of " + f"{self.n_completed_tasks:3d} | elapsed: " + f"{short_format_time(elapsed_time)} finished" + ) + return + + # Original job iterator becomes None once it has been fully + # consumed: at this point we know the total number of jobs and we are + # able to display an estimation of the remaining time based on already + # completed jobs. Otherwise, we simply display the number of completed + # tasks. + elif self._original_iterator is not None: + if _verbosity_filter(self.n_dispatched_batches, self.verbose): + return + self._print( + f"Done {self.n_completed_tasks:3d} tasks | elapsed: " + f"{short_format_time(elapsed_time)}" + ) + else: + index = self.n_completed_tasks + # We are finished dispatching + total_tasks = self.n_dispatched_tasks + # We always display the first loop + if not index == 0: + # Display depending on the number of remaining items + # A message as soon as we finish dispatching, cursor is 0 + cursor = (total_tasks - index + 1 - + self._pre_dispatch_amount) + frequency = (total_tasks // self.verbose) + 1 + is_last_item = (index + 1 == total_tasks) + if (is_last_item or cursor % frequency): + return + remaining_time = (elapsed_time / index) * \ + (self.n_dispatched_tasks - index * 1.0) + # only display status if remaining time is greater or equal to 0 + self._print( + f"Done {index:3d} out of {total_tasks:3d} | elapsed: " + f"{short_format_time(elapsed_time)} remaining: " + f"{short_format_time(remaining_time)}" + ) + + def _abort(self): + # Stop dispatching new jobs in the async callback thread + self._aborting = True + + # If the backend allows it, cancel or kill remaining running + # tasks without waiting for the results as we will raise + # the exception we got back to the caller instead of returning + # any result. + backend = self._backend + if (not self._aborted and hasattr(backend, 'abort_everything')): + # If the backend is managed externally we need to make sure + # to leave it in a working state to allow for future jobs + # scheduling. + ensure_ready = self._managed_backend + backend.abort_everything(ensure_ready=ensure_ready) + self._aborted = True + + def _start(self, iterator, pre_dispatch): + # Only set self._iterating to True if at least a batch + # was dispatched. In particular this covers the edge + # case of Parallel used with an exhausted iterator. If + # self._original_iterator is None, then this means either + # that pre_dispatch == "all", n_jobs == 1 or that the first batch + # was very quick and its callback already dispatched all the + # remaining jobs. + self._iterating = False + if self.dispatch_one_batch(iterator): + self._iterating = self._original_iterator is not None + + while self.dispatch_one_batch(iterator): + pass + + if pre_dispatch == "all": + # The iterable was consumed all at once by the above for loop. + # No need to wait for async callbacks to trigger to + # consumption. + self._iterating = False + + def _get_outputs(self, iterator, pre_dispatch): + """Iterator returning the tasks' output as soon as they are ready.""" + dispatch_thread_id = threading.get_ident() + detach_generator_exit = False + try: + self._start(iterator, pre_dispatch) + # first yield returns None, for internal use only. This ensures + # that we enter the try/except block and start dispatching the + # tasks. + yield + + with self._backend.retrieval_context(): + yield from self._retrieve() + + except GeneratorExit: + # The generator has been garbage collected before being fully + # consumed. This aborts the remaining tasks if possible and warn + # the user if necessary. + self._exception = True + + # In some interpreters such as PyPy, GeneratorExit can be raised in + # a different thread than the one used to start the dispatch of the + # parallel tasks. This can lead to hang when a thread attempts to + # join itself. As workaround, we detach the execution of the + # aborting code to a dedicated thread. We then need to make sure + # the rest of the function does not call `_terminate_and_reset` + # in finally. + if dispatch_thread_id != threading.get_ident(): + if not IS_PYPY: + warnings.warn( + "A generator produced by joblib.Parallel has been " + "gc'ed in an unexpected thread. This behavior should " + "not cause major -issues but to make sure, please " + "report this warning and your use case at " + "https://github.com/joblib/joblib/issues so it can " + "be investigated." + ) + + detach_generator_exit = True + _parallel = self + + class _GeneratorExitThread(threading.Thread): + def run(self): + _parallel._abort() + if _parallel.return_generator: + _parallel._warn_exit_early() + _parallel._terminate_and_reset() + + _GeneratorExitThread( + name="GeneratorExitThread" + ).start() + return + + # Otherwise, we are in the thread that started the dispatch: we can + # safely abort the execution and warn the user. + self._abort() + if self.return_generator: + self._warn_exit_early() + + raise + + # Note: we catch any BaseException instead of just Exception instances + # to also include KeyboardInterrupt + except BaseException: + self._exception = True + self._abort() + raise + finally: + # Store the unconsumed tasks and terminate the workers if necessary + _remaining_outputs = ([] if self._exception else self._jobs) + self._jobs = collections.deque() + self._running = False + if not detach_generator_exit: + self._terminate_and_reset() + + while len(_remaining_outputs) > 0: + batched_results = _remaining_outputs.popleft() + batched_results = batched_results.get_result(self.timeout) + for result in batched_results: + yield result + + def _wait_retrieval(self): + """Return True if we need to continue retrieving some tasks.""" + + # If the input load is still being iterated over, it means that tasks + # are still on the dispatch waitlist and their results will need to + # be retrieved later on. + if self._iterating: + return True + + # If some of the dispatched tasks are still being processed by the + # workers, wait for the compute to finish before starting retrieval + if self.n_completed_tasks < self.n_dispatched_tasks: + return True + + # For backends that does not support retrieving asynchronously the + # result to the main process, all results must be carefully retrieved + # in the _retrieve loop in the main thread while the backend is alive. + # For other backends, the actual retrieval is done asynchronously in + # the callback thread, and we can terminate the backend before the + # `self._jobs` result list has been emptied. The remaining results + # will be collected in the `finally` step of the generator. + if not self._backend.supports_retrieve_callback: + if len(self._jobs) > 0: + return True + + return False + + def _retrieve(self): + while self._wait_retrieval(): + + # If the callback thread of a worker has signaled that its task + # triggered an exception, or if the retrieval loop has raised an + # exception (e.g. `GeneratorExit`), exit the loop and surface the + # worker traceback. + if self._aborting: + self._raise_error_fast() + break + + # If the next job is not ready for retrieval yet, we just wait for + # async callbacks to progress. + if ((len(self._jobs) == 0) or + (self._jobs[0].get_status( + timeout=self.timeout) == TASK_PENDING)): + time.sleep(0.01) + continue + + # We need to be careful: the job list can be filling up as + # we empty it and Python list are not thread-safe by + # default hence the use of the lock + with self._lock: + batched_results = self._jobs.popleft() + + # Flatten the batched results to output one output at a time + batched_results = batched_results.get_result(self.timeout) + for result in batched_results: + self._nb_consumed += 1 + yield result + + def _raise_error_fast(self): + """If we are aborting, raise if a job caused an error.""" + + # Find the first job whose status is TASK_ERROR if it exists. + with self._lock: + error_job = next((job for job in self._jobs + if job.status == TASK_ERROR), None) + + # If this error job exists, immediately raise the error by + # calling get_result. This job might not exists if abort has been + # called directly or if the generator is gc'ed. + if error_job is not None: + error_job.get_result(self.timeout) + + def _warn_exit_early(self): + """Warn the user if the generator is gc'ed before being consumned.""" + ready_outputs = self.n_completed_tasks - self._nb_consumed + is_completed = self._is_completed() + msg = "" + if ready_outputs: + msg += ( + f"{ready_outputs} tasks have been successfully executed " + " but not used." + ) + if not is_completed: + msg += " Additionally, " + + if not is_completed: + msg += ( + f"{self.n_dispatched_tasks - self.n_completed_tasks} tasks " + "which were still being processed by the workers have been " + "cancelled." + ) + + if msg: + msg += ( + " You could benefit from adjusting the input task " + "iterator to limit unnecessary computation time." + ) + + warnings.warn(msg) + + def _get_sequential_output(self, iterable): + """Separate loop for sequential output. + + This simplifies the traceback in case of errors and reduces the + overhead of calling sequential tasks with `joblib`. + """ + try: + self._iterating = True + self._original_iterator = iterable + batch_size = self._get_batch_size() + + if batch_size != 1: + it = iter(iterable) + iterable_batched = iter( + lambda: tuple(itertools.islice(it, batch_size)), () + ) + iterable = ( + task for batch in iterable_batched for task in batch + ) + + # first yield returns None, for internal use only. This ensures + # that we enter the try/except block and setup the generator. + yield None + + # Sequentially call the tasks and yield the results. + for func, args, kwargs in iterable: + self.n_dispatched_batches += 1 + self.n_dispatched_tasks += 1 + res = func(*args, **kwargs) + self.n_completed_tasks += 1 + self.print_progress() + yield res + self._nb_consumed += 1 + except BaseException: + self._exception = True + self._aborting = True + self._aborted = True + raise + finally: + self.print_progress() + self._running = False + self._iterating = False + self._original_iterator = None + + def _reset_run_tracking(self): + """Reset the counters and flags used to track the execution.""" + + # Makes sur the parallel instance was not previously running in a + # thread-safe way. + with getattr(self, '_lock', nullcontext()): + if self._running: + msg = 'This Parallel instance is already running !' + if self.return_generator is True: + msg += ( + " Before submitting new tasks, you must wait for the " + "completion of all the previous tasks, or clean all " + "references to the output generator." + ) + raise RuntimeError(msg) + self._running = True + + # Counter to keep track of the task dispatched and completed. + self.n_dispatched_batches = 0 + self.n_dispatched_tasks = 0 + self.n_completed_tasks = 0 + + # Following count is incremented by one each time the user iterates + # on the output generator, it is used to prepare an informative + # warning message in case the generator is deleted before all the + # dispatched tasks have been consumed. + self._nb_consumed = 0 + + # Following flags are used to synchronize the threads in case one of + # the tasks error-out to ensure that all workers abort fast and that + # the backend terminates properly. + + # Set to True as soon as a worker signals that a task errors-out + self._exception = False + # Set to True in case of early termination following an incident + self._aborting = False + # Set to True after abortion is complete + self._aborted = False + + def __call__(self, iterable): + """Main function to dispatch parallel tasks.""" + + self._reset_run_tracking() + self._start_time = time.time() + + if not self._managed_backend: + n_jobs = self._initialize_backend() + else: + n_jobs = self._effective_n_jobs() + + if n_jobs == 1: + # If n_jobs==1, run the computation sequentially and return + # immediately to avoid overheads. + output = self._get_sequential_output(iterable) + next(output) + return output if self.return_generator else list(output) + + # Let's create an ID that uniquely identifies the current call. If the + # call is interrupted early and that the same instance is immediately + # re-used, this id will be used to prevent workers that were + # concurrently finalizing a task from the previous call to run the + # callback. + with self._lock: + self._call_id = uuid4().hex + + # self._effective_n_jobs should be called in the Parallel.__call__ + # thread only -- store its value in an attribute for further queries. + self._cached_effective_n_jobs = n_jobs + + if isinstance(self._backend, LokyBackend): + # For the loky backend, we add a callback executed when reducing + # BatchCalls, that makes the loky executor use a temporary folder + # specific to this Parallel object when pickling temporary memmaps. + # This callback is necessary to ensure that several Parallel + # objects using the same reusable executor don't use the same + # temporary resources. + + def _batched_calls_reducer_callback(): + # Relevant implementation detail: the following lines, called + # when reducing BatchedCalls, are called in a thread-safe + # situation, meaning that the context of the temporary folder + # manager will not be changed in between the callback execution + # and the end of the BatchedCalls pickling. The reason is that + # pickling (the only place where set_current_context is used) + # is done from a single thread (the queue_feeder_thread). + self._backend._workers._temp_folder_manager.set_current_context( # noqa + self._id + ) + self._reducer_callback = _batched_calls_reducer_callback + + # self._effective_n_jobs should be called in the Parallel.__call__ + # thread only -- store its value in an attribute for further queries. + self._cached_effective_n_jobs = n_jobs + + backend_name = self._backend.__class__.__name__ + if n_jobs == 0: + raise RuntimeError("%s has no active worker." % backend_name) + + self._print( + f"Using backend {backend_name} with {n_jobs} concurrent workers." + ) + if hasattr(self._backend, 'start_call'): + self._backend.start_call() + + # Following flag prevents double calls to `backend.stop_call`. + self._calling = True + + iterator = iter(iterable) + pre_dispatch = self.pre_dispatch + + if pre_dispatch == 'all': + # prevent further dispatch via multiprocessing callback thread + self._original_iterator = None + self._pre_dispatch_amount = 0 + else: + self._original_iterator = iterator + if hasattr(pre_dispatch, 'endswith'): + pre_dispatch = eval_expr( + pre_dispatch.replace("n_jobs", str(n_jobs)) + ) + self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch) + + # The main thread will consume the first pre_dispatch items and + # the remaining items will later be lazily dispatched by async + # callbacks upon task completions. + + # TODO: this iterator should be batch_size * n_jobs + iterator = itertools.islice(iterator, self._pre_dispatch_amount) + + # Use a caching dict for callables that are pickled with cloudpickle to + # improve performances. This cache is used only in the case of + # functions that are defined in the __main__ module, functions that + # are defined locally (inside another function) and lambda expressions. + self._pickle_cache = dict() + + output = self._get_outputs(iterator, pre_dispatch) + self._call_ref = weakref.ref(output) + + # The first item from the output is blank, but it makes the interpreter + # progress until it enters the Try/Except block of the generator and + # reaches the first `yield` statement. This starts the asynchronous + # dispatch of the tasks to the workers. + next(output) + + return output if self.return_generator else list(output) + + def __repr__(self): + return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/pool.py b/llmeval-env/lib/python3.10/site-packages/joblib/pool.py new file mode 100644 index 0000000000000000000000000000000000000000..c0c3549c17300632c30b3992dc9bfd6c72b18c5d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/pool.py @@ -0,0 +1,354 @@ +"""Custom implementation of multiprocessing.Pool with custom pickler. + +This module provides efficient ways of working with data stored in +shared memory with numpy.memmap arrays without inducing any memory +copy between the parent and child processes. + +This module should not be imported if multiprocessing is not +available as it implements subclasses of multiprocessing Pool +that uses a custom alternative to SimpleQueue. + +""" +# Author: Olivier Grisel +# Copyright: 2012, Olivier Grisel +# License: BSD 3 clause + +import copyreg +import sys +import warnings +from time import sleep + +try: + WindowsError +except NameError: + WindowsError = type(None) + +from pickle import Pickler + +from pickle import HIGHEST_PROTOCOL +from io import BytesIO + +from ._memmapping_reducer import get_memmapping_reducers +from ._memmapping_reducer import TemporaryResourcesManager +from ._multiprocessing_helpers import mp, assert_spawning + +# We need the class definition to derive from it, not the multiprocessing.Pool +# factory function +from multiprocessing.pool import Pool + +try: + import numpy as np +except ImportError: + np = None + + +############################################################################### +# Enable custom pickling in Pool queues + +class CustomizablePickler(Pickler): + """Pickler that accepts custom reducers. + + TODO python2_drop : can this be simplified ? + + HIGHEST_PROTOCOL is selected by default as this pickler is used + to pickle ephemeral datastructures for interprocess communication + hence no backward compatibility is required. + + `reducers` is expected to be a dictionary with key/values + being `(type, callable)` pairs where `callable` is a function that + give an instance of `type` will return a tuple `(constructor, + tuple_of_objects)` to rebuild an instance out of the pickled + `tuple_of_objects` as would return a `__reduce__` method. See the + standard library documentation on pickling for more details. + + """ + + # We override the pure Python pickler as its the only way to be able to + # customize the dispatch table without side effects in Python 2.7 + # to 3.2. For Python 3.3+ leverage the new dispatch_table + # feature from https://bugs.python.org/issue14166 that makes it possible + # to use the C implementation of the Pickler which is faster. + + def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL): + Pickler.__init__(self, writer, protocol=protocol) + if reducers is None: + reducers = {} + if hasattr(Pickler, 'dispatch'): + # Make the dispatch registry an instance level attribute instead of + # a reference to the class dictionary under Python 2 + self.dispatch = Pickler.dispatch.copy() + else: + # Under Python 3 initialize the dispatch table with a copy of the + # default registry + self.dispatch_table = copyreg.dispatch_table.copy() + for type, reduce_func in reducers.items(): + self.register(type, reduce_func) + + def register(self, type, reduce_func): + """Attach a reducer function to a given type in the dispatch table.""" + if hasattr(Pickler, 'dispatch'): + # Python 2 pickler dispatching is not explicitly customizable. + # Let us use a closure to workaround this limitation. + def dispatcher(self, obj): + reduced = reduce_func(obj) + self.save_reduce(obj=obj, *reduced) + self.dispatch[type] = dispatcher + else: + self.dispatch_table[type] = reduce_func + + +class CustomizablePicklingQueue(object): + """Locked Pipe implementation that uses a customizable pickler. + + This class is an alternative to the multiprocessing implementation + of SimpleQueue in order to make it possible to pass custom + pickling reducers, for instance to avoid memory copy when passing + memory mapped datastructures. + + `reducers` is expected to be a dict with key / values being + `(type, callable)` pairs where `callable` is a function that, given an + instance of `type`, will return a tuple `(constructor, tuple_of_objects)` + to rebuild an instance out of the pickled `tuple_of_objects` as would + return a `__reduce__` method. + + See the standard library documentation on pickling for more details. + """ + + def __init__(self, context, reducers=None): + self._reducers = reducers + self._reader, self._writer = context.Pipe(duplex=False) + self._rlock = context.Lock() + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = context.Lock() + self._make_methods() + + def __getstate__(self): + assert_spawning(self) + return (self._reader, self._writer, self._rlock, self._wlock, + self._reducers) + + def __setstate__(self, state): + (self._reader, self._writer, self._rlock, self._wlock, + self._reducers) = state + self._make_methods() + + def empty(self): + return not self._reader.poll() + + def _make_methods(self): + self._recv = recv = self._reader.recv + racquire, rrelease = self._rlock.acquire, self._rlock.release + + def get(): + racquire() + try: + return recv() + finally: + rrelease() + + self.get = get + + if self._reducers: + def send(obj): + buffer = BytesIO() + CustomizablePickler(buffer, self._reducers).dump(obj) + self._writer.send_bytes(buffer.getvalue()) + self._send = send + else: + self._send = send = self._writer.send + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self.put = send + else: + wlock_acquire, wlock_release = ( + self._wlock.acquire, self._wlock.release) + + def put(obj): + wlock_acquire() + try: + return send(obj) + finally: + wlock_release() + + self.put = put + + +class PicklingPool(Pool): + """Pool implementation with customizable pickling reducers. + + This is useful to control how data is shipped between processes + and makes it possible to use shared memory without useless + copies induces by the default pickling methods of the original + objects passed as arguments to dispatch. + + `forward_reducers` and `backward_reducers` are expected to be + dictionaries with key/values being `(type, callable)` pairs where + `callable` is a function that, given an instance of `type`, will return a + tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the + pickled `tuple_of_objects` as would return a `__reduce__` method. + See the standard library documentation about pickling for more details. + + """ + + def __init__(self, processes=None, forward_reducers=None, + backward_reducers=None, **kwargs): + if forward_reducers is None: + forward_reducers = dict() + if backward_reducers is None: + backward_reducers = dict() + self._forward_reducers = forward_reducers + self._backward_reducers = backward_reducers + poolargs = dict(processes=processes) + poolargs.update(kwargs) + super(PicklingPool, self).__init__(**poolargs) + + def _setup_queues(self): + context = getattr(self, '_ctx', mp) + self._inqueue = CustomizablePicklingQueue(context, + self._forward_reducers) + self._outqueue = CustomizablePicklingQueue(context, + self._backward_reducers) + self._quick_put = self._inqueue._send + self._quick_get = self._outqueue._recv + + +class MemmappingPool(PicklingPool): + """Process pool that shares large arrays to avoid memory copy. + + This drop-in replacement for `multiprocessing.pool.Pool` makes + it possible to work efficiently with shared memory in a numpy + context. + + Existing instances of numpy.memmap are preserved: the child + suprocesses will have access to the same shared memory in the + original mode except for the 'w+' mode that is automatically + transformed as 'r+' to avoid zeroing the original data upon + instantiation. + + Furthermore large arrays from the parent process are automatically + dumped to a temporary folder on the filesystem such as child + processes to access their content via memmapping (file system + backed shared memory). + + Note: it is important to call the terminate method to collect + the temporary folder used by the pool. + + Parameters + ---------- + processes: int, optional + Number of worker processes running concurrently in the pool. + initializer: callable, optional + Callable executed on worker process creation. + initargs: tuple, optional + Arguments passed to the initializer callable. + temp_folder: (str, callable) optional + If str: + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, + - /dev/shm if the folder exists and is writable: this is a RAMdisk + filesystem available by default on modern Linux distributions, + - the default system temporary folder that can be overridden + with TMP, TMPDIR or TEMP environment variables, typically /tmp + under Unix operating systems. + if callable: + An callable in charge of dynamically resolving a temporary folder + for memmapping large arrays. + max_nbytes int or None, optional, 1e6 by default + Threshold on the size of arrays passed to the workers that + triggers automated memory mapping in temp_folder. + Use None to disable memmapping of large arrays. + mmap_mode: {'r+', 'r', 'w+', 'c'} + Memmapping mode for numpy arrays passed to workers. + See 'max_nbytes' parameter documentation for more details. + forward_reducers: dictionary, optional + Reducers used to pickle objects passed from main process to worker + processes: see below. + backward_reducers: dictionary, optional + Reducers used to pickle return values from workers back to the + main process. + verbose: int, optional + Make it possible to monitor how the communication of numpy arrays + with the subprocess is handled (pickling or memmapping) + prewarm: bool or str, optional, "auto" by default. + If True, force a read on newly memmapped array to make sure that OS + pre-cache it in memory. This can be useful to avoid concurrent disk + access when the same data array is passed to different worker + processes. If "auto" (by default), prewarm is set to True, unless the + Linux shared memory partition /dev/shm is available and used as temp + folder. + + `forward_reducers` and `backward_reducers` are expected to be + dictionaries with key/values being `(type, callable)` pairs where + `callable` is a function that give an instance of `type` will return + a tuple `(constructor, tuple_of_objects)` to rebuild an instance out + of the pickled `tuple_of_objects` as would return a `__reduce__` + method. See the standard library documentation on pickling for more + details. + + """ + + def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6, + mmap_mode='r', forward_reducers=None, backward_reducers=None, + verbose=0, context_id=None, prewarm=False, **kwargs): + + if context_id is not None: + warnings.warn('context_id is deprecated and ignored in joblib' + ' 0.9.4 and will be removed in 0.11', + DeprecationWarning) + + manager = TemporaryResourcesManager(temp_folder) + self._temp_folder_manager = manager + + # The usage of a temp_folder_resolver over a simple temp_folder is + # superfluous for multiprocessing pools, as they don't get reused, see + # get_memmapping_executor for more details. We still use it for code + # simplicity. + forward_reducers, backward_reducers = \ + get_memmapping_reducers( + temp_folder_resolver=manager.resolve_temp_folder_name, + max_nbytes=max_nbytes, mmap_mode=mmap_mode, + forward_reducers=forward_reducers, + backward_reducers=backward_reducers, verbose=verbose, + unlink_on_gc_collect=False, prewarm=prewarm) + + poolargs = dict( + processes=processes, + forward_reducers=forward_reducers, + backward_reducers=backward_reducers) + poolargs.update(kwargs) + super(MemmappingPool, self).__init__(**poolargs) + + def terminate(self): + n_retries = 10 + for i in range(n_retries): + try: + super(MemmappingPool, self).terminate() + break + except OSError as e: + if isinstance(e, WindowsError): + # Workaround occasional "[Error 5] Access is denied" issue + # when trying to terminate a process under windows. + sleep(0.1) + if i + 1 == n_retries: + warnings.warn("Failed to terminate worker processes in" + " multiprocessing pool: %r" % e) + + # Clean up the temporary resources as the workers should now be off. + self._temp_folder_manager._clean_temporary_resources() + + @property + def _temp_folder(self): + # Legacy property in tests. could be removed if we refactored the + # memmapping tests. SHOULD ONLY BE USED IN TESTS! + # We cache this property because it is called late in the tests - at + # this point, all context have been unregistered, and + # resolve_temp_folder_name raises an error. + if getattr(self, '_cached_temp_folder', None) is not None: + return self._cached_temp_folder + else: + self._cached_temp_folder = self._temp_folder_manager.resolve_temp_folder_name() # noqa + return self._cached_temp_folder diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b133c4315ab6a5206e83f1c3546b275b3b3e67d8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..258eb7a9f0c1a4546e293260b704228621210f5f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..334b0435552c777f6c795e3e7f6715c494e9a61a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..503c5df820ae69d09bd1139805a8fe8178d7dc13 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..008406a5874a1a97e1f7eaf90499a35781152da9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca72554f77f72ce43615148560eace2f7747ec47 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4a0ffae9a88a07867cb1cbed85d65c2f3557474 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffd9bedd81549850d9f9a0c10afa344d072ddb17 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ccda04f47ae41aa6fd846c0720106ae7848c492 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e81d8c91d3ea45378670b2558292bb46266767f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/common.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/common.py new file mode 100644 index 0000000000000000000000000000000000000000..b0ca0c6abd913bc37091ebae4bd6a0b64084d20f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/common.py @@ -0,0 +1,84 @@ +""" +Small utilities for testing. +""" +import os +import gc +import sys + +from joblib._multiprocessing_helpers import mp +from joblib.testing import SkipTest, skipif + +try: + import lz4 +except ImportError: + lz4 = None + +IS_PYPY = hasattr(sys, "pypy_version_info") + +# A decorator to run tests only when numpy is available +try: + import numpy as np + + def with_numpy(func): + """A decorator to skip tests requiring numpy.""" + return func + +except ImportError: + def with_numpy(func): + """A decorator to skip tests requiring numpy.""" + def my_func(): + raise SkipTest('Test requires numpy') + return my_func + np = None + +# TODO: Turn this back on after refactoring yield based tests in test_hashing +# with_numpy = skipif(not np, reason='Test requires numpy.') + +# we use memory_profiler library for memory consumption checks +try: + from memory_profiler import memory_usage + + def with_memory_profiler(func): + """A decorator to skip tests requiring memory_profiler.""" + return func + + def memory_used(func, *args, **kwargs): + """Compute memory usage when executing func.""" + gc.collect() + mem_use = memory_usage((func, args, kwargs), interval=.001) + return max(mem_use) - min(mem_use) + +except ImportError: + def with_memory_profiler(func): + """A decorator to skip tests requiring memory_profiler.""" + def dummy_func(): + raise SkipTest('Test requires memory_profiler.') + return dummy_func + + memory_usage = memory_used = None + + +def force_gc_pypy(): + # The gc in pypy can be delayed. Force it to test the behavior when it + # will eventually be collected. + if IS_PYPY: + # Run gc.collect() twice to make sure the weakref is collected, as + # mentionned in the pypy doc: + # https://doc.pypy.org/en/latest/config/objspace.usemodules._weakref.html + import gc + gc.collect() + gc.collect() + + +with_multiprocessing = skipif( + mp is None, reason='Needs multiprocessing to run.') + + +with_dev_shm = skipif( + not os.path.exists('/dev/shm'), + reason='This test requires a large /dev/shm shared memory fs.') + +with_lz4 = skipif(lz4 is None, reason='Needs lz4 compression to run') + +without_lz4 = skipif( + lz4 is not None, reason='Needs lz4 not being installed to run') diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/__init__.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9228ae2cc126f4dc3e81d4a0f3e1a8a51d59dd1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/__pycache__/create_numpy_pickle.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/__pycache__/create_numpy_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..064810c9c77e9ff75916eb280e6024621a9b5410 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/__pycache__/create_numpy_pickle.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.gzip b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.gzip new file mode 100644 index 0000000000000000000000000000000000000000..2d56bb066e49bbf08dc2565c53f11348b62f8ef6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.gzip differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.lzma b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.lzma new file mode 100644 index 0000000000000000000000000000000000000000..6664c1772e85fa930dc330550f2cb237ac042328 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.lzma differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.gzip b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.gzip new file mode 100644 index 0000000000000000000000000000000000000000..bffa94bc1e200b7a2cb7aa1b200364bd80455477 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.gzip differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_02.npy.z b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_02.npy.z new file mode 100644 index 0000000000000000000000000000000000000000..ccb4f412310d7a238fe50f4885581d221a34f5e1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_02.npy.z differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_03.npy.z b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_03.npy.z new file mode 100644 index 0000000000000000000000000000000000000000..669602e0a880d308cbea22eed2fda74cea225a9f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_03.npy.z differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..6c41a59a6900ced36050bf357359c1164a11fdbe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py @@ -0,0 +1,9 @@ +# -*- coding: big5 -*- + + +# Some Traditional Chinese characters: ¤@¨Ç¤¤¤å¦r²Å +def big5_f(): + """¥Î©ó´ú¸Õªº¨ç¼Æ + """ + # µùÄÀ + return 0 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_init.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_init.py new file mode 100644 index 0000000000000000000000000000000000000000..923a89b749862ef85b9420634e29e1f7c863113a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_init.py @@ -0,0 +1,14 @@ +# Basic test case to test functioning of module's top-level + +try: + from joblib import * # noqa + _top_import_error = None +except Exception as ex: # pragma: no cover + _top_import_error = ex + + +def test_import_joblib(): + # Test either above import has failed for some reason + # "import *" only allowed at module level, hence we + # rely on setting up the variable above + assert _top_import_error is None diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_memory.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..f360e2b2619d1883c4475cc88f35251cbcb9d1a7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_memory.py @@ -0,0 +1,1526 @@ +""" +Test the memory module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import functools +import gc +import logging +import shutil +import os +import os.path +import pathlib +import pickle +import sys +import time +import datetime +import textwrap + +import pytest + +from joblib.memory import Memory +from joblib.memory import expires_after +from joblib.memory import MemorizedFunc, NotMemorizedFunc +from joblib.memory import MemorizedResult, NotMemorizedResult +from joblib.memory import _FUNCTION_HASHES +from joblib.memory import register_store_backend, _STORE_BACKENDS +from joblib.memory import _build_func_identifier, _store_backend_factory +from joblib.memory import JobLibCollisionWarning +from joblib.parallel import Parallel, delayed +from joblib._store_backends import StoreBackendBase, FileSystemStoreBackend +from joblib.test.common import with_numpy, np +from joblib.test.common import with_multiprocessing +from joblib.testing import parametrize, raises, warns +from joblib.hashing import hash + + +############################################################################### +# Module-level variables for the tests +def f(x, y=1): + """ A module-level function for testing purposes. + """ + return x ** 2 + y + + +############################################################################### +# Helper function for the tests +def check_identity_lazy(func, accumulator, location): + """ Given a function and an accumulator (a list that grows every + time the function is called), check that the function can be + decorated by memory to be a lazy identity. + """ + # Call each function with several arguments, and check that it is + # evaluated only once per argument. + memory = Memory(location=location, verbose=0) + func = memory.cache(func) + for i in range(3): + for _ in range(2): + assert func(i) == i + assert len(accumulator) == i + 1 + + +def corrupt_single_cache_item(memory): + single_cache_item, = memory.store_backend.get_items() + output_filename = os.path.join(single_cache_item.path, 'output.pkl') + with open(output_filename, 'w') as f: + f.write('garbage') + + +def monkeypatch_cached_func_warn(func, monkeypatch_fixture): + # Need monkeypatch because pytest does not + # capture stdlib logging output (see + # https://github.com/pytest-dev/pytest/issues/2079) + + recorded = [] + + def append_to_record(item): + recorded.append(item) + monkeypatch_fixture.setattr(func, 'warn', append_to_record) + return recorded + + +############################################################################### +# Tests +def test_memory_integration(tmpdir): + """ Simple test of memory lazy evaluation. + """ + accumulator = list() + + # Rmk: this function has the same name than a module-level function, + # thus it serves as a test to see that both are identified + # as different. + def f(arg): + accumulator.append(1) + return arg + + check_identity_lazy(f, accumulator, tmpdir.strpath) + + # Now test clearing + for compress in (False, True): + for mmap_mode in ('r', None): + memory = Memory(location=tmpdir.strpath, verbose=10, + mmap_mode=mmap_mode, compress=compress) + # First clear the cache directory, to check that our code can + # handle that + # NOTE: this line would raise an exception, as the database file is + # still open; we ignore the error since we want to test what + # happens if the directory disappears + shutil.rmtree(tmpdir.strpath, ignore_errors=True) + g = memory.cache(f) + g(1) + g.clear(warn=False) + current_accumulator = len(accumulator) + out = g(1) + + assert len(accumulator) == current_accumulator + 1 + # Also, check that Memory.eval works similarly + assert memory.eval(f, 1) == out + assert len(accumulator) == current_accumulator + 1 + + # Now do a smoke test with a function defined in __main__, as the name + # mangling rules are more complex + f.__module__ = '__main__' + memory = Memory(location=tmpdir.strpath, verbose=0) + memory.cache(f)(1) + + +@parametrize("call_before_reducing", [True, False]) +def test_parallel_call_cached_function_defined_in_jupyter( + tmpdir, call_before_reducing +): + # Calling an interactively defined memory.cache()'d function inside a + # Parallel call used to clear the existing cache related to the said + # function (https://github.com/joblib/joblib/issues/1035) + + # This tests checks that this is no longer the case. + + # TODO: test that the cache related to the function cache persists across + # ipython sessions (provided that no code change were made to the + # function's source)? + + # The first part of the test makes the necessary low-level calls to emulate + # the definition of a function in an jupyter notebook cell. Joblib has + # some custom code to treat functions defined specifically in jupyter + # notebooks/ipython session -- we want to test this code, which requires + # the emulation to be rigorous. + for session_no in [0, 1]: + ipython_cell_source = ''' + def f(x): + return x + ''' + + ipython_cell_id = ''.format(session_no) + + exec( + compile( + textwrap.dedent(ipython_cell_source), + filename=ipython_cell_id, + mode='exec' + ) + ) + # f is now accessible in the locals mapping - but for some unknown + # reason, f = locals()['f'] throws a KeyError at runtime, we need to + # bind locals()['f'] to a different name in the local namespace + aliased_f = locals()['f'] + aliased_f.__module__ = "__main__" + + # Preliminary sanity checks, and tests checking that joblib properly + # identified f as an interactive function defined in a jupyter notebook + assert aliased_f(1) == 1 + assert aliased_f.__code__.co_filename == ipython_cell_id + + memory = Memory(location=tmpdir.strpath, verbose=0) + cached_f = memory.cache(aliased_f) + + assert len(os.listdir(tmpdir / 'joblib')) == 1 + f_cache_relative_directory = os.listdir(tmpdir / 'joblib')[0] + assert 'ipython-input' in f_cache_relative_directory + + f_cache_directory = tmpdir / 'joblib' / f_cache_relative_directory + + if session_no == 0: + # The cache should be empty as cached_f has not been called yet. + assert os.listdir(f_cache_directory) == ['f'] + assert os.listdir(f_cache_directory / 'f') == [] + + if call_before_reducing: + cached_f(3) + # Two files were just created, func_code.py, and a folder + # containing the information (inputs hash/ouptput) of + # cached_f(3) + assert len(os.listdir(f_cache_directory / 'f')) == 2 + + # Now, testing #1035: when calling a cached function, joblib + # used to dynamically inspect the underlying function to + # extract its source code (to verify it matches the source code + # of the function as last inspected by joblib) -- however, + # source code introspection fails for dynamic functions sent to + # child processes - which would eventually make joblib clear + # the cache associated to f + res = Parallel(n_jobs=2)(delayed(cached_f)(i) for i in [1, 2]) + else: + # Submit the function to the joblib child processes, although + # the function has never been called in the parent yet. This + # triggers a specific code branch inside + # MemorizedFunc.__reduce__. + res = Parallel(n_jobs=2)(delayed(cached_f)(i) for i in [1, 2]) + assert len(os.listdir(f_cache_directory / 'f')) == 3 + + cached_f(3) + + # Making sure f's cache does not get cleared after the parallel + # calls, and contains ALL cached functions calls (f(1), f(2), f(3)) + # and 'func_code.py' + assert len(os.listdir(f_cache_directory / 'f')) == 4 + else: + # For the second session, there should be an already existing cache + assert len(os.listdir(f_cache_directory / 'f')) == 4 + + cached_f(3) + + # The previous cache should not be invalidated after calling the + # function in a new session + assert len(os.listdir(f_cache_directory / 'f')) == 4 + + +def test_no_memory(): + """ Test memory with location=None: no memoize """ + accumulator = list() + + def ff(arg): + accumulator.append(1) + return arg + + memory = Memory(location=None, verbose=0) + gg = memory.cache(ff) + for _ in range(4): + current_accumulator = len(accumulator) + gg(1) + assert len(accumulator) == current_accumulator + 1 + + +def test_memory_kwarg(tmpdir): + " Test memory with a function with keyword arguments." + accumulator = list() + + def g(arg1=None, arg2=1): + accumulator.append(1) + return arg1 + + check_identity_lazy(g, accumulator, tmpdir.strpath) + + memory = Memory(location=tmpdir.strpath, verbose=0) + g = memory.cache(g) + # Smoke test with an explicit keyword argument: + assert g(arg1=30, arg2=2) == 30 + + +def test_memory_lambda(tmpdir): + " Test memory with a function with a lambda." + accumulator = list() + + def helper(x): + """ A helper function to define l as a lambda. + """ + accumulator.append(1) + return x + + check_identity_lazy(lambda x: helper(x), accumulator, tmpdir.strpath) + + +def test_memory_name_collision(tmpdir): + " Check that name collisions with functions will raise warnings" + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache + def name_collision(x): + """ A first function called name_collision + """ + return x + + a = name_collision + + @memory.cache + def name_collision(x): + """ A second function called name_collision + """ + return x + + b = name_collision + + with warns(JobLibCollisionWarning) as warninfo: + a(1) + b(1) + + assert len(warninfo) == 1 + assert "collision" in str(warninfo[0].message) + + +def test_memory_warning_lambda_collisions(tmpdir): + # Check that multiple use of lambda will raise collisions + memory = Memory(location=tmpdir.strpath, verbose=0) + a = memory.cache(lambda x: x) + b = memory.cache(lambda x: x + 1) + + with warns(JobLibCollisionWarning) as warninfo: + assert a(0) == 0 + assert b(1) == 2 + assert a(1) == 1 + + # In recent Python versions, we can retrieve the code of lambdas, + # thus nothing is raised + assert len(warninfo) == 4 + + +def test_memory_warning_collision_detection(tmpdir): + # Check that collisions impossible to detect will raise appropriate + # warnings. + memory = Memory(location=tmpdir.strpath, verbose=0) + a1 = eval('lambda x: x') + a1 = memory.cache(a1) + b1 = eval('lambda x: x+1') + b1 = memory.cache(b1) + + with warns(JobLibCollisionWarning) as warninfo: + a1(1) + b1(1) + a1(0) + + assert len(warninfo) == 2 + assert "cannot detect" in str(warninfo[0].message).lower() + + +def test_memory_partial(tmpdir): + " Test memory with functools.partial." + accumulator = list() + + def func(x, y): + """ A helper function to define l as a lambda. + """ + accumulator.append(1) + return y + + import functools + function = functools.partial(func, 1) + + check_identity_lazy(function, accumulator, tmpdir.strpath) + + +def test_memory_eval(tmpdir): + " Smoke test memory with a function with a function defined in an eval." + memory = Memory(location=tmpdir.strpath, verbose=0) + + m = eval('lambda x: x') + mm = memory.cache(m) + + assert mm(1) == 1 + + +def count_and_append(x=[]): + """ A function with a side effect in its arguments. + + Return the length of its argument and append one element. + """ + len_x = len(x) + x.append(None) + return len_x + + +def test_argument_change(tmpdir): + """ Check that if a function has a side effect in its arguments, it + should use the hash of changing arguments. + """ + memory = Memory(location=tmpdir.strpath, verbose=0) + func = memory.cache(count_and_append) + # call the function for the first time, is should cache it with + # argument x=[] + assert func() == 0 + # the second time the argument is x=[None], which is not cached + # yet, so the functions should be called a second time + assert func() == 1 + + +@with_numpy +@parametrize('mmap_mode', [None, 'r']) +def test_memory_numpy(tmpdir, mmap_mode): + " Test memory with a function with numpy arrays." + accumulator = list() + + def n(arg=None): + accumulator.append(1) + return arg + + memory = Memory(location=tmpdir.strpath, mmap_mode=mmap_mode, + verbose=0) + cached_n = memory.cache(n) + + rnd = np.random.RandomState(0) + for i in range(3): + a = rnd.random_sample((10, 10)) + for _ in range(3): + assert np.all(cached_n(a) == a) + assert len(accumulator) == i + 1 + + +@with_numpy +def test_memory_numpy_check_mmap_mode(tmpdir, monkeypatch): + """Check that mmap_mode is respected even at the first call""" + + memory = Memory(location=tmpdir.strpath, mmap_mode='r', verbose=0) + + @memory.cache() + def twice(a): + return a * 2 + + a = np.ones(3) + + b = twice(a) + c = twice(a) + + assert isinstance(c, np.memmap) + assert c.mode == 'r' + + assert isinstance(b, np.memmap) + assert b.mode == 'r' + + # Corrupts the file, Deleting b and c mmaps + # is necessary to be able edit the file + del b + del c + gc.collect() + corrupt_single_cache_item(memory) + + # Make sure that corrupting the file causes recomputation and that + # a warning is issued. + recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch) + d = twice(a) + assert len(recorded_warnings) == 1 + exception_msg = 'Exception while loading results' + assert exception_msg in recorded_warnings[0] + # Asserts that the recomputation returns a mmap + assert isinstance(d, np.memmap) + assert d.mode == 'r' + + +def test_memory_exception(tmpdir): + """ Smoketest the exception handling of Memory. + """ + memory = Memory(location=tmpdir.strpath, verbose=0) + + class MyException(Exception): + pass + + @memory.cache + def h(exc=0): + if exc: + raise MyException + + # Call once, to initialise the cache + h() + + for _ in range(3): + # Call 3 times, to be sure that the Exception is always raised + with raises(MyException): + h(1) + + +def test_memory_ignore(tmpdir): + " Test the ignore feature of memory " + memory = Memory(location=tmpdir.strpath, verbose=0) + accumulator = list() + + @memory.cache(ignore=['y']) + def z(x, y=1): + accumulator.append(1) + + assert z.ignore == ['y'] + + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=2) + assert len(accumulator) == 1 + + +def test_memory_ignore_decorated(tmpdir): + " Test the ignore feature of memory on a decorated function " + memory = Memory(location=tmpdir.strpath, verbose=0) + accumulator = list() + + def decorate(f): + @functools.wraps(f) + def wrapped(*args, **kwargs): + return f(*args, **kwargs) + return wrapped + + @memory.cache(ignore=['y']) + @decorate + def z(x, y=1): + accumulator.append(1) + + assert z.ignore == ['y'] + + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=2) + assert len(accumulator) == 1 + + +def test_memory_args_as_kwargs(tmpdir): + """Non-regression test against 0.12.0 changes. + + https://github.com/joblib/joblib/pull/751 + """ + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache + def plus_one(a): + return a + 1 + + # It's possible to call a positional arg as a kwarg. + assert plus_one(1) == 2 + assert plus_one(a=1) == 2 + + # However, a positional argument that joblib hadn't seen + # before would cause a failure if it was passed as a kwarg. + assert plus_one(a=2) == 3 + + +@parametrize('ignore, verbose, mmap_mode', [(['x'], 100, 'r'), + ([], 10, None)]) +def test_partial_decoration(tmpdir, ignore, verbose, mmap_mode): + "Check cache may be called with kwargs before decorating" + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache(ignore=ignore, verbose=verbose, mmap_mode=mmap_mode) + def z(x): + pass + + assert z.ignore == ignore + assert z._verbose == verbose + assert z.mmap_mode == mmap_mode + + +def test_func_dir(tmpdir): + # Test the creation of the memory cache directory for the function. + memory = Memory(location=tmpdir.strpath, verbose=0) + path = __name__.split('.') + path.append('f') + path = tmpdir.join('joblib', *path).strpath + + g = memory.cache(f) + # Test that the function directory is created on demand + func_id = _build_func_identifier(f) + location = os.path.join(g.store_backend.location, func_id) + assert location == path + assert os.path.exists(path) + assert memory.location == os.path.dirname(g.store_backend.location) + + # Test that the code is stored. + # For the following test to be robust to previous execution, we clear + # the in-memory store + _FUNCTION_HASHES.clear() + assert not g._check_previous_func_code() + assert os.path.exists(os.path.join(path, 'func_code.py')) + assert g._check_previous_func_code() + + # Test the robustness to failure of loading previous results. + args_id = g._get_args_id(1) + output_dir = os.path.join(g.store_backend.location, g.func_id, args_id) + a = g(1) + assert os.path.exists(output_dir) + os.remove(os.path.join(output_dir, 'output.pkl')) + assert a == g(1) + + +def test_persistence(tmpdir): + # Test the memorized functions can be pickled and restored. + memory = Memory(location=tmpdir.strpath, verbose=0) + g = memory.cache(f) + output = g(1) + + h = pickle.loads(pickle.dumps(g)) + + args_id = h._get_args_id(1) + output_dir = os.path.join(h.store_backend.location, h.func_id, args_id) + assert os.path.exists(output_dir) + assert output == h.store_backend.load_item([h.func_id, args_id]) + memory2 = pickle.loads(pickle.dumps(memory)) + assert memory.store_backend.location == memory2.store_backend.location + + # Smoke test that pickling a memory with location=None works + memory = Memory(location=None, verbose=0) + pickle.loads(pickle.dumps(memory)) + g = memory.cache(f) + gp = pickle.loads(pickle.dumps(g)) + gp(1) + + +def test_check_call_in_cache(tmpdir): + for func in (MemorizedFunc(f, tmpdir.strpath), + Memory(location=tmpdir.strpath, verbose=0).cache(f)): + result = func.check_call_in_cache(2) + assert not result + assert isinstance(result, bool) + assert func(2) == 5 + result = func.check_call_in_cache(2) + assert result + assert isinstance(result, bool) + func.clear() + + +def test_call_and_shelve(tmpdir): + # Test MemorizedFunc outputting a reference to cache. + + for func, Result in zip((MemorizedFunc(f, tmpdir.strpath), + NotMemorizedFunc(f), + Memory(location=tmpdir.strpath, + verbose=0).cache(f), + Memory(location=None).cache(f), + ), + (MemorizedResult, NotMemorizedResult, + MemorizedResult, NotMemorizedResult)): + assert func(2) == 5 + result = func.call_and_shelve(2) + assert isinstance(result, Result) + assert result.get() == 5 + + result.clear() + with raises(KeyError): + result.get() + result.clear() # Do nothing if there is no cache. + + +def test_call_and_shelve_argument_hash(tmpdir): + # Verify that a warning is raised when accessing arguments_hash + # attribute from MemorizedResult + func = Memory(location=tmpdir.strpath, verbose=0).cache(f) + result = func.call_and_shelve(2) + assert isinstance(result, MemorizedResult) + with warns(DeprecationWarning) as w: + assert result.argument_hash == result.args_id + assert len(w) == 1 + assert "The 'argument_hash' attribute has been deprecated" \ + in str(w[-1].message) + + +def test_call_and_shelve_lazily_load_stored_result(tmpdir): + """Check call_and_shelve only load stored data if needed.""" + test_access_time_file = tmpdir.join('test_access') + test_access_time_file.write('test_access') + test_access_time = os.stat(test_access_time_file.strpath).st_atime + # check file system access time stats resolution is lower than test wait + # timings. + time.sleep(0.5) + assert test_access_time_file.read() == 'test_access' + + if test_access_time == os.stat(test_access_time_file.strpath).st_atime: + # Skip this test when access time cannot be retrieved with enough + # precision from the file system (e.g. NTFS on windows). + pytest.skip("filesystem does not support fine-grained access time " + "attribute") + + memory = Memory(location=tmpdir.strpath, verbose=0) + func = memory.cache(f) + args_id = func._get_args_id(2) + result_path = os.path.join(memory.store_backend.location, + func.func_id, args_id, 'output.pkl') + assert func(2) == 5 + first_access_time = os.stat(result_path).st_atime + time.sleep(1) + + # Should not access the stored data + result = func.call_and_shelve(2) + assert isinstance(result, MemorizedResult) + assert os.stat(result_path).st_atime == first_access_time + time.sleep(1) + + # Read the stored data => last access time is greater than first_access + assert result.get() == 5 + assert os.stat(result_path).st_atime > first_access_time + + +def test_memorized_pickling(tmpdir): + for func in (MemorizedFunc(f, tmpdir.strpath), NotMemorizedFunc(f)): + filename = tmpdir.join('pickling_test.dat').strpath + result = func.call_and_shelve(2) + with open(filename, 'wb') as fp: + pickle.dump(result, fp) + with open(filename, 'rb') as fp: + result2 = pickle.load(fp) + assert result2.get() == result.get() + os.remove(filename) + + +def test_memorized_repr(tmpdir): + func = MemorizedFunc(f, tmpdir.strpath) + result = func.call_and_shelve(2) + + func2 = MemorizedFunc(f, tmpdir.strpath) + result2 = func2.call_and_shelve(2) + assert result.get() == result2.get() + assert repr(func) == repr(func2) + + # Smoke test with NotMemorizedFunc + func = NotMemorizedFunc(f) + repr(func) + repr(func.call_and_shelve(2)) + + # Smoke test for message output (increase code coverage) + func = MemorizedFunc(f, tmpdir.strpath, verbose=11, timestamp=time.time()) + result = func.call_and_shelve(11) + result.get() + + func = MemorizedFunc(f, tmpdir.strpath, verbose=11) + result = func.call_and_shelve(11) + result.get() + + func = MemorizedFunc(f, tmpdir.strpath, verbose=5, timestamp=time.time()) + result = func.call_and_shelve(11) + result.get() + + func = MemorizedFunc(f, tmpdir.strpath, verbose=5) + result = func.call_and_shelve(11) + result.get() + + +def test_memory_file_modification(capsys, tmpdir, monkeypatch): + # Test that modifying a Python file after loading it does not lead to + # Recomputation + dir_name = tmpdir.mkdir('tmp_import').strpath + filename = os.path.join(dir_name, 'tmp_joblib_.py') + content = 'def f(x):\n print(x)\n return x\n' + with open(filename, 'w') as module_file: + module_file.write(content) + + # Load the module: + monkeypatch.syspath_prepend(dir_name) + import tmp_joblib_ as tmp + + memory = Memory(location=tmpdir.strpath, verbose=0) + f = memory.cache(tmp.f) + # First call f a few times + f(1) + f(2) + f(1) + + # Now modify the module where f is stored without modifying f + with open(filename, 'w') as module_file: + module_file.write('\n\n' + content) + + # And call f a couple more times + f(1) + f(1) + + # Flush the .pyc files + shutil.rmtree(dir_name) + os.mkdir(dir_name) + # Now modify the module where f is stored, modifying f + content = 'def f(x):\n print("x=%s" % x)\n return x\n' + with open(filename, 'w') as module_file: + module_file.write(content) + + # And call f more times prior to reloading: the cache should not be + # invalidated at this point as the active function definition has not + # changed in memory yet. + f(1) + f(1) + + # Now reload + sys.stdout.write('Reloading\n') + sys.modules.pop('tmp_joblib_') + import tmp_joblib_ as tmp + f = memory.cache(tmp.f) + + # And call f more times + f(1) + f(1) + + out, err = capsys.readouterr() + assert out == '1\n2\nReloading\nx=1\n' + + +def _function_to_cache(a, b): + # Just a place holder function to be mutated by tests + pass + + +def _sum(a, b): + return a + b + + +def _product(a, b): + return a * b + + +def test_memory_in_memory_function_code_change(tmpdir): + _function_to_cache.__code__ = _sum.__code__ + + memory = Memory(location=tmpdir.strpath, verbose=0) + f = memory.cache(_function_to_cache) + + assert f(1, 2) == 3 + assert f(1, 2) == 3 + + with warns(JobLibCollisionWarning): + # Check that inline function modification triggers a cache invalidation + _function_to_cache.__code__ = _product.__code__ + assert f(1, 2) == 2 + assert f(1, 2) == 2 + + +def test_clear_memory_with_none_location(): + memory = Memory(location=None) + memory.clear() + + +def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'): + return a, b, kw1, kw2 + + +def func_with_signature(a: int, b: float) -> float: + return a + b + + +def test_memory_func_with_kwonly_args(tmpdir): + memory = Memory(location=tmpdir.strpath, verbose=0) + func_cached = memory.cache(func_with_kwonly_args) + + assert func_cached(1, 2, kw1=3) == (1, 2, 3, 'kw2') + + # Making sure that providing a keyword-only argument by + # position raises an exception + with raises(ValueError) as excinfo: + func_cached(1, 2, 3, kw2=4) + excinfo.match("Keyword-only parameter 'kw1' was passed as positional " + "parameter") + + # Keyword-only parameter passed by position with cached call + # should still raise ValueError + func_cached(1, 2, kw1=3, kw2=4) + + with raises(ValueError) as excinfo: + func_cached(1, 2, 3, kw2=4) + excinfo.match("Keyword-only parameter 'kw1' was passed as positional " + "parameter") + + # Test 'ignore' parameter + func_cached = memory.cache(func_with_kwonly_args, ignore=['kw2']) + assert func_cached(1, 2, kw1=3, kw2=4) == (1, 2, 3, 4) + assert func_cached(1, 2, kw1=3, kw2='ignored') == (1, 2, 3, 4) + + +def test_memory_func_with_signature(tmpdir): + memory = Memory(location=tmpdir.strpath, verbose=0) + func_cached = memory.cache(func_with_signature) + + assert func_cached(1, 2.) == 3. + + +def _setup_toy_cache(tmpdir, num_inputs=10): + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache() + def get_1000_bytes(arg): + return 'a' * 1000 + + inputs = list(range(num_inputs)) + for arg in inputs: + get_1000_bytes(arg) + + func_id = _build_func_identifier(get_1000_bytes) + hash_dirnames = [get_1000_bytes._get_args_id(arg) + for arg in inputs] + + full_hashdirs = [os.path.join(get_1000_bytes.store_backend.location, + func_id, dirname) + for dirname in hash_dirnames] + return memory, full_hashdirs, get_1000_bytes + + +def test__get_items(tmpdir): + memory, expected_hash_dirs, _ = _setup_toy_cache(tmpdir) + items = memory.store_backend.get_items() + hash_dirs = [ci.path for ci in items] + assert set(hash_dirs) == set(expected_hash_dirs) + + def get_files_size(directory): + full_paths = [os.path.join(directory, fn) + for fn in os.listdir(directory)] + return sum(os.path.getsize(fp) for fp in full_paths) + + expected_hash_cache_sizes = [get_files_size(hash_dir) + for hash_dir in hash_dirs] + hash_cache_sizes = [ci.size for ci in items] + assert hash_cache_sizes == expected_hash_cache_sizes + + output_filenames = [os.path.join(hash_dir, 'output.pkl') + for hash_dir in hash_dirs] + + expected_last_accesses = [ + datetime.datetime.fromtimestamp(os.path.getatime(fn)) + for fn in output_filenames] + last_accesses = [ci.last_access for ci in items] + assert last_accesses == expected_last_accesses + + +def test__get_items_to_delete(tmpdir): + # test empty cache + memory, _, _ = _setup_toy_cache(tmpdir, num_inputs=0) + items_to_delete = memory.store_backend._get_items_to_delete('1K') + assert items_to_delete == [] + + memory, expected_hash_cachedirs, _ = _setup_toy_cache(tmpdir) + items = memory.store_backend.get_items() + # bytes_limit set to keep only one cache item (each hash cache + # folder is about 1000 bytes + metadata) + items_to_delete = memory.store_backend._get_items_to_delete('2K') + nb_hashes = len(expected_hash_cachedirs) + assert set.issubset(set(items_to_delete), set(items)) + assert len(items_to_delete) == nb_hashes - 1 + + # Sanity check bytes_limit=2048 is the same as bytes_limit='2K' + items_to_delete_2048b = memory.store_backend._get_items_to_delete(2048) + assert sorted(items_to_delete) == sorted(items_to_delete_2048b) + + # bytes_limit greater than the size of the cache + items_to_delete_empty = memory.store_backend._get_items_to_delete('1M') + assert items_to_delete_empty == [] + + # All the cache items need to be deleted + bytes_limit_too_small = 500 + items_to_delete_500b = memory.store_backend._get_items_to_delete( + bytes_limit_too_small + ) + assert set(items_to_delete_500b), set(items) + + # Test LRU property: surviving cache items should all have a more + # recent last_access that the ones that have been deleted + items_to_delete_6000b = memory.store_backend._get_items_to_delete(6000) + surviving_items = set(items).difference(items_to_delete_6000b) + + assert (max(ci.last_access for ci in items_to_delete_6000b) <= + min(ci.last_access for ci in surviving_items)) + + +def test_memory_reduce_size_bytes_limit(tmpdir): + memory, _, _ = _setup_toy_cache(tmpdir) + ref_cache_items = memory.store_backend.get_items() + + # By default memory.bytes_limit is None and reduce_size is a noop + memory.reduce_size() + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # No cache items deleted if bytes_limit greater than the size of + # the cache + memory.reduce_size(bytes_limit='1M') + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # bytes_limit is set so that only two cache items are kept + memory.reduce_size(bytes_limit='3K') + cache_items = memory.store_backend.get_items() + assert set.issubset(set(cache_items), set(ref_cache_items)) + assert len(cache_items) == 2 + + # bytes_limit set so that no cache item is kept + bytes_limit_too_small = 500 + memory.reduce_size(bytes_limit=bytes_limit_too_small) + cache_items = memory.store_backend.get_items() + assert cache_items == [] + + +def test_memory_reduce_size_items_limit(tmpdir): + memory, _, _ = _setup_toy_cache(tmpdir) + ref_cache_items = memory.store_backend.get_items() + + # By default reduce_size is a noop + memory.reduce_size() + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # No cache items deleted if items_limit greater than the size of + # the cache + memory.reduce_size(items_limit=10) + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # items_limit is set so that only two cache items are kept + memory.reduce_size(items_limit=2) + cache_items = memory.store_backend.get_items() + assert set.issubset(set(cache_items), set(ref_cache_items)) + assert len(cache_items) == 2 + + # item_limit set so that no cache item is kept + memory.reduce_size(items_limit=0) + cache_items = memory.store_backend.get_items() + assert cache_items == [] + + +def test_memory_reduce_size_age_limit(tmpdir): + import time + import datetime + memory, _, put_cache = _setup_toy_cache(tmpdir) + ref_cache_items = memory.store_backend.get_items() + + # By default reduce_size is a noop + memory.reduce_size() + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # No cache items deleted if age_limit big. + memory.reduce_size(age_limit=datetime.timedelta(days=1)) + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # age_limit is set so that only two cache items are kept + time.sleep(1) + put_cache(-1) + put_cache(-2) + memory.reduce_size(age_limit=datetime.timedelta(seconds=1)) + cache_items = memory.store_backend.get_items() + assert not set.issubset(set(cache_items), set(ref_cache_items)) + assert len(cache_items) == 2 + + # age_limit set so that no cache item is kept + memory.reduce_size(age_limit=datetime.timedelta(seconds=0)) + cache_items = memory.store_backend.get_items() + assert cache_items == [] + + +def test_memory_clear(tmpdir): + memory, _, g = _setup_toy_cache(tmpdir) + memory.clear() + + assert os.listdir(memory.store_backend.location) == [] + + # Check that the cache for functions hash is also reset. + assert not g._check_previous_func_code(stacklevel=4) + + +def fast_func_with_complex_output(): + complex_obj = ['a' * 1000] * 1000 + return complex_obj + + +def fast_func_with_conditional_complex_output(complex_output=True): + complex_obj = {str(i): i for i in range(int(1e5))} + return complex_obj if complex_output else 'simple output' + + +@with_multiprocessing +def test_cached_function_race_condition_when_persisting_output(tmpdir, capfd): + # Test race condition where multiple processes are writing into + # the same output.pkl. See + # https://github.com/joblib/joblib/issues/490 for more details. + memory = Memory(location=tmpdir.strpath) + func_cached = memory.cache(fast_func_with_complex_output) + + Parallel(n_jobs=2)(delayed(func_cached)() for i in range(3)) + + stdout, stderr = capfd.readouterr() + + # Checking both stdout and stderr (ongoing PR #434 may change + # logging destination) to make sure there is no exception while + # loading the results + exception_msg = 'Exception while loading results' + assert exception_msg not in stdout + assert exception_msg not in stderr + + +@with_multiprocessing +def test_cached_function_race_condition_when_persisting_output_2(tmpdir, + capfd): + # Test race condition in first attempt at solving + # https://github.com/joblib/joblib/issues/490. The race condition + # was due to the delay between seeing the cache directory created + # (interpreted as the result being cached) and the output.pkl being + # pickled. + memory = Memory(location=tmpdir.strpath) + func_cached = memory.cache(fast_func_with_conditional_complex_output) + + Parallel(n_jobs=2)(delayed(func_cached)(True if i % 2 == 0 else False) + for i in range(3)) + + stdout, stderr = capfd.readouterr() + + # Checking both stdout and stderr (ongoing PR #434 may change + # logging destination) to make sure there is no exception while + # loading the results + exception_msg = 'Exception while loading results' + assert exception_msg not in stdout + assert exception_msg not in stderr + + +def test_memory_recomputes_after_an_error_while_loading_results( + tmpdir, monkeypatch): + memory = Memory(location=tmpdir.strpath) + + def func(arg): + # This makes sure that the timestamp returned by two calls of + # func are different. This is needed on Windows where + # time.time resolution may not be accurate enough + time.sleep(0.01) + return arg, time.time() + + cached_func = memory.cache(func) + input_arg = 'arg' + arg, timestamp = cached_func(input_arg) + + # Make sure the function is correctly cached + assert arg == input_arg + + # Corrupting output.pkl to make sure that an error happens when + # loading the cached result + corrupt_single_cache_item(memory) + + # Make sure that corrupting the file causes recomputation and that + # a warning is issued. + recorded_warnings = monkeypatch_cached_func_warn(cached_func, monkeypatch) + recomputed_arg, recomputed_timestamp = cached_func(arg) + assert len(recorded_warnings) == 1 + exception_msg = 'Exception while loading results' + assert exception_msg in recorded_warnings[0] + assert recomputed_arg == arg + assert recomputed_timestamp > timestamp + + # Corrupting output.pkl to make sure that an error happens when + # loading the cached result + corrupt_single_cache_item(memory) + reference = cached_func.call_and_shelve(arg) + try: + reference.get() + raise AssertionError( + "It normally not possible to load a corrupted" + " MemorizedResult" + ) + except KeyError as e: + message = "is corrupted" + assert message in str(e.args) + + +class IncompleteStoreBackend(StoreBackendBase): + """This backend cannot be instantiated and should raise a TypeError.""" + pass + + +class DummyStoreBackend(StoreBackendBase): + """A dummy store backend that does nothing.""" + + def _open_item(self, *args, **kwargs): + """Open an item on store.""" + "Does nothing" + + def _item_exists(self, location): + """Check if an item location exists.""" + "Does nothing" + + def _move_item(self, src, dst): + """Move an item from src to dst in store.""" + "Does nothing" + + def create_location(self, location): + """Create location on store.""" + "Does nothing" + + def exists(self, obj): + """Check if an object exists in the store""" + return False + + def clear_location(self, obj): + """Clear object on store""" + "Does nothing" + + def get_items(self): + """Returns the whole list of items available in cache.""" + return [] + + def configure(self, location, *args, **kwargs): + """Configure the store""" + "Does nothing" + + +@parametrize("invalid_prefix", [None, dict(), list()]) +def test_register_invalid_store_backends_key(invalid_prefix): + # verify the right exceptions are raised when passing a wrong backend key. + with raises(ValueError) as excinfo: + register_store_backend(invalid_prefix, None) + excinfo.match(r'Store backend name should be a string*') + + +def test_register_invalid_store_backends_object(): + # verify the right exceptions are raised when passing a wrong backend + # object. + with raises(ValueError) as excinfo: + register_store_backend("fs", None) + excinfo.match(r'Store backend should inherit StoreBackendBase*') + + +def test_memory_default_store_backend(): + # test an unknown backend falls back into a FileSystemStoreBackend + with raises(TypeError) as excinfo: + Memory(location='/tmp/joblib', backend='unknown') + excinfo.match(r"Unknown location*") + + +def test_warning_on_unknown_location_type(): + class NonSupportedLocationClass: + pass + unsupported_location = NonSupportedLocationClass() + + with warns(UserWarning) as warninfo: + _store_backend_factory("local", location=unsupported_location) + + expected_mesage = ("Instantiating a backend using a " + "NonSupportedLocationClass as a location is not " + "supported by joblib") + assert expected_mesage in str(warninfo[0].message) + + +def test_instanciate_incomplete_store_backend(): + # Verify that registering an external incomplete store backend raises an + # exception when one tries to instantiate it. + backend_name = "isb" + register_store_backend(backend_name, IncompleteStoreBackend) + assert (backend_name, IncompleteStoreBackend) in _STORE_BACKENDS.items() + with raises(TypeError) as excinfo: + _store_backend_factory(backend_name, "fake_location") + excinfo.match(r"Can't instantiate abstract class IncompleteStoreBackend " + "(without an implementation for|with) abstract methods*") + + +def test_dummy_store_backend(): + # Verify that registering an external store backend works. + + backend_name = "dsb" + register_store_backend(backend_name, DummyStoreBackend) + assert (backend_name, DummyStoreBackend) in _STORE_BACKENDS.items() + + backend_obj = _store_backend_factory(backend_name, "dummy_location") + assert isinstance(backend_obj, DummyStoreBackend) + + +def test_instanciate_store_backend_with_pathlib_path(): + # Instantiate a FileSystemStoreBackend using a pathlib.Path object + path = pathlib.Path("some_folder") + backend_obj = _store_backend_factory("local", path) + assert backend_obj.location == "some_folder" + + +def test_filesystem_store_backend_repr(tmpdir): + # Verify string representation of a filesystem store backend. + + repr_pattern = 'FileSystemStoreBackend(location="{location}")' + backend = FileSystemStoreBackend() + assert backend.location is None + + repr(backend) # Should not raise an exception + + assert str(backend) == repr_pattern.format(location=None) + + # backend location is passed explicitly via the configure method (called + # by the internal _store_backend_factory function) + backend.configure(tmpdir.strpath) + + assert str(backend) == repr_pattern.format(location=tmpdir.strpath) + + repr(backend) # Should not raise an exception + + +def test_memory_objects_repr(tmpdir): + # Verify printable reprs of MemorizedResult, MemorizedFunc and Memory. + + def my_func(a, b): + return a + b + + memory = Memory(location=tmpdir.strpath, verbose=0) + memorized_func = memory.cache(my_func) + + memorized_func_repr = 'MemorizedFunc(func={func}, location={location})' + + assert str(memorized_func) == memorized_func_repr.format( + func=my_func, + location=memory.store_backend.location) + + memorized_result = memorized_func.call_and_shelve(42, 42) + + memorized_result_repr = ('MemorizedResult(location="{location}", ' + 'func="{func}", args_id="{args_id}")') + + assert str(memorized_result) == memorized_result_repr.format( + location=memory.store_backend.location, + func=memorized_result.func_id, + args_id=memorized_result.args_id) + + assert str(memory) == 'Memory(location={location})'.format( + location=memory.store_backend.location) + + +def test_memorized_result_pickle(tmpdir): + # Verify a MemoryResult object can be pickled/depickled. Non regression + # test introduced following issue + # https://github.com/joblib/joblib/issues/747 + + memory = Memory(location=tmpdir.strpath) + + @memory.cache + def g(x): + return x**2 + + memorized_result = g.call_and_shelve(4) + memorized_result_pickle = pickle.dumps(memorized_result) + memorized_result_loads = pickle.loads(memorized_result_pickle) + + assert memorized_result.store_backend.location == \ + memorized_result_loads.store_backend.location + assert memorized_result.func == memorized_result_loads.func + assert memorized_result.args_id == memorized_result_loads.args_id + assert str(memorized_result) == str(memorized_result_loads) + + +def compare(left, right, ignored_attrs=None): + if ignored_attrs is None: + ignored_attrs = [] + + left_vars = vars(left) + right_vars = vars(right) + assert set(left_vars.keys()) == set(right_vars.keys()) + for attr in left_vars.keys(): + if attr in ignored_attrs: + continue + assert left_vars[attr] == right_vars[attr] + + +@pytest.mark.parametrize('memory_kwargs', + [{'compress': 3, 'verbose': 2}, + {'mmap_mode': 'r', 'verbose': 5, + 'backend_options': {'parameter': 'unused'}}]) +def test_memory_pickle_dump_load(tmpdir, memory_kwargs): + memory = Memory(location=tmpdir.strpath, **memory_kwargs) + + memory_reloaded = pickle.loads(pickle.dumps(memory)) + + # Compare Memory instance before and after pickle roundtrip + compare(memory.store_backend, memory_reloaded.store_backend) + compare(memory, memory_reloaded, + ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id'])) + assert hash(memory) == hash(memory_reloaded) + + func_cached = memory.cache(f) + + func_cached_reloaded = pickle.loads(pickle.dumps(func_cached)) + + # Compare MemorizedFunc instance before/after pickle roundtrip + compare(func_cached.store_backend, func_cached_reloaded.store_backend) + compare(func_cached, func_cached_reloaded, + ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id'])) + assert hash(func_cached) == hash(func_cached_reloaded) + + # Compare MemorizedResult instance before/after pickle roundtrip + memorized_result = func_cached.call_and_shelve(1) + memorized_result_reloaded = pickle.loads(pickle.dumps(memorized_result)) + + compare(memorized_result.store_backend, + memorized_result_reloaded.store_backend) + compare(memorized_result, memorized_result_reloaded, + ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id'])) + assert hash(memorized_result) == hash(memorized_result_reloaded) + + +def test_info_log(tmpdir, caplog): + caplog.set_level(logging.INFO) + x = 3 + + memory = Memory(location=tmpdir.strpath, verbose=20) + + @memory.cache + def f(x): + return x ** 2 + + _ = f(x) + assert "Querying" in caplog.text + caplog.clear() + + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache + def f(x): + return x ** 2 + + _ = f(x) + assert "Querying" not in caplog.text + caplog.clear() + + +def test_deprecated_bytes_limit(tmpdir): + from joblib import __version__ + if __version__ >= "1.5": + raise DeprecationWarning( + "Bytes limit is deprecated and should be removed by 1.4" + ) + with pytest.warns(DeprecationWarning, match="bytes_limit"): + _ = Memory(location=tmpdir.strpath, bytes_limit='1K') + + +class TestCacheValidationCallback: + "Tests on parameter `cache_validation_callback`" + + def foo(self, x, d, delay=None): + d["run"] = True + if delay is not None: + time.sleep(delay) + return x * 2 + + def test_invalid_cache_validation_callback(self, memory): + "Test invalid values for `cache_validation_callback" + match = "cache_validation_callback needs to be callable. Got True." + with pytest.raises(ValueError, match=match): + memory.cache(cache_validation_callback=True) + + @pytest.mark.parametrize("consider_cache_valid", [True, False]) + def test_constant_cache_validation_callback( + self, memory, consider_cache_valid + ): + "Test expiry of old results" + f = memory.cache( + self.foo, cache_validation_callback=lambda _: consider_cache_valid, + ignore=["d"] + ) + + d1, d2 = {"run": False}, {"run": False} + assert f(2, d1) == 4 + assert f(2, d2) == 4 + + assert d1["run"] + assert d2["run"] != consider_cache_valid + + def test_memory_only_cache_long_run(self, memory): + "Test cache validity based on run duration." + + def cache_validation_callback(metadata): + duration = metadata['duration'] + if duration > 0.1: + return True + + f = memory.cache( + self.foo, cache_validation_callback=cache_validation_callback, + ignore=["d"] + ) + + # Short run are not cached + d1, d2 = {"run": False}, {"run": False} + assert f(2, d1, delay=0) == 4 + assert f(2, d2, delay=0) == 4 + assert d1["run"] + assert d2["run"] + + # Longer run are cached + d1, d2 = {"run": False}, {"run": False} + assert f(2, d1, delay=0.2) == 4 + assert f(2, d2, delay=0.2) == 4 + assert d1["run"] + assert not d2["run"] + + def test_memory_expires_after(self, memory): + "Test expiry of old cached results" + + f = memory.cache( + self.foo, cache_validation_callback=expires_after(seconds=.3), + ignore=["d"] + ) + + d1, d2, d3 = {"run": False}, {"run": False}, {"run": False} + assert f(2, d1) == 4 + assert f(2, d2) == 4 + time.sleep(.5) + assert f(2, d3) == 4 + + assert d1["run"] + assert not d2["run"] + assert d3["run"] + + +class TestMemorizedFunc: + "Tests for the MemorizedFunc and NotMemorizedFunc classes" + + @staticmethod + def f(x, counter): + counter[x] = counter.get(x, 0) + 1 + return counter[x] + + def test_call_method_memorized(self, memory): + "Test calling the function" + + f = memory.cache(self.f, ignore=['counter']) + + counter = {} + assert f(2, counter) == 1 + assert f(2, counter) == 1 + + x, meta = f.call(2, counter) + assert x == 2, "f has not been called properly" + assert isinstance(meta, dict), ( + "Metadata are not returned by MemorizedFunc.call." + ) + + def test_call_method_not_memorized(self, memory): + "Test calling the function" + + f = NotMemorizedFunc(self.f) + + counter = {} + assert f(2, counter) == 1 + assert f(2, counter) == 2 + + x, meta = f.call(2, counter) + assert x == 3, "f has not been called properly" + assert isinstance(meta, dict), ( + "Metadata are not returned by MemorizedFunc.call." + ) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..9fee585c79ad219d3a9f8cdc6a55655b50099c09 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py @@ -0,0 +1,1159 @@ +"""Test the numpy pickler as a replacement of the standard pickler.""" + +import copy +import os +import random +import re +import io +import sys +import warnings +import gzip +import zlib +import bz2 +import pickle +import socket +from contextlib import closing +import mmap +from pathlib import Path + +try: + import lzma +except ImportError: + lzma = None + +import pytest + +from joblib.test.common import np, with_numpy, with_lz4, without_lz4 +from joblib.test.common import with_memory_profiler, memory_used +from joblib.testing import parametrize, raises, warns + +# numpy_pickle is not a drop-in replacement of pickle, as it takes +# filenames instead of open files as arguments. +from joblib import numpy_pickle, register_compressor +from joblib.test import data + +from joblib.numpy_pickle_utils import _IO_BUFFER_SIZE +from joblib.numpy_pickle_utils import _detect_compressor +from joblib.numpy_pickle_utils import _is_numpy_array_byte_order_mismatch +from joblib.numpy_pickle_utils import _ensure_native_byte_order +from joblib.compressor import (_COMPRESSORS, _LZ4_PREFIX, CompressorWrapper, + LZ4_NOT_INSTALLED_ERROR, BinaryZlibFile) + + +############################################################################### +# Define a list of standard types. +# Borrowed from dill, initial author: Micheal McKerns: +# http://dev.danse.us/trac/pathos/browser/dill/dill_test2.py + +typelist = [] + +# testing types +_none = None +typelist.append(_none) +_type = type +typelist.append(_type) +_bool = bool(1) +typelist.append(_bool) +_int = int(1) +typelist.append(_int) +_float = float(1) +typelist.append(_float) +_complex = complex(1) +typelist.append(_complex) +_string = str(1) +typelist.append(_string) +_tuple = () +typelist.append(_tuple) +_list = [] +typelist.append(_list) +_dict = {} +typelist.append(_dict) +_builtin = len +typelist.append(_builtin) + + +def _function(x): + yield x + + +class _class: + def _method(self): + pass + + +class _newclass(object): + def _method(self): + pass + + +typelist.append(_function) +typelist.append(_class) +typelist.append(_newclass) # +_instance = _class() +typelist.append(_instance) +_object = _newclass() +typelist.append(_object) # + + +############################################################################### +# Tests + +@parametrize('compress', [0, 1]) +@parametrize('member', typelist) +def test_standard_types(tmpdir, compress, member): + # Test pickling and saving with standard types. + filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(member, filename, compress=compress) + _member = numpy_pickle.load(filename) + # We compare the pickled instance to the reloaded one only if it + # can be compared to a copied one + if member == copy.deepcopy(member): + assert member == _member + + +def test_value_error(): + # Test inverting the input arguments to dump + with raises(ValueError): + numpy_pickle.dump('foo', dict()) + + +@parametrize('wrong_compress', [-1, 10, dict()]) +def test_compress_level_error(wrong_compress): + # Verify that passing an invalid compress argument raises an error. + exception_msg = ('Non valid compress level given: ' + '"{0}"'.format(wrong_compress)) + with raises(ValueError) as excinfo: + numpy_pickle.dump('dummy', 'foo', compress=wrong_compress) + excinfo.match(exception_msg) + + +@with_numpy +@parametrize('compress', [False, True, 0, 3, 'zlib']) +def test_numpy_persistence(tmpdir, compress): + filename = tmpdir.join('test.pkl').strpath + rnd = np.random.RandomState(0) + a = rnd.random_sample((10, 2)) + # We use 'a.T' to have a non C-contiguous array. + for index, obj in enumerate(((a,), (a.T,), (a, a), [a, a, a])): + filenames = numpy_pickle.dump(obj, filename, compress=compress) + + # All is cached in one file + assert len(filenames) == 1 + # Check that only one file was created + assert filenames[0] == filename + # Check that this file does exist + assert os.path.exists(filenames[0]) + + # Unpickle the object + obj_ = numpy_pickle.load(filename) + # Check that the items are indeed arrays + for item in obj_: + assert isinstance(item, np.ndarray) + # And finally, check that all the values are equal. + np.testing.assert_array_equal(np.array(obj), np.array(obj_)) + + # Now test with an array subclass + obj = np.memmap(filename + 'mmap', mode='w+', shape=4, dtype=np.float64) + filenames = numpy_pickle.dump(obj, filename, compress=compress) + # All is cached in one file + assert len(filenames) == 1 + + obj_ = numpy_pickle.load(filename) + if (type(obj) is not np.memmap and + hasattr(obj, '__array_prepare__')): + # We don't reconstruct memmaps + assert isinstance(obj_, type(obj)) + + np.testing.assert_array_equal(obj_, obj) + + # Test with an object containing multiple numpy arrays + obj = ComplexTestObject() + filenames = numpy_pickle.dump(obj, filename, compress=compress) + # All is cached in one file + assert len(filenames) == 1 + + obj_loaded = numpy_pickle.load(filename) + assert isinstance(obj_loaded, type(obj)) + np.testing.assert_array_equal(obj_loaded.array_float, obj.array_float) + np.testing.assert_array_equal(obj_loaded.array_int, obj.array_int) + np.testing.assert_array_equal(obj_loaded.array_obj, obj.array_obj) + + +@with_numpy +def test_numpy_persistence_bufferred_array_compression(tmpdir): + big_array = np.ones((_IO_BUFFER_SIZE + 100), dtype=np.uint8) + filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(big_array, filename, compress=True) + arr_reloaded = numpy_pickle.load(filename) + + np.testing.assert_array_equal(big_array, arr_reloaded) + + +@with_numpy +def test_memmap_persistence(tmpdir): + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + filename = tmpdir.join('test1.pkl').strpath + numpy_pickle.dump(a, filename) + b = numpy_pickle.load(filename, mmap_mode='r') + + assert isinstance(b, np.memmap) + + # Test with an object containing multiple numpy arrays + filename = tmpdir.join('test2.pkl').strpath + obj = ComplexTestObject() + numpy_pickle.dump(obj, filename) + obj_loaded = numpy_pickle.load(filename, mmap_mode='r') + assert isinstance(obj_loaded, type(obj)) + assert isinstance(obj_loaded.array_float, np.memmap) + assert not obj_loaded.array_float.flags.writeable + assert isinstance(obj_loaded.array_int, np.memmap) + assert not obj_loaded.array_int.flags.writeable + # Memory map not allowed for numpy object arrays + assert not isinstance(obj_loaded.array_obj, np.memmap) + np.testing.assert_array_equal(obj_loaded.array_float, + obj.array_float) + np.testing.assert_array_equal(obj_loaded.array_int, + obj.array_int) + np.testing.assert_array_equal(obj_loaded.array_obj, + obj.array_obj) + + # Test we can write in memmapped arrays + obj_loaded = numpy_pickle.load(filename, mmap_mode='r+') + assert obj_loaded.array_float.flags.writeable + obj_loaded.array_float[0:10] = 10.0 + assert obj_loaded.array_int.flags.writeable + obj_loaded.array_int[0:10] = 10 + + obj_reloaded = numpy_pickle.load(filename, mmap_mode='r') + np.testing.assert_array_equal(obj_reloaded.array_float, + obj_loaded.array_float) + np.testing.assert_array_equal(obj_reloaded.array_int, + obj_loaded.array_int) + + # Test w+ mode is caught and the mode has switched to r+ + numpy_pickle.load(filename, mmap_mode='w+') + assert obj_loaded.array_int.flags.writeable + assert obj_loaded.array_int.mode == 'r+' + assert obj_loaded.array_float.flags.writeable + assert obj_loaded.array_float.mode == 'r+' + + +@with_numpy +def test_memmap_persistence_mixed_dtypes(tmpdir): + # loading datastructures that have sub-arrays with dtype=object + # should not prevent memmapping on fixed size dtype sub-arrays. + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + b = np.array([1, 'b'], dtype=object) + construct = (a, b) + filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(construct, filename) + a_clone, b_clone = numpy_pickle.load(filename, mmap_mode='r') + + # the floating point array has been memory mapped + assert isinstance(a_clone, np.memmap) + + # the object-dtype array has been loaded in memory + assert not isinstance(b_clone, np.memmap) + + +@with_numpy +def test_masked_array_persistence(tmpdir): + # The special-case picker fails, because saving masked_array + # not implemented, but it just delegates to the standard pickler. + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + a = np.ma.masked_greater(a, 0.5) + filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(a, filename) + b = numpy_pickle.load(filename, mmap_mode='r') + assert isinstance(b, np.ma.masked_array) + + +@with_numpy +def test_compress_mmap_mode_warning(tmpdir): + # Test the warning in case of compress + mmap_mode + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + this_filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(a, this_filename, compress=1) + with warns(UserWarning) as warninfo: + numpy_pickle.load(this_filename, mmap_mode='r+') + debug_msg = "\n".join([str(w) for w in warninfo]) + warninfo = [w.message for w in warninfo] + assert len(warninfo) == 1, debug_msg + assert ( + str(warninfo[0]) == + 'mmap_mode "r+" is not compatible with compressed ' + f'file {this_filename}. "r+" flag will be ignored.' + ) + + +@with_numpy +@parametrize('cache_size', [None, 0, 10]) +def test_cache_size_warning(tmpdir, cache_size): + # Check deprecation warning raised when cache size is not None + filename = tmpdir.join('test.pkl').strpath + rnd = np.random.RandomState(0) + a = rnd.random_sample((10, 2)) + + warnings.simplefilter("always") + with warnings.catch_warnings(record=True) as warninfo: + numpy_pickle.dump(a, filename, cache_size=cache_size) + expected_nb_warnings = 1 if cache_size is not None else 0 + assert len(warninfo) == expected_nb_warnings + for w in warninfo: + assert w.category == DeprecationWarning + assert (str(w.message) == + "Please do not set 'cache_size' in joblib.dump, this " + "parameter has no effect and will be removed. You " + "used 'cache_size={0}'".format(cache_size)) + + +@with_numpy +@with_memory_profiler +@parametrize('compress', [True, False]) +def test_memory_usage(tmpdir, compress): + # Verify memory stays within expected bounds. + filename = tmpdir.join('test.pkl').strpath + small_array = np.ones((10, 10)) + big_array = np.ones(shape=100 * int(1e6), dtype=np.uint8) + + for obj in (small_array, big_array): + size = obj.nbytes / 1e6 + obj_filename = filename + str(np.random.randint(0, 1000)) + mem_used = memory_used(numpy_pickle.dump, + obj, obj_filename, compress=compress) + + # The memory used to dump the object shouldn't exceed the buffer + # size used to write array chunks (16MB). + write_buf_size = _IO_BUFFER_SIZE + 16 * 1024 ** 2 / 1e6 + assert mem_used <= write_buf_size + + mem_used = memory_used(numpy_pickle.load, obj_filename) + # memory used should be less than array size + buffer size used to + # read the array chunk by chunk. + read_buf_size = 32 + _IO_BUFFER_SIZE # MiB + assert mem_used < size + read_buf_size + + +@with_numpy +def test_compressed_pickle_dump_and_load(tmpdir): + expected_list = [np.arange(5, dtype=np.dtype('i8')), + np.arange(5, dtype=np.dtype('f8')), + np.array([1, 'abc', {'a': 1, 'b': 2}], dtype='O'), + np.arange(256, dtype=np.uint8).tobytes(), + u"C'est l'\xe9t\xe9 !"] + + fname = tmpdir.join('temp.pkl.gz').strpath + + dumped_filenames = numpy_pickle.dump(expected_list, fname, compress=1) + assert len(dumped_filenames) == 1 + result_list = numpy_pickle.load(fname) + for result, expected in zip(result_list, expected_list): + if isinstance(expected, np.ndarray): + expected = _ensure_native_byte_order(expected) + assert result.dtype == expected.dtype + np.testing.assert_equal(result, expected) + else: + assert result == expected + + +def _check_pickle(filename, expected_list, mmap_mode=None): + """Helper function to test joblib pickle content. + + Note: currently only pickles containing an iterable are supported + by this function. + """ + version_match = re.match(r'.+py(\d)(\d).+', filename) + py_version_used_for_writing = int(version_match.group(1)) + + py_version_to_default_pickle_protocol = {2: 2, 3: 3} + pickle_reading_protocol = py_version_to_default_pickle_protocol.get(3, 4) + pickle_writing_protocol = py_version_to_default_pickle_protocol.get( + py_version_used_for_writing, 4) + if pickle_reading_protocol >= pickle_writing_protocol: + try: + with warnings.catch_warnings(record=True) as warninfo: + warnings.simplefilter('always') + warnings.filterwarnings( + 'ignore', module='numpy', + message='The compiler package is deprecated') + result_list = numpy_pickle.load(filename, mmap_mode=mmap_mode) + filename_base = os.path.basename(filename) + expected_nb_deprecation_warnings = 1 if ( + "_0.9" in filename_base or "_0.8.4" in filename_base) else 0 + + expected_nb_user_warnings = 3 if ( + re.search("_0.1.+.pkl$", filename_base) and + mmap_mode is not None) else 0 + expected_nb_warnings = \ + expected_nb_deprecation_warnings + expected_nb_user_warnings + assert len(warninfo) == expected_nb_warnings + + deprecation_warnings = [ + w for w in warninfo if issubclass( + w.category, DeprecationWarning)] + user_warnings = [ + w for w in warninfo if issubclass( + w.category, UserWarning)] + for w in deprecation_warnings: + assert (str(w.message) == + "The file '{0}' has been generated with a joblib " + "version less than 0.10. Please regenerate this " + "pickle file.".format(filename)) + + for w in user_warnings: + escaped_filename = re.escape(filename) + assert re.search( + f"memmapped.+{escaped_filename}.+segmentation fault", + str(w.message)) + + for result, expected in zip(result_list, expected_list): + if isinstance(expected, np.ndarray): + expected = _ensure_native_byte_order(expected) + assert result.dtype == expected.dtype + np.testing.assert_equal(result, expected) + else: + assert result == expected + except Exception as exc: + # When trying to read with python 3 a pickle generated + # with python 2 we expect a user-friendly error + if py_version_used_for_writing == 2: + assert isinstance(exc, ValueError) + message = ('You may be trying to read with ' + 'python 3 a joblib pickle generated with python 2.') + assert message in str(exc) + elif filename.endswith('.lz4') and with_lz4.args[0]: + assert isinstance(exc, ValueError) + assert LZ4_NOT_INSTALLED_ERROR in str(exc) + else: + raise + else: + # Pickle protocol used for writing is too high. We expect a + # "unsupported pickle protocol" error message + try: + numpy_pickle.load(filename) + raise AssertionError('Numpy pickle loading should ' + 'have raised a ValueError exception') + except ValueError as e: + message = 'unsupported pickle protocol: {0}'.format( + pickle_writing_protocol) + assert message in str(e.args) + + +@with_numpy +def test_joblib_pickle_across_python_versions(): + # We need to be specific about dtypes in particular endianness + # because the pickles can be generated on one architecture and + # the tests run on another one. See + # https://github.com/joblib/joblib/issues/279. + expected_list = [np.arange(5, dtype=np.dtype('i8'), ('', '>f8')]), + np.arange(3, dtype=np.dtype('>i8')), + np.arange(3, dtype=np.dtype('>f8'))] + + # Verify the byteorder mismatch is correctly detected. + for array in be_arrays: + if sys.byteorder == 'big': + assert not _is_numpy_array_byte_order_mismatch(array) + else: + assert _is_numpy_array_byte_order_mismatch(array) + converted = _ensure_native_byte_order(array) + if converted.dtype.fields: + for f in converted.dtype.fields.values(): + f[0].byteorder == '=' + else: + assert converted.dtype.byteorder == "=" + + # List of numpy arrays with little endian byteorder. + le_arrays = [np.array([(1, 2.0), (3, 4.0)], + dtype=[('', ' size + np.testing.assert_array_equal(obj, memmaps) + + +def test_register_compressor(tmpdir): + # Check that registering compressor file works. + compressor_name = 'test-name' + compressor_prefix = 'test-prefix' + + class BinaryCompressorTestFile(io.BufferedIOBase): + pass + + class BinaryCompressorTestWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryCompressorTestFile, + prefix=compressor_prefix) + + register_compressor(compressor_name, BinaryCompressorTestWrapper()) + + assert (_COMPRESSORS[compressor_name].fileobj_factory == + BinaryCompressorTestFile) + assert _COMPRESSORS[compressor_name].prefix == compressor_prefix + + # Remove this dummy compressor file from extra compressors because other + # tests might fail because of this + _COMPRESSORS.pop(compressor_name) + + +@parametrize('invalid_name', [1, (), {}]) +def test_register_compressor_invalid_name(invalid_name): + # Test that registering an invalid compressor name is not allowed. + with raises(ValueError) as excinfo: + register_compressor(invalid_name, None) + excinfo.match("Compressor name should be a string") + + +def test_register_compressor_invalid_fileobj(): + # Test that registering an invalid file object is not allowed. + + class InvalidFileObject(): + pass + + class InvalidFileObjectWrapper(CompressorWrapper): + def __init__(self): + CompressorWrapper.__init__(self, obj=InvalidFileObject, + prefix=b'prefix') + + with raises(ValueError) as excinfo: + register_compressor('invalid', InvalidFileObjectWrapper()) + + excinfo.match("Compressor 'fileobj_factory' attribute should implement " + "the file object interface") + + +class AnotherZlibCompressorWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryZlibFile, prefix=b'prefix') + + +class StandardLibGzipCompressorWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=gzip.GzipFile, prefix=b'prefix') + + +def test_register_compressor_already_registered(): + # Test registration of existing compressor files. + compressor_name = 'test-name' + + # register a test compressor + register_compressor(compressor_name, AnotherZlibCompressorWrapper()) + + with raises(ValueError) as excinfo: + register_compressor(compressor_name, + StandardLibGzipCompressorWrapper()) + excinfo.match("Compressor '{}' already registered." + .format(compressor_name)) + + register_compressor(compressor_name, StandardLibGzipCompressorWrapper(), + force=True) + + assert compressor_name in _COMPRESSORS + assert _COMPRESSORS[compressor_name].fileobj_factory == gzip.GzipFile + + # Remove this dummy compressor file from extra compressors because other + # tests might fail because of this + _COMPRESSORS.pop(compressor_name) + + +@with_lz4 +def test_lz4_compression(tmpdir): + # Check that lz4 can be used when dependency is available. + import lz4.frame + compressor = 'lz4' + assert compressor in _COMPRESSORS + assert _COMPRESSORS[compressor].fileobj_factory == lz4.frame.LZ4FrameFile + + fname = tmpdir.join('test.pkl').strpath + data = 'test data' + numpy_pickle.dump(data, fname, compress=compressor) + + with open(fname, 'rb') as f: + assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX + assert numpy_pickle.load(fname) == data + + # Test that LZ4 is applied based on file extension + numpy_pickle.dump(data, fname + '.lz4') + with open(fname, 'rb') as f: + assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX + assert numpy_pickle.load(fname) == data + + +@without_lz4 +def test_lz4_compression_without_lz4(tmpdir): + # Check that lz4 cannot be used when dependency is not available. + fname = tmpdir.join('test.nolz4').strpath + data = 'test data' + msg = LZ4_NOT_INSTALLED_ERROR + with raises(ValueError) as excinfo: + numpy_pickle.dump(data, fname, compress='lz4') + excinfo.match(msg) + + with raises(ValueError) as excinfo: + numpy_pickle.dump(data, fname + '.lz4') + excinfo.match(msg) + + +protocols = [pickle.DEFAULT_PROTOCOL] +if pickle.HIGHEST_PROTOCOL != pickle.DEFAULT_PROTOCOL: + protocols.append(pickle.HIGHEST_PROTOCOL) + + +@with_numpy +@parametrize('protocol', protocols) +def test_memmap_alignment_padding(tmpdir, protocol): + # Test that memmaped arrays returned by numpy.load are correctly aligned + fname = tmpdir.join('test.mmap').strpath + + a = np.random.randn(2) + numpy_pickle.dump(a, fname, protocol=protocol) + memmap = numpy_pickle.load(fname, mmap_mode='r') + assert isinstance(memmap, np.memmap) + np.testing.assert_array_equal(a, memmap) + assert ( + memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0) + assert memmap.flags.aligned + + array_list = [ + np.random.randn(2), np.random.randn(2), + np.random.randn(2), np.random.randn(2) + ] + + # On Windows OSError 22 if reusing the same path for memmap ... + fname = tmpdir.join('test1.mmap').strpath + numpy_pickle.dump(array_list, fname, protocol=protocol) + l_reloaded = numpy_pickle.load(fname, mmap_mode='r') + + for idx, memmap in enumerate(l_reloaded): + assert isinstance(memmap, np.memmap) + np.testing.assert_array_equal(array_list[idx], memmap) + assert ( + memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0) + assert memmap.flags.aligned + + array_dict = { + 'a0': np.arange(2, dtype=np.uint8), + 'a1': np.arange(3, dtype=np.uint8), + 'a2': np.arange(5, dtype=np.uint8), + 'a3': np.arange(7, dtype=np.uint8), + 'a4': np.arange(11, dtype=np.uint8), + 'a5': np.arange(13, dtype=np.uint8), + 'a6': np.arange(17, dtype=np.uint8), + 'a7': np.arange(19, dtype=np.uint8), + 'a8': np.arange(23, dtype=np.uint8), + } + + # On Windows OSError 22 if reusing the same path for memmap ... + fname = tmpdir.join('test2.mmap').strpath + numpy_pickle.dump(array_dict, fname, protocol=protocol) + d_reloaded = numpy_pickle.load(fname, mmap_mode='r') + + for key, memmap in d_reloaded.items(): + assert isinstance(memmap, np.memmap) + np.testing.assert_array_equal(array_dict[key], memmap) + assert ( + memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0) + assert memmap.flags.aligned diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..9e95393b19e979593c7037d0d4fb740e47131dde --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py @@ -0,0 +1,16 @@ +"""Test the old numpy pickler, compatibility version.""" + +# numpy_pickle is not a drop-in replacement of pickle, as it takes +# filenames instead of open files as arguments. +from joblib import numpy_pickle_compat + + +def test_z_file(tmpdir): + # Test saving and loading data with Zfiles. + filename = tmpdir.join('test.pkl').strpath + data = numpy_pickle_compat.asbytes('Foo, \n Bar, baz, \n\nfoobar') + with open(filename, 'wb') as f: + numpy_pickle_compat.write_zfile(f, data) + with open(filename, 'rb') as f: + data_read = numpy_pickle_compat.read_zfile(f) + assert data == data_read diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/testing.py b/llmeval-env/lib/python3.10/site-packages/joblib/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..caab7d2903c710534596f2248de4b6c0642f9526 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/testing.py @@ -0,0 +1,99 @@ +""" +Helper for testing. +""" + +import sys +import warnings +import os.path +import re +import subprocess +import threading + +import pytest +import _pytest + + +raises = pytest.raises +warns = pytest.warns +SkipTest = _pytest.runner.Skipped +skipif = pytest.mark.skipif +fixture = pytest.fixture +parametrize = pytest.mark.parametrize +timeout = pytest.mark.timeout +xfail = pytest.mark.xfail +param = pytest.param + + +def warnings_to_stdout(): + """ Redirect all warnings to stdout. + """ + showwarning_orig = warnings.showwarning + + def showwarning(msg, cat, fname, lno, file=None, line=0): + showwarning_orig(msg, cat, os.path.basename(fname), line, sys.stdout) + + warnings.showwarning = showwarning + # warnings.simplefilter('always') + + +def check_subprocess_call(cmd, timeout=5, stdout_regex=None, + stderr_regex=None): + """Runs a command in a subprocess with timeout in seconds. + + A SIGTERM is sent after `timeout` and if it does not terminate, a + SIGKILL is sent after `2 * timeout`. + + Also checks returncode is zero, stdout if stdout_regex is set, and + stderr if stderr_regex is set. + """ + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def terminate_process(): # pragma: no cover + """ + Attempt to terminate a leftover process spawned during test execution: + ideally this should not be needed but can help avoid clogging the CI + workers in case of deadlocks. + """ + warnings.warn(f"Timeout running {cmd}") + proc.terminate() + + def kill_process(): # pragma: no cover + """ + Kill a leftover process spawned during test execution: ideally this + should not be needed but can help avoid clogging the CI workers in + case of deadlocks. + """ + warnings.warn(f"Timeout running {cmd}") + proc.kill() + + try: + if timeout is not None: + terminate_timer = threading.Timer(timeout, terminate_process) + terminate_timer.start() + kill_timer = threading.Timer(2 * timeout, kill_process) + kill_timer.start() + stdout, stderr = proc.communicate() + stdout, stderr = stdout.decode(), stderr.decode() + if proc.returncode != 0: + message = ( + 'Non-zero return code: {}.\nStdout:\n{}\n' + 'Stderr:\n{}').format( + proc.returncode, stdout, stderr) + raise ValueError(message) + + if (stdout_regex is not None and + not re.search(stdout_regex, stdout)): + raise ValueError( + "Unexpected stdout: {!r} does not match:\n{!r}".format( + stdout_regex, stdout)) + if (stderr_regex is not None and + not re.search(stderr_regex, stderr)): + raise ValueError( + "Unexpected stderr: {!r} does not match:\n{!r}".format( + stderr_regex, stderr)) + + finally: + if timeout is not None: + terminate_timer.cancel() + kill_timer.cancel()