applied-ai-018 commited on
Commit
be99f12
·
verified ·
1 Parent(s): 24e0940

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/joblib/_cloudpickle_wrapper.py +19 -0
  24. llmeval-env/lib/python3.10/site-packages/joblib/_memmapping_reducer.py +657 -0
  25. llmeval-env/lib/python3.10/site-packages/joblib/_multiprocessing_helpers.py +53 -0
  26. llmeval-env/lib/python3.10/site-packages/joblib/_parallel_backends.py +649 -0
  27. llmeval-env/lib/python3.10/site-packages/joblib/_utils.py +83 -0
  28. llmeval-env/lib/python3.10/site-packages/joblib/backports.py +177 -0
  29. llmeval-env/lib/python3.10/site-packages/joblib/externals/__init__.py +0 -0
  30. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__init__.py +44 -0
  31. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/_base.py +28 -0
  32. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py +67 -0
  33. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py +18 -0
  34. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py +43 -0
  35. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py +193 -0
  36. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py +85 -0
  37. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py +378 -0
  38. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py +250 -0
  39. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py +409 -0
  40. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py +102 -0
  41. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/initializers.py +80 -0
  42. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py +1314 -0
  43. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py +285 -0
  44. llmeval-env/lib/python3.10/site-packages/joblib/func_inspect.py +369 -0
  45. llmeval-env/lib/python3.10/site-packages/joblib/logger.py +162 -0
  46. llmeval-env/lib/python3.10/site-packages/joblib/memory.py +1172 -0
  47. llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle.py +659 -0
  48. llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle_utils.py +253 -0
  49. llmeval-env/lib/python3.10/site-packages/joblib/parallel.py +2010 -0
  50. llmeval-env/lib/python3.10/site-packages/joblib/pool.py +354 -0
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.64 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc ADDED
Binary file (609 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc ADDED
Binary file (20.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (2.67 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc ADDED
Binary file (5.38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc ADDED
Binary file (3.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc ADDED
Binary file (3.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc ADDED
Binary file (8.92 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc ADDED
Binary file (6.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc ADDED
Binary file (4.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc ADDED
Binary file (34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc ADDED
Binary file (7.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc ADDED
Binary file (6.57 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc ADDED
Binary file (56.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/_cloudpickle_wrapper.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Small shim of loky's cloudpickle_wrapper to avoid failure when
3
+ multiprocessing is not available.
4
+ """
5
+
6
+
7
+ from ._multiprocessing_helpers import mp
8
+
9
+
10
+ def _my_wrap_non_picklable_objects(obj, keep_wrapper=True):
11
+ return obj
12
+
13
+
14
+ if mp is not None:
15
+ from .externals.loky import wrap_non_picklable_objects
16
+ else:
17
+ wrap_non_picklable_objects = _my_wrap_non_picklable_objects
18
+
19
+ __all__ = ["wrap_non_picklable_objects"]
llmeval-env/lib/python3.10/site-packages/joblib/_memmapping_reducer.py ADDED
@@ -0,0 +1,657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Reducer using memory mapping for numpy arrays
3
+ """
4
+ # Author: Thomas Moreau <[email protected]>
5
+ # Copyright: 2017, Thomas Moreau
6
+ # License: BSD 3 clause
7
+
8
+ from mmap import mmap
9
+ import errno
10
+ import os
11
+ import stat
12
+ import threading
13
+ import atexit
14
+ import tempfile
15
+ import time
16
+ import warnings
17
+ import weakref
18
+ from uuid import uuid4
19
+ from multiprocessing import util
20
+
21
+ from pickle import whichmodule, loads, dumps, HIGHEST_PROTOCOL, PicklingError
22
+
23
+ try:
24
+ WindowsError
25
+ except NameError:
26
+ WindowsError = type(None)
27
+
28
+ try:
29
+ import numpy as np
30
+ from numpy.lib.stride_tricks import as_strided
31
+ except ImportError:
32
+ np = None
33
+
34
+ from .numpy_pickle import dump, load, load_temporary_memmap
35
+ from .backports import make_memmap
36
+ from .disk import delete_folder
37
+ from .externals.loky.backend import resource_tracker
38
+
39
+ # Some system have a ramdisk mounted by default, we can use it instead of /tmp
40
+ # as the default folder to dump big arrays to share with subprocesses.
41
+ SYSTEM_SHARED_MEM_FS = '/dev/shm'
42
+
43
+ # Minimal number of bytes available on SYSTEM_SHARED_MEM_FS to consider using
44
+ # it as the default folder to dump big arrays to share with subprocesses.
45
+ SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(2e9)
46
+
47
+ # Folder and file permissions to chmod temporary files generated by the
48
+ # memmapping pool. Only the owner of the Python process can access the
49
+ # temporary files and folder.
50
+ FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
51
+ FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
52
+
53
+ # Set used in joblib workers, referencing the filenames of temporary memmaps
54
+ # created by joblib to speed up data communication. In child processes, we add
55
+ # a finalizer to these memmaps that sends a maybe_unlink call to the
56
+ # resource_tracker, in order to free main memory as fast as possible.
57
+ JOBLIB_MMAPS = set()
58
+
59
+
60
+ def _log_and_unlink(filename):
61
+ from .externals.loky.backend.resource_tracker import _resource_tracker
62
+ util.debug(
63
+ "[FINALIZER CALL] object mapping to {} about to be deleted,"
64
+ " decrementing the refcount of the file (pid: {})".format(
65
+ os.path.basename(filename), os.getpid()))
66
+ _resource_tracker.maybe_unlink(filename, "file")
67
+
68
+
69
+ def add_maybe_unlink_finalizer(memmap):
70
+ util.debug(
71
+ "[FINALIZER ADD] adding finalizer to {} (id {}, filename {}, pid {})"
72
+ "".format(type(memmap), id(memmap), os.path.basename(memmap.filename),
73
+ os.getpid()))
74
+ weakref.finalize(memmap, _log_and_unlink, memmap.filename)
75
+
76
+
77
+ def unlink_file(filename):
78
+ """Wrapper around os.unlink with a retry mechanism.
79
+
80
+ The retry mechanism has been implemented primarily to overcome a race
81
+ condition happening during the finalizer of a np.memmap: when a process
82
+ holding the last reference to a mmap-backed np.memmap/np.array is about to
83
+ delete this array (and close the reference), it sends a maybe_unlink
84
+ request to the resource_tracker. This request can be processed faster than
85
+ it takes for the last reference of the memmap to be closed, yielding (on
86
+ Windows) a PermissionError in the resource_tracker loop.
87
+ """
88
+ NUM_RETRIES = 10
89
+ for retry_no in range(1, NUM_RETRIES + 1):
90
+ try:
91
+ os.unlink(filename)
92
+ break
93
+ except PermissionError:
94
+ util.debug(
95
+ '[ResourceTracker] tried to unlink {}, got '
96
+ 'PermissionError'.format(filename)
97
+ )
98
+ if retry_no == NUM_RETRIES:
99
+ raise
100
+ else:
101
+ time.sleep(.2)
102
+ except FileNotFoundError:
103
+ # In case of a race condition when deleting the temporary folder,
104
+ # avoid noisy FileNotFoundError exception in the resource tracker.
105
+ pass
106
+
107
+
108
+ resource_tracker._CLEANUP_FUNCS['file'] = unlink_file
109
+
110
+
111
+ class _WeakArrayKeyMap:
112
+ """A variant of weakref.WeakKeyDictionary for unhashable numpy arrays.
113
+
114
+ This datastructure will be used with numpy arrays as obj keys, therefore we
115
+ do not use the __get__ / __set__ methods to avoid any conflict with the
116
+ numpy fancy indexing syntax.
117
+ """
118
+
119
+ def __init__(self):
120
+ self._data = {}
121
+
122
+ def get(self, obj):
123
+ ref, val = self._data[id(obj)]
124
+ if ref() is not obj:
125
+ # In case of race condition with on_destroy: could never be
126
+ # triggered by the joblib tests with CPython.
127
+ raise KeyError(obj)
128
+ return val
129
+
130
+ def set(self, obj, value):
131
+ key = id(obj)
132
+ try:
133
+ ref, _ = self._data[key]
134
+ if ref() is not obj:
135
+ # In case of race condition with on_destroy: could never be
136
+ # triggered by the joblib tests with CPython.
137
+ raise KeyError(obj)
138
+ except KeyError:
139
+ # Insert the new entry in the mapping along with a weakref
140
+ # callback to automatically delete the entry from the mapping
141
+ # as soon as the object used as key is garbage collected.
142
+ def on_destroy(_):
143
+ del self._data[key]
144
+ ref = weakref.ref(obj, on_destroy)
145
+ self._data[key] = ref, value
146
+
147
+ def __getstate__(self):
148
+ raise PicklingError("_WeakArrayKeyMap is not pickleable")
149
+
150
+
151
+ ###############################################################################
152
+ # Support for efficient transient pickling of numpy data structures
153
+
154
+
155
+ def _get_backing_memmap(a):
156
+ """Recursively look up the original np.memmap instance base if any."""
157
+ b = getattr(a, 'base', None)
158
+ if b is None:
159
+ # TODO: check scipy sparse datastructure if scipy is installed
160
+ # a nor its descendants do not have a memmap base
161
+ return None
162
+
163
+ elif isinstance(b, mmap):
164
+ # a is already a real memmap instance.
165
+ return a
166
+
167
+ else:
168
+ # Recursive exploration of the base ancestry
169
+ return _get_backing_memmap(b)
170
+
171
+
172
+ def _get_temp_dir(pool_folder_name, temp_folder=None):
173
+ """Get the full path to a subfolder inside the temporary folder.
174
+
175
+ Parameters
176
+ ----------
177
+ pool_folder_name : str
178
+ Sub-folder name used for the serialization of a pool instance.
179
+
180
+ temp_folder: str, optional
181
+ Folder to be used by the pool for memmapping large arrays
182
+ for sharing memory with worker processes. If None, this will try in
183
+ order:
184
+
185
+ - a folder pointed by the JOBLIB_TEMP_FOLDER environment
186
+ variable,
187
+ - /dev/shm if the folder exists and is writable: this is a
188
+ RAMdisk filesystem available by default on modern Linux
189
+ distributions,
190
+ - the default system temporary folder that can be
191
+ overridden with TMP, TMPDIR or TEMP environment
192
+ variables, typically /tmp under Unix operating systems.
193
+
194
+ Returns
195
+ -------
196
+ pool_folder : str
197
+ full path to the temporary folder
198
+ use_shared_mem : bool
199
+ whether the temporary folder is written to the system shared memory
200
+ folder or some other temporary folder.
201
+ """
202
+ use_shared_mem = False
203
+ if temp_folder is None:
204
+ temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
205
+ if temp_folder is None:
206
+ if os.path.exists(SYSTEM_SHARED_MEM_FS) and hasattr(os, 'statvfs'):
207
+ try:
208
+ shm_stats = os.statvfs(SYSTEM_SHARED_MEM_FS)
209
+ available_nbytes = shm_stats.f_bsize * shm_stats.f_bavail
210
+ if available_nbytes > SYSTEM_SHARED_MEM_FS_MIN_SIZE:
211
+ # Try to see if we have write access to the shared mem
212
+ # folder only if it is reasonably large (that is 2GB or
213
+ # more).
214
+ temp_folder = SYSTEM_SHARED_MEM_FS
215
+ pool_folder = os.path.join(temp_folder, pool_folder_name)
216
+ if not os.path.exists(pool_folder):
217
+ os.makedirs(pool_folder)
218
+ use_shared_mem = True
219
+ except (IOError, OSError):
220
+ # Missing rights in the /dev/shm partition, fallback to regular
221
+ # temp folder.
222
+ temp_folder = None
223
+ if temp_folder is None:
224
+ # Fallback to the default tmp folder, typically /tmp
225
+ temp_folder = tempfile.gettempdir()
226
+ temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
227
+ pool_folder = os.path.join(temp_folder, pool_folder_name)
228
+ return pool_folder, use_shared_mem
229
+
230
+
231
+ def has_shareable_memory(a):
232
+ """Return True if a is backed by some mmap buffer directly or not."""
233
+ return _get_backing_memmap(a) is not None
234
+
235
+
236
+ def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides,
237
+ total_buffer_len, unlink_on_gc_collect):
238
+ """Reconstruct an array view on a memory mapped file."""
239
+ if mode == 'w+':
240
+ # Do not zero the original data when unpickling
241
+ mode = 'r+'
242
+
243
+ if strides is None:
244
+ # Simple, contiguous memmap
245
+ return make_memmap(
246
+ filename, dtype=dtype, shape=shape, mode=mode, offset=offset,
247
+ order=order, unlink_on_gc_collect=unlink_on_gc_collect
248
+ )
249
+ else:
250
+ # For non-contiguous data, memmap the total enclosing buffer and then
251
+ # extract the non-contiguous view with the stride-tricks API
252
+ base = make_memmap(
253
+ filename, dtype=dtype, shape=total_buffer_len, offset=offset,
254
+ mode=mode, order=order, unlink_on_gc_collect=unlink_on_gc_collect
255
+ )
256
+ return as_strided(base, shape=shape, strides=strides)
257
+
258
+
259
+ def _reduce_memmap_backed(a, m):
260
+ """Pickling reduction for memmap backed arrays.
261
+
262
+ a is expected to be an instance of np.ndarray (or np.memmap)
263
+ m is expected to be an instance of np.memmap on the top of the ``base``
264
+ attribute ancestry of a. ``m.base`` should be the real python mmap object.
265
+ """
266
+ # offset that comes from the striding differences between a and m
267
+ util.debug('[MEMMAP REDUCE] reducing a memmap-backed array '
268
+ '(shape, {}, pid: {})'.format(a.shape, os.getpid()))
269
+ try:
270
+ from numpy.lib.array_utils import byte_bounds
271
+ except (ModuleNotFoundError, ImportError):
272
+ # Backward-compat for numpy < 2.0
273
+ from numpy import byte_bounds
274
+ a_start, a_end = byte_bounds(a)
275
+ m_start = byte_bounds(m)[0]
276
+ offset = a_start - m_start
277
+
278
+ # offset from the backing memmap
279
+ offset += m.offset
280
+
281
+ if m.flags['F_CONTIGUOUS']:
282
+ order = 'F'
283
+ else:
284
+ # The backing memmap buffer is necessarily contiguous hence C if not
285
+ # Fortran
286
+ order = 'C'
287
+
288
+ if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
289
+ # If the array is a contiguous view, no need to pass the strides
290
+ strides = None
291
+ total_buffer_len = None
292
+ else:
293
+ # Compute the total number of items to map from which the strided
294
+ # view will be extracted.
295
+ strides = a.strides
296
+ total_buffer_len = (a_end - a_start) // a.itemsize
297
+
298
+ return (_strided_from_memmap,
299
+ (m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
300
+ total_buffer_len, False))
301
+
302
+
303
+ def reduce_array_memmap_backward(a):
304
+ """reduce a np.array or a np.memmap from a child process"""
305
+ m = _get_backing_memmap(a)
306
+ if isinstance(m, np.memmap) and m.filename not in JOBLIB_MMAPS:
307
+ # if a is backed by a memmaped file, reconstruct a using the
308
+ # memmaped file.
309
+ return _reduce_memmap_backed(a, m)
310
+ else:
311
+ # a is either a regular (not memmap-backed) numpy array, or an array
312
+ # backed by a shared temporary file created by joblib. In the latter
313
+ # case, in order to limit the lifespan of these temporary files, we
314
+ # serialize the memmap as a regular numpy array, and decref the
315
+ # file backing the memmap (done implicitly in a previously registered
316
+ # finalizer, see ``unlink_on_gc_collect`` for more details)
317
+ return (
318
+ loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL), )
319
+ )
320
+
321
+
322
+ class ArrayMemmapForwardReducer(object):
323
+ """Reducer callable to dump large arrays to memmap files.
324
+
325
+ Parameters
326
+ ----------
327
+ max_nbytes: int
328
+ Threshold to trigger memmapping of large arrays to files created
329
+ a folder.
330
+ temp_folder_resolver: callable
331
+ An callable in charge of resolving a temporary folder name where files
332
+ for backing memmapped arrays are created.
333
+ mmap_mode: 'r', 'r+' or 'c'
334
+ Mode for the created memmap datastructure. See the documentation of
335
+ numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
336
+ automatically to avoid zeroing the data on unpickling.
337
+ verbose: int, optional, 0 by default
338
+ If verbose > 0, memmap creations are logged.
339
+ If verbose > 1, both memmap creations, reuse and array pickling are
340
+ logged.
341
+ prewarm: bool, optional, False by default.
342
+ Force a read on newly memmapped array to make sure that OS pre-cache it
343
+ memory. This can be useful to avoid concurrent disk access when the
344
+ same data array is passed to different worker processes.
345
+ """
346
+
347
+ def __init__(self, max_nbytes, temp_folder_resolver, mmap_mode,
348
+ unlink_on_gc_collect, verbose=0, prewarm=True):
349
+ self._max_nbytes = max_nbytes
350
+ self._temp_folder_resolver = temp_folder_resolver
351
+ self._mmap_mode = mmap_mode
352
+ self.verbose = int(verbose)
353
+ if prewarm == "auto":
354
+ self._prewarm = not self._temp_folder.startswith(
355
+ SYSTEM_SHARED_MEM_FS
356
+ )
357
+ else:
358
+ self._prewarm = prewarm
359
+ self._prewarm = prewarm
360
+ self._memmaped_arrays = _WeakArrayKeyMap()
361
+ self._temporary_memmaped_filenames = set()
362
+ self._unlink_on_gc_collect = unlink_on_gc_collect
363
+
364
+ @property
365
+ def _temp_folder(self):
366
+ return self._temp_folder_resolver()
367
+
368
+ def __reduce__(self):
369
+ # The ArrayMemmapForwardReducer is passed to the children processes: it
370
+ # needs to be pickled but the _WeakArrayKeyMap need to be skipped as
371
+ # it's only guaranteed to be consistent with the parent process memory
372
+ # garbage collection.
373
+ # Although this reducer is pickled, it is not needed in its destination
374
+ # process (child processes), as we only use this reducer to send
375
+ # memmaps from the parent process to the children processes. For this
376
+ # reason, we can afford skipping the resolver, (which would otherwise
377
+ # be unpicklable), and pass it as None instead.
378
+ args = (self._max_nbytes, None, self._mmap_mode,
379
+ self._unlink_on_gc_collect)
380
+ kwargs = {
381
+ 'verbose': self.verbose,
382
+ 'prewarm': self._prewarm,
383
+ }
384
+ return ArrayMemmapForwardReducer, args, kwargs
385
+
386
+ def __call__(self, a):
387
+ m = _get_backing_memmap(a)
388
+ if m is not None and isinstance(m, np.memmap):
389
+ # a is already backed by a memmap file, let's reuse it directly
390
+ return _reduce_memmap_backed(a, m)
391
+
392
+ if (not a.dtype.hasobject and self._max_nbytes is not None and
393
+ a.nbytes > self._max_nbytes):
394
+ # check that the folder exists (lazily create the pool temp folder
395
+ # if required)
396
+ try:
397
+ os.makedirs(self._temp_folder)
398
+ os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
399
+ except OSError as e:
400
+ if e.errno != errno.EEXIST:
401
+ raise e
402
+
403
+ try:
404
+ basename = self._memmaped_arrays.get(a)
405
+ except KeyError:
406
+ # Generate a new unique random filename. The process and thread
407
+ # ids are only useful for debugging purpose and to make it
408
+ # easier to cleanup orphaned files in case of hard process
409
+ # kill (e.g. by "kill -9" or segfault).
410
+ basename = "{}-{}-{}.pkl".format(
411
+ os.getpid(), id(threading.current_thread()), uuid4().hex)
412
+ self._memmaped_arrays.set(a, basename)
413
+ filename = os.path.join(self._temp_folder, basename)
414
+
415
+ # In case the same array with the same content is passed several
416
+ # times to the pool subprocess children, serialize it only once
417
+
418
+ is_new_memmap = filename not in self._temporary_memmaped_filenames
419
+
420
+ # add the memmap to the list of temporary memmaps created by joblib
421
+ self._temporary_memmaped_filenames.add(filename)
422
+
423
+ if self._unlink_on_gc_collect:
424
+ # Bump reference count of the memmap by 1 to account for
425
+ # shared usage of the memmap by a child process. The
426
+ # corresponding decref call will be executed upon calling
427
+ # resource_tracker.maybe_unlink, registered as a finalizer in
428
+ # the child.
429
+ # the incref/decref calls here are only possible when the child
430
+ # and the parent share the same resource_tracker. It is not the
431
+ # case for the multiprocessing backend, but it does not matter
432
+ # because unlinking a memmap from a child process is only
433
+ # useful to control the memory usage of long-lasting child
434
+ # processes, while the multiprocessing-based pools terminate
435
+ # their workers at the end of a map() call.
436
+ resource_tracker.register(filename, "file")
437
+
438
+ if is_new_memmap:
439
+ # Incref each temporary memmap created by joblib one extra
440
+ # time. This means that these memmaps will only be deleted
441
+ # once an extra maybe_unlink() is called, which is done once
442
+ # all the jobs have completed (or been canceled) in the
443
+ # Parallel._terminate_backend() method.
444
+ resource_tracker.register(filename, "file")
445
+
446
+ if not os.path.exists(filename):
447
+ util.debug(
448
+ "[ARRAY DUMP] Pickling new array (shape={}, dtype={}) "
449
+ "creating a new memmap at {}".format(
450
+ a.shape, a.dtype, filename))
451
+ for dumped_filename in dump(a, filename):
452
+ os.chmod(dumped_filename, FILE_PERMISSIONS)
453
+
454
+ if self._prewarm:
455
+ # Warm up the data by accessing it. This operation ensures
456
+ # that the disk access required to create the memmapping
457
+ # file are performed in the reducing process and avoids
458
+ # concurrent memmap creation in multiple children
459
+ # processes.
460
+ load(filename, mmap_mode=self._mmap_mode).max()
461
+
462
+ else:
463
+ util.debug(
464
+ "[ARRAY DUMP] Pickling known array (shape={}, dtype={}) "
465
+ "reusing memmap file: {}".format(
466
+ a.shape, a.dtype, os.path.basename(filename)))
467
+
468
+ # The worker process will use joblib.load to memmap the data
469
+ return (
470
+ (load_temporary_memmap, (filename, self._mmap_mode,
471
+ self._unlink_on_gc_collect))
472
+ )
473
+ else:
474
+ # do not convert a into memmap, let pickler do its usual copy with
475
+ # the default system pickler
476
+ util.debug(
477
+ '[ARRAY DUMP] Pickling array (NO MEMMAPPING) (shape={}, '
478
+ ' dtype={}).'.format(a.shape, a.dtype))
479
+ return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
480
+
481
+
482
+ def get_memmapping_reducers(
483
+ forward_reducers=None, backward_reducers=None,
484
+ temp_folder_resolver=None, max_nbytes=1e6, mmap_mode='r', verbose=0,
485
+ prewarm=False, unlink_on_gc_collect=True, **kwargs):
486
+ """Construct a pair of memmapping reducer linked to a tmpdir.
487
+
488
+ This function manage the creation and the clean up of the temporary folders
489
+ underlying the memory maps and should be use to get the reducers necessary
490
+ to construct joblib pool or executor.
491
+ """
492
+ if forward_reducers is None:
493
+ forward_reducers = dict()
494
+ if backward_reducers is None:
495
+ backward_reducers = dict()
496
+
497
+ if np is not None:
498
+ # Register smart numpy.ndarray reducers that detects memmap backed
499
+ # arrays and that is also able to dump to memmap large in-memory
500
+ # arrays over the max_nbytes threshold
501
+ forward_reduce_ndarray = ArrayMemmapForwardReducer(
502
+ max_nbytes, temp_folder_resolver, mmap_mode, unlink_on_gc_collect,
503
+ verbose, prewarm=prewarm)
504
+ forward_reducers[np.ndarray] = forward_reduce_ndarray
505
+ forward_reducers[np.memmap] = forward_reduce_ndarray
506
+
507
+ # Communication from child process to the parent process always
508
+ # pickles in-memory numpy.ndarray without dumping them as memmap
509
+ # to avoid confusing the caller and make it tricky to collect the
510
+ # temporary folder
511
+ backward_reducers[np.ndarray] = reduce_array_memmap_backward
512
+ backward_reducers[np.memmap] = reduce_array_memmap_backward
513
+
514
+ return forward_reducers, backward_reducers
515
+
516
+
517
+ class TemporaryResourcesManager(object):
518
+ """Stateful object able to manage temporary folder and pickles
519
+
520
+ It exposes:
521
+ - a per-context folder name resolving API that memmap-based reducers will
522
+ rely on to know where to pickle the temporary memmaps
523
+ - a temporary file/folder management API that internally uses the
524
+ resource_tracker.
525
+ """
526
+
527
+ def __init__(self, temp_folder_root=None, context_id=None):
528
+ self._current_temp_folder = None
529
+ self._temp_folder_root = temp_folder_root
530
+ self._use_shared_mem = None
531
+ self._cached_temp_folders = dict()
532
+ self._id = uuid4().hex
533
+ self._finalizers = {}
534
+ if context_id is None:
535
+ # It would be safer to not assign a default context id (less silent
536
+ # bugs), but doing this while maintaining backward compatibility
537
+ # with the previous, context-unaware version get_memmaping_executor
538
+ # exposes too many low-level details.
539
+ context_id = uuid4().hex
540
+ self.set_current_context(context_id)
541
+
542
+ def set_current_context(self, context_id):
543
+ self._current_context_id = context_id
544
+ self.register_new_context(context_id)
545
+
546
+ def register_new_context(self, context_id):
547
+ # Prepare a sub-folder name specific to a context (usually a unique id
548
+ # generated by each instance of the Parallel class). Do not create in
549
+ # advance to spare FS write access if no array is to be dumped).
550
+ if context_id in self._cached_temp_folders:
551
+ return
552
+ else:
553
+ # During its lifecycle, one Parallel object can have several
554
+ # executors associated to it (for instance, if a loky worker raises
555
+ # an exception, joblib shutdowns the executor and instantly
556
+ # recreates a new one before raising the error - see
557
+ # ``ensure_ready``. Because we don't want two executors tied to
558
+ # the same Parallel object (and thus the same context id) to
559
+ # register/use/delete the same folder, we also add an id specific
560
+ # to the current Manager (and thus specific to its associated
561
+ # executor) to the folder name.
562
+ new_folder_name = (
563
+ "joblib_memmapping_folder_{}_{}_{}".format(
564
+ os.getpid(), self._id, context_id)
565
+ )
566
+ new_folder_path, _ = _get_temp_dir(
567
+ new_folder_name, self._temp_folder_root
568
+ )
569
+ self.register_folder_finalizer(new_folder_path, context_id)
570
+ self._cached_temp_folders[context_id] = new_folder_path
571
+
572
+ def resolve_temp_folder_name(self):
573
+ """Return a folder name specific to the currently activated context"""
574
+ return self._cached_temp_folders[self._current_context_id]
575
+
576
+ # resource management API
577
+
578
+ def register_folder_finalizer(self, pool_subfolder, context_id):
579
+ # Register the garbage collector at program exit in case caller forgets
580
+ # to call terminate explicitly: note we do not pass any reference to
581
+ # ensure that this callback won't prevent garbage collection of
582
+ # parallel instance and related file handler resources such as POSIX
583
+ # semaphores and pipes
584
+ pool_module_name = whichmodule(delete_folder, 'delete_folder')
585
+ resource_tracker.register(pool_subfolder, "folder")
586
+
587
+ def _cleanup():
588
+ # In some cases the Python runtime seems to set delete_folder to
589
+ # None just before exiting when accessing the delete_folder
590
+ # function from the closure namespace. So instead we reimport
591
+ # the delete_folder function explicitly.
592
+ # https://github.com/joblib/joblib/issues/328
593
+ # We cannot just use from 'joblib.pool import delete_folder'
594
+ # because joblib should only use relative imports to allow
595
+ # easy vendoring.
596
+ delete_folder = __import__(
597
+ pool_module_name, fromlist=['delete_folder']
598
+ ).delete_folder
599
+ try:
600
+ delete_folder(pool_subfolder, allow_non_empty=True)
601
+ resource_tracker.unregister(pool_subfolder, "folder")
602
+ except OSError:
603
+ warnings.warn("Failed to delete temporary folder: {}"
604
+ .format(pool_subfolder))
605
+
606
+ self._finalizers[context_id] = atexit.register(_cleanup)
607
+
608
+ def _clean_temporary_resources(self, context_id=None, force=False,
609
+ allow_non_empty=False):
610
+ """Clean temporary resources created by a process-based pool"""
611
+ if context_id is None:
612
+ # Iterates over a copy of the cache keys to avoid Error due to
613
+ # iterating over a changing size dictionary.
614
+ for context_id in list(self._cached_temp_folders):
615
+ self._clean_temporary_resources(
616
+ context_id, force=force, allow_non_empty=allow_non_empty
617
+ )
618
+ else:
619
+ temp_folder = self._cached_temp_folders.get(context_id)
620
+ if temp_folder and os.path.exists(temp_folder):
621
+ for filename in os.listdir(temp_folder):
622
+ if force:
623
+ # Some workers have failed and the ref counted might
624
+ # be off. The workers should have shut down by this
625
+ # time so forcefully clean up the files.
626
+ resource_tracker.unregister(
627
+ os.path.join(temp_folder, filename), "file"
628
+ )
629
+ else:
630
+ resource_tracker.maybe_unlink(
631
+ os.path.join(temp_folder, filename), "file"
632
+ )
633
+
634
+ # When forcing clean-up, try to delete the folder even if some
635
+ # files are still in it. Otherwise, try to delete the folder
636
+ allow_non_empty |= force
637
+
638
+ # Clean up the folder if possible, either if it is empty or
639
+ # if none of the files in it are in used and allow_non_empty.
640
+ try:
641
+ delete_folder(
642
+ temp_folder, allow_non_empty=allow_non_empty
643
+ )
644
+ # Forget the folder once it has been deleted
645
+ self._cached_temp_folders.pop(context_id, None)
646
+ resource_tracker.unregister(temp_folder, "folder")
647
+
648
+ # Also cancel the finalizers that gets triggered at gc.
649
+ finalizer = self._finalizers.pop(context_id, None)
650
+ if finalizer is not None:
651
+ atexit.unregister(finalizer)
652
+
653
+ except OSError:
654
+ # Temporary folder cannot be deleted right now.
655
+ # This folder will be cleaned up by an atexit
656
+ # finalizer registered by the memmapping_reducer.
657
+ pass
llmeval-env/lib/python3.10/site-packages/joblib/_multiprocessing_helpers.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helper module to factorize the conditional multiprocessing import logic
2
+
3
+ We use a distinct module to simplify import statements and avoid introducing
4
+ circular dependencies (for instance for the assert_spawning name).
5
+ """
6
+ import os
7
+ import warnings
8
+
9
+
10
+ # Obtain possible configuration from the environment, assuming 1 (on)
11
+ # by default, upon 0 set to None. Should instructively fail if some non
12
+ # 0/1 value is set.
13
+ mp = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
14
+ if mp:
15
+ try:
16
+ import multiprocessing as mp
17
+ import _multiprocessing # noqa
18
+ except ImportError:
19
+ mp = None
20
+
21
+ # 2nd stage: validate that locking is available on the system and
22
+ # issue a warning if not
23
+ if mp is not None:
24
+ try:
25
+ # try to create a named semaphore using SemLock to make sure they are
26
+ # available on this platform. We use the low level object
27
+ # _multiprocessing.SemLock to avoid spawning a resource tracker on
28
+ # Unix system or changing the default backend.
29
+ import tempfile
30
+ from _multiprocessing import SemLock
31
+
32
+ _rand = tempfile._RandomNameSequence()
33
+ for i in range(100):
34
+ try:
35
+ name = '/joblib-{}-{}' .format(
36
+ os.getpid(), next(_rand))
37
+ _sem = SemLock(0, 0, 1, name=name, unlink=True)
38
+ del _sem # cleanup
39
+ break
40
+ except FileExistsError as e: # pragma: no cover
41
+ if i >= 99:
42
+ raise FileExistsError(
43
+ 'cannot find name for semaphore') from e
44
+ except (FileExistsError, AttributeError, ImportError, OSError) as e:
45
+ mp = None
46
+ warnings.warn('%s. joblib will operate in serial mode' % (e,))
47
+
48
+
49
+ # 3rd stage: backward compat for the assert_spawning helper
50
+ if mp is not None:
51
+ from multiprocessing.context import assert_spawning
52
+ else:
53
+ assert_spawning = None
llmeval-env/lib/python3.10/site-packages/joblib/_parallel_backends.py ADDED
@@ -0,0 +1,649 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Backends for embarrassingly parallel code.
3
+ """
4
+
5
+ import gc
6
+ import os
7
+ import warnings
8
+ import threading
9
+ import contextlib
10
+ from abc import ABCMeta, abstractmethod
11
+
12
+ from ._utils import (
13
+ _TracebackCapturingWrapper,
14
+ _retrieve_traceback_capturing_wrapped_call
15
+ )
16
+
17
+ from ._multiprocessing_helpers import mp
18
+
19
+ if mp is not None:
20
+ from .pool import MemmappingPool
21
+ from multiprocessing.pool import ThreadPool
22
+ from .executor import get_memmapping_executor
23
+
24
+ # Import loky only if multiprocessing is present
25
+ from .externals.loky import process_executor, cpu_count
26
+ from .externals.loky.process_executor import ShutdownExecutorError
27
+
28
+
29
+ class ParallelBackendBase(metaclass=ABCMeta):
30
+ """Helper abc which defines all methods a ParallelBackend must implement"""
31
+
32
+ supports_inner_max_num_threads = False
33
+ supports_retrieve_callback = False
34
+ default_n_jobs = 1
35
+
36
+ @property
37
+ def supports_return_generator(self):
38
+ return self.supports_retrieve_callback
39
+
40
+ @property
41
+ def supports_timeout(self):
42
+ return self.supports_retrieve_callback
43
+
44
+ nesting_level = None
45
+
46
+ def __init__(self, nesting_level=None, inner_max_num_threads=None,
47
+ **kwargs):
48
+ super().__init__(**kwargs)
49
+ self.nesting_level = nesting_level
50
+ self.inner_max_num_threads = inner_max_num_threads
51
+
52
+ MAX_NUM_THREADS_VARS = [
53
+ 'OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS',
54
+ 'BLIS_NUM_THREADS', 'VECLIB_MAXIMUM_THREADS', 'NUMBA_NUM_THREADS',
55
+ 'NUMEXPR_NUM_THREADS',
56
+ ]
57
+
58
+ TBB_ENABLE_IPC_VAR = "ENABLE_IPC"
59
+
60
+ @abstractmethod
61
+ def effective_n_jobs(self, n_jobs):
62
+ """Determine the number of jobs that can actually run in parallel
63
+
64
+ n_jobs is the number of workers requested by the callers. Passing
65
+ n_jobs=-1 means requesting all available workers for instance matching
66
+ the number of CPU cores on the worker host(s).
67
+
68
+ This method should return a guesstimate of the number of workers that
69
+ can actually perform work concurrently. The primary use case is to make
70
+ it possible for the caller to know in how many chunks to slice the
71
+ work.
72
+
73
+ In general working on larger data chunks is more efficient (less
74
+ scheduling overhead and better use of CPU cache prefetching heuristics)
75
+ as long as all the workers have enough work to do.
76
+ """
77
+
78
+ @abstractmethod
79
+ def apply_async(self, func, callback=None):
80
+ """Schedule a func to be run"""
81
+
82
+ def retrieve_result_callback(self, out):
83
+ """Called within the callback function passed in apply_async.
84
+
85
+ The argument of this function is the argument given to a callback in
86
+ the considered backend. It is supposed to return the outcome of a task
87
+ if it succeeded or raise the exception if it failed.
88
+ """
89
+
90
+ def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
91
+ **backend_args):
92
+ """Reconfigure the backend and return the number of workers.
93
+
94
+ This makes it possible to reuse an existing backend instance for
95
+ successive independent calls to Parallel with different parameters.
96
+ """
97
+ self.parallel = parallel
98
+ return self.effective_n_jobs(n_jobs)
99
+
100
+ def start_call(self):
101
+ """Call-back method called at the beginning of a Parallel call"""
102
+
103
+ def stop_call(self):
104
+ """Call-back method called at the end of a Parallel call"""
105
+
106
+ def terminate(self):
107
+ """Shutdown the workers and free the shared memory."""
108
+
109
+ def compute_batch_size(self):
110
+ """Determine the optimal batch size"""
111
+ return 1
112
+
113
+ def batch_completed(self, batch_size, duration):
114
+ """Callback indicate how long it took to run a batch"""
115
+
116
+ def get_exceptions(self):
117
+ """List of exception types to be captured."""
118
+ return []
119
+
120
+ def abort_everything(self, ensure_ready=True):
121
+ """Abort any running tasks
122
+
123
+ This is called when an exception has been raised when executing a task
124
+ and all the remaining tasks will be ignored and can therefore be
125
+ aborted to spare computation resources.
126
+
127
+ If ensure_ready is True, the backend should be left in an operating
128
+ state as future tasks might be re-submitted via that same backend
129
+ instance.
130
+
131
+ If ensure_ready is False, the implementer of this method can decide
132
+ to leave the backend in a closed / terminated state as no new task
133
+ are expected to be submitted to this backend.
134
+
135
+ Setting ensure_ready to False is an optimization that can be leveraged
136
+ when aborting tasks via killing processes from a local process pool
137
+ managed by the backend it-self: if we expect no new tasks, there is no
138
+ point in re-creating new workers.
139
+ """
140
+ # Does nothing by default: to be overridden in subclasses when
141
+ # canceling tasks is possible.
142
+ pass
143
+
144
+ def get_nested_backend(self):
145
+ """Backend instance to be used by nested Parallel calls.
146
+
147
+ By default a thread-based backend is used for the first level of
148
+ nesting. Beyond, switch to sequential backend to avoid spawning too
149
+ many threads on the host.
150
+ """
151
+ nesting_level = getattr(self, 'nesting_level', 0) + 1
152
+ if nesting_level > 1:
153
+ return SequentialBackend(nesting_level=nesting_level), None
154
+ else:
155
+ return ThreadingBackend(nesting_level=nesting_level), None
156
+
157
+ @contextlib.contextmanager
158
+ def retrieval_context(self):
159
+ """Context manager to manage an execution context.
160
+
161
+ Calls to Parallel.retrieve will be made inside this context.
162
+
163
+ By default, this does nothing. It may be useful for subclasses to
164
+ handle nested parallelism. In particular, it may be required to avoid
165
+ deadlocks if a backend manages a fixed number of workers, when those
166
+ workers may be asked to do nested Parallel calls. Without
167
+ 'retrieval_context' this could lead to deadlock, as all the workers
168
+ managed by the backend may be "busy" waiting for the nested parallel
169
+ calls to finish, but the backend has no free workers to execute those
170
+ tasks.
171
+ """
172
+ yield
173
+
174
+ def _prepare_worker_env(self, n_jobs):
175
+ """Return environment variables limiting threadpools in external libs.
176
+
177
+ This function return a dict containing environment variables to pass
178
+ when creating a pool of process. These environment variables limit the
179
+ number of threads to `n_threads` for OpenMP, MKL, Accelerated and
180
+ OpenBLAS libraries in the child processes.
181
+ """
182
+ explicit_n_threads = self.inner_max_num_threads
183
+ default_n_threads = max(cpu_count() // n_jobs, 1)
184
+
185
+ # Set the inner environment variables to self.inner_max_num_threads if
186
+ # it is given. Else, default to cpu_count // n_jobs unless the variable
187
+ # is already present in the parent process environment.
188
+ env = {}
189
+ for var in self.MAX_NUM_THREADS_VARS:
190
+ if explicit_n_threads is None:
191
+ var_value = os.environ.get(var, default_n_threads)
192
+ else:
193
+ var_value = explicit_n_threads
194
+
195
+ env[var] = str(var_value)
196
+
197
+ if self.TBB_ENABLE_IPC_VAR not in os.environ:
198
+ # To avoid over-subscription when using TBB, let the TBB schedulers
199
+ # use Inter Process Communication to coordinate:
200
+ env[self.TBB_ENABLE_IPC_VAR] = "1"
201
+ return env
202
+
203
+ @staticmethod
204
+ def in_main_thread():
205
+ return isinstance(threading.current_thread(), threading._MainThread)
206
+
207
+
208
+ class SequentialBackend(ParallelBackendBase):
209
+ """A ParallelBackend which will execute all batches sequentially.
210
+
211
+ Does not use/create any threading objects, and hence has minimal
212
+ overhead. Used when n_jobs == 1.
213
+ """
214
+
215
+ uses_threads = True
216
+ supports_timeout = False
217
+ supports_retrieve_callback = False
218
+ supports_sharedmem = True
219
+
220
+ def effective_n_jobs(self, n_jobs):
221
+ """Determine the number of jobs which are going to run in parallel"""
222
+ if n_jobs == 0:
223
+ raise ValueError('n_jobs == 0 in Parallel has no meaning')
224
+ return 1
225
+
226
+ def apply_async(self, func, callback=None):
227
+ """Schedule a func to be run"""
228
+ raise RuntimeError("Should never be called for SequentialBackend.")
229
+
230
+ def retrieve_result_callback(self, out):
231
+ raise RuntimeError("Should never be called for SequentialBackend.")
232
+
233
+ def get_nested_backend(self):
234
+ # import is not top level to avoid cyclic import errors.
235
+ from .parallel import get_active_backend
236
+
237
+ # SequentialBackend should neither change the nesting level, the
238
+ # default backend or the number of jobs. Just return the current one.
239
+ return get_active_backend()
240
+
241
+
242
+ class PoolManagerMixin(object):
243
+ """A helper class for managing pool of workers."""
244
+
245
+ _pool = None
246
+
247
+ def effective_n_jobs(self, n_jobs):
248
+ """Determine the number of jobs which are going to run in parallel"""
249
+ if n_jobs == 0:
250
+ raise ValueError('n_jobs == 0 in Parallel has no meaning')
251
+ elif mp is None or n_jobs is None:
252
+ # multiprocessing is not available or disabled, fallback
253
+ # to sequential mode
254
+ return 1
255
+ elif n_jobs < 0:
256
+ n_jobs = max(cpu_count() + 1 + n_jobs, 1)
257
+ return n_jobs
258
+
259
+ def terminate(self):
260
+ """Shutdown the process or thread pool"""
261
+ if self._pool is not None:
262
+ self._pool.close()
263
+ self._pool.terminate() # terminate does a join()
264
+ self._pool = None
265
+
266
+ def _get_pool(self):
267
+ """Used by apply_async to make it possible to implement lazy init"""
268
+ return self._pool
269
+
270
+ def apply_async(self, func, callback=None):
271
+ """Schedule a func to be run"""
272
+ # Here, we need a wrapper to avoid crashes on KeyboardInterruptErrors.
273
+ # We also call the callback on error, to make sure the pool does not
274
+ # wait on crashed jobs.
275
+ return self._get_pool().apply_async(
276
+ _TracebackCapturingWrapper(func), (),
277
+ callback=callback, error_callback=callback
278
+ )
279
+
280
+ def retrieve_result_callback(self, out):
281
+ """Mimic concurrent.futures results, raising an error if needed."""
282
+ return _retrieve_traceback_capturing_wrapped_call(out)
283
+
284
+ def abort_everything(self, ensure_ready=True):
285
+ """Shutdown the pool and restart a new one with the same parameters"""
286
+ self.terminate()
287
+ if ensure_ready:
288
+ self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel,
289
+ **self.parallel._backend_args)
290
+
291
+
292
+ class AutoBatchingMixin(object):
293
+ """A helper class for automagically batching jobs."""
294
+
295
+ # In seconds, should be big enough to hide multiprocessing dispatching
296
+ # overhead.
297
+ # This settings was found by running benchmarks/bench_auto_batching.py
298
+ # with various parameters on various platforms.
299
+ MIN_IDEAL_BATCH_DURATION = .2
300
+
301
+ # Should not be too high to avoid stragglers: long jobs running alone
302
+ # on a single worker while other workers have no work to process any more.
303
+ MAX_IDEAL_BATCH_DURATION = 2
304
+
305
+ # Batching counters default values
306
+ _DEFAULT_EFFECTIVE_BATCH_SIZE = 1
307
+ _DEFAULT_SMOOTHED_BATCH_DURATION = 0.0
308
+
309
+ def __init__(self, **kwargs):
310
+ super().__init__(**kwargs)
311
+ self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
312
+ self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
313
+
314
+ def compute_batch_size(self):
315
+ """Determine the optimal batch size"""
316
+ old_batch_size = self._effective_batch_size
317
+ batch_duration = self._smoothed_batch_duration
318
+ if (batch_duration > 0 and
319
+ batch_duration < self.MIN_IDEAL_BATCH_DURATION):
320
+ # The current batch size is too small: the duration of the
321
+ # processing of a batch of task is not large enough to hide
322
+ # the scheduling overhead.
323
+ ideal_batch_size = int(old_batch_size *
324
+ self.MIN_IDEAL_BATCH_DURATION /
325
+ batch_duration)
326
+ # Multiply by two to limit oscilations between min and max.
327
+ ideal_batch_size *= 2
328
+
329
+ # dont increase the batch size too fast to limit huge batch sizes
330
+ # potentially leading to starving worker
331
+ batch_size = min(2 * old_batch_size, ideal_batch_size)
332
+
333
+ batch_size = max(batch_size, 1)
334
+
335
+ self._effective_batch_size = batch_size
336
+ if self.parallel.verbose >= 10:
337
+ self.parallel._print(
338
+ f"Batch computation too fast ({batch_duration}s.) "
339
+ f"Setting batch_size={batch_size}."
340
+ )
341
+ elif (batch_duration > self.MAX_IDEAL_BATCH_DURATION and
342
+ old_batch_size >= 2):
343
+ # The current batch size is too big. If we schedule overly long
344
+ # running batches some CPUs might wait with nothing left to do
345
+ # while a couple of CPUs a left processing a few long running
346
+ # batches. Better reduce the batch size a bit to limit the
347
+ # likelihood of scheduling such stragglers.
348
+
349
+ # decrease the batch size quickly to limit potential starving
350
+ ideal_batch_size = int(
351
+ old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration
352
+ )
353
+ # Multiply by two to limit oscilations between min and max.
354
+ batch_size = max(2 * ideal_batch_size, 1)
355
+ self._effective_batch_size = batch_size
356
+ if self.parallel.verbose >= 10:
357
+ self.parallel._print(
358
+ f"Batch computation too slow ({batch_duration}s.) "
359
+ f"Setting batch_size={batch_size}."
360
+ )
361
+ else:
362
+ # No batch size adjustment
363
+ batch_size = old_batch_size
364
+
365
+ if batch_size != old_batch_size:
366
+ # Reset estimation of the smoothed mean batch duration: this
367
+ # estimate is updated in the multiprocessing apply_async
368
+ # CallBack as long as the batch_size is constant. Therefore
369
+ # we need to reset the estimate whenever we re-tune the batch
370
+ # size.
371
+ self._smoothed_batch_duration = \
372
+ self._DEFAULT_SMOOTHED_BATCH_DURATION
373
+
374
+ return batch_size
375
+
376
+ def batch_completed(self, batch_size, duration):
377
+ """Callback indicate how long it took to run a batch"""
378
+ if batch_size == self._effective_batch_size:
379
+ # Update the smoothed streaming estimate of the duration of a batch
380
+ # from dispatch to completion
381
+ old_duration = self._smoothed_batch_duration
382
+ if old_duration == self._DEFAULT_SMOOTHED_BATCH_DURATION:
383
+ # First record of duration for this batch size after the last
384
+ # reset.
385
+ new_duration = duration
386
+ else:
387
+ # Update the exponentially weighted average of the duration of
388
+ # batch for the current effective size.
389
+ new_duration = 0.8 * old_duration + 0.2 * duration
390
+ self._smoothed_batch_duration = new_duration
391
+
392
+ def reset_batch_stats(self):
393
+ """Reset batch statistics to default values.
394
+
395
+ This avoids interferences with future jobs.
396
+ """
397
+ self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
398
+ self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
399
+
400
+
401
+ class ThreadingBackend(PoolManagerMixin, ParallelBackendBase):
402
+ """A ParallelBackend which will use a thread pool to execute batches in.
403
+
404
+ This is a low-overhead backend but it suffers from the Python Global
405
+ Interpreter Lock if the called function relies a lot on Python objects.
406
+ Mostly useful when the execution bottleneck is a compiled extension that
407
+ explicitly releases the GIL (for instance a Cython loop wrapped in a "with
408
+ nogil" block or an expensive call to a library such as NumPy).
409
+
410
+ The actual thread pool is lazily initialized: the actual thread pool
411
+ construction is delayed to the first call to apply_async.
412
+
413
+ ThreadingBackend is used as the default backend for nested calls.
414
+ """
415
+
416
+ supports_retrieve_callback = True
417
+ uses_threads = True
418
+ supports_sharedmem = True
419
+
420
+ def configure(self, n_jobs=1, parallel=None, **backend_args):
421
+ """Build a process or thread pool and return the number of workers"""
422
+ n_jobs = self.effective_n_jobs(n_jobs)
423
+ if n_jobs == 1:
424
+ # Avoid unnecessary overhead and use sequential backend instead.
425
+ raise FallbackToBackend(
426
+ SequentialBackend(nesting_level=self.nesting_level))
427
+ self.parallel = parallel
428
+ self._n_jobs = n_jobs
429
+ return n_jobs
430
+
431
+ def _get_pool(self):
432
+ """Lazily initialize the thread pool
433
+
434
+ The actual pool of worker threads is only initialized at the first
435
+ call to apply_async.
436
+ """
437
+ if self._pool is None:
438
+ self._pool = ThreadPool(self._n_jobs)
439
+ return self._pool
440
+
441
+
442
+ class MultiprocessingBackend(PoolManagerMixin, AutoBatchingMixin,
443
+ ParallelBackendBase):
444
+ """A ParallelBackend which will use a multiprocessing.Pool.
445
+
446
+ Will introduce some communication and memory overhead when exchanging
447
+ input and output data with the with the worker Python processes.
448
+ However, does not suffer from the Python Global Interpreter Lock.
449
+ """
450
+
451
+ supports_retrieve_callback = True
452
+ supports_return_generator = False
453
+
454
+ def effective_n_jobs(self, n_jobs):
455
+ """Determine the number of jobs which are going to run in parallel.
456
+
457
+ This also checks if we are attempting to create a nested parallel
458
+ loop.
459
+ """
460
+ if mp is None:
461
+ return 1
462
+
463
+ if mp.current_process().daemon:
464
+ # Daemonic processes cannot have children
465
+ if n_jobs != 1:
466
+ if inside_dask_worker():
467
+ msg = (
468
+ "Inside a Dask worker with daemon=True, "
469
+ "setting n_jobs=1.\nPossible work-arounds:\n"
470
+ "- dask.config.set("
471
+ "{'distributed.worker.daemon': False})"
472
+ "- set the environment variable "
473
+ "DASK_DISTRIBUTED__WORKER__DAEMON=False\n"
474
+ "before creating your Dask cluster."
475
+ )
476
+ else:
477
+ msg = (
478
+ 'Multiprocessing-backed parallel loops '
479
+ 'cannot be nested, setting n_jobs=1'
480
+ )
481
+ warnings.warn(msg, stacklevel=3)
482
+ return 1
483
+
484
+ if process_executor._CURRENT_DEPTH > 0:
485
+ # Mixing loky and multiprocessing in nested loop is not supported
486
+ if n_jobs != 1:
487
+ warnings.warn(
488
+ 'Multiprocessing-backed parallel loops cannot be nested,'
489
+ ' below loky, setting n_jobs=1',
490
+ stacklevel=3)
491
+ return 1
492
+
493
+ elif not (self.in_main_thread() or self.nesting_level == 0):
494
+ # Prevent posix fork inside in non-main posix threads
495
+ if n_jobs != 1:
496
+ warnings.warn(
497
+ 'Multiprocessing-backed parallel loops cannot be nested'
498
+ ' below threads, setting n_jobs=1',
499
+ stacklevel=3)
500
+ return 1
501
+
502
+ return super(MultiprocessingBackend, self).effective_n_jobs(n_jobs)
503
+
504
+ def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
505
+ **memmappingpool_args):
506
+ """Build a process or thread pool and return the number of workers"""
507
+ n_jobs = self.effective_n_jobs(n_jobs)
508
+ if n_jobs == 1:
509
+ raise FallbackToBackend(
510
+ SequentialBackend(nesting_level=self.nesting_level))
511
+
512
+ # Make sure to free as much memory as possible before forking
513
+ gc.collect()
514
+ self._pool = MemmappingPool(n_jobs, **memmappingpool_args)
515
+ self.parallel = parallel
516
+ return n_jobs
517
+
518
+ def terminate(self):
519
+ """Shutdown the process or thread pool"""
520
+ super(MultiprocessingBackend, self).terminate()
521
+ self.reset_batch_stats()
522
+
523
+
524
+ class LokyBackend(AutoBatchingMixin, ParallelBackendBase):
525
+ """Managing pool of workers with loky instead of multiprocessing."""
526
+
527
+ supports_retrieve_callback = True
528
+ supports_inner_max_num_threads = True
529
+
530
+ def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
531
+ idle_worker_timeout=300, **memmappingexecutor_args):
532
+ """Build a process executor and return the number of workers"""
533
+ n_jobs = self.effective_n_jobs(n_jobs)
534
+ if n_jobs == 1:
535
+ raise FallbackToBackend(
536
+ SequentialBackend(nesting_level=self.nesting_level))
537
+
538
+ self._workers = get_memmapping_executor(
539
+ n_jobs, timeout=idle_worker_timeout,
540
+ env=self._prepare_worker_env(n_jobs=n_jobs),
541
+ context_id=parallel._id, **memmappingexecutor_args)
542
+ self.parallel = parallel
543
+ return n_jobs
544
+
545
+ def effective_n_jobs(self, n_jobs):
546
+ """Determine the number of jobs which are going to run in parallel"""
547
+ if n_jobs == 0:
548
+ raise ValueError('n_jobs == 0 in Parallel has no meaning')
549
+ elif mp is None or n_jobs is None:
550
+ # multiprocessing is not available or disabled, fallback
551
+ # to sequential mode
552
+ return 1
553
+ elif mp.current_process().daemon:
554
+ # Daemonic processes cannot have children
555
+ if n_jobs != 1:
556
+ if inside_dask_worker():
557
+ msg = (
558
+ "Inside a Dask worker with daemon=True, "
559
+ "setting n_jobs=1.\nPossible work-arounds:\n"
560
+ "- dask.config.set("
561
+ "{'distributed.worker.daemon': False})\n"
562
+ "- set the environment variable "
563
+ "DASK_DISTRIBUTED__WORKER__DAEMON=False\n"
564
+ "before creating your Dask cluster."
565
+ )
566
+ else:
567
+ msg = (
568
+ 'Loky-backed parallel loops cannot be called in a'
569
+ ' multiprocessing, setting n_jobs=1'
570
+ )
571
+ warnings.warn(msg, stacklevel=3)
572
+
573
+ return 1
574
+ elif not (self.in_main_thread() or self.nesting_level == 0):
575
+ # Prevent posix fork inside in non-main posix threads
576
+ if n_jobs != 1:
577
+ warnings.warn(
578
+ 'Loky-backed parallel loops cannot be nested below '
579
+ 'threads, setting n_jobs=1',
580
+ stacklevel=3)
581
+ return 1
582
+ elif n_jobs < 0:
583
+ n_jobs = max(cpu_count() + 1 + n_jobs, 1)
584
+ return n_jobs
585
+
586
+ def apply_async(self, func, callback=None):
587
+ """Schedule a func to be run"""
588
+ future = self._workers.submit(func)
589
+ if callback is not None:
590
+ future.add_done_callback(callback)
591
+ return future
592
+
593
+ def retrieve_result_callback(self, out):
594
+ try:
595
+ return out.result()
596
+ except ShutdownExecutorError:
597
+ raise RuntimeError(
598
+ "The executor underlying Parallel has been shutdown. "
599
+ "This is likely due to the garbage collection of a previous "
600
+ "generator from a call to Parallel with return_as='generator'."
601
+ " Make sure the generator is not garbage collected when "
602
+ "submitting a new job or that it is first properly exhausted."
603
+ )
604
+
605
+ def terminate(self):
606
+ if self._workers is not None:
607
+ # Don't terminate the workers as we want to reuse them in later
608
+ # calls, but cleanup the temporary resources that the Parallel call
609
+ # created. This 'hack' requires a private, low-level operation.
610
+ self._workers._temp_folder_manager._clean_temporary_resources(
611
+ context_id=self.parallel._id, force=False
612
+ )
613
+ self._workers = None
614
+
615
+ self.reset_batch_stats()
616
+
617
+ def abort_everything(self, ensure_ready=True):
618
+ """Shutdown the workers and restart a new one with the same parameters
619
+ """
620
+ self._workers.terminate(kill_workers=True)
621
+ self._workers = None
622
+
623
+ if ensure_ready:
624
+ self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel)
625
+
626
+
627
+ class FallbackToBackend(Exception):
628
+ """Raised when configuration should fallback to another backend"""
629
+
630
+ def __init__(self, backend):
631
+ self.backend = backend
632
+
633
+
634
+ def inside_dask_worker():
635
+ """Check whether the current function is executed inside a Dask worker.
636
+ """
637
+ # This function can not be in joblib._dask because there would be a
638
+ # circular import:
639
+ # _dask imports _parallel_backend that imports _dask ...
640
+ try:
641
+ from distributed import get_worker
642
+ except ImportError:
643
+ return False
644
+
645
+ try:
646
+ get_worker()
647
+ return True
648
+ except ValueError:
649
+ return False
llmeval-env/lib/python3.10/site-packages/joblib/_utils.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://stackoverflow.com/a/9558001/2536294
2
+
3
+ import ast
4
+ from dataclasses import dataclass
5
+ import operator as op
6
+
7
+
8
+ from ._multiprocessing_helpers import mp
9
+
10
+ if mp is not None:
11
+ from .externals.loky.process_executor import _ExceptionWithTraceback
12
+
13
+
14
+ # supported operators
15
+ operators = {
16
+ ast.Add: op.add,
17
+ ast.Sub: op.sub,
18
+ ast.Mult: op.mul,
19
+ ast.Div: op.truediv,
20
+ ast.FloorDiv: op.floordiv,
21
+ ast.Mod: op.mod,
22
+ ast.Pow: op.pow,
23
+ ast.USub: op.neg,
24
+ }
25
+
26
+
27
+ def eval_expr(expr):
28
+ """
29
+ >>> eval_expr('2*6')
30
+ 12
31
+ >>> eval_expr('2**6')
32
+ 64
33
+ >>> eval_expr('1 + 2*3**(4) / (6 + -7)')
34
+ -161.0
35
+ """
36
+ try:
37
+ return eval_(ast.parse(expr, mode="eval").body)
38
+ except (TypeError, SyntaxError, KeyError) as e:
39
+ raise ValueError(
40
+ f"{expr!r} is not a valid or supported arithmetic expression."
41
+ ) from e
42
+
43
+
44
+ def eval_(node):
45
+ if isinstance(node, ast.Constant): # <constant>
46
+ return node.value
47
+ elif isinstance(node, ast.BinOp): # <left> <operator> <right>
48
+ return operators[type(node.op)](eval_(node.left), eval_(node.right))
49
+ elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
50
+ return operators[type(node.op)](eval_(node.operand))
51
+ else:
52
+ raise TypeError(node)
53
+
54
+
55
+ @dataclass(frozen=True)
56
+ class _Sentinel:
57
+ """A sentinel to mark a parameter as not explicitly set"""
58
+ default_value: object
59
+
60
+ def __repr__(self):
61
+ return f"default({self.default_value!r})"
62
+
63
+
64
+ class _TracebackCapturingWrapper:
65
+ """Protect function call and return error with traceback."""
66
+
67
+ def __init__(self, func):
68
+ self.func = func
69
+
70
+ def __call__(self, **kwargs):
71
+ try:
72
+ return self.func(**kwargs)
73
+ except BaseException as e:
74
+ return _ExceptionWithTraceback(e)
75
+
76
+
77
+ def _retrieve_traceback_capturing_wrapped_call(out):
78
+ if isinstance(out, _ExceptionWithTraceback):
79
+ rebuild, args = out.__reduce__()
80
+ out = rebuild(*args)
81
+ if isinstance(out, BaseException):
82
+ raise out
83
+ return out
llmeval-env/lib/python3.10/site-packages/joblib/backports.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Backports of fixes for joblib dependencies
3
+ """
4
+ import os
5
+ import re
6
+ import time
7
+
8
+ from os.path import basename
9
+ from multiprocessing import util
10
+
11
+
12
+ class Version:
13
+ """Backport from deprecated distutils
14
+
15
+ We maintain this backport to avoid introducing a new dependency on
16
+ `packaging`.
17
+
18
+ We might rexplore this choice in the future if all major Python projects
19
+ introduce a dependency on packaging anyway.
20
+ """
21
+
22
+ def __init__(self, vstring=None):
23
+ if vstring:
24
+ self.parse(vstring)
25
+
26
+ def __repr__(self):
27
+ return "%s ('%s')" % (self.__class__.__name__, str(self))
28
+
29
+ def __eq__(self, other):
30
+ c = self._cmp(other)
31
+ if c is NotImplemented:
32
+ return c
33
+ return c == 0
34
+
35
+ def __lt__(self, other):
36
+ c = self._cmp(other)
37
+ if c is NotImplemented:
38
+ return c
39
+ return c < 0
40
+
41
+ def __le__(self, other):
42
+ c = self._cmp(other)
43
+ if c is NotImplemented:
44
+ return c
45
+ return c <= 0
46
+
47
+ def __gt__(self, other):
48
+ c = self._cmp(other)
49
+ if c is NotImplemented:
50
+ return c
51
+ return c > 0
52
+
53
+ def __ge__(self, other):
54
+ c = self._cmp(other)
55
+ if c is NotImplemented:
56
+ return c
57
+ return c >= 0
58
+
59
+
60
+ class LooseVersion(Version):
61
+ """Backport from deprecated distutils
62
+
63
+ We maintain this backport to avoid introducing a new dependency on
64
+ `packaging`.
65
+
66
+ We might rexplore this choice in the future if all major Python projects
67
+ introduce a dependency on packaging anyway.
68
+ """
69
+
70
+ component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
71
+
72
+ def __init__(self, vstring=None):
73
+ if vstring:
74
+ self.parse(vstring)
75
+
76
+ def parse(self, vstring):
77
+ # I've given up on thinking I can reconstruct the version string
78
+ # from the parsed tuple -- so I just store the string here for
79
+ # use by __str__
80
+ self.vstring = vstring
81
+ components = [x for x in self.component_re.split(vstring)
82
+ if x and x != '.']
83
+ for i, obj in enumerate(components):
84
+ try:
85
+ components[i] = int(obj)
86
+ except ValueError:
87
+ pass
88
+
89
+ self.version = components
90
+
91
+ def __str__(self):
92
+ return self.vstring
93
+
94
+ def __repr__(self):
95
+ return "LooseVersion ('%s')" % str(self)
96
+
97
+ def _cmp(self, other):
98
+ if isinstance(other, str):
99
+ other = LooseVersion(other)
100
+ elif not isinstance(other, LooseVersion):
101
+ return NotImplemented
102
+
103
+ if self.version == other.version:
104
+ return 0
105
+ if self.version < other.version:
106
+ return -1
107
+ if self.version > other.version:
108
+ return 1
109
+
110
+
111
+ try:
112
+ import numpy as np
113
+
114
+ def make_memmap(filename, dtype='uint8', mode='r+', offset=0,
115
+ shape=None, order='C', unlink_on_gc_collect=False):
116
+ """Custom memmap constructor compatible with numpy.memmap.
117
+
118
+ This function:
119
+ - is a backport the numpy memmap offset fix (See
120
+ https://github.com/numpy/numpy/pull/8443 for more details.
121
+ The numpy fix is available starting numpy 1.13)
122
+ - adds ``unlink_on_gc_collect``, which specifies explicitly whether
123
+ the process re-constructing the memmap owns a reference to the
124
+ underlying file. If set to True, it adds a finalizer to the
125
+ newly-created memmap that sends a maybe_unlink request for the
126
+ memmaped file to resource_tracker.
127
+ """
128
+ util.debug(
129
+ "[MEMMAP READ] creating a memmap (shape {}, filename {}, "
130
+ "pid {})".format(shape, basename(filename), os.getpid())
131
+ )
132
+
133
+ mm = np.memmap(filename, dtype=dtype, mode=mode, offset=offset,
134
+ shape=shape, order=order)
135
+ if LooseVersion(np.__version__) < '1.13':
136
+ mm.offset = offset
137
+ if unlink_on_gc_collect:
138
+ from ._memmapping_reducer import add_maybe_unlink_finalizer
139
+ add_maybe_unlink_finalizer(mm)
140
+ return mm
141
+ except ImportError:
142
+ def make_memmap(filename, dtype='uint8', mode='r+', offset=0,
143
+ shape=None, order='C', unlink_on_gc_collect=False):
144
+ raise NotImplementedError(
145
+ "'joblib.backports.make_memmap' should not be used "
146
+ 'if numpy is not installed.')
147
+
148
+
149
+ if os.name == 'nt':
150
+ # https://github.com/joblib/joblib/issues/540
151
+ access_denied_errors = (5, 13)
152
+ from os import replace
153
+
154
+ def concurrency_safe_rename(src, dst):
155
+ """Renames ``src`` into ``dst`` overwriting ``dst`` if it exists.
156
+
157
+ On Windows os.replace can yield permission errors if executed by two
158
+ different processes.
159
+ """
160
+ max_sleep_time = 1
161
+ total_sleep_time = 0
162
+ sleep_time = 0.001
163
+ while total_sleep_time < max_sleep_time:
164
+ try:
165
+ replace(src, dst)
166
+ break
167
+ except Exception as exc:
168
+ if getattr(exc, 'winerror', None) in access_denied_errors:
169
+ time.sleep(sleep_time)
170
+ total_sleep_time += sleep_time
171
+ sleep_time *= 2
172
+ else:
173
+ raise
174
+ else:
175
+ raise
176
+ else:
177
+ from os import replace as concurrency_safe_rename # noqa
llmeval-env/lib/python3.10/site-packages/joblib/externals/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__init__.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""The :mod:`loky` module manages a pool of worker that can be re-used across time.
2
+ It provides a robust and dynamic implementation os the
3
+ :class:`ProcessPoolExecutor` and a function :func:`get_reusable_executor` which
4
+ hide the pool management under the hood.
5
+ """
6
+ from concurrent.futures import (
7
+ ALL_COMPLETED,
8
+ FIRST_COMPLETED,
9
+ FIRST_EXCEPTION,
10
+ CancelledError,
11
+ Executor,
12
+ TimeoutError,
13
+ as_completed,
14
+ wait,
15
+ )
16
+
17
+ from ._base import Future
18
+ from .backend.context import cpu_count
19
+ from .backend.reduction import set_loky_pickler
20
+ from .reusable_executor import get_reusable_executor
21
+ from .cloudpickle_wrapper import wrap_non_picklable_objects
22
+ from .process_executor import BrokenProcessPool, ProcessPoolExecutor
23
+
24
+
25
+ __all__ = [
26
+ "get_reusable_executor",
27
+ "cpu_count",
28
+ "wait",
29
+ "as_completed",
30
+ "Future",
31
+ "Executor",
32
+ "ProcessPoolExecutor",
33
+ "BrokenProcessPool",
34
+ "CancelledError",
35
+ "TimeoutError",
36
+ "FIRST_COMPLETED",
37
+ "FIRST_EXCEPTION",
38
+ "ALL_COMPLETED",
39
+ "wrap_non_picklable_objects",
40
+ "set_loky_pickler",
41
+ ]
42
+
43
+
44
+ __version__ = "3.4.1"
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/_base.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Modification of concurrent.futures.Future
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from concurrent/futures/_base.py (17/02/2017)
7
+ # * Do not use yield from
8
+ # * Use old super syntax
9
+ #
10
+ # Copyright 2009 Brian Quinlan. All Rights Reserved.
11
+ # Licensed to PSF under a Contributor Agreement.
12
+
13
+ from concurrent.futures import Future as _BaseFuture
14
+ from concurrent.futures._base import LOGGER
15
+
16
+
17
+ # To make loky._base.Future instances awaitable by concurrent.futures.wait,
18
+ # derive our custom Future class from _BaseFuture. _invoke_callback is the only
19
+ # modification made to this class in loky.
20
+ # TODO investigate why using `concurrent.futures.Future` directly does not
21
+ # always work in our test suite.
22
+ class Future(_BaseFuture):
23
+ def _invoke_callbacks(self):
24
+ for callback in self._done_callbacks:
25
+ try:
26
+ callback(self)
27
+ except BaseException:
28
+ LOGGER.exception(f"exception calling callback for {self!r}")
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Extra reducers for Unix based system and connections objects
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/reduction.py (17/02/2017)
7
+ # * Add adapted reduction for LokyProcesses and socket/Connection
8
+ #
9
+ import os
10
+ import socket
11
+ import _socket
12
+ from multiprocessing.connection import Connection
13
+ from multiprocessing.context import get_spawning_popen
14
+
15
+ from .reduction import register
16
+
17
+ HAVE_SEND_HANDLE = (
18
+ hasattr(socket, "CMSG_LEN")
19
+ and hasattr(socket, "SCM_RIGHTS")
20
+ and hasattr(socket.socket, "sendmsg")
21
+ )
22
+
23
+
24
+ def _mk_inheritable(fd):
25
+ os.set_inheritable(fd, True)
26
+ return fd
27
+
28
+
29
+ def DupFd(fd):
30
+ """Return a wrapper for an fd."""
31
+ popen_obj = get_spawning_popen()
32
+ if popen_obj is not None:
33
+ return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
34
+ elif HAVE_SEND_HANDLE:
35
+ from multiprocessing import resource_sharer
36
+
37
+ return resource_sharer.DupFd(fd)
38
+ else:
39
+ raise TypeError(
40
+ "Cannot pickle connection object. This object can only be "
41
+ "passed when spawning a new process"
42
+ )
43
+
44
+
45
+ def _reduce_socket(s):
46
+ df = DupFd(s.fileno())
47
+ return _rebuild_socket, (df, s.family, s.type, s.proto)
48
+
49
+
50
+ def _rebuild_socket(df, family, type, proto):
51
+ fd = df.detach()
52
+ return socket.fromfd(fd, family, type, proto)
53
+
54
+
55
+ def rebuild_connection(df, readable, writable):
56
+ fd = df.detach()
57
+ return Connection(fd, readable, writable)
58
+
59
+
60
+ def reduce_connection(conn):
61
+ df = DupFd(conn.fileno())
62
+ return rebuild_connection, (df, conn.readable, conn.writable)
63
+
64
+
65
+ register(socket.socket, _reduce_socket)
66
+ register(_socket.socket, _reduce_socket)
67
+ register(Connection, reduce_connection)
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Extra reducers for Windows system and connections objects
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/reduction.py (17/02/2017)
7
+ # * Add adapted reduction for LokyProcesses and socket/PipeConnection
8
+ #
9
+ import socket
10
+ from multiprocessing import connection
11
+ from multiprocessing.reduction import _reduce_socket
12
+
13
+ from .reduction import register
14
+
15
+ # register reduction for win32 communication objects
16
+ register(socket.socket, _reduce_socket)
17
+ register(connection.Connection, connection.reduce_connection)
18
+ register(connection.PipeConnection, connection.reduce_pipe_connection)
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Launch a subprocess using forkexec and make sure only the needed fd are
3
+ # shared in the two process.
4
+ #
5
+ # author: Thomas Moreau and Olivier Grisel
6
+ #
7
+ import os
8
+ import sys
9
+
10
+
11
+ def close_fds(keep_fds): # pragma: no cover
12
+ """Close all the file descriptors except those in keep_fds."""
13
+
14
+ # Make sure to keep stdout and stderr open for logging purpose
15
+ keep_fds = {*keep_fds, 1, 2}
16
+
17
+ # We try to retrieve all the open fds
18
+ try:
19
+ open_fds = {int(fd) for fd in os.listdir("/proc/self/fd")}
20
+ except FileNotFoundError:
21
+ import resource
22
+
23
+ max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
24
+ open_fds = {*range(max_nfds)}
25
+
26
+ for i in open_fds - keep_fds:
27
+ try:
28
+ os.close(i)
29
+ except OSError:
30
+ pass
31
+
32
+
33
+ def fork_exec(cmd, keep_fds, env=None):
34
+ # copy the environment variables to set in the child process
35
+ env = env or {}
36
+ child_env = {**os.environ, **env}
37
+
38
+ pid = os.fork()
39
+ if pid == 0: # pragma: no cover
40
+ close_fds(keep_fds)
41
+ os.execve(sys.executable, cmd, child_env)
42
+ else:
43
+ return pid
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Popen for LokyProcess.
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ import os
7
+ import sys
8
+ import signal
9
+ import pickle
10
+ from io import BytesIO
11
+ from multiprocessing import util, process
12
+ from multiprocessing.connection import wait
13
+ from multiprocessing.context import set_spawning_popen
14
+
15
+ from . import reduction, resource_tracker, spawn
16
+
17
+
18
+ __all__ = ["Popen"]
19
+
20
+
21
+ #
22
+ # Wrapper for an fd used while launching a process
23
+ #
24
+
25
+
26
+ class _DupFd:
27
+ def __init__(self, fd):
28
+ self.fd = reduction._mk_inheritable(fd)
29
+
30
+ def detach(self):
31
+ return self.fd
32
+
33
+
34
+ #
35
+ # Start child process using subprocess.Popen
36
+ #
37
+
38
+
39
+ class Popen:
40
+ method = "loky"
41
+ DupFd = _DupFd
42
+
43
+ def __init__(self, process_obj):
44
+ sys.stdout.flush()
45
+ sys.stderr.flush()
46
+ self.returncode = None
47
+ self._fds = []
48
+ self._launch(process_obj)
49
+
50
+ def duplicate_for_child(self, fd):
51
+ self._fds.append(fd)
52
+ return reduction._mk_inheritable(fd)
53
+
54
+ def poll(self, flag=os.WNOHANG):
55
+ if self.returncode is None:
56
+ while True:
57
+ try:
58
+ pid, sts = os.waitpid(self.pid, flag)
59
+ except OSError:
60
+ # Child process not yet created. See #1731717
61
+ # e.errno == errno.ECHILD == 10
62
+ return None
63
+ else:
64
+ break
65
+ if pid == self.pid:
66
+ if os.WIFSIGNALED(sts):
67
+ self.returncode = -os.WTERMSIG(sts)
68
+ else:
69
+ assert os.WIFEXITED(sts)
70
+ self.returncode = os.WEXITSTATUS(sts)
71
+ return self.returncode
72
+
73
+ def wait(self, timeout=None):
74
+ if self.returncode is None:
75
+ if timeout is not None:
76
+ if not wait([self.sentinel], timeout):
77
+ return None
78
+ # This shouldn't block if wait() returned successfully.
79
+ return self.poll(os.WNOHANG if timeout == 0.0 else 0)
80
+ return self.returncode
81
+
82
+ def terminate(self):
83
+ if self.returncode is None:
84
+ try:
85
+ os.kill(self.pid, signal.SIGTERM)
86
+ except ProcessLookupError:
87
+ pass
88
+ except OSError:
89
+ if self.wait(timeout=0.1) is None:
90
+ raise
91
+
92
+ def _launch(self, process_obj):
93
+
94
+ tracker_fd = resource_tracker._resource_tracker.getfd()
95
+
96
+ fp = BytesIO()
97
+ set_spawning_popen(self)
98
+ try:
99
+ prep_data = spawn.get_preparation_data(
100
+ process_obj._name,
101
+ getattr(process_obj, "init_main_module", True),
102
+ )
103
+ reduction.dump(prep_data, fp)
104
+ reduction.dump(process_obj, fp)
105
+
106
+ finally:
107
+ set_spawning_popen(None)
108
+
109
+ try:
110
+ parent_r, child_w = os.pipe()
111
+ child_r, parent_w = os.pipe()
112
+ # for fd in self._fds:
113
+ # _mk_inheritable(fd)
114
+
115
+ cmd_python = [sys.executable]
116
+ cmd_python += ["-m", self.__module__]
117
+ cmd_python += ["--process-name", str(process_obj.name)]
118
+ cmd_python += ["--pipe", str(reduction._mk_inheritable(child_r))]
119
+ reduction._mk_inheritable(child_w)
120
+ reduction._mk_inheritable(tracker_fd)
121
+ self._fds += [child_r, child_w, tracker_fd]
122
+ if sys.version_info >= (3, 8) and os.name == "posix":
123
+ mp_tracker_fd = prep_data["mp_tracker_args"]["fd"]
124
+ self.duplicate_for_child(mp_tracker_fd)
125
+
126
+ from .fork_exec import fork_exec
127
+
128
+ pid = fork_exec(cmd_python, self._fds, env=process_obj.env)
129
+ util.debug(
130
+ f"launched python with pid {pid} and cmd:\n{cmd_python}"
131
+ )
132
+ self.sentinel = parent_r
133
+
134
+ method = "getbuffer"
135
+ if not hasattr(fp, method):
136
+ method = "getvalue"
137
+ with os.fdopen(parent_w, "wb") as f:
138
+ f.write(getattr(fp, method)())
139
+ self.pid = pid
140
+ finally:
141
+ if parent_r is not None:
142
+ util.Finalize(self, os.close, (parent_r,))
143
+ for fd in (child_r, child_w):
144
+ if fd is not None:
145
+ os.close(fd)
146
+
147
+ @staticmethod
148
+ def thread_is_spawning():
149
+ return True
150
+
151
+
152
+ if __name__ == "__main__":
153
+ import argparse
154
+
155
+ parser = argparse.ArgumentParser("Command line parser")
156
+ parser.add_argument(
157
+ "--pipe", type=int, required=True, help="File handle for the pipe"
158
+ )
159
+ parser.add_argument(
160
+ "--process-name",
161
+ type=str,
162
+ default=None,
163
+ help="Identifier for debugging purpose",
164
+ )
165
+
166
+ args = parser.parse_args()
167
+
168
+ info = {}
169
+ exitcode = 1
170
+ try:
171
+ with os.fdopen(args.pipe, "rb") as from_parent:
172
+ process.current_process()._inheriting = True
173
+ try:
174
+ prep_data = pickle.load(from_parent)
175
+ spawn.prepare(prep_data)
176
+ process_obj = pickle.load(from_parent)
177
+ finally:
178
+ del process.current_process()._inheriting
179
+
180
+ exitcode = process_obj._bootstrap()
181
+ except Exception:
182
+ print("\n\n" + "-" * 80)
183
+ print(f"{args.process_name} failed with traceback: ")
184
+ print("-" * 80)
185
+ import traceback
186
+
187
+ print(traceback.format_exc())
188
+ print("\n" + "-" * 80)
189
+ finally:
190
+ if from_parent is not None:
191
+ from_parent.close()
192
+
193
+ sys.exit(exitcode)
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # LokyProcess implementation
3
+ #
4
+ # authors: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # based on multiprocessing/process.py (17/02/2017)
7
+ #
8
+ import sys
9
+ from multiprocessing.context import assert_spawning
10
+ from multiprocessing.process import BaseProcess
11
+
12
+
13
+ class LokyProcess(BaseProcess):
14
+ _start_method = "loky"
15
+
16
+ def __init__(
17
+ self,
18
+ group=None,
19
+ target=None,
20
+ name=None,
21
+ args=(),
22
+ kwargs={},
23
+ daemon=None,
24
+ init_main_module=False,
25
+ env=None,
26
+ ):
27
+ super().__init__(
28
+ group=group,
29
+ target=target,
30
+ name=name,
31
+ args=args,
32
+ kwargs=kwargs,
33
+ daemon=daemon,
34
+ )
35
+ self.env = {} if env is None else env
36
+ self.authkey = self.authkey
37
+ self.init_main_module = init_main_module
38
+
39
+ @staticmethod
40
+ def _Popen(process_obj):
41
+ if sys.platform == "win32":
42
+ from .popen_loky_win32 import Popen
43
+ else:
44
+ from .popen_loky_posix import Popen
45
+ return Popen(process_obj)
46
+
47
+
48
+ class LokyInitMainProcess(LokyProcess):
49
+ _start_method = "loky_init_main"
50
+
51
+ def __init__(
52
+ self,
53
+ group=None,
54
+ target=None,
55
+ name=None,
56
+ args=(),
57
+ kwargs={},
58
+ daemon=None,
59
+ ):
60
+ super().__init__(
61
+ group=group,
62
+ target=target,
63
+ name=name,
64
+ args=args,
65
+ kwargs=kwargs,
66
+ daemon=daemon,
67
+ init_main_module=True,
68
+ )
69
+
70
+
71
+ #
72
+ # We subclass bytes to avoid accidental transmission of auth keys over network
73
+ #
74
+
75
+
76
+ class AuthenticationKey(bytes):
77
+ def __reduce__(self):
78
+ try:
79
+ assert_spawning(self)
80
+ except RuntimeError:
81
+ raise TypeError(
82
+ "Pickling an AuthenticationKey object is "
83
+ "disallowed for security reasons"
84
+ )
85
+ return AuthenticationKey, (bytes(self),)
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Server process to keep track of unlinked resources, like folders and
3
+ # semaphores and clean them.
4
+ #
5
+ # author: Thomas Moreau
6
+ #
7
+ # adapted from multiprocessing/semaphore_tracker.py (17/02/2017)
8
+ # * include custom spawnv_passfds to start the process
9
+ # * add some VERBOSE logging
10
+ #
11
+ # TODO: multiprocessing.resource_tracker was contributed to Python 3.8 so
12
+ # once loky drops support for Python 3.7 it might be possible to stop
13
+ # maintaining this loky-specific fork. As a consequence, it might also be
14
+ # possible to stop maintaining the loky.backend.synchronize fork of
15
+ # multiprocessing.synchronize.
16
+
17
+ #
18
+ # On Unix we run a server process which keeps track of unlinked
19
+ # resources. The server ignores SIGINT and SIGTERM and reads from a
20
+ # pipe. The resource_tracker implements a reference counting scheme: each time
21
+ # a Python process anticipates the shared usage of a resource by another
22
+ # process, it signals the resource_tracker of this shared usage, and in return,
23
+ # the resource_tracker increments the resource's reference count by 1.
24
+ # Similarly, when access to a resource is closed by a Python process, the
25
+ # process notifies the resource_tracker by asking it to decrement the
26
+ # resource's reference count by 1. When the reference count drops to 0, the
27
+ # resource_tracker attempts to clean up the underlying resource.
28
+
29
+ # Finally, every other process connected to the resource tracker has a copy of
30
+ # the writable end of the pipe used to communicate with it, so the resource
31
+ # tracker gets EOF when all other processes have exited. Then the
32
+ # resource_tracker process unlinks any remaining leaked resources (with
33
+ # reference count above 0)
34
+
35
+ # For semaphores, this is important because the system only supports a limited
36
+ # number of named semaphores, and they will not be automatically removed till
37
+ # the next reboot. Without this resource tracker process, "killall python"
38
+ # would probably leave unlinked semaphores.
39
+
40
+ # Note that this behavior differs from CPython's resource_tracker, which only
41
+ # implements list of shared resources, and not a proper refcounting scheme.
42
+ # Also, CPython's resource tracker will only attempt to cleanup those shared
43
+ # resources once all procsses connected to the resouce tracker have exited.
44
+
45
+
46
+ import os
47
+ import shutil
48
+ import sys
49
+ import signal
50
+ import warnings
51
+ import threading
52
+ from _multiprocessing import sem_unlink
53
+ from multiprocessing import util
54
+
55
+ from . import spawn
56
+
57
+ if sys.platform == "win32":
58
+ import _winapi
59
+ import msvcrt
60
+ from multiprocessing.reduction import duplicate
61
+
62
+
63
+ __all__ = ["ensure_running", "register", "unregister"]
64
+
65
+ _HAVE_SIGMASK = hasattr(signal, "pthread_sigmask")
66
+ _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
67
+
68
+ _CLEANUP_FUNCS = {"folder": shutil.rmtree, "file": os.unlink}
69
+
70
+ if os.name == "posix":
71
+ _CLEANUP_FUNCS["semlock"] = sem_unlink
72
+
73
+
74
+ VERBOSE = False
75
+
76
+
77
+ class ResourceTracker:
78
+ def __init__(self):
79
+ self._lock = threading.Lock()
80
+ self._fd = None
81
+ self._pid = None
82
+
83
+ def getfd(self):
84
+ self.ensure_running()
85
+ return self._fd
86
+
87
+ def ensure_running(self):
88
+ """Make sure that resource tracker process is running.
89
+
90
+ This can be run from any process. Usually a child process will use
91
+ the resource created by its parent."""
92
+ with self._lock:
93
+ if self._fd is not None:
94
+ # resource tracker was launched before, is it still running?
95
+ if self._check_alive():
96
+ # => still alive
97
+ return
98
+ # => dead, launch it again
99
+ os.close(self._fd)
100
+ if os.name == "posix":
101
+ try:
102
+ # At this point, the resource_tracker process has been
103
+ # killed or crashed. Let's remove the process entry
104
+ # from the process table to avoid zombie processes.
105
+ os.waitpid(self._pid, 0)
106
+ except OSError:
107
+ # The process was terminated or is a child from an
108
+ # ancestor of the current process.
109
+ pass
110
+ self._fd = None
111
+ self._pid = None
112
+
113
+ warnings.warn(
114
+ "resource_tracker: process died unexpectedly, "
115
+ "relaunching. Some folders/sempahores might "
116
+ "leak."
117
+ )
118
+
119
+ fds_to_pass = []
120
+ try:
121
+ fds_to_pass.append(sys.stderr.fileno())
122
+ except Exception:
123
+ pass
124
+
125
+ r, w = os.pipe()
126
+ if sys.platform == "win32":
127
+ _r = duplicate(msvcrt.get_osfhandle(r), inheritable=True)
128
+ os.close(r)
129
+ r = _r
130
+
131
+ cmd = f"from {main.__module__} import main; main({r}, {VERBOSE})"
132
+ try:
133
+ fds_to_pass.append(r)
134
+ # process will out live us, so no need to wait on pid
135
+ exe = spawn.get_executable()
136
+ args = [exe, *util._args_from_interpreter_flags(), "-c", cmd]
137
+ util.debug(f"launching resource tracker: {args}")
138
+ # bpo-33613: Register a signal mask that will block the
139
+ # signals. This signal mask will be inherited by the child
140
+ # that is going to be spawned and will protect the child from a
141
+ # race condition that can make the child die before it
142
+ # registers signal handlers for SIGINT and SIGTERM. The mask is
143
+ # unregistered after spawning the child.
144
+ try:
145
+ if _HAVE_SIGMASK:
146
+ signal.pthread_sigmask(
147
+ signal.SIG_BLOCK, _IGNORED_SIGNALS
148
+ )
149
+ pid = spawnv_passfds(exe, args, fds_to_pass)
150
+ finally:
151
+ if _HAVE_SIGMASK:
152
+ signal.pthread_sigmask(
153
+ signal.SIG_UNBLOCK, _IGNORED_SIGNALS
154
+ )
155
+ except BaseException:
156
+ os.close(w)
157
+ raise
158
+ else:
159
+ self._fd = w
160
+ self._pid = pid
161
+ finally:
162
+ if sys.platform == "win32":
163
+ _winapi.CloseHandle(r)
164
+ else:
165
+ os.close(r)
166
+
167
+ def _check_alive(self):
168
+ """Check for the existence of the resource tracker process."""
169
+ try:
170
+ self._send("PROBE", "", "")
171
+ except BrokenPipeError:
172
+ return False
173
+ else:
174
+ return True
175
+
176
+ def register(self, name, rtype):
177
+ """Register a named resource, and increment its refcount."""
178
+ self.ensure_running()
179
+ self._send("REGISTER", name, rtype)
180
+
181
+ def unregister(self, name, rtype):
182
+ """Unregister a named resource with resource tracker."""
183
+ self.ensure_running()
184
+ self._send("UNREGISTER", name, rtype)
185
+
186
+ def maybe_unlink(self, name, rtype):
187
+ """Decrement the refcount of a resource, and delete it if it hits 0"""
188
+ self.ensure_running()
189
+ self._send("MAYBE_UNLINK", name, rtype)
190
+
191
+ def _send(self, cmd, name, rtype):
192
+ if len(name) > 512:
193
+ # posix guarantees that writes to a pipe of less than PIPE_BUF
194
+ # bytes are atomic, and that PIPE_BUF >= 512
195
+ raise ValueError("name too long")
196
+ msg = f"{cmd}:{name}:{rtype}\n".encode("ascii")
197
+ nbytes = os.write(self._fd, msg)
198
+ assert nbytes == len(msg)
199
+
200
+
201
+ _resource_tracker = ResourceTracker()
202
+ ensure_running = _resource_tracker.ensure_running
203
+ register = _resource_tracker.register
204
+ maybe_unlink = _resource_tracker.maybe_unlink
205
+ unregister = _resource_tracker.unregister
206
+ getfd = _resource_tracker.getfd
207
+
208
+
209
+ def main(fd, verbose=0):
210
+ """Run resource tracker."""
211
+ # protect the process from ^C and "killall python" etc
212
+ if verbose:
213
+ util.log_to_stderr(level=util.DEBUG)
214
+
215
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
216
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
217
+
218
+ if _HAVE_SIGMASK:
219
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
220
+
221
+ for f in (sys.stdin, sys.stdout):
222
+ try:
223
+ f.close()
224
+ except Exception:
225
+ pass
226
+
227
+ if verbose:
228
+ util.debug("Main resource tracker is running")
229
+
230
+ registry = {rtype: {} for rtype in _CLEANUP_FUNCS.keys()}
231
+ try:
232
+ # keep track of registered/unregistered resources
233
+ if sys.platform == "win32":
234
+ fd = msvcrt.open_osfhandle(fd, os.O_RDONLY)
235
+ with open(fd, "rb") as f:
236
+ while True:
237
+ line = f.readline()
238
+ if line == b"": # EOF
239
+ break
240
+ try:
241
+ splitted = line.strip().decode("ascii").split(":")
242
+ # name can potentially contain separator symbols (for
243
+ # instance folders on Windows)
244
+ cmd, name, rtype = (
245
+ splitted[0],
246
+ ":".join(splitted[1:-1]),
247
+ splitted[-1],
248
+ )
249
+
250
+ if cmd == "PROBE":
251
+ continue
252
+
253
+ if rtype not in _CLEANUP_FUNCS:
254
+ raise ValueError(
255
+ f"Cannot register {name} for automatic cleanup: "
256
+ f"unknown resource type ({rtype}). Resource type "
257
+ "should be one of the following: "
258
+ f"{list(_CLEANUP_FUNCS.keys())}"
259
+ )
260
+
261
+ if cmd == "REGISTER":
262
+ if name not in registry[rtype]:
263
+ registry[rtype][name] = 1
264
+ else:
265
+ registry[rtype][name] += 1
266
+
267
+ if verbose:
268
+ util.debug(
269
+ "[ResourceTracker] incremented refcount of "
270
+ f"{rtype} {name} "
271
+ f"(current {registry[rtype][name]})"
272
+ )
273
+ elif cmd == "UNREGISTER":
274
+ del registry[rtype][name]
275
+ if verbose:
276
+ util.debug(
277
+ f"[ResourceTracker] unregister {name} {rtype}: "
278
+ f"registry({len(registry)})"
279
+ )
280
+ elif cmd == "MAYBE_UNLINK":
281
+ registry[rtype][name] -= 1
282
+ if verbose:
283
+ util.debug(
284
+ "[ResourceTracker] decremented refcount of "
285
+ f"{rtype} {name} "
286
+ f"(current {registry[rtype][name]})"
287
+ )
288
+
289
+ if registry[rtype][name] == 0:
290
+ del registry[rtype][name]
291
+ try:
292
+ if verbose:
293
+ util.debug(
294
+ f"[ResourceTracker] unlink {name}"
295
+ )
296
+ _CLEANUP_FUNCS[rtype](name)
297
+ except Exception as e:
298
+ warnings.warn(
299
+ f"resource_tracker: {name}: {e!r}"
300
+ )
301
+
302
+ else:
303
+ raise RuntimeError(f"unrecognized command {cmd!r}")
304
+ except BaseException:
305
+ try:
306
+ sys.excepthook(*sys.exc_info())
307
+ except BaseException:
308
+ pass
309
+ finally:
310
+ # all processes have terminated; cleanup any remaining resources
311
+ def _unlink_resources(rtype_registry, rtype):
312
+ if rtype_registry:
313
+ try:
314
+ warnings.warn(
315
+ "resource_tracker: There appear to be "
316
+ f"{len(rtype_registry)} leaked {rtype} objects to "
317
+ "clean up at shutdown"
318
+ )
319
+ except Exception:
320
+ pass
321
+ for name in rtype_registry:
322
+ # For some reason the process which created and registered this
323
+ # resource has failed to unregister it. Presumably it has
324
+ # died. We therefore clean it up.
325
+ try:
326
+ _CLEANUP_FUNCS[rtype](name)
327
+ if verbose:
328
+ util.debug(f"[ResourceTracker] unlink {name}")
329
+ except Exception as e:
330
+ warnings.warn(f"resource_tracker: {name}: {e!r}")
331
+
332
+ for rtype, rtype_registry in registry.items():
333
+ if rtype == "folder":
334
+ continue
335
+ else:
336
+ _unlink_resources(rtype_registry, rtype)
337
+
338
+ # The default cleanup routine for folders deletes everything inside
339
+ # those folders recursively, which can include other resources tracked
340
+ # by the resource tracker). To limit the risk of the resource tracker
341
+ # attempting to delete twice a resource (once as part of a tracked
342
+ # folder, and once as a resource), we delete the folders after all
343
+ # other resource types.
344
+ if "folder" in registry:
345
+ _unlink_resources(registry["folder"], "folder")
346
+
347
+ if verbose:
348
+ util.debug("resource tracker shut down")
349
+
350
+
351
+ #
352
+ # Start a program with only specified fds kept open
353
+ #
354
+
355
+
356
+ def spawnv_passfds(path, args, passfds):
357
+ passfds = sorted(passfds)
358
+ if sys.platform != "win32":
359
+ errpipe_read, errpipe_write = os.pipe()
360
+ try:
361
+ from .reduction import _mk_inheritable
362
+ from .fork_exec import fork_exec
363
+
364
+ _pass = [_mk_inheritable(fd) for fd in passfds]
365
+ return fork_exec(args, _pass)
366
+ finally:
367
+ os.close(errpipe_read)
368
+ os.close(errpipe_write)
369
+ else:
370
+ cmd = " ".join(f'"{x}"' for x in args)
371
+ try:
372
+ _, ht, pid, _ = _winapi.CreateProcess(
373
+ path, cmd, None, None, True, 0, None, None, None
374
+ )
375
+ _winapi.CloseHandle(ht)
376
+ except BaseException:
377
+ pass
378
+ return pid
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Prepares and processes the data to setup the new process environment
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/spawn.py (17/02/2017)
7
+ # * Improve logging data
8
+ #
9
+ import os
10
+ import sys
11
+ import runpy
12
+ import textwrap
13
+ import types
14
+ from multiprocessing import process, util
15
+
16
+
17
+ if sys.platform != "win32":
18
+ WINEXE = False
19
+ WINSERVICE = False
20
+ else:
21
+ import msvcrt
22
+ from multiprocessing.reduction import duplicate
23
+
24
+ WINEXE = sys.platform == "win32" and getattr(sys, "frozen", False)
25
+ WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
26
+
27
+ if WINSERVICE:
28
+ _python_exe = os.path.join(sys.exec_prefix, "python.exe")
29
+ else:
30
+ _python_exe = sys.executable
31
+
32
+
33
+ def get_executable():
34
+ return _python_exe
35
+
36
+
37
+ def _check_not_importing_main():
38
+ if getattr(process.current_process(), "_inheriting", False):
39
+ raise RuntimeError(
40
+ textwrap.dedent(
41
+ """\
42
+ An attempt has been made to start a new process before the
43
+ current process has finished its bootstrapping phase.
44
+
45
+ This probably means that you are not using fork to start your
46
+ child processes and you have forgotten to use the proper idiom
47
+ in the main module:
48
+
49
+ if __name__ == '__main__':
50
+ freeze_support()
51
+ ...
52
+
53
+ The "freeze_support()" line can be omitted if the program
54
+ is not going to be frozen to produce an executable."""
55
+ )
56
+ )
57
+
58
+
59
+ def get_preparation_data(name, init_main_module=True):
60
+ """Return info about parent needed by child to unpickle process object."""
61
+ _check_not_importing_main()
62
+ d = dict(
63
+ log_to_stderr=util._log_to_stderr,
64
+ authkey=bytes(process.current_process().authkey),
65
+ name=name,
66
+ sys_argv=sys.argv,
67
+ orig_dir=process.ORIGINAL_DIR,
68
+ dir=os.getcwd(),
69
+ )
70
+
71
+ # Send sys_path and make sure the current directory will not be changed
72
+ d["sys_path"] = [p if p != "" else process.ORIGINAL_DIR for p in sys.path]
73
+
74
+ # Make sure to pass the information if the multiprocessing logger is active
75
+ if util._logger is not None:
76
+ d["log_level"] = util._logger.getEffectiveLevel()
77
+ if util._logger.handlers:
78
+ h = util._logger.handlers[0]
79
+ d["log_fmt"] = h.formatter._fmt
80
+
81
+ # Tell the child how to communicate with the resource_tracker
82
+ from .resource_tracker import _resource_tracker
83
+
84
+ _resource_tracker.ensure_running()
85
+ d["tracker_args"] = {"pid": _resource_tracker._pid}
86
+ if sys.platform == "win32":
87
+ d["tracker_args"]["fh"] = msvcrt.get_osfhandle(_resource_tracker._fd)
88
+ else:
89
+ d["tracker_args"]["fd"] = _resource_tracker._fd
90
+
91
+ if sys.version_info >= (3, 8) and os.name == "posix":
92
+ # joblib/loky#242: allow loky processes to retrieve the resource
93
+ # tracker of their parent in case the child processes depickles
94
+ # shared_memory objects, that are still tracked by multiprocessing's
95
+ # resource_tracker by default.
96
+ # XXX: this is a workaround that may be error prone: in the future, it
97
+ # would be better to have loky subclass multiprocessing's shared_memory
98
+ # to force registration of shared_memory segments via loky's
99
+ # resource_tracker.
100
+ from multiprocessing.resource_tracker import (
101
+ _resource_tracker as mp_resource_tracker,
102
+ )
103
+
104
+ # multiprocessing's resource_tracker must be running before loky
105
+ # process is created (othewise the child won't be able to use it if it
106
+ # is created later on)
107
+ mp_resource_tracker.ensure_running()
108
+ d["mp_tracker_args"] = {
109
+ "fd": mp_resource_tracker._fd,
110
+ "pid": mp_resource_tracker._pid,
111
+ }
112
+
113
+ # Figure out whether to initialise main in the subprocess as a module
114
+ # or through direct execution (or to leave it alone entirely)
115
+ if init_main_module:
116
+ main_module = sys.modules["__main__"]
117
+ try:
118
+ main_mod_name = getattr(main_module.__spec__, "name", None)
119
+ except BaseException:
120
+ main_mod_name = None
121
+ if main_mod_name is not None:
122
+ d["init_main_from_name"] = main_mod_name
123
+ elif sys.platform != "win32" or (not WINEXE and not WINSERVICE):
124
+ main_path = getattr(main_module, "__file__", None)
125
+ if main_path is not None:
126
+ if (
127
+ not os.path.isabs(main_path)
128
+ and process.ORIGINAL_DIR is not None
129
+ ):
130
+ main_path = os.path.join(process.ORIGINAL_DIR, main_path)
131
+ d["init_main_from_path"] = os.path.normpath(main_path)
132
+
133
+ return d
134
+
135
+
136
+ #
137
+ # Prepare current process
138
+ #
139
+ old_main_modules = []
140
+
141
+
142
+ def prepare(data, parent_sentinel=None):
143
+ """Try to get current process ready to unpickle process object."""
144
+ if "name" in data:
145
+ process.current_process().name = data["name"]
146
+
147
+ if "authkey" in data:
148
+ process.current_process().authkey = data["authkey"]
149
+
150
+ if "log_to_stderr" in data and data["log_to_stderr"]:
151
+ util.log_to_stderr()
152
+
153
+ if "log_level" in data:
154
+ util.get_logger().setLevel(data["log_level"])
155
+
156
+ if "log_fmt" in data:
157
+ import logging
158
+
159
+ util.get_logger().handlers[0].setFormatter(
160
+ logging.Formatter(data["log_fmt"])
161
+ )
162
+
163
+ if "sys_path" in data:
164
+ sys.path = data["sys_path"]
165
+
166
+ if "sys_argv" in data:
167
+ sys.argv = data["sys_argv"]
168
+
169
+ if "dir" in data:
170
+ os.chdir(data["dir"])
171
+
172
+ if "orig_dir" in data:
173
+ process.ORIGINAL_DIR = data["orig_dir"]
174
+
175
+ if "mp_tracker_args" in data:
176
+ from multiprocessing.resource_tracker import (
177
+ _resource_tracker as mp_resource_tracker,
178
+ )
179
+
180
+ mp_resource_tracker._fd = data["mp_tracker_args"]["fd"]
181
+ mp_resource_tracker._pid = data["mp_tracker_args"]["pid"]
182
+ if "tracker_args" in data:
183
+ from .resource_tracker import _resource_tracker
184
+
185
+ _resource_tracker._pid = data["tracker_args"]["pid"]
186
+ if sys.platform == "win32":
187
+ handle = data["tracker_args"]["fh"]
188
+ handle = duplicate(handle, source_process=parent_sentinel)
189
+ _resource_tracker._fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
190
+ else:
191
+ _resource_tracker._fd = data["tracker_args"]["fd"]
192
+
193
+ if "init_main_from_name" in data:
194
+ _fixup_main_from_name(data["init_main_from_name"])
195
+ elif "init_main_from_path" in data:
196
+ _fixup_main_from_path(data["init_main_from_path"])
197
+
198
+
199
+ # Multiprocessing module helpers to fix up the main module in
200
+ # spawned subprocesses
201
+ def _fixup_main_from_name(mod_name):
202
+ # __main__.py files for packages, directories, zip archives, etc, run
203
+ # their "main only" code unconditionally, so we don't even try to
204
+ # populate anything in __main__, nor do we make any changes to
205
+ # __main__ attributes
206
+ current_main = sys.modules["__main__"]
207
+ if mod_name == "__main__" or mod_name.endswith(".__main__"):
208
+ return
209
+
210
+ # If this process was forked, __main__ may already be populated
211
+ if getattr(current_main.__spec__, "name", None) == mod_name:
212
+ return
213
+
214
+ # Otherwise, __main__ may contain some non-main code where we need to
215
+ # support unpickling it properly. We rerun it as __mp_main__ and make
216
+ # the normal __main__ an alias to that
217
+ old_main_modules.append(current_main)
218
+ main_module = types.ModuleType("__mp_main__")
219
+ main_content = runpy.run_module(
220
+ mod_name, run_name="__mp_main__", alter_sys=True
221
+ )
222
+ main_module.__dict__.update(main_content)
223
+ sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module
224
+
225
+
226
+ def _fixup_main_from_path(main_path):
227
+ # If this process was forked, __main__ may already be populated
228
+ current_main = sys.modules["__main__"]
229
+
230
+ # Unfortunately, the main ipython launch script historically had no
231
+ # "if __name__ == '__main__'" guard, so we work around that
232
+ # by treating it like a __main__.py file
233
+ # See https://github.com/ipython/ipython/issues/4698
234
+ main_name = os.path.splitext(os.path.basename(main_path))[0]
235
+ if main_name == "ipython":
236
+ return
237
+
238
+ # Otherwise, if __file__ already has the setting we expect,
239
+ # there's nothing more to do
240
+ if getattr(current_main, "__file__", None) == main_path:
241
+ return
242
+
243
+ # If the parent process has sent a path through rather than a module
244
+ # name we assume it is an executable script that may contain
245
+ # non-main code that needs to be executed
246
+ old_main_modules.append(current_main)
247
+ main_module = types.ModuleType("__mp_main__")
248
+ main_content = runpy.run_path(main_path, run_name="__mp_main__")
249
+ main_module.__dict__.update(main_content)
250
+ sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Synchronization primitives based on our SemLock implementation
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/synchronize.py (17/02/2017)
7
+ # * Remove ctx argument for compatibility reason
8
+ # * Registers a cleanup function with the loky resource_tracker to remove the
9
+ # semaphore when the process dies instead.
10
+ #
11
+ # TODO: investigate which Python version is required to be able to use
12
+ # multiprocessing.resource_tracker and therefore multiprocessing.synchronize
13
+ # instead of a loky-specific fork.
14
+
15
+ import os
16
+ import sys
17
+ import tempfile
18
+ import threading
19
+ import _multiprocessing
20
+ from time import time as _time
21
+ from multiprocessing import process, util
22
+ from multiprocessing.context import assert_spawning
23
+
24
+ from . import resource_tracker
25
+
26
+ __all__ = [
27
+ "Lock",
28
+ "RLock",
29
+ "Semaphore",
30
+ "BoundedSemaphore",
31
+ "Condition",
32
+ "Event",
33
+ ]
34
+ # Try to import the mp.synchronize module cleanly, if it fails
35
+ # raise ImportError for platforms lacking a working sem_open implementation.
36
+ # See issue 3770
37
+ try:
38
+ from _multiprocessing import SemLock as _SemLock
39
+ from _multiprocessing import sem_unlink
40
+ except ImportError:
41
+ raise ImportError(
42
+ "This platform lacks a functioning sem_open"
43
+ " implementation, therefore, the required"
44
+ " synchronization primitives needed will not"
45
+ " function, see issue 3770."
46
+ )
47
+
48
+ #
49
+ # Constants
50
+ #
51
+
52
+ RECURSIVE_MUTEX, SEMAPHORE = range(2)
53
+ SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
54
+
55
+
56
+ #
57
+ # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
58
+ #
59
+
60
+
61
+ class SemLock:
62
+
63
+ _rand = tempfile._RandomNameSequence()
64
+
65
+ def __init__(self, kind, value, maxvalue, name=None):
66
+ # unlink_now is only used on win32 or when we are using fork.
67
+ unlink_now = False
68
+ if name is None:
69
+ # Try to find an unused name for the SemLock instance.
70
+ for _ in range(100):
71
+ try:
72
+ self._semlock = _SemLock(
73
+ kind, value, maxvalue, SemLock._make_name(), unlink_now
74
+ )
75
+ except FileExistsError: # pragma: no cover
76
+ pass
77
+ else:
78
+ break
79
+ else: # pragma: no cover
80
+ raise FileExistsError("cannot find name for semaphore")
81
+ else:
82
+ self._semlock = _SemLock(kind, value, maxvalue, name, unlink_now)
83
+ self.name = name
84
+ util.debug(
85
+ f"created semlock with handle {self._semlock.handle} and name "
86
+ f'"{self.name}"'
87
+ )
88
+
89
+ self._make_methods()
90
+
91
+ def _after_fork(obj):
92
+ obj._semlock._after_fork()
93
+
94
+ util.register_after_fork(self, _after_fork)
95
+
96
+ # When the object is garbage collected or the
97
+ # process shuts down we unlink the semaphore name
98
+ resource_tracker.register(self._semlock.name, "semlock")
99
+ util.Finalize(
100
+ self, SemLock._cleanup, (self._semlock.name,), exitpriority=0
101
+ )
102
+
103
+ @staticmethod
104
+ def _cleanup(name):
105
+ try:
106
+ sem_unlink(name)
107
+ except FileNotFoundError:
108
+ # Already unlinked, possibly by user code: ignore and make sure to
109
+ # unregister the semaphore from the resource tracker.
110
+ pass
111
+ finally:
112
+ resource_tracker.unregister(name, "semlock")
113
+
114
+ def _make_methods(self):
115
+ self.acquire = self._semlock.acquire
116
+ self.release = self._semlock.release
117
+
118
+ def __enter__(self):
119
+ return self._semlock.acquire()
120
+
121
+ def __exit__(self, *args):
122
+ return self._semlock.release()
123
+
124
+ def __getstate__(self):
125
+ assert_spawning(self)
126
+ sl = self._semlock
127
+ h = sl.handle
128
+ return (h, sl.kind, sl.maxvalue, sl.name)
129
+
130
+ def __setstate__(self, state):
131
+ self._semlock = _SemLock._rebuild(*state)
132
+ util.debug(
133
+ f'recreated blocker with handle {state[0]!r} and name "{state[3]}"'
134
+ )
135
+ self._make_methods()
136
+
137
+ @staticmethod
138
+ def _make_name():
139
+ # OSX does not support long names for semaphores
140
+ return f"/loky-{os.getpid()}-{next(SemLock._rand)}"
141
+
142
+
143
+ #
144
+ # Semaphore
145
+ #
146
+
147
+
148
+ class Semaphore(SemLock):
149
+ def __init__(self, value=1):
150
+ SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
151
+
152
+ def get_value(self):
153
+ if sys.platform == "darwin":
154
+ raise NotImplementedError("OSX does not implement sem_getvalue")
155
+ return self._semlock._get_value()
156
+
157
+ def __repr__(self):
158
+ try:
159
+ value = self._semlock._get_value()
160
+ except Exception:
161
+ value = "unknown"
162
+ return f"<{self.__class__.__name__}(value={value})>"
163
+
164
+
165
+ #
166
+ # Bounded semaphore
167
+ #
168
+
169
+
170
+ class BoundedSemaphore(Semaphore):
171
+ def __init__(self, value=1):
172
+ SemLock.__init__(self, SEMAPHORE, value, value)
173
+
174
+ def __repr__(self):
175
+ try:
176
+ value = self._semlock._get_value()
177
+ except Exception:
178
+ value = "unknown"
179
+ return (
180
+ f"<{self.__class__.__name__}(value={value}, "
181
+ f"maxvalue={self._semlock.maxvalue})>"
182
+ )
183
+
184
+
185
+ #
186
+ # Non-recursive lock
187
+ #
188
+
189
+
190
+ class Lock(SemLock):
191
+ def __init__(self):
192
+ super().__init__(SEMAPHORE, 1, 1)
193
+
194
+ def __repr__(self):
195
+ try:
196
+ if self._semlock._is_mine():
197
+ name = process.current_process().name
198
+ if threading.current_thread().name != "MainThread":
199
+ name = f"{name}|{threading.current_thread().name}"
200
+ elif self._semlock._get_value() == 1:
201
+ name = "None"
202
+ elif self._semlock._count() > 0:
203
+ name = "SomeOtherThread"
204
+ else:
205
+ name = "SomeOtherProcess"
206
+ except Exception:
207
+ name = "unknown"
208
+ return f"<{self.__class__.__name__}(owner={name})>"
209
+
210
+
211
+ #
212
+ # Recursive lock
213
+ #
214
+
215
+
216
+ class RLock(SemLock):
217
+ def __init__(self):
218
+ super().__init__(RECURSIVE_MUTEX, 1, 1)
219
+
220
+ def __repr__(self):
221
+ try:
222
+ if self._semlock._is_mine():
223
+ name = process.current_process().name
224
+ if threading.current_thread().name != "MainThread":
225
+ name = f"{name}|{threading.current_thread().name}"
226
+ count = self._semlock._count()
227
+ elif self._semlock._get_value() == 1:
228
+ name, count = "None", 0
229
+ elif self._semlock._count() > 0:
230
+ name, count = "SomeOtherThread", "nonzero"
231
+ else:
232
+ name, count = "SomeOtherProcess", "nonzero"
233
+ except Exception:
234
+ name, count = "unknown", "unknown"
235
+ return f"<{self.__class__.__name__}({name}, {count})>"
236
+
237
+
238
+ #
239
+ # Condition variable
240
+ #
241
+
242
+
243
+ class Condition:
244
+ def __init__(self, lock=None):
245
+ self._lock = lock or RLock()
246
+ self._sleeping_count = Semaphore(0)
247
+ self._woken_count = Semaphore(0)
248
+ self._wait_semaphore = Semaphore(0)
249
+ self._make_methods()
250
+
251
+ def __getstate__(self):
252
+ assert_spawning(self)
253
+ return (
254
+ self._lock,
255
+ self._sleeping_count,
256
+ self._woken_count,
257
+ self._wait_semaphore,
258
+ )
259
+
260
+ def __setstate__(self, state):
261
+ (
262
+ self._lock,
263
+ self._sleeping_count,
264
+ self._woken_count,
265
+ self._wait_semaphore,
266
+ ) = state
267
+ self._make_methods()
268
+
269
+ def __enter__(self):
270
+ return self._lock.__enter__()
271
+
272
+ def __exit__(self, *args):
273
+ return self._lock.__exit__(*args)
274
+
275
+ def _make_methods(self):
276
+ self.acquire = self._lock.acquire
277
+ self.release = self._lock.release
278
+
279
+ def __repr__(self):
280
+ try:
281
+ num_waiters = (
282
+ self._sleeping_count._semlock._get_value()
283
+ - self._woken_count._semlock._get_value()
284
+ )
285
+ except Exception:
286
+ num_waiters = "unknown"
287
+ return f"<{self.__class__.__name__}({self._lock}, {num_waiters})>"
288
+
289
+ def wait(self, timeout=None):
290
+ assert (
291
+ self._lock._semlock._is_mine()
292
+ ), "must acquire() condition before using wait()"
293
+
294
+ # indicate that this thread is going to sleep
295
+ self._sleeping_count.release()
296
+
297
+ # release lock
298
+ count = self._lock._semlock._count()
299
+ for _ in range(count):
300
+ self._lock.release()
301
+
302
+ try:
303
+ # wait for notification or timeout
304
+ return self._wait_semaphore.acquire(True, timeout)
305
+ finally:
306
+ # indicate that this thread has woken
307
+ self._woken_count.release()
308
+
309
+ # reacquire lock
310
+ for _ in range(count):
311
+ self._lock.acquire()
312
+
313
+ def notify(self):
314
+ assert self._lock._semlock._is_mine(), "lock is not owned"
315
+ assert not self._wait_semaphore.acquire(False)
316
+
317
+ # to take account of timeouts since last notify() we subtract
318
+ # woken_count from sleeping_count and rezero woken_count
319
+ while self._woken_count.acquire(False):
320
+ res = self._sleeping_count.acquire(False)
321
+ assert res
322
+
323
+ if self._sleeping_count.acquire(False): # try grabbing a sleeper
324
+ self._wait_semaphore.release() # wake up one sleeper
325
+ self._woken_count.acquire() # wait for the sleeper to wake
326
+
327
+ # rezero _wait_semaphore in case a timeout just happened
328
+ self._wait_semaphore.acquire(False)
329
+
330
+ def notify_all(self):
331
+ assert self._lock._semlock._is_mine(), "lock is not owned"
332
+ assert not self._wait_semaphore.acquire(False)
333
+
334
+ # to take account of timeouts since last notify*() we subtract
335
+ # woken_count from sleeping_count and rezero woken_count
336
+ while self._woken_count.acquire(False):
337
+ res = self._sleeping_count.acquire(False)
338
+ assert res
339
+
340
+ sleepers = 0
341
+ while self._sleeping_count.acquire(False):
342
+ self._wait_semaphore.release() # wake up one sleeper
343
+ sleepers += 1
344
+
345
+ if sleepers:
346
+ for _ in range(sleepers):
347
+ self._woken_count.acquire() # wait for a sleeper to wake
348
+
349
+ # rezero wait_semaphore in case some timeouts just happened
350
+ while self._wait_semaphore.acquire(False):
351
+ pass
352
+
353
+ def wait_for(self, predicate, timeout=None):
354
+ result = predicate()
355
+ if result:
356
+ return result
357
+ if timeout is not None:
358
+ endtime = _time() + timeout
359
+ else:
360
+ endtime = None
361
+ waittime = None
362
+ while not result:
363
+ if endtime is not None:
364
+ waittime = endtime - _time()
365
+ if waittime <= 0:
366
+ break
367
+ self.wait(waittime)
368
+ result = predicate()
369
+ return result
370
+
371
+
372
+ #
373
+ # Event
374
+ #
375
+
376
+
377
+ class Event:
378
+ def __init__(self):
379
+ self._cond = Condition(Lock())
380
+ self._flag = Semaphore(0)
381
+
382
+ def is_set(self):
383
+ with self._cond:
384
+ if self._flag.acquire(False):
385
+ self._flag.release()
386
+ return True
387
+ return False
388
+
389
+ def set(self):
390
+ with self._cond:
391
+ self._flag.acquire(False)
392
+ self._flag.release()
393
+ self._cond.notify_all()
394
+
395
+ def clear(self):
396
+ with self._cond:
397
+ self._flag.acquire(False)
398
+
399
+ def wait(self, timeout=None):
400
+ with self._cond:
401
+ if self._flag.acquire(False):
402
+ self._flag.release()
403
+ else:
404
+ self._cond.wait(timeout)
405
+
406
+ if self._flag.acquire(False):
407
+ self._flag.release()
408
+ return True
409
+ return False
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from functools import partial
3
+ from joblib.externals.cloudpickle import dumps, loads
4
+
5
+
6
+ WRAP_CACHE = {}
7
+
8
+
9
+ class CloudpickledObjectWrapper:
10
+ def __init__(self, obj, keep_wrapper=False):
11
+ self._obj = obj
12
+ self._keep_wrapper = keep_wrapper
13
+
14
+ def __reduce__(self):
15
+ _pickled_object = dumps(self._obj)
16
+ if not self._keep_wrapper:
17
+ return loads, (_pickled_object,)
18
+
19
+ return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
20
+
21
+ def __getattr__(self, attr):
22
+ # Ensure that the wrapped object can be used seemlessly as the
23
+ # previous object.
24
+ if attr not in ["_obj", "_keep_wrapper"]:
25
+ return getattr(self._obj, attr)
26
+ return getattr(self, attr)
27
+
28
+
29
+ # Make sure the wrapped object conserves the callable property
30
+ class CallableObjectWrapper(CloudpickledObjectWrapper):
31
+ def __call__(self, *args, **kwargs):
32
+ return self._obj(*args, **kwargs)
33
+
34
+
35
+ def _wrap_non_picklable_objects(obj, keep_wrapper):
36
+ if callable(obj):
37
+ return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
38
+ return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
39
+
40
+
41
+ def _reconstruct_wrapper(_pickled_object, keep_wrapper):
42
+ obj = loads(_pickled_object)
43
+ return _wrap_non_picklable_objects(obj, keep_wrapper)
44
+
45
+
46
+ def _wrap_objects_when_needed(obj):
47
+ # Function to introspect an object and decide if it should be wrapped or
48
+ # not.
49
+ need_wrap = "__main__" in getattr(obj, "__module__", "")
50
+ if isinstance(obj, partial):
51
+ return partial(
52
+ _wrap_objects_when_needed(obj.func),
53
+ *[_wrap_objects_when_needed(a) for a in obj.args],
54
+ **{
55
+ k: _wrap_objects_when_needed(v)
56
+ for k, v in obj.keywords.items()
57
+ }
58
+ )
59
+ if callable(obj):
60
+ # Need wrap if the object is a function defined in a local scope of
61
+ # another function.
62
+ func_code = getattr(obj, "__code__", "")
63
+ need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED
64
+
65
+ # Need wrap if the obj is a lambda expression
66
+ func_name = getattr(obj, "__name__", "")
67
+ need_wrap |= "<lambda>" in func_name
68
+
69
+ if not need_wrap:
70
+ return obj
71
+
72
+ wrapped_obj = WRAP_CACHE.get(obj)
73
+ if wrapped_obj is None:
74
+ wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False)
75
+ WRAP_CACHE[obj] = wrapped_obj
76
+ return wrapped_obj
77
+
78
+
79
+ def wrap_non_picklable_objects(obj, keep_wrapper=True):
80
+ """Wrapper for non-picklable object to use cloudpickle to serialize them.
81
+
82
+ Note that this wrapper tends to slow down the serialization process as it
83
+ is done with cloudpickle which is typically slower compared to pickle. The
84
+ proper way to solve serialization issues is to avoid defining functions and
85
+ objects in the main scripts and to implement __reduce__ functions for
86
+ complex classes.
87
+ """
88
+ # If obj is a class, create a CloudpickledClassWrapper which instantiates
89
+ # the object internally and wrap it directly in a CloudpickledObjectWrapper
90
+ if inspect.isclass(obj):
91
+
92
+ class CloudpickledClassWrapper(CloudpickledObjectWrapper):
93
+ def __init__(self, *args, **kwargs):
94
+ self._obj = obj(*args, **kwargs)
95
+ self._keep_wrapper = keep_wrapper
96
+
97
+ CloudpickledClassWrapper.__name__ = obj.__name__
98
+ return CloudpickledClassWrapper
99
+
100
+ # If obj is an instance of a class, just wrap it in a regular
101
+ # CloudpickledObjectWrapper
102
+ return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper)
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/initializers.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+
4
+ def _viztracer_init(init_kwargs):
5
+ """Initialize viztracer's profiler in worker processes"""
6
+ from viztracer import VizTracer
7
+
8
+ tracer = VizTracer(**init_kwargs)
9
+ tracer.register_exit()
10
+ tracer.start()
11
+
12
+
13
+ def _make_viztracer_initializer_and_initargs():
14
+ try:
15
+ import viztracer
16
+
17
+ tracer = viztracer.get_tracer()
18
+ if tracer is not None and getattr(tracer, "enable", False):
19
+ # Profiler is active: introspect its configuration to
20
+ # initialize the workers with the same configuration.
21
+ return _viztracer_init, (tracer.init_kwargs,)
22
+ except ImportError:
23
+ # viztracer is not installed: nothing to do
24
+ pass
25
+ except Exception as e:
26
+ # In case viztracer's API evolve, we do not want to crash loky but
27
+ # we want to know about it to be able to update loky.
28
+ warnings.warn(f"Unable to introspect viztracer state: {e}")
29
+ return None, ()
30
+
31
+
32
+ class _ChainedInitializer:
33
+ """Compound worker initializer
34
+
35
+ This is meant to be used in conjunction with _chain_initializers to
36
+ produce the necessary chained_args list to be passed to __call__.
37
+ """
38
+
39
+ def __init__(self, initializers):
40
+ self._initializers = initializers
41
+
42
+ def __call__(self, *chained_args):
43
+ for initializer, args in zip(self._initializers, chained_args):
44
+ initializer(*args)
45
+
46
+
47
+ def _chain_initializers(initializer_and_args):
48
+ """Convenience helper to combine a sequence of initializers.
49
+
50
+ If some initializers are None, they are filtered out.
51
+ """
52
+ filtered_initializers = []
53
+ filtered_initargs = []
54
+ for initializer, initargs in initializer_and_args:
55
+ if initializer is not None:
56
+ filtered_initializers.append(initializer)
57
+ filtered_initargs.append(initargs)
58
+
59
+ if not filtered_initializers:
60
+ return None, ()
61
+ elif len(filtered_initializers) == 1:
62
+ return filtered_initializers[0], filtered_initargs[0]
63
+ else:
64
+ return _ChainedInitializer(filtered_initializers), filtered_initargs
65
+
66
+
67
+ def _prepare_initializer(initializer, initargs):
68
+ if initializer is not None and not callable(initializer):
69
+ raise TypeError(
70
+ f"initializer must be a callable, got: {initializer!r}"
71
+ )
72
+
73
+ # Introspect runtime to determine if we need to propagate the viztracer
74
+ # profiler information to the workers:
75
+ return _chain_initializers(
76
+ [
77
+ (initializer, initargs),
78
+ _make_viztracer_initializer_and_initargs(),
79
+ ]
80
+ )
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py ADDED
@@ -0,0 +1,1314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Re-implementation of the ProcessPoolExecutor more robust to faults
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from concurrent/futures/process_pool_executor.py (17/02/2017)
7
+ # * Add an extra management thread to detect executor_manager_thread failures,
8
+ # * Improve the shutdown process to avoid deadlocks,
9
+ # * Add timeout for workers,
10
+ # * More robust pickling process.
11
+ #
12
+ # Copyright 2009 Brian Quinlan. All Rights Reserved.
13
+ # Licensed to PSF under a Contributor Agreement.
14
+
15
+ """Implements ProcessPoolExecutor.
16
+
17
+ The follow diagram and text describe the data-flow through the system:
18
+
19
+ |======================= In-process =====================|== Out-of-process ==|
20
+
21
+ +----------+ +----------+ +--------+ +-----------+ +---------+
22
+ | | => | Work Ids | | | | Call Q | | Process |
23
+ | | +----------+ | | +-----------+ | Pool |
24
+ | | | ... | | | | ... | +---------+
25
+ | | | 6 | => | | => | 5, call() | => | |
26
+ | | | 7 | | | | ... | | |
27
+ | Process | | ... | | Local | +-----------+ | Process |
28
+ | Pool | +----------+ | Worker | | #1..n |
29
+ | Executor | | Thread | | |
30
+ | | +----------- + | | +-----------+ | |
31
+ | | <=> | Work Items | <=> | | <= | Result Q | <= | |
32
+ | | +------------+ | | +-----------+ | |
33
+ | | | 6: call() | | | | ... | | |
34
+ | | | future | +--------+ | 4, result | | |
35
+ | | | ... | | 3, except | | |
36
+ +----------+ +------------+ +-----------+ +---------+
37
+
38
+ Executor.submit() called:
39
+ - creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
40
+ - adds the id of the _WorkItem to the "Work Ids" queue
41
+
42
+ Local worker thread:
43
+ - reads work ids from the "Work Ids" queue and looks up the corresponding
44
+ WorkItem from the "Work Items" dict: if the work item has been cancelled then
45
+ it is simply removed from the dict, otherwise it is repackaged as a
46
+ _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
47
+ until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
48
+ calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
49
+ - reads _ResultItems from "Result Q", updates the future stored in the
50
+ "Work Items" dict and deletes the dict entry
51
+
52
+ Process #1..n:
53
+ - reads _CallItems from "Call Q", executes the calls, and puts the resulting
54
+ _ResultItems in "Result Q"
55
+ """
56
+
57
+
58
+ __author__ = "Thomas Moreau ([email protected])"
59
+
60
+
61
+ import os
62
+ import gc
63
+ import sys
64
+ import queue
65
+ import struct
66
+ import weakref
67
+ import warnings
68
+ import itertools
69
+ import traceback
70
+ import threading
71
+ from time import time, sleep
72
+ import multiprocessing as mp
73
+ from functools import partial
74
+ from pickle import PicklingError
75
+ from concurrent.futures import Executor
76
+ from concurrent.futures._base import LOGGER
77
+ from concurrent.futures.process import BrokenProcessPool as _BPPException
78
+ from multiprocessing.connection import wait
79
+
80
+ from ._base import Future
81
+ from .backend import get_context
82
+ from .backend.context import cpu_count, _MAX_WINDOWS_WORKERS
83
+ from .backend.queues import Queue, SimpleQueue
84
+ from .backend.reduction import set_loky_pickler, get_loky_pickler_name
85
+ from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
86
+ from .initializers import _prepare_initializer
87
+
88
+
89
+ # Mechanism to prevent infinite process spawning. When a worker of a
90
+ # ProcessPoolExecutor nested in MAX_DEPTH Executor tries to create a new
91
+ # Executor, a LokyRecursionError is raised
92
+ MAX_DEPTH = int(os.environ.get("LOKY_MAX_DEPTH", 10))
93
+ _CURRENT_DEPTH = 0
94
+
95
+ # Minimum time interval between two consecutive memory leak protection checks.
96
+ _MEMORY_LEAK_CHECK_DELAY = 1.0
97
+
98
+ # Number of bytes of memory usage allowed over the reference process size.
99
+ _MAX_MEMORY_LEAK_SIZE = int(3e8)
100
+
101
+
102
+ try:
103
+ from psutil import Process
104
+
105
+ _USE_PSUTIL = True
106
+
107
+ def _get_memory_usage(pid, force_gc=False):
108
+ if force_gc:
109
+ gc.collect()
110
+
111
+ mem_size = Process(pid).memory_info().rss
112
+ mp.util.debug(f"psutil return memory size: {mem_size}")
113
+ return mem_size
114
+
115
+ except ImportError:
116
+ _USE_PSUTIL = False
117
+
118
+
119
+ class _ThreadWakeup:
120
+ def __init__(self):
121
+ self._closed = False
122
+ self._reader, self._writer = mp.Pipe(duplex=False)
123
+
124
+ def close(self):
125
+ if not self._closed:
126
+ self._closed = True
127
+ self._writer.close()
128
+ self._reader.close()
129
+
130
+ def wakeup(self):
131
+ if not self._closed:
132
+ self._writer.send_bytes(b"")
133
+
134
+ def clear(self):
135
+ if not self._closed:
136
+ while self._reader.poll():
137
+ self._reader.recv_bytes()
138
+
139
+
140
+ class _ExecutorFlags:
141
+ """necessary references to maintain executor states without preventing gc
142
+
143
+ It permits to keep the information needed by executor_manager_thread
144
+ and crash_detection_thread to maintain the pool without preventing the
145
+ garbage collection of unreferenced executors.
146
+ """
147
+
148
+ def __init__(self, shutdown_lock):
149
+
150
+ self.shutdown = False
151
+ self.broken = None
152
+ self.kill_workers = False
153
+ self.shutdown_lock = shutdown_lock
154
+
155
+ def flag_as_shutting_down(self, kill_workers=None):
156
+ with self.shutdown_lock:
157
+ self.shutdown = True
158
+ if kill_workers is not None:
159
+ self.kill_workers = kill_workers
160
+
161
+ def flag_as_broken(self, broken):
162
+ with self.shutdown_lock:
163
+ self.shutdown = True
164
+ self.broken = broken
165
+
166
+
167
+ # Prior to 3.9, executor_manager_thread is created as daemon thread. This means
168
+ # that it is not joined automatically when the interpreter is shutting down.
169
+ # To work around this problem, an exit handler is installed to tell the
170
+ # thread to exit when the interpreter is shutting down and then waits until
171
+ # it finishes. The thread needs to be daemonized because the atexit hooks are
172
+ # called after all non daemonized threads are joined.
173
+ #
174
+ # Starting 3.9, there exists a specific atexit hook to be called before joining
175
+ # the threads so the executor_manager_thread does not need to be daemonized
176
+ # anymore.
177
+ #
178
+ # The atexit hooks are registered when starting the first ProcessPoolExecutor
179
+ # to avoid import having an effect on the interpreter.
180
+
181
+ _global_shutdown = False
182
+ _global_shutdown_lock = threading.Lock()
183
+ _threads_wakeups = weakref.WeakKeyDictionary()
184
+
185
+
186
+ def _python_exit():
187
+ global _global_shutdown
188
+ _global_shutdown = True
189
+
190
+ # Materialize the list of items to avoid error due to iterating over
191
+ # changing size dictionary.
192
+ items = list(_threads_wakeups.items())
193
+ if len(items) > 0:
194
+ mp.util.debug(
195
+ "Interpreter shutting down. Waking up {len(items)}"
196
+ f"executor_manager_thread:\n{items}"
197
+ )
198
+
199
+ # Wake up the executor_manager_thread's so they can detect the interpreter
200
+ # is shutting down and exit.
201
+ for _, (shutdown_lock, thread_wakeup) in items:
202
+ with shutdown_lock:
203
+ thread_wakeup.wakeup()
204
+
205
+ # Collect the executor_manager_thread's to make sure we exit cleanly.
206
+ for thread, _ in items:
207
+ # This locks is to prevent situations where an executor is gc'ed in one
208
+ # thread while the atexit finalizer is running in another thread. This
209
+ # can happen when joblib is used in pypy for instance.
210
+ with _global_shutdown_lock:
211
+ thread.join()
212
+
213
+
214
+ # With the fork context, _thread_wakeups is propagated to children.
215
+ # Clear it after fork to avoid some situation that can cause some
216
+ # freeze when joining the workers.
217
+ mp.util.register_after_fork(_threads_wakeups, lambda obj: obj.clear())
218
+
219
+
220
+ # Module variable to register the at_exit call
221
+ process_pool_executor_at_exit = None
222
+
223
+ # Controls how many more calls than processes will be queued in the call queue.
224
+ # A smaller number will mean that processes spend more time idle waiting for
225
+ # work while a larger number will make Future.cancel() succeed less frequently
226
+ # (Futures in the call queue cannot be cancelled).
227
+ EXTRA_QUEUED_CALLS = 1
228
+
229
+
230
+ class _RemoteTraceback(Exception):
231
+ """Embed stringification of remote traceback in local traceback"""
232
+
233
+ def __init__(self, tb=None):
234
+ self.tb = f'\n"""\n{tb}"""'
235
+
236
+ def __str__(self):
237
+ return self.tb
238
+
239
+
240
+ # Do not inherit from BaseException to mirror
241
+ # concurrent.futures.process._ExceptionWithTraceback
242
+ class _ExceptionWithTraceback:
243
+ def __init__(self, exc):
244
+ tb = getattr(exc, "__traceback__", None)
245
+ if tb is None:
246
+ _, _, tb = sys.exc_info()
247
+ tb = traceback.format_exception(type(exc), exc, tb)
248
+ tb = "".join(tb)
249
+ self.exc = exc
250
+ self.tb = tb
251
+
252
+ def __reduce__(self):
253
+ return _rebuild_exc, (self.exc, self.tb)
254
+
255
+
256
+ def _rebuild_exc(exc, tb):
257
+ exc.__cause__ = _RemoteTraceback(tb)
258
+ return exc
259
+
260
+
261
+ class _WorkItem:
262
+
263
+ __slots__ = ["future", "fn", "args", "kwargs"]
264
+
265
+ def __init__(self, future, fn, args, kwargs):
266
+ self.future = future
267
+ self.fn = fn
268
+ self.args = args
269
+ self.kwargs = kwargs
270
+
271
+
272
+ class _ResultItem:
273
+ def __init__(self, work_id, exception=None, result=None):
274
+ self.work_id = work_id
275
+ self.exception = exception
276
+ self.result = result
277
+
278
+
279
+ class _CallItem:
280
+ def __init__(self, work_id, fn, args, kwargs):
281
+ self.work_id = work_id
282
+ self.fn = fn
283
+ self.args = args
284
+ self.kwargs = kwargs
285
+
286
+ # Store the current loky_pickler so it is correctly set in the worker
287
+ self.loky_pickler = get_loky_pickler_name()
288
+
289
+ def __call__(self):
290
+ set_loky_pickler(self.loky_pickler)
291
+ return self.fn(*self.args, **self.kwargs)
292
+
293
+ def __repr__(self):
294
+ return (
295
+ f"CallItem({self.work_id}, {self.fn}, {self.args}, {self.kwargs})"
296
+ )
297
+
298
+
299
+ class _SafeQueue(Queue):
300
+ """Safe Queue set exception to the future object linked to a job"""
301
+
302
+ def __init__(
303
+ self,
304
+ max_size=0,
305
+ ctx=None,
306
+ pending_work_items=None,
307
+ running_work_items=None,
308
+ thread_wakeup=None,
309
+ reducers=None,
310
+ ):
311
+ self.thread_wakeup = thread_wakeup
312
+ self.pending_work_items = pending_work_items
313
+ self.running_work_items = running_work_items
314
+ super().__init__(max_size, reducers=reducers, ctx=ctx)
315
+
316
+ def _on_queue_feeder_error(self, e, obj):
317
+ if isinstance(obj, _CallItem):
318
+ # format traceback only works on python3
319
+ if isinstance(e, struct.error):
320
+ raised_error = RuntimeError(
321
+ "The task could not be sent to the workers as it is too "
322
+ "large for `send_bytes`."
323
+ )
324
+ else:
325
+ raised_error = PicklingError(
326
+ "Could not pickle the task to send it to the workers."
327
+ )
328
+ tb = traceback.format_exception(
329
+ type(e), e, getattr(e, "__traceback__", None)
330
+ )
331
+ raised_error.__cause__ = _RemoteTraceback("".join(tb))
332
+ work_item = self.pending_work_items.pop(obj.work_id, None)
333
+ self.running_work_items.remove(obj.work_id)
334
+ # work_item can be None if another process terminated. In this
335
+ # case, the executor_manager_thread fails all work_items with
336
+ # BrokenProcessPool
337
+ if work_item is not None:
338
+ work_item.future.set_exception(raised_error)
339
+ del work_item
340
+ self.thread_wakeup.wakeup()
341
+ else:
342
+ super()._on_queue_feeder_error(e, obj)
343
+
344
+
345
+ def _get_chunks(chunksize, *iterables):
346
+ """Iterates over zip()ed iterables in chunks."""
347
+ it = zip(*iterables)
348
+ while True:
349
+ chunk = tuple(itertools.islice(it, chunksize))
350
+ if not chunk:
351
+ return
352
+ yield chunk
353
+
354
+
355
+ def _process_chunk(fn, chunk):
356
+ """Processes a chunk of an iterable passed to map.
357
+
358
+ Runs the function passed to map() on a chunk of the
359
+ iterable passed to map.
360
+
361
+ This function is run in a separate process.
362
+
363
+ """
364
+ return [fn(*args) for args in chunk]
365
+
366
+
367
+ def _sendback_result(result_queue, work_id, result=None, exception=None):
368
+ """Safely send back the given result or exception"""
369
+ try:
370
+ result_queue.put(
371
+ _ResultItem(work_id, result=result, exception=exception)
372
+ )
373
+ except BaseException as e:
374
+ exc = _ExceptionWithTraceback(e)
375
+ result_queue.put(_ResultItem(work_id, exception=exc))
376
+
377
+
378
+ def _process_worker(
379
+ call_queue,
380
+ result_queue,
381
+ initializer,
382
+ initargs,
383
+ processes_management_lock,
384
+ timeout,
385
+ worker_exit_lock,
386
+ current_depth,
387
+ ):
388
+ """Evaluates calls from call_queue and places the results in result_queue.
389
+
390
+ This worker is run in a separate process.
391
+
392
+ Args:
393
+ call_queue: A ctx.Queue of _CallItems that will be read and
394
+ evaluated by the worker.
395
+ result_queue: A ctx.Queue of _ResultItems that will written
396
+ to by the worker.
397
+ initializer: A callable initializer, or None
398
+ initargs: A tuple of args for the initializer
399
+ processes_management_lock: A ctx.Lock avoiding worker timeout while
400
+ some workers are being spawned.
401
+ timeout: maximum time to wait for a new item in the call_queue. If that
402
+ time is expired, the worker will shutdown.
403
+ worker_exit_lock: Lock to avoid flagging the executor as broken on
404
+ workers timeout.
405
+ current_depth: Nested parallelism level, to avoid infinite spawning.
406
+ """
407
+ if initializer is not None:
408
+ try:
409
+ initializer(*initargs)
410
+ except BaseException:
411
+ LOGGER.critical("Exception in initializer:", exc_info=True)
412
+ # The parent will notice that the process stopped and
413
+ # mark the pool broken
414
+ return
415
+
416
+ # set the global _CURRENT_DEPTH mechanism to limit recursive call
417
+ global _CURRENT_DEPTH
418
+ _CURRENT_DEPTH = current_depth
419
+ _process_reference_size = None
420
+ _last_memory_leak_check = None
421
+ pid = os.getpid()
422
+
423
+ mp.util.debug(f"Worker started with timeout={timeout}")
424
+ while True:
425
+ try:
426
+ call_item = call_queue.get(block=True, timeout=timeout)
427
+ if call_item is None:
428
+ mp.util.info("Shutting down worker on sentinel")
429
+ except queue.Empty:
430
+ mp.util.info(f"Shutting down worker after timeout {timeout:0.3f}s")
431
+ if processes_management_lock.acquire(block=False):
432
+ processes_management_lock.release()
433
+ call_item = None
434
+ else:
435
+ mp.util.info("Could not acquire processes_management_lock")
436
+ continue
437
+ except BaseException:
438
+ previous_tb = traceback.format_exc()
439
+ try:
440
+ result_queue.put(_RemoteTraceback(previous_tb))
441
+ except BaseException:
442
+ # If we cannot format correctly the exception, at least print
443
+ # the traceback.
444
+ print(previous_tb)
445
+ mp.util.debug("Exiting with code 1")
446
+ sys.exit(1)
447
+ if call_item is None:
448
+ # Notify queue management thread about worker shutdown
449
+ result_queue.put(pid)
450
+ is_clean = worker_exit_lock.acquire(True, timeout=30)
451
+
452
+ # Early notify any loky executor running in this worker process
453
+ # (nested parallelism) that this process is about to shutdown to
454
+ # avoid a deadlock waiting undifinitely for the worker to finish.
455
+ _python_exit()
456
+
457
+ if is_clean:
458
+ mp.util.debug("Exited cleanly")
459
+ else:
460
+ mp.util.info("Main process did not release worker_exit")
461
+ return
462
+ try:
463
+ r = call_item()
464
+ except BaseException as e:
465
+ exc = _ExceptionWithTraceback(e)
466
+ result_queue.put(_ResultItem(call_item.work_id, exception=exc))
467
+ else:
468
+ _sendback_result(result_queue, call_item.work_id, result=r)
469
+ del r
470
+
471
+ # Free the resource as soon as possible, to avoid holding onto
472
+ # open files or shared memory that is not needed anymore
473
+ del call_item
474
+
475
+ if _USE_PSUTIL:
476
+ if _process_reference_size is None:
477
+ # Make reference measurement after the first call
478
+ _process_reference_size = _get_memory_usage(pid, force_gc=True)
479
+ _last_memory_leak_check = time()
480
+ continue
481
+ if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY:
482
+ mem_usage = _get_memory_usage(pid)
483
+ _last_memory_leak_check = time()
484
+ if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
485
+ # Memory usage stays within bounds: everything is fine.
486
+ continue
487
+
488
+ # Check again memory usage; this time take the measurement
489
+ # after a forced garbage collection to break any reference
490
+ # cycles.
491
+ mem_usage = _get_memory_usage(pid, force_gc=True)
492
+ _last_memory_leak_check = time()
493
+ if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
494
+ # The GC managed to free the memory: everything is fine.
495
+ continue
496
+
497
+ # The process is leaking memory: let the main process
498
+ # know that we need to start a new worker.
499
+ mp.util.info("Memory leak detected: shutting down worker")
500
+ result_queue.put(pid)
501
+ with worker_exit_lock:
502
+ mp.util.debug("Exit due to memory leak")
503
+ return
504
+ else:
505
+ # if psutil is not installed, trigger gc.collect events
506
+ # regularly to limit potential memory leaks due to reference cycles
507
+ if _last_memory_leak_check is None or (
508
+ time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY
509
+ ):
510
+ gc.collect()
511
+ _last_memory_leak_check = time()
512
+
513
+
514
+ class _ExecutorManagerThread(threading.Thread):
515
+ """Manages the communication between this process and the worker processes.
516
+
517
+ The manager is run in a local thread.
518
+
519
+ Args:
520
+ executor: A reference to the ProcessPoolExecutor that owns
521
+ this thread. A weakref will be own by the manager as well as
522
+ references to internal objects used to introspect the state of
523
+ the executor.
524
+ """
525
+
526
+ def __init__(self, executor):
527
+ # Store references to necessary internals of the executor.
528
+
529
+ # A _ThreadWakeup to allow waking up the executor_manager_thread from
530
+ # the main Thread and avoid deadlocks caused by permanently
531
+ # locked queues.
532
+ self.thread_wakeup = executor._executor_manager_thread_wakeup
533
+ self.shutdown_lock = executor._shutdown_lock
534
+
535
+ # A weakref.ref to the ProcessPoolExecutor that owns this thread. Used
536
+ # to determine if the ProcessPoolExecutor has been garbage collected
537
+ # and that the manager can exit.
538
+ # When the executor gets garbage collected, the weakref callback
539
+ # will wake up the queue management thread so that it can terminate
540
+ # if there is no pending work item.
541
+ def weakref_cb(
542
+ _,
543
+ thread_wakeup=self.thread_wakeup,
544
+ shutdown_lock=self.shutdown_lock,
545
+ ):
546
+ if mp is not None:
547
+ # At this point, the multiprocessing module can already be
548
+ # garbage collected. We only log debug info when still
549
+ # possible.
550
+ mp.util.debug(
551
+ "Executor collected: triggering callback for"
552
+ " QueueManager wakeup"
553
+ )
554
+ with shutdown_lock:
555
+ thread_wakeup.wakeup()
556
+
557
+ self.executor_reference = weakref.ref(executor, weakref_cb)
558
+
559
+ # The flags of the executor
560
+ self.executor_flags = executor._flags
561
+
562
+ # A list of the ctx.Process instances used as workers.
563
+ self.processes = executor._processes
564
+
565
+ # A ctx.Queue that will be filled with _CallItems derived from
566
+ # _WorkItems for processing by the process workers.
567
+ self.call_queue = executor._call_queue
568
+
569
+ # A ctx.SimpleQueue of _ResultItems generated by the process workers.
570
+ self.result_queue = executor._result_queue
571
+
572
+ # A queue.Queue of work ids e.g. Queue([5, 6, ...]).
573
+ self.work_ids_queue = executor._work_ids
574
+
575
+ # A dict mapping work ids to _WorkItems e.g.
576
+ # {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
577
+ self.pending_work_items = executor._pending_work_items
578
+
579
+ # A list of the work_ids that are currently running
580
+ self.running_work_items = executor._running_work_items
581
+
582
+ # A lock to avoid concurrent shutdown of workers on timeout and spawn
583
+ # of new processes or shut down
584
+ self.processes_management_lock = executor._processes_management_lock
585
+
586
+ super().__init__(name="ExecutorManagerThread")
587
+ if sys.version_info < (3, 9):
588
+ self.daemon = True
589
+
590
+ def run(self):
591
+ # Main loop for the executor manager thread.
592
+
593
+ while True:
594
+ self.add_call_item_to_queue()
595
+
596
+ result_item, is_broken, bpe = self.wait_result_broken_or_wakeup()
597
+
598
+ if is_broken:
599
+ self.terminate_broken(bpe)
600
+ return
601
+ if result_item is not None:
602
+ self.process_result_item(result_item)
603
+ # Delete reference to result_item to avoid keeping references
604
+ # while waiting on new results.
605
+ del result_item
606
+
607
+ if self.is_shutting_down():
608
+ self.flag_executor_shutting_down()
609
+
610
+ # Since no new work items can be added, it is safe to shutdown
611
+ # this thread if there are no pending work items.
612
+ if not self.pending_work_items:
613
+ self.join_executor_internals()
614
+ return
615
+
616
+ def add_call_item_to_queue(self):
617
+ # Fills call_queue with _WorkItems from pending_work_items.
618
+ # This function never blocks.
619
+ while True:
620
+ if self.call_queue.full():
621
+ return
622
+ try:
623
+ work_id = self.work_ids_queue.get(block=False)
624
+ except queue.Empty:
625
+ return
626
+ else:
627
+ work_item = self.pending_work_items[work_id]
628
+
629
+ if work_item.future.set_running_or_notify_cancel():
630
+ self.running_work_items += [work_id]
631
+ self.call_queue.put(
632
+ _CallItem(
633
+ work_id,
634
+ work_item.fn,
635
+ work_item.args,
636
+ work_item.kwargs,
637
+ ),
638
+ block=True,
639
+ )
640
+ else:
641
+ del self.pending_work_items[work_id]
642
+ continue
643
+
644
+ def wait_result_broken_or_wakeup(self):
645
+ # Wait for a result to be ready in the result_queue while checking
646
+ # that all worker processes are still running, or for a wake up
647
+ # signal send. The wake up signals come either from new tasks being
648
+ # submitted, from the executor being shutdown/gc-ed, or from the
649
+ # shutdown of the python interpreter.
650
+ result_reader = self.result_queue._reader
651
+ wakeup_reader = self.thread_wakeup._reader
652
+ readers = [result_reader, wakeup_reader]
653
+ worker_sentinels = [p.sentinel for p in list(self.processes.values())]
654
+ ready = wait(readers + worker_sentinels)
655
+
656
+ bpe = None
657
+ is_broken = True
658
+ result_item = None
659
+ if result_reader in ready:
660
+ try:
661
+ result_item = result_reader.recv()
662
+ if isinstance(result_item, _RemoteTraceback):
663
+ bpe = BrokenProcessPool(
664
+ "A task has failed to un-serialize. Please ensure that"
665
+ " the arguments of the function are all picklable."
666
+ )
667
+ bpe.__cause__ = result_item
668
+ else:
669
+ is_broken = False
670
+ except BaseException as e:
671
+ bpe = BrokenProcessPool(
672
+ "A result has failed to un-serialize. Please ensure that "
673
+ "the objects returned by the function are always "
674
+ "picklable."
675
+ )
676
+ tb = traceback.format_exception(
677
+ type(e), e, getattr(e, "__traceback__", None)
678
+ )
679
+ bpe.__cause__ = _RemoteTraceback("".join(tb))
680
+
681
+ elif wakeup_reader in ready:
682
+ # This is simply a wake-up event that might either trigger putting
683
+ # more tasks in the queue or trigger the clean up of resources.
684
+ is_broken = False
685
+ else:
686
+ # A worker has terminated and we don't know why, set the state of
687
+ # the executor as broken
688
+ exit_codes = ""
689
+ if sys.platform != "win32":
690
+ # In Windows, introspecting terminated workers exitcodes seems
691
+ # unstable, therefore they are not appended in the exception
692
+ # message.
693
+ exit_codes = (
694
+ "\nThe exit codes of the workers are "
695
+ f"{get_exitcodes_terminated_worker(self.processes)}"
696
+ )
697
+ mp.util.debug(
698
+ "A worker unexpectedly terminated. Workers that "
699
+ "might have caused the breakage: "
700
+ + str(
701
+ {
702
+ p.name: p.exitcode
703
+ for p in list(self.processes.values())
704
+ if p is not None and p.sentinel in ready
705
+ }
706
+ )
707
+ )
708
+ bpe = TerminatedWorkerError(
709
+ "A worker process managed by the executor was unexpectedly "
710
+ "terminated. This could be caused by a segmentation fault "
711
+ "while calling the function or by an excessive memory usage "
712
+ "causing the Operating System to kill the worker.\n"
713
+ f"{exit_codes}"
714
+ )
715
+
716
+ self.thread_wakeup.clear()
717
+
718
+ return result_item, is_broken, bpe
719
+
720
+ def process_result_item(self, result_item):
721
+ # Process the received a result_item. This can be either the PID of a
722
+ # worker that exited gracefully or a _ResultItem
723
+
724
+ if isinstance(result_item, int):
725
+ # Clean shutdown of a worker using its PID, either on request
726
+ # by the executor.shutdown method or by the timeout of the worker
727
+ # itself: we should not mark the executor as broken.
728
+ with self.processes_management_lock:
729
+ p = self.processes.pop(result_item, None)
730
+
731
+ # p can be None if the executor is concurrently shutting down.
732
+ if p is not None:
733
+ p._worker_exit_lock.release()
734
+ mp.util.debug(
735
+ f"joining {p.name} when processing {p.pid} as result_item"
736
+ )
737
+ p.join()
738
+ del p
739
+
740
+ # Make sure the executor have the right number of worker, even if a
741
+ # worker timeout while some jobs were submitted. If some work is
742
+ # pending or there is less processes than running items, we need to
743
+ # start a new Process and raise a warning.
744
+ n_pending = len(self.pending_work_items)
745
+ n_running = len(self.running_work_items)
746
+ if n_pending - n_running > 0 or n_running > len(self.processes):
747
+ executor = self.executor_reference()
748
+ if (
749
+ executor is not None
750
+ and len(self.processes) < executor._max_workers
751
+ ):
752
+ warnings.warn(
753
+ "A worker stopped while some jobs were given to the "
754
+ "executor. This can be caused by a too short worker "
755
+ "timeout or by a memory leak.",
756
+ UserWarning,
757
+ )
758
+ with executor._processes_management_lock:
759
+ executor._adjust_process_count()
760
+ executor = None
761
+ else:
762
+ # Received a _ResultItem so mark the future as completed.
763
+ work_item = self.pending_work_items.pop(result_item.work_id, None)
764
+ # work_item can be None if another process terminated (see above)
765
+ if work_item is not None:
766
+ if result_item.exception:
767
+ work_item.future.set_exception(result_item.exception)
768
+ else:
769
+ work_item.future.set_result(result_item.result)
770
+ self.running_work_items.remove(result_item.work_id)
771
+
772
+ def is_shutting_down(self):
773
+ # Check whether we should start shutting down the executor.
774
+ executor = self.executor_reference()
775
+ # No more work items can be added if:
776
+ # - The interpreter is shutting down OR
777
+ # - The executor that owns this thread is not broken AND
778
+ # * The executor that owns this worker has been collected OR
779
+ # * The executor that owns this worker has been shutdown.
780
+ # If the executor is broken, it should be detected in the next loop.
781
+ return _global_shutdown or (
782
+ (executor is None or self.executor_flags.shutdown)
783
+ and not self.executor_flags.broken
784
+ )
785
+
786
+ def terminate_broken(self, bpe):
787
+ # Terminate the executor because it is in a broken state. The bpe
788
+ # argument can be used to display more information on the error that
789
+ # lead the executor into becoming broken.
790
+
791
+ # Mark the process pool broken so that submits fail right now.
792
+ self.executor_flags.flag_as_broken(bpe)
793
+
794
+ # Mark pending tasks as failed.
795
+ for work_item in self.pending_work_items.values():
796
+ work_item.future.set_exception(bpe)
797
+ # Delete references to object. See issue16284
798
+ del work_item
799
+ self.pending_work_items.clear()
800
+
801
+ # Terminate remaining workers forcibly: the queues or their
802
+ # locks may be in a dirty state and block forever.
803
+ self.kill_workers(reason="broken executor")
804
+
805
+ # clean up resources
806
+ self.join_executor_internals()
807
+
808
+ def flag_executor_shutting_down(self):
809
+ # Flag the executor as shutting down and cancel remaining tasks if
810
+ # requested as early as possible if it is not gc-ed yet.
811
+ self.executor_flags.flag_as_shutting_down()
812
+
813
+ # Cancel pending work items if requested.
814
+ if self.executor_flags.kill_workers:
815
+ while self.pending_work_items:
816
+ _, work_item = self.pending_work_items.popitem()
817
+ work_item.future.set_exception(
818
+ ShutdownExecutorError(
819
+ "The Executor was shutdown with `kill_workers=True` "
820
+ "before this job could complete."
821
+ )
822
+ )
823
+ del work_item
824
+
825
+ # Kill the remaining worker forcibly to no waste time joining them
826
+ self.kill_workers(reason="executor shutting down")
827
+
828
+ def kill_workers(self, reason=""):
829
+ # Terminate the remaining workers using SIGKILL. This function also
830
+ # terminates descendant workers of the children in case there is some
831
+ # nested parallelism.
832
+ while self.processes:
833
+ _, p = self.processes.popitem()
834
+ mp.util.debug(f"terminate process {p.name}, reason: {reason}")
835
+ try:
836
+ kill_process_tree(p)
837
+ except ProcessLookupError: # pragma: no cover
838
+ pass
839
+
840
+ def shutdown_workers(self):
841
+ # shutdown all workers in self.processes
842
+
843
+ # Create a list to avoid RuntimeError due to concurrent modification of
844
+ # processes. nb_children_alive is thus an upper bound. Also release the
845
+ # processes' _worker_exit_lock to accelerate the shutdown procedure, as
846
+ # there is no need for hand-shake here.
847
+ with self.processes_management_lock:
848
+ n_children_to_stop = 0
849
+ for p in list(self.processes.values()):
850
+ mp.util.debug(f"releasing worker exit lock on {p.name}")
851
+ p._worker_exit_lock.release()
852
+ n_children_to_stop += 1
853
+
854
+ mp.util.debug(f"found {n_children_to_stop} processes to stop")
855
+
856
+ # Send the right number of sentinels, to make sure all children are
857
+ # properly terminated. Do it with a mechanism that avoid hanging on
858
+ # Full queue when all workers have already been shutdown.
859
+ n_sentinels_sent = 0
860
+ cooldown_time = 0.001
861
+ while (
862
+ n_sentinels_sent < n_children_to_stop
863
+ and self.get_n_children_alive() > 0
864
+ ):
865
+ for _ in range(n_children_to_stop - n_sentinels_sent):
866
+ try:
867
+ self.call_queue.put_nowait(None)
868
+ n_sentinels_sent += 1
869
+ except queue.Full as e:
870
+ if cooldown_time > 5.0:
871
+ mp.util.info(
872
+ "failed to send all sentinels and exit with error."
873
+ f"\ncall_queue size={self.call_queue._maxsize}; "
874
+ f" full is {self.call_queue.full()}; "
875
+ )
876
+ raise e
877
+ mp.util.info(
878
+ "full call_queue prevented to send all sentinels at "
879
+ "once, waiting..."
880
+ )
881
+ sleep(cooldown_time)
882
+ cooldown_time *= 1.2
883
+ break
884
+
885
+ mp.util.debug(f"sent {n_sentinels_sent} sentinels to the call queue")
886
+
887
+ def join_executor_internals(self):
888
+ self.shutdown_workers()
889
+
890
+ # Release the queue's resources as soon as possible. Flag the feeder
891
+ # thread for clean exit to avoid having the crash detection thread flag
892
+ # the Executor as broken during the shutdown. This is safe as either:
893
+ # * We don't need to communicate with the workers anymore
894
+ # * There is nothing left in the Queue buffer except None sentinels
895
+ mp.util.debug("closing call_queue")
896
+ self.call_queue.close()
897
+ self.call_queue.join_thread()
898
+
899
+ # Closing result_queue
900
+ mp.util.debug("closing result_queue")
901
+ self.result_queue.close()
902
+
903
+ mp.util.debug("closing thread_wakeup")
904
+ with self.shutdown_lock:
905
+ self.thread_wakeup.close()
906
+
907
+ # If .join() is not called on the created processes then
908
+ # some ctx.Queue methods may deadlock on macOS.
909
+ with self.processes_management_lock:
910
+ mp.util.debug(f"joining {len(self.processes)} processes")
911
+ n_joined_processes = 0
912
+ while True:
913
+ try:
914
+ pid, p = self.processes.popitem()
915
+ mp.util.debug(f"joining process {p.name} with pid {pid}")
916
+ p.join()
917
+ n_joined_processes += 1
918
+ except KeyError:
919
+ break
920
+
921
+ mp.util.debug(
922
+ "executor management thread clean shutdown of "
923
+ f"{n_joined_processes} workers"
924
+ )
925
+
926
+ def get_n_children_alive(self):
927
+ # This is an upper bound on the number of children alive.
928
+ with self.processes_management_lock:
929
+ return sum(p.is_alive() for p in list(self.processes.values()))
930
+
931
+
932
+ _system_limits_checked = False
933
+ _system_limited = None
934
+
935
+
936
+ def _check_system_limits():
937
+ global _system_limits_checked, _system_limited
938
+ if _system_limits_checked and _system_limited:
939
+ raise NotImplementedError(_system_limited)
940
+ _system_limits_checked = True
941
+ try:
942
+ nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
943
+ except (AttributeError, ValueError):
944
+ # sysconf not available or setting not available
945
+ return
946
+ if nsems_max == -1:
947
+ # undetermined limit, assume that limit is determined
948
+ # by available memory only
949
+ return
950
+ if nsems_max >= 256:
951
+ # minimum number of semaphores available
952
+ # according to POSIX
953
+ return
954
+ _system_limited = (
955
+ f"system provides too few semaphores ({nsems_max} available, "
956
+ "256 necessary)"
957
+ )
958
+ raise NotImplementedError(_system_limited)
959
+
960
+
961
+ def _chain_from_iterable_of_lists(iterable):
962
+ """
963
+ Specialized implementation of itertools.chain.from_iterable.
964
+ Each item in *iterable* should be a list. This function is
965
+ careful not to keep references to yielded objects.
966
+ """
967
+ for element in iterable:
968
+ element.reverse()
969
+ while element:
970
+ yield element.pop()
971
+
972
+
973
+ def _check_max_depth(context):
974
+ # Limit the maxmal recursion level
975
+ global _CURRENT_DEPTH
976
+ if context.get_start_method() == "fork" and _CURRENT_DEPTH > 0:
977
+ raise LokyRecursionError(
978
+ "Could not spawn extra nested processes at depth superior to "
979
+ "MAX_DEPTH=1. It is not possible to increase this limit when "
980
+ "using the 'fork' start method."
981
+ )
982
+
983
+ if 0 < MAX_DEPTH and _CURRENT_DEPTH + 1 > MAX_DEPTH:
984
+ raise LokyRecursionError(
985
+ "Could not spawn extra nested processes at depth superior to "
986
+ f"MAX_DEPTH={MAX_DEPTH}. If this is intendend, you can change "
987
+ "this limit with the LOKY_MAX_DEPTH environment variable."
988
+ )
989
+
990
+
991
+ class LokyRecursionError(RuntimeError):
992
+ """A process tries to spawn too many levels of nested processes."""
993
+
994
+
995
+ class BrokenProcessPool(_BPPException):
996
+ """
997
+ Raised when the executor is broken while a future was in the running state.
998
+ The cause can an error raised when unpickling the task in the worker
999
+ process or when unpickling the result value in the parent process. It can
1000
+ also be caused by a worker process being terminated unexpectedly.
1001
+ """
1002
+
1003
+
1004
+ class TerminatedWorkerError(BrokenProcessPool):
1005
+ """
1006
+ Raised when a process in a ProcessPoolExecutor terminated abruptly
1007
+ while a future was in the running state.
1008
+ """
1009
+
1010
+
1011
+ # Alias for backward compat (for code written for loky 1.1.4 and earlier). Do
1012
+ # not use in new code.
1013
+ BrokenExecutor = BrokenProcessPool
1014
+
1015
+
1016
+ class ShutdownExecutorError(RuntimeError):
1017
+
1018
+ """
1019
+ Raised when a ProcessPoolExecutor is shutdown while a future was in the
1020
+ running or pending state.
1021
+ """
1022
+
1023
+
1024
+ class ProcessPoolExecutor(Executor):
1025
+
1026
+ _at_exit = None
1027
+
1028
+ def __init__(
1029
+ self,
1030
+ max_workers=None,
1031
+ job_reducers=None,
1032
+ result_reducers=None,
1033
+ timeout=None,
1034
+ context=None,
1035
+ initializer=None,
1036
+ initargs=(),
1037
+ env=None,
1038
+ ):
1039
+ """Initializes a new ProcessPoolExecutor instance.
1040
+
1041
+ Args:
1042
+ max_workers: int, optional (default: cpu_count())
1043
+ The maximum number of processes that can be used to execute the
1044
+ given calls. If None or not given then as many worker processes
1045
+ will be created as the number of CPUs the current process
1046
+ can use.
1047
+ job_reducers, result_reducers: dict(type: reducer_func)
1048
+ Custom reducer for pickling the jobs and the results from the
1049
+ Executor. If only `job_reducers` is provided, `result_reducer`
1050
+ will use the same reducers
1051
+ timeout: int, optional (default: None)
1052
+ Idle workers exit after timeout seconds. If a new job is
1053
+ submitted after the timeout, the executor will start enough
1054
+ new Python processes to make sure the pool of workers is full.
1055
+ context: A multiprocessing context to launch the workers. This
1056
+ object should provide SimpleQueue, Queue and Process.
1057
+ initializer: An callable used to initialize worker processes.
1058
+ initargs: A tuple of arguments to pass to the initializer.
1059
+ env: A dict of environment variable to overwrite in the child
1060
+ process. The environment variables are set before any module is
1061
+ loaded. Note that this only works with the loky context.
1062
+ """
1063
+ _check_system_limits()
1064
+
1065
+ if max_workers is None:
1066
+ self._max_workers = cpu_count()
1067
+ else:
1068
+ if max_workers <= 0:
1069
+ raise ValueError("max_workers must be greater than 0")
1070
+ self._max_workers = max_workers
1071
+
1072
+ if (
1073
+ sys.platform == "win32"
1074
+ and self._max_workers > _MAX_WINDOWS_WORKERS
1075
+ ):
1076
+ warnings.warn(
1077
+ f"On Windows, max_workers cannot exceed {_MAX_WINDOWS_WORKERS} "
1078
+ "due to limitations of the operating system."
1079
+ )
1080
+ self._max_workers = _MAX_WINDOWS_WORKERS
1081
+
1082
+ if context is None:
1083
+ context = get_context()
1084
+ self._context = context
1085
+ self._env = env
1086
+
1087
+ self._initializer, self._initargs = _prepare_initializer(
1088
+ initializer, initargs
1089
+ )
1090
+ _check_max_depth(self._context)
1091
+
1092
+ if result_reducers is None:
1093
+ result_reducers = job_reducers
1094
+
1095
+ # Timeout
1096
+ self._timeout = timeout
1097
+
1098
+ # Management thread
1099
+ self._executor_manager_thread = None
1100
+
1101
+ # Map of pids to processes
1102
+ self._processes = {}
1103
+
1104
+ # Internal variables of the ProcessPoolExecutor
1105
+ self._processes = {}
1106
+ self._queue_count = 0
1107
+ self._pending_work_items = {}
1108
+ self._running_work_items = []
1109
+ self._work_ids = queue.Queue()
1110
+ self._processes_management_lock = self._context.Lock()
1111
+ self._executor_manager_thread = None
1112
+ self._shutdown_lock = threading.Lock()
1113
+
1114
+ # _ThreadWakeup is a communication channel used to interrupt the wait
1115
+ # of the main loop of executor_manager_thread from another thread (e.g.
1116
+ # when calling executor.submit or executor.shutdown). We do not use the
1117
+ # _result_queue to send wakeup signals to the executor_manager_thread
1118
+ # as it could result in a deadlock if a worker process dies with the
1119
+ # _result_queue write lock still acquired.
1120
+ #
1121
+ # _shutdown_lock must be locked to access _ThreadWakeup.wakeup.
1122
+ self._executor_manager_thread_wakeup = _ThreadWakeup()
1123
+
1124
+ # Flag to hold the state of the Executor. This permits to introspect
1125
+ # the Executor state even once it has been garbage collected.
1126
+ self._flags = _ExecutorFlags(self._shutdown_lock)
1127
+
1128
+ # Finally setup the queues for interprocess communication
1129
+ self._setup_queues(job_reducers, result_reducers)
1130
+
1131
+ mp.util.debug("ProcessPoolExecutor is setup")
1132
+
1133
+ def _setup_queues(self, job_reducers, result_reducers, queue_size=None):
1134
+ # Make the call queue slightly larger than the number of processes to
1135
+ # prevent the worker processes from idling. But don't make it too big
1136
+ # because futures in the call queue cannot be cancelled.
1137
+ if queue_size is None:
1138
+ queue_size = 2 * self._max_workers + EXTRA_QUEUED_CALLS
1139
+ self._call_queue = _SafeQueue(
1140
+ max_size=queue_size,
1141
+ pending_work_items=self._pending_work_items,
1142
+ running_work_items=self._running_work_items,
1143
+ thread_wakeup=self._executor_manager_thread_wakeup,
1144
+ reducers=job_reducers,
1145
+ ctx=self._context,
1146
+ )
1147
+ # Killed worker processes can produce spurious "broken pipe"
1148
+ # tracebacks in the queue's own worker thread. But we detect killed
1149
+ # processes anyway, so silence the tracebacks.
1150
+ self._call_queue._ignore_epipe = True
1151
+
1152
+ self._result_queue = SimpleQueue(
1153
+ reducers=result_reducers, ctx=self._context
1154
+ )
1155
+
1156
+ def _start_executor_manager_thread(self):
1157
+ if self._executor_manager_thread is None:
1158
+ mp.util.debug("_start_executor_manager_thread called")
1159
+
1160
+ # Start the processes so that their sentinels are known.
1161
+ self._executor_manager_thread = _ExecutorManagerThread(self)
1162
+ self._executor_manager_thread.start()
1163
+
1164
+ # register this executor in a mechanism that ensures it will wakeup
1165
+ # when the interpreter is exiting.
1166
+ _threads_wakeups[self._executor_manager_thread] = (
1167
+ self._shutdown_lock,
1168
+ self._executor_manager_thread_wakeup,
1169
+ )
1170
+
1171
+ global process_pool_executor_at_exit
1172
+ if process_pool_executor_at_exit is None:
1173
+ # Ensure that the _python_exit function will be called before
1174
+ # the multiprocessing.Queue._close finalizers which have an
1175
+ # exitpriority of 10.
1176
+
1177
+ if sys.version_info < (3, 9):
1178
+ process_pool_executor_at_exit = mp.util.Finalize(
1179
+ None, _python_exit, exitpriority=20
1180
+ )
1181
+ else:
1182
+ process_pool_executor_at_exit = threading._register_atexit(
1183
+ _python_exit
1184
+ )
1185
+
1186
+ def _adjust_process_count(self):
1187
+ while len(self._processes) < self._max_workers:
1188
+ worker_exit_lock = self._context.BoundedSemaphore(1)
1189
+ args = (
1190
+ self._call_queue,
1191
+ self._result_queue,
1192
+ self._initializer,
1193
+ self._initargs,
1194
+ self._processes_management_lock,
1195
+ self._timeout,
1196
+ worker_exit_lock,
1197
+ _CURRENT_DEPTH + 1,
1198
+ )
1199
+ worker_exit_lock.acquire()
1200
+ try:
1201
+ # Try to spawn the process with some environment variable to
1202
+ # overwrite but it only works with the loky context for now.
1203
+ p = self._context.Process(
1204
+ target=_process_worker, args=args, env=self._env
1205
+ )
1206
+ except TypeError:
1207
+ p = self._context.Process(target=_process_worker, args=args)
1208
+ p._worker_exit_lock = worker_exit_lock
1209
+ p.start()
1210
+ self._processes[p.pid] = p
1211
+ mp.util.debug(
1212
+ f"Adjusted process count to {self._max_workers}: "
1213
+ f"{[(p.name, pid) for pid, p in self._processes.items()]}"
1214
+ )
1215
+
1216
+ def _ensure_executor_running(self):
1217
+ """ensures all workers and management thread are running"""
1218
+ with self._processes_management_lock:
1219
+ if len(self._processes) != self._max_workers:
1220
+ self._adjust_process_count()
1221
+ self._start_executor_manager_thread()
1222
+
1223
+ def submit(self, fn, *args, **kwargs):
1224
+ with self._flags.shutdown_lock:
1225
+ if self._flags.broken is not None:
1226
+ raise self._flags.broken
1227
+ if self._flags.shutdown:
1228
+ raise ShutdownExecutorError(
1229
+ "cannot schedule new futures after shutdown"
1230
+ )
1231
+
1232
+ # Cannot submit a new calls once the interpreter is shutting down.
1233
+ # This check avoids spawning new processes at exit.
1234
+ if _global_shutdown:
1235
+ raise RuntimeError(
1236
+ "cannot schedule new futures after " "interpreter shutdown"
1237
+ )
1238
+
1239
+ f = Future()
1240
+ w = _WorkItem(f, fn, args, kwargs)
1241
+
1242
+ self._pending_work_items[self._queue_count] = w
1243
+ self._work_ids.put(self._queue_count)
1244
+ self._queue_count += 1
1245
+ # Wake up queue management thread
1246
+ self._executor_manager_thread_wakeup.wakeup()
1247
+
1248
+ self._ensure_executor_running()
1249
+ return f
1250
+
1251
+ submit.__doc__ = Executor.submit.__doc__
1252
+
1253
+ def map(self, fn, *iterables, **kwargs):
1254
+ """Returns an iterator equivalent to map(fn, iter).
1255
+
1256
+ Args:
1257
+ fn: A callable that will take as many arguments as there are
1258
+ passed iterables.
1259
+ timeout: The maximum number of seconds to wait. If None, then there
1260
+ is no limit on the wait time.
1261
+ chunksize: If greater than one, the iterables will be chopped into
1262
+ chunks of size chunksize and submitted to the process pool.
1263
+ If set to one, the items in the list will be sent one at a
1264
+ time.
1265
+
1266
+ Returns:
1267
+ An iterator equivalent to: map(func, *iterables) but the calls may
1268
+ be evaluated out-of-order.
1269
+
1270
+ Raises:
1271
+ TimeoutError: If the entire result iterator could not be generated
1272
+ before the given timeout.
1273
+ Exception: If fn(*args) raises for any values.
1274
+ """
1275
+ timeout = kwargs.get("timeout", None)
1276
+ chunksize = kwargs.get("chunksize", 1)
1277
+ if chunksize < 1:
1278
+ raise ValueError("chunksize must be >= 1.")
1279
+
1280
+ results = super().map(
1281
+ partial(_process_chunk, fn),
1282
+ _get_chunks(chunksize, *iterables),
1283
+ timeout=timeout,
1284
+ )
1285
+ return _chain_from_iterable_of_lists(results)
1286
+
1287
+ def shutdown(self, wait=True, kill_workers=False):
1288
+ mp.util.debug(f"shutting down executor {self}")
1289
+
1290
+ self._flags.flag_as_shutting_down(kill_workers)
1291
+ executor_manager_thread = self._executor_manager_thread
1292
+ executor_manager_thread_wakeup = self._executor_manager_thread_wakeup
1293
+
1294
+ if executor_manager_thread_wakeup is not None:
1295
+ # Wake up queue management thread
1296
+ with self._shutdown_lock:
1297
+ self._executor_manager_thread_wakeup.wakeup()
1298
+
1299
+ if executor_manager_thread is not None and wait:
1300
+ # This locks avoids concurrent join if the interpreter
1301
+ # is shutting down.
1302
+ with _global_shutdown_lock:
1303
+ executor_manager_thread.join()
1304
+ _threads_wakeups.pop(executor_manager_thread, None)
1305
+
1306
+ # To reduce the risk of opening too many files, remove references to
1307
+ # objects that use file descriptors.
1308
+ self._executor_manager_thread = None
1309
+ self._executor_manager_thread_wakeup = None
1310
+ self._call_queue = None
1311
+ self._result_queue = None
1312
+ self._processes_management_lock = None
1313
+
1314
+ shutdown.__doc__ = Executor.shutdown.__doc__
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Reusable ProcessPoolExecutor
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ import time
7
+ import warnings
8
+ import threading
9
+ import multiprocessing as mp
10
+
11
+ from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS
12
+ from .backend.context import cpu_count
13
+ from .backend import get_context
14
+
15
+ __all__ = ["get_reusable_executor"]
16
+
17
+ # Singleton executor and id management
18
+ _executor_lock = threading.RLock()
19
+ _next_executor_id = 0
20
+ _executor = None
21
+ _executor_kwargs = None
22
+
23
+
24
+ def _get_next_executor_id():
25
+ """Ensure that each successive executor instance has a unique, monotonic id.
26
+
27
+ The purpose of this monotonic id is to help debug and test automated
28
+ instance creation.
29
+ """
30
+ global _next_executor_id
31
+ with _executor_lock:
32
+ executor_id = _next_executor_id
33
+ _next_executor_id += 1
34
+ return executor_id
35
+
36
+
37
+ def get_reusable_executor(
38
+ max_workers=None,
39
+ context=None,
40
+ timeout=10,
41
+ kill_workers=False,
42
+ reuse="auto",
43
+ job_reducers=None,
44
+ result_reducers=None,
45
+ initializer=None,
46
+ initargs=(),
47
+ env=None,
48
+ ):
49
+ """Return the current ReusableExectutor instance.
50
+
51
+ Start a new instance if it has not been started already or if the previous
52
+ instance was left in a broken state.
53
+
54
+ If the previous instance does not have the requested number of workers, the
55
+ executor is dynamically resized to adjust the number of workers prior to
56
+ returning.
57
+
58
+ Reusing a singleton instance spares the overhead of starting new worker
59
+ processes and importing common python packages each time.
60
+
61
+ ``max_workers`` controls the maximum number of tasks that can be running in
62
+ parallel in worker processes. By default this is set to the number of
63
+ CPUs on the host.
64
+
65
+ Setting ``timeout`` (in seconds) makes idle workers automatically shutdown
66
+ so as to release system resources. New workers are respawn upon submission
67
+ of new tasks so that ``max_workers`` are available to accept the newly
68
+ submitted tasks. Setting ``timeout`` to around 100 times the time required
69
+ to spawn new processes and import packages in them (on the order of 100ms)
70
+ ensures that the overhead of spawning workers is negligible.
71
+
72
+ Setting ``kill_workers=True`` makes it possible to forcibly interrupt
73
+ previously spawned jobs to get a new instance of the reusable executor
74
+ with new constructor argument values.
75
+
76
+ The ``job_reducers`` and ``result_reducers`` are used to customize the
77
+ pickling of tasks and results send to the executor.
78
+
79
+ When provided, the ``initializer`` is run first in newly spawned
80
+ processes with argument ``initargs``.
81
+
82
+ The environment variable in the child process are a copy of the values in
83
+ the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and
84
+ ``VAL`` are string literals to overwrite the environment variable ``ENV``
85
+ in the child processes to value ``VAL``. The environment variables are set
86
+ in the children before any module is loaded. This only works with the
87
+ ``loky`` context.
88
+ """
89
+ _executor, _ = _ReusablePoolExecutor.get_reusable_executor(
90
+ max_workers=max_workers,
91
+ context=context,
92
+ timeout=timeout,
93
+ kill_workers=kill_workers,
94
+ reuse=reuse,
95
+ job_reducers=job_reducers,
96
+ result_reducers=result_reducers,
97
+ initializer=initializer,
98
+ initargs=initargs,
99
+ env=env,
100
+ )
101
+ return _executor
102
+
103
+
104
+ class _ReusablePoolExecutor(ProcessPoolExecutor):
105
+ def __init__(
106
+ self,
107
+ submit_resize_lock,
108
+ max_workers=None,
109
+ context=None,
110
+ timeout=None,
111
+ executor_id=0,
112
+ job_reducers=None,
113
+ result_reducers=None,
114
+ initializer=None,
115
+ initargs=(),
116
+ env=None,
117
+ ):
118
+ super().__init__(
119
+ max_workers=max_workers,
120
+ context=context,
121
+ timeout=timeout,
122
+ job_reducers=job_reducers,
123
+ result_reducers=result_reducers,
124
+ initializer=initializer,
125
+ initargs=initargs,
126
+ env=env,
127
+ )
128
+ self.executor_id = executor_id
129
+ self._submit_resize_lock = submit_resize_lock
130
+
131
+ @classmethod
132
+ def get_reusable_executor(
133
+ cls,
134
+ max_workers=None,
135
+ context=None,
136
+ timeout=10,
137
+ kill_workers=False,
138
+ reuse="auto",
139
+ job_reducers=None,
140
+ result_reducers=None,
141
+ initializer=None,
142
+ initargs=(),
143
+ env=None,
144
+ ):
145
+ with _executor_lock:
146
+ global _executor, _executor_kwargs
147
+ executor = _executor
148
+
149
+ if max_workers is None:
150
+ if reuse is True and executor is not None:
151
+ max_workers = executor._max_workers
152
+ else:
153
+ max_workers = cpu_count()
154
+ elif max_workers <= 0:
155
+ raise ValueError(
156
+ f"max_workers must be greater than 0, got {max_workers}."
157
+ )
158
+
159
+ if isinstance(context, str):
160
+ context = get_context(context)
161
+ if context is not None and context.get_start_method() == "fork":
162
+ raise ValueError(
163
+ "Cannot use reusable executor with the 'fork' context"
164
+ )
165
+
166
+ kwargs = dict(
167
+ context=context,
168
+ timeout=timeout,
169
+ job_reducers=job_reducers,
170
+ result_reducers=result_reducers,
171
+ initializer=initializer,
172
+ initargs=initargs,
173
+ env=env,
174
+ )
175
+ if executor is None:
176
+ is_reused = False
177
+ mp.util.debug(
178
+ f"Create a executor with max_workers={max_workers}."
179
+ )
180
+ executor_id = _get_next_executor_id()
181
+ _executor_kwargs = kwargs
182
+ _executor = executor = cls(
183
+ _executor_lock,
184
+ max_workers=max_workers,
185
+ executor_id=executor_id,
186
+ **kwargs,
187
+ )
188
+ else:
189
+ if reuse == "auto":
190
+ reuse = kwargs == _executor_kwargs
191
+ if (
192
+ executor._flags.broken
193
+ or executor._flags.shutdown
194
+ or not reuse
195
+ ):
196
+ if executor._flags.broken:
197
+ reason = "broken"
198
+ elif executor._flags.shutdown:
199
+ reason = "shutdown"
200
+ else:
201
+ reason = "arguments have changed"
202
+ mp.util.debug(
203
+ "Creating a new executor with max_workers="
204
+ f"{max_workers} as the previous instance cannot be "
205
+ f"reused ({reason})."
206
+ )
207
+ executor.shutdown(wait=True, kill_workers=kill_workers)
208
+ _executor = executor = _executor_kwargs = None
209
+ # Recursive call to build a new instance
210
+ return cls.get_reusable_executor(
211
+ max_workers=max_workers, **kwargs
212
+ )
213
+ else:
214
+ mp.util.debug(
215
+ "Reusing existing executor with "
216
+ f"max_workers={executor._max_workers}."
217
+ )
218
+ is_reused = True
219
+ executor._resize(max_workers)
220
+
221
+ return executor, is_reused
222
+
223
+ def submit(self, fn, *args, **kwargs):
224
+ with self._submit_resize_lock:
225
+ return super().submit(fn, *args, **kwargs)
226
+
227
+ def _resize(self, max_workers):
228
+ with self._submit_resize_lock:
229
+ if max_workers is None:
230
+ raise ValueError("Trying to resize with max_workers=None")
231
+ elif max_workers == self._max_workers:
232
+ return
233
+
234
+ if self._executor_manager_thread is None:
235
+ # If the executor_manager_thread has not been started
236
+ # then no processes have been spawned and we can just
237
+ # update _max_workers and return
238
+ self._max_workers = max_workers
239
+ return
240
+
241
+ self._wait_job_completion()
242
+
243
+ # Some process might have returned due to timeout so check how many
244
+ # children are still alive. Use the _process_management_lock to
245
+ # ensure that no process are spawned or timeout during the resize.
246
+ with self._processes_management_lock:
247
+ processes = list(self._processes.values())
248
+ nb_children_alive = sum(p.is_alive() for p in processes)
249
+ self._max_workers = max_workers
250
+ for _ in range(max_workers, nb_children_alive):
251
+ self._call_queue.put(None)
252
+ while (
253
+ len(self._processes) > max_workers and not self._flags.broken
254
+ ):
255
+ time.sleep(1e-3)
256
+
257
+ self._adjust_process_count()
258
+ processes = list(self._processes.values())
259
+ while not all(p.is_alive() for p in processes):
260
+ time.sleep(1e-3)
261
+
262
+ def _wait_job_completion(self):
263
+ """Wait for the cache to be empty before resizing the pool."""
264
+ # Issue a warning to the user about the bad effect of this usage.
265
+ if self._pending_work_items:
266
+ warnings.warn(
267
+ "Trying to resize an executor with running jobs: "
268
+ "waiting for jobs completion before resizing.",
269
+ UserWarning,
270
+ )
271
+ mp.util.debug(
272
+ f"Executor {self.executor_id} waiting for jobs completion "
273
+ "before resizing"
274
+ )
275
+ # Wait for the completion of the jobs
276
+ while self._pending_work_items:
277
+ time.sleep(1e-3)
278
+
279
+ def _setup_queues(self, job_reducers, result_reducers):
280
+ # As this executor can be resized, use a large queue size to avoid
281
+ # underestimating capacity and introducing overhead
282
+ queue_size = 2 * cpu_count() + EXTRA_QUEUED_CALLS
283
+ super()._setup_queues(
284
+ job_reducers, result_reducers, queue_size=queue_size
285
+ )
llmeval-env/lib/python3.10/site-packages/joblib/func_inspect.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ My own variation on function-specific inspect-like features.
3
+ """
4
+
5
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Copyright (c) 2009 Gael Varoquaux
7
+ # License: BSD Style, 3 clauses.
8
+
9
+ import inspect
10
+ import warnings
11
+ import re
12
+ import os
13
+ import collections
14
+
15
+ from itertools import islice
16
+ from tokenize import open as open_py_source
17
+
18
+ from .logger import pformat
19
+
20
+ full_argspec_fields = ('args varargs varkw defaults kwonlyargs '
21
+ 'kwonlydefaults annotations')
22
+ full_argspec_type = collections.namedtuple('FullArgSpec', full_argspec_fields)
23
+
24
+
25
+ def get_func_code(func):
26
+ """ Attempts to retrieve a reliable function code hash.
27
+
28
+ The reason we don't use inspect.getsource is that it caches the
29
+ source, whereas we want this to be modified on the fly when the
30
+ function is modified.
31
+
32
+ Returns
33
+ -------
34
+ func_code: string
35
+ The function code
36
+ source_file: string
37
+ The path to the file in which the function is defined.
38
+ first_line: int
39
+ The first line of the code in the source file.
40
+
41
+ Notes
42
+ ------
43
+ This function does a bit more magic than inspect, and is thus
44
+ more robust.
45
+ """
46
+ source_file = None
47
+ try:
48
+ code = func.__code__
49
+ source_file = code.co_filename
50
+ if not os.path.exists(source_file):
51
+ # Use inspect for lambda functions and functions defined in an
52
+ # interactive shell, or in doctests
53
+ source_code = ''.join(inspect.getsourcelines(func)[0])
54
+ line_no = 1
55
+ if source_file.startswith('<doctest '):
56
+ source_file, line_no = re.match(
57
+ r'\<doctest (.*\.rst)\[(.*)\]\>', source_file).groups()
58
+ line_no = int(line_no)
59
+ source_file = '<doctest %s>' % source_file
60
+ return source_code, source_file, line_no
61
+ # Try to retrieve the source code.
62
+ with open_py_source(source_file) as source_file_obj:
63
+ first_line = code.co_firstlineno
64
+ # All the lines after the function definition:
65
+ source_lines = list(islice(source_file_obj, first_line - 1, None))
66
+ return ''.join(inspect.getblock(source_lines)), source_file, first_line
67
+ except: # noqa: E722
68
+ # If the source code fails, we use the hash. This is fragile and
69
+ # might change from one session to another.
70
+ if hasattr(func, '__code__'):
71
+ # Python 3.X
72
+ return str(func.__code__.__hash__()), source_file, -1
73
+ else:
74
+ # Weird objects like numpy ufunc don't have __code__
75
+ # This is fragile, as quite often the id of the object is
76
+ # in the repr, so it might not persist across sessions,
77
+ # however it will work for ufuncs.
78
+ return repr(func), source_file, -1
79
+
80
+
81
+ def _clean_win_chars(string):
82
+ """Windows cannot encode some characters in filename."""
83
+ import urllib
84
+ if hasattr(urllib, 'quote'):
85
+ quote = urllib.quote
86
+ else:
87
+ # In Python 3, quote is elsewhere
88
+ import urllib.parse
89
+ quote = urllib.parse.quote
90
+ for char in ('<', '>', '!', ':', '\\'):
91
+ string = string.replace(char, quote(char))
92
+ return string
93
+
94
+
95
+ def get_func_name(func, resolv_alias=True, win_characters=True):
96
+ """ Return the function import path (as a list of module names), and
97
+ a name for the function.
98
+
99
+ Parameters
100
+ ----------
101
+ func: callable
102
+ The func to inspect
103
+ resolv_alias: boolean, optional
104
+ If true, possible local aliases are indicated.
105
+ win_characters: boolean, optional
106
+ If true, substitute special characters using urllib.quote
107
+ This is useful in Windows, as it cannot encode some filenames
108
+ """
109
+ if hasattr(func, '__module__'):
110
+ module = func.__module__
111
+ else:
112
+ try:
113
+ module = inspect.getmodule(func)
114
+ except TypeError:
115
+ if hasattr(func, '__class__'):
116
+ module = func.__class__.__module__
117
+ else:
118
+ module = 'unknown'
119
+ if module is None:
120
+ # Happens in doctests, eg
121
+ module = ''
122
+ if module == '__main__':
123
+ try:
124
+ filename = os.path.abspath(inspect.getsourcefile(func))
125
+ except: # noqa: E722
126
+ filename = None
127
+ if filename is not None:
128
+ # mangling of full path to filename
129
+ parts = filename.split(os.sep)
130
+ if parts[-1].startswith('<ipython-input'):
131
+ # We're in a IPython (or notebook) session. parts[-1] comes
132
+ # from func.__code__.co_filename and is of the form
133
+ # <ipython-input-N-XYZ>, where:
134
+ # - N is the cell number where the function was defined
135
+ # - XYZ is a hash representing the function's code (and name).
136
+ # It will be consistent across sessions and kernel restarts,
137
+ # and will change if the function's code/name changes
138
+ # We remove N so that cache is properly hit if the cell where
139
+ # the func is defined is re-exectuted.
140
+ # The XYZ hash should avoid collisions between functions with
141
+ # the same name, both within the same notebook but also across
142
+ # notebooks
143
+ splitted = parts[-1].split('-')
144
+ parts[-1] = '-'.join(splitted[:2] + splitted[3:])
145
+ elif len(parts) > 2 and parts[-2].startswith('ipykernel_'):
146
+ # In a notebook session (ipykernel). Filename seems to be 'xyz'
147
+ # of above. parts[-2] has the structure ipykernel_XXXXXX where
148
+ # XXXXXX is a six-digit number identifying the current run (?).
149
+ # If we split it off, the function again has the same
150
+ # identifier across runs.
151
+ parts[-2] = 'ipykernel'
152
+ filename = '-'.join(parts)
153
+ if filename.endswith('.py'):
154
+ filename = filename[:-3]
155
+ module = module + '-' + filename
156
+ module = module.split('.')
157
+ if hasattr(func, 'func_name'):
158
+ name = func.func_name
159
+ elif hasattr(func, '__name__'):
160
+ name = func.__name__
161
+ else:
162
+ name = 'unknown'
163
+ # Hack to detect functions not defined at the module-level
164
+ if resolv_alias:
165
+ # TODO: Maybe add a warning here?
166
+ if hasattr(func, 'func_globals') and name in func.func_globals:
167
+ if not func.func_globals[name] is func:
168
+ name = '%s-alias' % name
169
+ if hasattr(func, '__qualname__') and func.__qualname__ != name:
170
+ # Extend the module name in case of nested functions to avoid
171
+ # (module, name) collisions
172
+ module.extend(func.__qualname__.split(".")[:-1])
173
+ if inspect.ismethod(func):
174
+ # We need to add the name of the class
175
+ if hasattr(func, 'im_class'):
176
+ klass = func.im_class
177
+ module.append(klass.__name__)
178
+ if os.name == 'nt' and win_characters:
179
+ # Windows can't encode certain characters in filenames
180
+ name = _clean_win_chars(name)
181
+ module = [_clean_win_chars(s) for s in module]
182
+ return module, name
183
+
184
+
185
+ def _signature_str(function_name, arg_sig):
186
+ """Helper function to output a function signature"""
187
+ return '{}{}'.format(function_name, arg_sig)
188
+
189
+
190
+ def _function_called_str(function_name, args, kwargs):
191
+ """Helper function to output a function call"""
192
+ template_str = '{0}({1}, {2})'
193
+
194
+ args_str = repr(args)[1:-1]
195
+ kwargs_str = ', '.join('%s=%s' % (k, v)
196
+ for k, v in kwargs.items())
197
+ return template_str.format(function_name, args_str,
198
+ kwargs_str)
199
+
200
+
201
+ def filter_args(func, ignore_lst, args=(), kwargs=dict()):
202
+ """ Filters the given args and kwargs using a list of arguments to
203
+ ignore, and a function specification.
204
+
205
+ Parameters
206
+ ----------
207
+ func: callable
208
+ Function giving the argument specification
209
+ ignore_lst: list of strings
210
+ List of arguments to ignore (either a name of an argument
211
+ in the function spec, or '*', or '**')
212
+ *args: list
213
+ Positional arguments passed to the function.
214
+ **kwargs: dict
215
+ Keyword arguments passed to the function
216
+
217
+ Returns
218
+ -------
219
+ filtered_args: list
220
+ List of filtered positional and keyword arguments.
221
+ """
222
+ args = list(args)
223
+ if isinstance(ignore_lst, str):
224
+ # Catch a common mistake
225
+ raise ValueError(
226
+ 'ignore_lst must be a list of parameters to ignore '
227
+ '%s (type %s) was given' % (ignore_lst, type(ignore_lst)))
228
+ # Special case for functools.partial objects
229
+ if (not inspect.ismethod(func) and not inspect.isfunction(func)):
230
+ if ignore_lst:
231
+ warnings.warn('Cannot inspect object %s, ignore list will '
232
+ 'not work.' % func, stacklevel=2)
233
+ return {'*': args, '**': kwargs}
234
+ arg_sig = inspect.signature(func)
235
+ arg_names = []
236
+ arg_defaults = []
237
+ arg_kwonlyargs = []
238
+ arg_varargs = None
239
+ arg_varkw = None
240
+ for param in arg_sig.parameters.values():
241
+ if param.kind is param.POSITIONAL_OR_KEYWORD:
242
+ arg_names.append(param.name)
243
+ elif param.kind is param.KEYWORD_ONLY:
244
+ arg_names.append(param.name)
245
+ arg_kwonlyargs.append(param.name)
246
+ elif param.kind is param.VAR_POSITIONAL:
247
+ arg_varargs = param.name
248
+ elif param.kind is param.VAR_KEYWORD:
249
+ arg_varkw = param.name
250
+ if param.default is not param.empty:
251
+ arg_defaults.append(param.default)
252
+ if inspect.ismethod(func):
253
+ # First argument is 'self', it has been removed by Python
254
+ # we need to add it back:
255
+ args = [func.__self__, ] + args
256
+ # func is an instance method, inspect.signature(func) does not
257
+ # include self, we need to fetch it from the class method, i.e
258
+ # func.__func__
259
+ class_method_sig = inspect.signature(func.__func__)
260
+ self_name = next(iter(class_method_sig.parameters))
261
+ arg_names = [self_name] + arg_names
262
+ # XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such
263
+ # as on ndarrays.
264
+
265
+ _, name = get_func_name(func, resolv_alias=False)
266
+ arg_dict = dict()
267
+ arg_position = -1
268
+ for arg_position, arg_name in enumerate(arg_names):
269
+ if arg_position < len(args):
270
+ # Positional argument or keyword argument given as positional
271
+ if arg_name not in arg_kwonlyargs:
272
+ arg_dict[arg_name] = args[arg_position]
273
+ else:
274
+ raise ValueError(
275
+ "Keyword-only parameter '%s' was passed as "
276
+ 'positional parameter for %s:\n'
277
+ ' %s was called.'
278
+ % (arg_name,
279
+ _signature_str(name, arg_sig),
280
+ _function_called_str(name, args, kwargs))
281
+ )
282
+
283
+ else:
284
+ position = arg_position - len(arg_names)
285
+ if arg_name in kwargs:
286
+ arg_dict[arg_name] = kwargs[arg_name]
287
+ else:
288
+ try:
289
+ arg_dict[arg_name] = arg_defaults[position]
290
+ except (IndexError, KeyError) as e:
291
+ # Missing argument
292
+ raise ValueError(
293
+ 'Wrong number of arguments for %s:\n'
294
+ ' %s was called.'
295
+ % (_signature_str(name, arg_sig),
296
+ _function_called_str(name, args, kwargs))
297
+ ) from e
298
+
299
+ varkwargs = dict()
300
+ for arg_name, arg_value in sorted(kwargs.items()):
301
+ if arg_name in arg_dict:
302
+ arg_dict[arg_name] = arg_value
303
+ elif arg_varkw is not None:
304
+ varkwargs[arg_name] = arg_value
305
+ else:
306
+ raise TypeError("Ignore list for %s() contains an unexpected "
307
+ "keyword argument '%s'" % (name, arg_name))
308
+
309
+ if arg_varkw is not None:
310
+ arg_dict['**'] = varkwargs
311
+ if arg_varargs is not None:
312
+ varargs = args[arg_position + 1:]
313
+ arg_dict['*'] = varargs
314
+
315
+ # Now remove the arguments to be ignored
316
+ for item in ignore_lst:
317
+ if item in arg_dict:
318
+ arg_dict.pop(item)
319
+ else:
320
+ raise ValueError("Ignore list: argument '%s' is not defined for "
321
+ "function %s"
322
+ % (item,
323
+ _signature_str(name, arg_sig))
324
+ )
325
+ # XXX: Return a sorted list of pairs?
326
+ return arg_dict
327
+
328
+
329
+ def _format_arg(arg):
330
+ formatted_arg = pformat(arg, indent=2)
331
+ if len(formatted_arg) > 1500:
332
+ formatted_arg = '%s...' % formatted_arg[:700]
333
+ return formatted_arg
334
+
335
+
336
+ def format_signature(func, *args, **kwargs):
337
+ # XXX: Should this use inspect.formatargvalues/formatargspec?
338
+ module, name = get_func_name(func)
339
+ module = [m for m in module if m]
340
+ if module:
341
+ module.append(name)
342
+ module_path = '.'.join(module)
343
+ else:
344
+ module_path = name
345
+ arg_str = list()
346
+ previous_length = 0
347
+ for arg in args:
348
+ formatted_arg = _format_arg(arg)
349
+ if previous_length > 80:
350
+ formatted_arg = '\n%s' % formatted_arg
351
+ previous_length = len(formatted_arg)
352
+ arg_str.append(formatted_arg)
353
+ arg_str.extend(['%s=%s' % (v, _format_arg(i)) for v, i in kwargs.items()])
354
+ arg_str = ', '.join(arg_str)
355
+
356
+ signature = '%s(%s)' % (name, arg_str)
357
+ return module_path, signature
358
+
359
+
360
+ def format_call(func, args, kwargs, object_name="Memory"):
361
+ """ Returns a nicely formatted statement displaying the function
362
+ call with the given arguments.
363
+ """
364
+ path, signature = format_signature(func, *args, **kwargs)
365
+ msg = '%s\n[%s] Calling %s...\n%s' % (80 * '_', object_name,
366
+ path, signature)
367
+ return msg
368
+ # XXX: Not using logging framework
369
+ # self.debug(msg)
llmeval-env/lib/python3.10/site-packages/joblib/logger.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers for logging.
3
+
4
+ This module needs much love to become useful.
5
+ """
6
+
7
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
8
+ # Copyright (c) 2008 Gael Varoquaux
9
+ # License: BSD Style, 3 clauses.
10
+
11
+ from __future__ import print_function
12
+
13
+ import time
14
+ import sys
15
+ import os
16
+ import shutil
17
+ import logging
18
+ import pprint
19
+
20
+ from .disk import mkdirp
21
+
22
+
23
+ def _squeeze_time(t):
24
+ """Remove .1s to the time under Windows: this is the time it take to
25
+ stat files. This is needed to make results similar to timings under
26
+ Unix, for tests
27
+ """
28
+ if sys.platform.startswith('win'):
29
+ return max(0, t - .1)
30
+ else:
31
+ return t
32
+
33
+
34
+ def format_time(t):
35
+ t = _squeeze_time(t)
36
+ return "%.1fs, %.1fmin" % (t, t / 60.)
37
+
38
+
39
+ def short_format_time(t):
40
+ t = _squeeze_time(t)
41
+ if t > 60:
42
+ return "%4.1fmin" % (t / 60.)
43
+ else:
44
+ return " %5.1fs" % (t)
45
+
46
+
47
+ def pformat(obj, indent=0, depth=3):
48
+ if 'numpy' in sys.modules:
49
+ import numpy as np
50
+ print_options = np.get_printoptions()
51
+ np.set_printoptions(precision=6, threshold=64, edgeitems=1)
52
+ else:
53
+ print_options = None
54
+ out = pprint.pformat(obj, depth=depth, indent=indent)
55
+ if print_options:
56
+ np.set_printoptions(**print_options)
57
+ return out
58
+
59
+
60
+ ###############################################################################
61
+ # class `Logger`
62
+ ###############################################################################
63
+ class Logger(object):
64
+ """ Base class for logging messages.
65
+ """
66
+
67
+ def __init__(self, depth=3, name=None):
68
+ """
69
+ Parameters
70
+ ----------
71
+ depth: int, optional
72
+ The depth of objects printed.
73
+ name: str, optional
74
+ The namespace to log to. If None, defaults to joblib.
75
+ """
76
+ self.depth = depth
77
+ self._name = name if name else 'joblib'
78
+
79
+ def warn(self, msg):
80
+ logging.getLogger(self._name).warning("[%s]: %s" % (self, msg))
81
+
82
+ def info(self, msg):
83
+ logging.info("[%s]: %s" % (self, msg))
84
+
85
+ def debug(self, msg):
86
+ # XXX: This conflicts with the debug flag used in children class
87
+ logging.getLogger(self._name).debug("[%s]: %s" % (self, msg))
88
+
89
+ def format(self, obj, indent=0):
90
+ """Return the formatted representation of the object."""
91
+ return pformat(obj, indent=indent, depth=self.depth)
92
+
93
+
94
+ ###############################################################################
95
+ # class `PrintTime`
96
+ ###############################################################################
97
+ class PrintTime(object):
98
+ """ Print and log messages while keeping track of time.
99
+ """
100
+
101
+ def __init__(self, logfile=None, logdir=None):
102
+ if logfile is not None and logdir is not None:
103
+ raise ValueError('Cannot specify both logfile and logdir')
104
+ # XXX: Need argument docstring
105
+ self.last_time = time.time()
106
+ self.start_time = self.last_time
107
+ if logdir is not None:
108
+ logfile = os.path.join(logdir, 'joblib.log')
109
+ self.logfile = logfile
110
+ if logfile is not None:
111
+ mkdirp(os.path.dirname(logfile))
112
+ if os.path.exists(logfile):
113
+ # Rotate the logs
114
+ for i in range(1, 9):
115
+ try:
116
+ shutil.move(logfile + '.%i' % i,
117
+ logfile + '.%i' % (i + 1))
118
+ except: # noqa: E722
119
+ "No reason failing here"
120
+ # Use a copy rather than a move, so that a process
121
+ # monitoring this file does not get lost.
122
+ try:
123
+ shutil.copy(logfile, logfile + '.1')
124
+ except: # noqa: E722
125
+ "No reason failing here"
126
+ try:
127
+ with open(logfile, 'w') as logfile:
128
+ logfile.write('\nLogging joblib python script\n')
129
+ logfile.write('\n---%s---\n' % time.ctime(self.last_time))
130
+ except: # noqa: E722
131
+ """ Multiprocessing writing to files can create race
132
+ conditions. Rather fail silently than crash the
133
+ computation.
134
+ """
135
+ # XXX: We actually need a debug flag to disable this
136
+ # silent failure.
137
+
138
+ def __call__(self, msg='', total=False):
139
+ """ Print the time elapsed between the last call and the current
140
+ call, with an optional message.
141
+ """
142
+ if not total:
143
+ time_lapse = time.time() - self.last_time
144
+ full_msg = "%s: %s" % (msg, format_time(time_lapse))
145
+ else:
146
+ # FIXME: Too much logic duplicated
147
+ time_lapse = time.time() - self.start_time
148
+ full_msg = "%s: %.2fs, %.1f min" % (msg, time_lapse,
149
+ time_lapse / 60)
150
+ print(full_msg, file=sys.stderr)
151
+ if self.logfile is not None:
152
+ try:
153
+ with open(self.logfile, 'a') as f:
154
+ print(full_msg, file=f)
155
+ except: # noqa: E722
156
+ """ Multiprocessing writing to files can create race
157
+ conditions. Rather fail silently than crash the
158
+ calculation.
159
+ """
160
+ # XXX: We actually need a debug flag to disable this
161
+ # silent failure.
162
+ self.last_time = time.time()
llmeval-env/lib/python3.10/site-packages/joblib/memory.py ADDED
@@ -0,0 +1,1172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A context object for caching a function's return value each time it
3
+ is called with the same input arguments.
4
+
5
+ """
6
+
7
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
8
+ # Copyright (c) 2009 Gael Varoquaux
9
+ # License: BSD Style, 3 clauses.
10
+
11
+
12
+ import asyncio
13
+ import datetime
14
+ import functools
15
+ import inspect
16
+ import logging
17
+ import os
18
+ import pathlib
19
+ import pydoc
20
+ import re
21
+ import textwrap
22
+ import time
23
+ import tokenize
24
+ import traceback
25
+ import warnings
26
+ import weakref
27
+
28
+ from . import hashing
29
+ from ._store_backends import CacheWarning # noqa
30
+ from ._store_backends import FileSystemStoreBackend, StoreBackendBase
31
+ from .func_inspect import (filter_args, format_call, format_signature,
32
+ get_func_code, get_func_name)
33
+ from .logger import Logger, format_time, pformat
34
+
35
+ FIRST_LINE_TEXT = "# first line:"
36
+
37
+ # TODO: The following object should have a data store object as a sub
38
+ # object, and the interface to persist and query should be separated in
39
+ # the data store.
40
+ #
41
+ # This would enable creating 'Memory' objects with a different logic for
42
+ # pickling that would simply span a MemorizedFunc with the same
43
+ # store (or do we want to copy it to avoid cross-talks?), for instance to
44
+ # implement HDF5 pickling.
45
+
46
+ # TODO: Same remark for the logger, and probably use the Python logging
47
+ # mechanism.
48
+
49
+
50
+ def extract_first_line(func_code):
51
+ """ Extract the first line information from the function code
52
+ text if available.
53
+ """
54
+ if func_code.startswith(FIRST_LINE_TEXT):
55
+ func_code = func_code.split('\n')
56
+ first_line = int(func_code[0][len(FIRST_LINE_TEXT):])
57
+ func_code = '\n'.join(func_code[1:])
58
+ else:
59
+ first_line = -1
60
+ return func_code, first_line
61
+
62
+
63
+ class JobLibCollisionWarning(UserWarning):
64
+ """ Warn that there might be a collision between names of functions.
65
+ """
66
+
67
+
68
+ _STORE_BACKENDS = {'local': FileSystemStoreBackend}
69
+
70
+
71
+ def register_store_backend(backend_name, backend):
72
+ """Extend available store backends.
73
+
74
+ The Memory, MemorizeResult and MemorizeFunc objects are designed to be
75
+ agnostic to the type of store used behind. By default, the local file
76
+ system is used but this function gives the possibility to extend joblib's
77
+ memory pattern with other types of storage such as cloud storage (S3, GCS,
78
+ OpenStack, HadoopFS, etc) or blob DBs.
79
+
80
+ Parameters
81
+ ----------
82
+ backend_name: str
83
+ The name identifying the store backend being registered. For example,
84
+ 'local' is used with FileSystemStoreBackend.
85
+ backend: StoreBackendBase subclass
86
+ The name of a class that implements the StoreBackendBase interface.
87
+
88
+ """
89
+ if not isinstance(backend_name, str):
90
+ raise ValueError("Store backend name should be a string, "
91
+ "'{0}' given.".format(backend_name))
92
+ if backend is None or not issubclass(backend, StoreBackendBase):
93
+ raise ValueError("Store backend should inherit "
94
+ "StoreBackendBase, "
95
+ "'{0}' given.".format(backend))
96
+
97
+ _STORE_BACKENDS[backend_name] = backend
98
+
99
+
100
+ def _store_backend_factory(backend, location, verbose=0, backend_options=None):
101
+ """Return the correct store object for the given location."""
102
+ if backend_options is None:
103
+ backend_options = {}
104
+
105
+ if isinstance(location, pathlib.Path):
106
+ location = str(location)
107
+
108
+ if isinstance(location, StoreBackendBase):
109
+ return location
110
+ elif isinstance(location, str):
111
+ obj = None
112
+ location = os.path.expanduser(location)
113
+ # The location is not a local file system, we look in the
114
+ # registered backends if there's one matching the given backend
115
+ # name.
116
+ for backend_key, backend_obj in _STORE_BACKENDS.items():
117
+ if backend == backend_key:
118
+ obj = backend_obj()
119
+
120
+ # By default, we assume the FileSystemStoreBackend can be used if no
121
+ # matching backend could be found.
122
+ if obj is None:
123
+ raise TypeError('Unknown location {0} or backend {1}'.format(
124
+ location, backend))
125
+
126
+ # The store backend is configured with the extra named parameters,
127
+ # some of them are specific to the underlying store backend.
128
+ obj.configure(location, verbose=verbose,
129
+ backend_options=backend_options)
130
+ return obj
131
+ elif location is not None:
132
+ warnings.warn(
133
+ "Instantiating a backend using a {} as a location is not "
134
+ "supported by joblib. Returning None instead.".format(
135
+ location.__class__.__name__), UserWarning)
136
+
137
+ return None
138
+
139
+
140
+ def _build_func_identifier(func):
141
+ """Build a roughly unique identifier for the cached function."""
142
+ modules, funcname = get_func_name(func)
143
+ # We reuse historical fs-like way of building a function identifier
144
+ return os.path.join(*modules, funcname)
145
+
146
+
147
+ # An in-memory store to avoid looking at the disk-based function
148
+ # source code to check if a function definition has changed
149
+ _FUNCTION_HASHES = weakref.WeakKeyDictionary()
150
+
151
+
152
+ ###############################################################################
153
+ # class `MemorizedResult`
154
+ ###############################################################################
155
+ class MemorizedResult(Logger):
156
+ """Object representing a cached value.
157
+
158
+ Attributes
159
+ ----------
160
+ location: str
161
+ The location of joblib cache. Depends on the store backend used.
162
+
163
+ func: function or str
164
+ function whose output is cached. The string case is intended only for
165
+ instantiation based on the output of repr() on another instance.
166
+ (namely eval(repr(memorized_instance)) works).
167
+
168
+ argument_hash: str
169
+ hash of the function arguments.
170
+
171
+ backend: str
172
+ Type of store backend for reading/writing cache files.
173
+ Default is 'local'.
174
+
175
+ mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
176
+ The memmapping mode used when loading from cache numpy arrays. See
177
+ numpy.load for the meaning of the different values.
178
+
179
+ verbose: int
180
+ verbosity level (0 means no message).
181
+
182
+ timestamp, metadata: string
183
+ for internal use only.
184
+ """
185
+ def __init__(self, location, call_id, backend='local', mmap_mode=None,
186
+ verbose=0, timestamp=None, metadata=None):
187
+ Logger.__init__(self)
188
+ self._call_id = call_id
189
+ self.store_backend = _store_backend_factory(backend, location,
190
+ verbose=verbose)
191
+ self.mmap_mode = mmap_mode
192
+
193
+ if metadata is not None:
194
+ self.metadata = metadata
195
+ else:
196
+ self.metadata = self.store_backend.get_metadata(self._call_id)
197
+
198
+ self.duration = self.metadata.get('duration', None)
199
+ self.verbose = verbose
200
+ self.timestamp = timestamp
201
+
202
+ @property
203
+ def func(self):
204
+ return self.func_id
205
+
206
+ @property
207
+ def func_id(self):
208
+ return self._call_id[0]
209
+
210
+ @property
211
+ def args_id(self):
212
+ return self._call_id[1]
213
+
214
+ @property
215
+ def argument_hash(self):
216
+ warnings.warn(
217
+ "The 'argument_hash' attribute has been deprecated in version "
218
+ "0.12 and will be removed in version 0.14.\n"
219
+ "Use `args_id` attribute instead.",
220
+ DeprecationWarning, stacklevel=2)
221
+ return self.args_id
222
+
223
+ def get(self):
224
+ """Read value from cache and return it."""
225
+ try:
226
+ return self.store_backend.load_item(
227
+ self._call_id,
228
+ timestamp=self.timestamp,
229
+ metadata=self.metadata,
230
+ verbose=self.verbose
231
+ )
232
+ except ValueError as exc:
233
+ new_exc = KeyError(
234
+ "Error while trying to load a MemorizedResult's value. "
235
+ "It seems that this folder is corrupted : {}".format(
236
+ os.path.join(self.store_backend.location, *self._call_id)))
237
+ raise new_exc from exc
238
+
239
+ def clear(self):
240
+ """Clear value from cache"""
241
+ self.store_backend.clear_item(self._call_id)
242
+
243
+ def __repr__(self):
244
+ return '{}(location="{}", func="{}", args_id="{}")'.format(
245
+ self.__class__.__name__, self.store_backend.location,
246
+ *self._call_id
247
+ )
248
+
249
+ def __getstate__(self):
250
+ state = self.__dict__.copy()
251
+ state['timestamp'] = None
252
+ return state
253
+
254
+
255
+ class NotMemorizedResult(object):
256
+ """Class representing an arbitrary value.
257
+
258
+ This class is a replacement for MemorizedResult when there is no cache.
259
+ """
260
+ __slots__ = ('value', 'valid')
261
+
262
+ def __init__(self, value):
263
+ self.value = value
264
+ self.valid = True
265
+
266
+ def get(self):
267
+ if self.valid:
268
+ return self.value
269
+ else:
270
+ raise KeyError("No value stored.")
271
+
272
+ def clear(self):
273
+ self.valid = False
274
+ self.value = None
275
+
276
+ def __repr__(self):
277
+ if self.valid:
278
+ return ('{class_name}({value})'
279
+ .format(class_name=self.__class__.__name__,
280
+ value=pformat(self.value)))
281
+ else:
282
+ return self.__class__.__name__ + ' with no value'
283
+
284
+ # __getstate__ and __setstate__ are required because of __slots__
285
+ def __getstate__(self):
286
+ return {"valid": self.valid, "value": self.value}
287
+
288
+ def __setstate__(self, state):
289
+ self.valid = state["valid"]
290
+ self.value = state["value"]
291
+
292
+
293
+ ###############################################################################
294
+ # class `NotMemorizedFunc`
295
+ ###############################################################################
296
+ class NotMemorizedFunc(object):
297
+ """No-op object decorating a function.
298
+
299
+ This class replaces MemorizedFunc when there is no cache. It provides an
300
+ identical API but does not write anything on disk.
301
+
302
+ Attributes
303
+ ----------
304
+ func: callable
305
+ Original undecorated function.
306
+ """
307
+ # Should be a light as possible (for speed)
308
+ def __init__(self, func):
309
+ self.func = func
310
+
311
+ def __call__(self, *args, **kwargs):
312
+ return self.func(*args, **kwargs)
313
+
314
+ def call_and_shelve(self, *args, **kwargs):
315
+ return NotMemorizedResult(self.func(*args, **kwargs))
316
+
317
+ def __repr__(self):
318
+ return '{0}(func={1})'.format(self.__class__.__name__, self.func)
319
+
320
+ def clear(self, warn=True):
321
+ # Argument "warn" is for compatibility with MemorizedFunc.clear
322
+ pass
323
+
324
+ def call(self, *args, **kwargs):
325
+ return self.func(*args, **kwargs), {}
326
+
327
+ def check_call_in_cache(self, *args, **kwargs):
328
+ return False
329
+
330
+
331
+ ###############################################################################
332
+ # class `AsyncNotMemorizedFunc`
333
+ ###############################################################################
334
+ class AsyncNotMemorizedFunc(NotMemorizedFunc):
335
+ async def call_and_shelve(self, *args, **kwargs):
336
+ return NotMemorizedResult(await self.func(*args, **kwargs))
337
+
338
+
339
+ ###############################################################################
340
+ # class `MemorizedFunc`
341
+ ###############################################################################
342
+ class MemorizedFunc(Logger):
343
+ """Callable object decorating a function for caching its return value
344
+ each time it is called.
345
+
346
+ Methods are provided to inspect the cache or clean it.
347
+
348
+ Attributes
349
+ ----------
350
+ func: callable
351
+ The original, undecorated, function.
352
+
353
+ location: string
354
+ The location of joblib cache. Depends on the store backend used.
355
+
356
+ backend: str
357
+ Type of store backend for reading/writing cache files.
358
+ Default is 'local', in which case the location is the path to a
359
+ disk storage.
360
+
361
+ ignore: list or None
362
+ List of variable names to ignore when choosing whether to
363
+ recompute.
364
+
365
+ mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
366
+ The memmapping mode used when loading from cache
367
+ numpy arrays. See numpy.load for the meaning of the different
368
+ values.
369
+
370
+ compress: boolean, or integer
371
+ Whether to zip the stored data on disk. If an integer is
372
+ given, it should be between 1 and 9, and sets the amount
373
+ of compression. Note that compressed arrays cannot be
374
+ read by memmapping.
375
+
376
+ verbose: int, optional
377
+ The verbosity flag, controls messages that are issued as
378
+ the function is evaluated.
379
+
380
+ cache_validation_callback: callable, optional
381
+ Callable to check if a result in cache is valid or is to be recomputed.
382
+ When the function is called with arguments for which a cache exists,
383
+ the callback is called with the cache entry's metadata as its sole
384
+ argument. If it returns True, the cached result is returned, else the
385
+ cache for these arguments is cleared and the result is recomputed.
386
+ """
387
+ # ------------------------------------------------------------------------
388
+ # Public interface
389
+ # ------------------------------------------------------------------------
390
+
391
+ def __init__(self, func, location, backend='local', ignore=None,
392
+ mmap_mode=None, compress=False, verbose=1, timestamp=None,
393
+ cache_validation_callback=None):
394
+ Logger.__init__(self)
395
+ self.mmap_mode = mmap_mode
396
+ self.compress = compress
397
+ self.func = func
398
+ self.cache_validation_callback = cache_validation_callback
399
+ self.func_id = _build_func_identifier(func)
400
+ self.ignore = ignore if ignore is not None else []
401
+ self._verbose = verbose
402
+
403
+ # retrieve store object from backend type and location.
404
+ self.store_backend = _store_backend_factory(backend, location,
405
+ verbose=verbose,
406
+ backend_options=dict(
407
+ compress=compress,
408
+ mmap_mode=mmap_mode),
409
+ )
410
+ if self.store_backend is not None:
411
+ # Create func directory on demand.
412
+ self.store_backend.store_cached_func_code([self.func_id])
413
+
414
+ self.timestamp = timestamp if timestamp is not None else time.time()
415
+ try:
416
+ functools.update_wrapper(self, func)
417
+ except Exception:
418
+ pass # Objects like ufunc don't like that
419
+ if inspect.isfunction(func):
420
+ doc = pydoc.TextDoc().document(func)
421
+ # Remove blank line
422
+ doc = doc.replace('\n', '\n\n', 1)
423
+ # Strip backspace-overprints for compatibility with autodoc
424
+ doc = re.sub('\x08.', '', doc)
425
+ else:
426
+ # Pydoc does a poor job on other objects
427
+ doc = func.__doc__
428
+ self.__doc__ = 'Memoized version of %s' % doc
429
+
430
+ self._func_code_info = None
431
+ self._func_code_id = None
432
+
433
+ def _is_in_cache_and_valid(self, call_id):
434
+ """Check if the function call is cached and valid for given arguments.
435
+
436
+ - Compare the function code with the one from the cached function,
437
+ asserting if it has changed.
438
+ - Check if the function call is present in the cache.
439
+ - Call `cache_validation_callback` for user define cache validation.
440
+
441
+ Returns True if the function call is in cache and can be used, and
442
+ returns False otherwise.
443
+ """
444
+ # Check if the code of the function has changed
445
+ if not self._check_previous_func_code(stacklevel=4):
446
+ return False
447
+
448
+ # Check if this specific call is in the cache
449
+ if not self.store_backend.contains_item(call_id):
450
+ return False
451
+
452
+ # Call the user defined cache validation callback
453
+ metadata = self.store_backend.get_metadata(call_id)
454
+ if (self.cache_validation_callback is not None and
455
+ not self.cache_validation_callback(metadata)):
456
+ self.store_backend.clear_item(call_id)
457
+ return False
458
+
459
+ return True
460
+
461
+ def _cached_call(self, args, kwargs, shelving):
462
+ """Call wrapped function and cache result, or read cache if available.
463
+
464
+ This function returns the wrapped function output or a reference to
465
+ the cached result.
466
+
467
+ Arguments:
468
+ ----------
469
+
470
+ args, kwargs: list and dict
471
+ input arguments for wrapped function
472
+
473
+ shelving: bool
474
+ True when called via the call_and_shelve function.
475
+
476
+
477
+ Returns
478
+ -------
479
+ output: Output of the wrapped function if shelving is false, or a
480
+ MemorizedResult reference to the value if shelving is true.
481
+ metadata: dict containing the metadata associated with the call.
482
+ """
483
+ args_id = self._get_args_id(*args, **kwargs)
484
+ call_id = (self.func_id, args_id)
485
+ _, func_name = get_func_name(self.func)
486
+ func_info = self.store_backend.get_cached_func_info([self.func_id])
487
+ location = func_info['location']
488
+
489
+ if self._verbose >= 20:
490
+ logging.basicConfig(level=logging.INFO)
491
+ _, signature = format_signature(self.func, *args, **kwargs)
492
+ self.info(
493
+ textwrap.dedent(
494
+ f"""
495
+ Querying {func_name} with signature
496
+ {signature}.
497
+
498
+ (argument hash {args_id})
499
+
500
+ The store location is {location}.
501
+ """
502
+ )
503
+ )
504
+
505
+ # Compare the function code with the previous to see if the
506
+ # function code has changed and check if the results are present in
507
+ # the cache.
508
+ if self._is_in_cache_and_valid(call_id):
509
+ if shelving:
510
+ return self._get_memorized_result(call_id), {}
511
+
512
+ try:
513
+ start_time = time.time()
514
+ output = self._load_item(call_id)
515
+ if self._verbose > 4:
516
+ self._print_duration(time.time() - start_time,
517
+ context='cache loaded ')
518
+ return output, {}
519
+ except Exception:
520
+ # XXX: Should use an exception logger
521
+ _, signature = format_signature(self.func, *args, **kwargs)
522
+ self.warn('Exception while loading results for '
523
+ '{}\n {}'.format(signature, traceback.format_exc()))
524
+
525
+ if self._verbose > 10:
526
+ self.warn(
527
+ f"Computing func {func_name}, argument hash {args_id} "
528
+ f"in location {location}"
529
+ )
530
+
531
+ # Returns the output but not the metadata
532
+ return self._call(call_id, args, kwargs, shelving)
533
+
534
+ @property
535
+ def func_code_info(self):
536
+ # 3-tuple property containing: the function source code, source file,
537
+ # and first line of the code inside the source file
538
+ if hasattr(self.func, '__code__'):
539
+ if self._func_code_id is None:
540
+ self._func_code_id = id(self.func.__code__)
541
+ elif id(self.func.__code__) != self._func_code_id:
542
+ # Be robust to dynamic reassignments of self.func.__code__
543
+ self._func_code_info = None
544
+
545
+ if self._func_code_info is None:
546
+ # Cache the source code of self.func . Provided that get_func_code
547
+ # (which should be called once on self) gets called in the process
548
+ # in which self.func was defined, this caching mechanism prevents
549
+ # undesired cache clearing when the cached function is called in
550
+ # an environment where the introspection utilities get_func_code
551
+ # relies on do not work (typically, in joblib child processes).
552
+ # See #1035 for more info
553
+ # TODO (pierreglaser): do the same with get_func_name?
554
+ self._func_code_info = get_func_code(self.func)
555
+ return self._func_code_info
556
+
557
+ def call_and_shelve(self, *args, **kwargs):
558
+ """Call wrapped function, cache result and return a reference.
559
+
560
+ This method returns a reference to the cached result instead of the
561
+ result itself. The reference object is small and pickeable, allowing
562
+ to send or store it easily. Call .get() on reference object to get
563
+ result.
564
+
565
+ Returns
566
+ -------
567
+ cached_result: MemorizedResult or NotMemorizedResult
568
+ reference to the value returned by the wrapped function. The
569
+ class "NotMemorizedResult" is used when there is no cache
570
+ activated (e.g. location=None in Memory).
571
+ """
572
+ # Return the wrapped output, without the metadata
573
+ return self._cached_call(args, kwargs, shelving=True)[0]
574
+
575
+ def __call__(self, *args, **kwargs):
576
+ # Return the output, without the metadata
577
+ return self._cached_call(args, kwargs, shelving=False)[0]
578
+
579
+ def __getstate__(self):
580
+ # Make sure self.func's source is introspected prior to being pickled -
581
+ # code introspection utilities typically do not work inside child
582
+ # processes
583
+ _ = self.func_code_info
584
+
585
+ # We don't store the timestamp when pickling, to avoid the hash
586
+ # depending from it.
587
+ state = self.__dict__.copy()
588
+ state['timestamp'] = None
589
+
590
+ # Invalidate the code id as id(obj) will be different in the child
591
+ state['_func_code_id'] = None
592
+
593
+ return state
594
+
595
+ def check_call_in_cache(self, *args, **kwargs):
596
+ """Check if function call is in the memory cache.
597
+
598
+ Does not call the function or do any work besides func inspection
599
+ and arg hashing.
600
+
601
+ Returns
602
+ -------
603
+ is_call_in_cache: bool
604
+ Whether or not the result of the function has been cached
605
+ for the input arguments that have been passed.
606
+ """
607
+ call_id = (self.func_id, self._get_args_id(*args, **kwargs))
608
+ return self.store_backend.contains_item(call_id)
609
+
610
+ # ------------------------------------------------------------------------
611
+ # Private interface
612
+ # ------------------------------------------------------------------------
613
+
614
+ def _get_args_id(self, *args, **kwargs):
615
+ """Return the input parameter hash of a result."""
616
+ return hashing.hash(filter_args(self.func, self.ignore, args, kwargs),
617
+ coerce_mmap=self.mmap_mode is not None)
618
+
619
+ def _hash_func(self):
620
+ """Hash a function to key the online cache"""
621
+ func_code_h = hash(getattr(self.func, '__code__', None))
622
+ return id(self.func), hash(self.func), func_code_h
623
+
624
+ def _write_func_code(self, func_code, first_line):
625
+ """ Write the function code and the filename to a file.
626
+ """
627
+ # We store the first line because the filename and the function
628
+ # name is not always enough to identify a function: people
629
+ # sometimes have several functions named the same way in a
630
+ # file. This is bad practice, but joblib should be robust to bad
631
+ # practice.
632
+ func_code = u'%s %i\n%s' % (FIRST_LINE_TEXT, first_line, func_code)
633
+ self.store_backend.store_cached_func_code([self.func_id], func_code)
634
+
635
+ # Also store in the in-memory store of function hashes
636
+ is_named_callable = (hasattr(self.func, '__name__') and
637
+ self.func.__name__ != '<lambda>')
638
+ if is_named_callable:
639
+ # Don't do this for lambda functions or strange callable
640
+ # objects, as it ends up being too fragile
641
+ func_hash = self._hash_func()
642
+ try:
643
+ _FUNCTION_HASHES[self.func] = func_hash
644
+ except TypeError:
645
+ # Some callable are not hashable
646
+ pass
647
+
648
+ def _check_previous_func_code(self, stacklevel=2):
649
+ """
650
+ stacklevel is the depth a which this function is called, to
651
+ issue useful warnings to the user.
652
+ """
653
+ # First check if our function is in the in-memory store.
654
+ # Using the in-memory store not only makes things faster, but it
655
+ # also renders us robust to variations of the files when the
656
+ # in-memory version of the code does not vary
657
+ try:
658
+ if self.func in _FUNCTION_HASHES:
659
+ # We use as an identifier the id of the function and its
660
+ # hash. This is more likely to falsely change than have hash
661
+ # collisions, thus we are on the safe side.
662
+ func_hash = self._hash_func()
663
+ if func_hash == _FUNCTION_HASHES[self.func]:
664
+ return True
665
+ except TypeError:
666
+ # Some callables are not hashable
667
+ pass
668
+
669
+ # Here, we go through some effort to be robust to dynamically
670
+ # changing code and collision. We cannot inspect.getsource
671
+ # because it is not reliable when using IPython's magic "%run".
672
+ func_code, source_file, first_line = self.func_code_info
673
+ try:
674
+ old_func_code, old_first_line = extract_first_line(
675
+ self.store_backend.get_cached_func_code([self.func_id]))
676
+ except (IOError, OSError): # some backend can also raise OSError
677
+ self._write_func_code(func_code, first_line)
678
+ return False
679
+ if old_func_code == func_code:
680
+ return True
681
+
682
+ # We have differing code, is this because we are referring to
683
+ # different functions, or because the function we are referring to has
684
+ # changed?
685
+
686
+ _, func_name = get_func_name(self.func, resolv_alias=False,
687
+ win_characters=False)
688
+ if old_first_line == first_line == -1 or func_name == '<lambda>':
689
+ if not first_line == -1:
690
+ func_description = ("{0} ({1}:{2})"
691
+ .format(func_name, source_file,
692
+ first_line))
693
+ else:
694
+ func_description = func_name
695
+ warnings.warn(JobLibCollisionWarning(
696
+ "Cannot detect name collisions for function '{0}'"
697
+ .format(func_description)), stacklevel=stacklevel)
698
+
699
+ # Fetch the code at the old location and compare it. If it is the
700
+ # same than the code store, we have a collision: the code in the
701
+ # file has not changed, but the name we have is pointing to a new
702
+ # code block.
703
+ if not old_first_line == first_line and source_file is not None:
704
+ if os.path.exists(source_file):
705
+ _, func_name = get_func_name(self.func, resolv_alias=False)
706
+ num_lines = len(func_code.split('\n'))
707
+ with tokenize.open(source_file) as f:
708
+ on_disk_func_code = f.readlines()[
709
+ old_first_line - 1:old_first_line - 1 + num_lines - 1]
710
+ on_disk_func_code = ''.join(on_disk_func_code)
711
+ possible_collision = (on_disk_func_code.rstrip() ==
712
+ old_func_code.rstrip())
713
+ else:
714
+ possible_collision = source_file.startswith('<doctest ')
715
+ if possible_collision:
716
+ warnings.warn(JobLibCollisionWarning(
717
+ 'Possible name collisions between functions '
718
+ "'%s' (%s:%i) and '%s' (%s:%i)" %
719
+ (func_name, source_file, old_first_line,
720
+ func_name, source_file, first_line)),
721
+ stacklevel=stacklevel)
722
+
723
+ # The function has changed, wipe the cache directory.
724
+ # XXX: Should be using warnings, and giving stacklevel
725
+ if self._verbose > 10:
726
+ _, func_name = get_func_name(self.func, resolv_alias=False)
727
+ self.warn("Function {0} (identified by {1}) has changed"
728
+ ".".format(func_name, self.func_id))
729
+ self.clear(warn=True)
730
+ return False
731
+
732
+ def clear(self, warn=True):
733
+ """Empty the function's cache."""
734
+ func_id = self.func_id
735
+ if self._verbose > 0 and warn:
736
+ self.warn("Clearing function cache identified by %s" % func_id)
737
+ self.store_backend.clear_path([func_id, ])
738
+
739
+ func_code, _, first_line = self.func_code_info
740
+ self._write_func_code(func_code, first_line)
741
+
742
+ def call(self, *args, **kwargs):
743
+ """Force the execution of the function with the given arguments.
744
+
745
+ The output values will be persisted, i.e., the cache will be updated
746
+ with any new values.
747
+
748
+ Parameters
749
+ ----------
750
+ *args: arguments
751
+ The arguments.
752
+ **kwargs: keyword arguments
753
+ Keyword arguments.
754
+
755
+ Returns
756
+ -------
757
+ output : object
758
+ The output of the function call.
759
+ metadata : dict
760
+ The metadata associated with the call.
761
+ """
762
+ call_id = (self.func_id, self._get_args_id(*args, **kwargs))
763
+
764
+ # Return the output and the metadata
765
+ return self._call(call_id, args, kwargs)
766
+
767
+ def _call(self, call_id, args, kwargs, shelving=False):
768
+ # Return the output and the metadata
769
+ self._before_call(args, kwargs)
770
+ start_time = time.time()
771
+ output = self.func(*args, **kwargs)
772
+ return self._after_call(call_id, args, kwargs, shelving,
773
+ output, start_time)
774
+
775
+ def _before_call(self, args, kwargs):
776
+ if self._verbose > 0:
777
+ print(format_call(self.func, args, kwargs))
778
+
779
+ def _after_call(self, call_id, args, kwargs, shelving, output, start_time):
780
+ self.store_backend.dump_item(call_id, output, verbose=self._verbose)
781
+ duration = time.time() - start_time
782
+ if self._verbose > 0:
783
+ self._print_duration(duration)
784
+ metadata = self._persist_input(duration, call_id, args, kwargs)
785
+ if shelving:
786
+ return self._get_memorized_result(call_id, metadata), metadata
787
+
788
+ if self.mmap_mode is not None:
789
+ # Memmap the output at the first call to be consistent with
790
+ # later calls
791
+ output = self._load_item(call_id, metadata)
792
+ return output, metadata
793
+
794
+ def _persist_input(self, duration, call_id, args, kwargs,
795
+ this_duration_limit=0.5):
796
+ """ Save a small summary of the call using json format in the
797
+ output directory.
798
+
799
+ output_dir: string
800
+ directory where to write metadata.
801
+
802
+ duration: float
803
+ time taken by hashing input arguments, calling the wrapped
804
+ function and persisting its output.
805
+
806
+ args, kwargs: list and dict
807
+ input arguments for wrapped function
808
+
809
+ this_duration_limit: float
810
+ Max execution time for this function before issuing a warning.
811
+ """
812
+ start_time = time.time()
813
+ argument_dict = filter_args(self.func, self.ignore,
814
+ args, kwargs)
815
+
816
+ input_repr = dict((k, repr(v)) for k, v in argument_dict.items())
817
+ # This can fail due to race-conditions with multiple
818
+ # concurrent joblibs removing the file or the directory
819
+ metadata = {
820
+ "duration": duration, "input_args": input_repr, "time": start_time,
821
+ }
822
+
823
+ self.store_backend.store_metadata(call_id, metadata)
824
+
825
+ this_duration = time.time() - start_time
826
+ if this_duration > this_duration_limit:
827
+ # This persistence should be fast. It will not be if repr() takes
828
+ # time and its output is large, because json.dump will have to
829
+ # write a large file. This should not be an issue with numpy arrays
830
+ # for which repr() always output a short representation, but can
831
+ # be with complex dictionaries. Fixing the problem should be a
832
+ # matter of replacing repr() above by something smarter.
833
+ warnings.warn("Persisting input arguments took %.2fs to run."
834
+ "If this happens often in your code, it can cause "
835
+ "performance problems "
836
+ "(results will be correct in all cases). "
837
+ "The reason for this is probably some large input "
838
+ "arguments for a wrapped function."
839
+ % this_duration, stacklevel=5)
840
+ return metadata
841
+
842
+ def _get_memorized_result(self, call_id, metadata=None):
843
+ return MemorizedResult(self.store_backend, call_id,
844
+ metadata=metadata, timestamp=self.timestamp,
845
+ verbose=self._verbose - 1)
846
+
847
+ def _load_item(self, call_id, metadata=None):
848
+ return self.store_backend.load_item(call_id, metadata=metadata,
849
+ timestamp=self.timestamp,
850
+ verbose=self._verbose)
851
+
852
+ def _print_duration(self, duration, context=''):
853
+ _, name = get_func_name(self.func)
854
+ msg = f"{name} {context}- {format_time(duration)}"
855
+ print(max(0, (80 - len(msg))) * '_' + msg)
856
+
857
+ # ------------------------------------------------------------------------
858
+ # Private `object` interface
859
+ # ------------------------------------------------------------------------
860
+
861
+ def __repr__(self):
862
+ return '{class_name}(func={func}, location={location})'.format(
863
+ class_name=self.__class__.__name__,
864
+ func=self.func,
865
+ location=self.store_backend.location,)
866
+
867
+
868
+ ###############################################################################
869
+ # class `AsyncMemorizedFunc`
870
+ ###############################################################################
871
+ class AsyncMemorizedFunc(MemorizedFunc):
872
+ async def __call__(self, *args, **kwargs):
873
+ out = self._cached_call(args, kwargs, shelving=False)
874
+ out = await out if asyncio.iscoroutine(out) else out
875
+ return out[0] # Don't return metadata
876
+
877
+ async def call_and_shelve(self, *args, **kwargs):
878
+ out = self._cached_call(args, kwargs, shelving=True)
879
+ out = await out if asyncio.iscoroutine(out) else out
880
+ return out[0] # Don't return metadata
881
+
882
+ async def call(self, *args, **kwargs):
883
+ out = super().call(*args, **kwargs)
884
+ return await out if asyncio.iscoroutine(out) else out
885
+
886
+ async def _call(self, call_id, args, kwargs, shelving=False):
887
+ self._before_call(args, kwargs)
888
+ start_time = time.time()
889
+ output = await self.func(*args, **kwargs)
890
+ return self._after_call(
891
+ call_id, args, kwargs, shelving, output, start_time
892
+ )
893
+
894
+
895
+ ###############################################################################
896
+ # class `Memory`
897
+ ###############################################################################
898
+ class Memory(Logger):
899
+ """ A context object for caching a function's return value each time it
900
+ is called with the same input arguments.
901
+
902
+ All values are cached on the filesystem, in a deep directory
903
+ structure.
904
+
905
+ Read more in the :ref:`User Guide <memory>`.
906
+
907
+ Parameters
908
+ ----------
909
+ location: str, pathlib.Path or None
910
+ The path of the base directory to use as a data store
911
+ or None. If None is given, no caching is done and
912
+ the Memory object is completely transparent. This option
913
+ replaces cachedir since version 0.12.
914
+
915
+ backend: str, optional
916
+ Type of store backend for reading/writing cache files.
917
+ Default: 'local'.
918
+ The 'local' backend is using regular filesystem operations to
919
+ manipulate data (open, mv, etc) in the backend.
920
+
921
+ mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
922
+ The memmapping mode used when loading from cache
923
+ numpy arrays. See numpy.load for the meaning of the
924
+ arguments.
925
+
926
+ compress: boolean, or integer, optional
927
+ Whether to zip the stored data on disk. If an integer is
928
+ given, it should be between 1 and 9, and sets the amount
929
+ of compression. Note that compressed arrays cannot be
930
+ read by memmapping.
931
+
932
+ verbose: int, optional
933
+ Verbosity flag, controls the debug messages that are issued
934
+ as functions are evaluated.
935
+
936
+ bytes_limit: int | str, optional
937
+ Limit in bytes of the size of the cache. By default, the size of
938
+ the cache is unlimited. When reducing the size of the cache,
939
+ ``joblib`` keeps the most recently accessed items first. If a
940
+ str is passed, it is converted to a number of bytes using units
941
+ { K | M | G} for kilo, mega, giga.
942
+
943
+ **Note:** You need to call :meth:`joblib.Memory.reduce_size` to
944
+ actually reduce the cache size to be less than ``bytes_limit``.
945
+
946
+ **Note:** This argument has been deprecated. One should give the
947
+ value of ``bytes_limit`` directly in
948
+ :meth:`joblib.Memory.reduce_size`.
949
+
950
+ backend_options: dict, optional
951
+ Contains a dictionary of named parameters used to configure
952
+ the store backend.
953
+ """
954
+ # ------------------------------------------------------------------------
955
+ # Public interface
956
+ # ------------------------------------------------------------------------
957
+
958
+ def __init__(self, location=None, backend='local',
959
+ mmap_mode=None, compress=False, verbose=1, bytes_limit=None,
960
+ backend_options=None):
961
+ Logger.__init__(self)
962
+ self._verbose = verbose
963
+ self.mmap_mode = mmap_mode
964
+ self.timestamp = time.time()
965
+ if bytes_limit is not None:
966
+ warnings.warn(
967
+ "bytes_limit argument has been deprecated. It will be removed "
968
+ "in version 1.5. Please pass its value directly to "
969
+ "Memory.reduce_size.",
970
+ category=DeprecationWarning
971
+ )
972
+ self.bytes_limit = bytes_limit
973
+ self.backend = backend
974
+ self.compress = compress
975
+ if backend_options is None:
976
+ backend_options = {}
977
+ self.backend_options = backend_options
978
+
979
+ if compress and mmap_mode is not None:
980
+ warnings.warn('Compressed results cannot be memmapped',
981
+ stacklevel=2)
982
+
983
+ self.location = location
984
+ if isinstance(location, str):
985
+ location = os.path.join(location, 'joblib')
986
+
987
+ self.store_backend = _store_backend_factory(
988
+ backend, location, verbose=self._verbose,
989
+ backend_options=dict(compress=compress, mmap_mode=mmap_mode,
990
+ **backend_options))
991
+
992
+ def cache(self, func=None, ignore=None, verbose=None, mmap_mode=False,
993
+ cache_validation_callback=None):
994
+ """ Decorates the given function func to only compute its return
995
+ value for input arguments not cached on disk.
996
+
997
+ Parameters
998
+ ----------
999
+ func: callable, optional
1000
+ The function to be decorated
1001
+ ignore: list of strings
1002
+ A list of arguments name to ignore in the hashing
1003
+ verbose: integer, optional
1004
+ The verbosity mode of the function. By default that
1005
+ of the memory object is used.
1006
+ mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
1007
+ The memmapping mode used when loading from cache
1008
+ numpy arrays. See numpy.load for the meaning of the
1009
+ arguments. By default that of the memory object is used.
1010
+ cache_validation_callback: callable, optional
1011
+ Callable to validate whether or not the cache is valid. When
1012
+ the cached function is called with arguments for which a cache
1013
+ exists, this callable is called with the metadata of the cached
1014
+ result as its sole argument. If it returns True, then the
1015
+ cached result is returned, else the cache for these arguments
1016
+ is cleared and recomputed.
1017
+
1018
+ Returns
1019
+ -------
1020
+ decorated_func: MemorizedFunc object
1021
+ The returned object is a MemorizedFunc object, that is
1022
+ callable (behaves like a function), but offers extra
1023
+ methods for cache lookup and management. See the
1024
+ documentation for :class:`joblib.memory.MemorizedFunc`.
1025
+ """
1026
+ if (cache_validation_callback is not None and
1027
+ not callable(cache_validation_callback)):
1028
+ raise ValueError(
1029
+ "cache_validation_callback needs to be callable. "
1030
+ f"Got {cache_validation_callback}."
1031
+ )
1032
+ if func is None:
1033
+ # Partial application, to be able to specify extra keyword
1034
+ # arguments in decorators
1035
+ return functools.partial(
1036
+ self.cache, ignore=ignore,
1037
+ mmap_mode=mmap_mode,
1038
+ verbose=verbose,
1039
+ cache_validation_callback=cache_validation_callback
1040
+ )
1041
+ if self.store_backend is None:
1042
+ cls = (AsyncNotMemorizedFunc
1043
+ if asyncio.iscoroutinefunction(func)
1044
+ else NotMemorizedFunc)
1045
+ return cls(func)
1046
+ if verbose is None:
1047
+ verbose = self._verbose
1048
+ if mmap_mode is False:
1049
+ mmap_mode = self.mmap_mode
1050
+ if isinstance(func, MemorizedFunc):
1051
+ func = func.func
1052
+ cls = (AsyncMemorizedFunc
1053
+ if asyncio.iscoroutinefunction(func)
1054
+ else MemorizedFunc)
1055
+ return cls(
1056
+ func, location=self.store_backend, backend=self.backend,
1057
+ ignore=ignore, mmap_mode=mmap_mode, compress=self.compress,
1058
+ verbose=verbose, timestamp=self.timestamp,
1059
+ cache_validation_callback=cache_validation_callback
1060
+ )
1061
+
1062
+ def clear(self, warn=True):
1063
+ """ Erase the complete cache directory.
1064
+ """
1065
+ if warn:
1066
+ self.warn('Flushing completely the cache')
1067
+ if self.store_backend is not None:
1068
+ self.store_backend.clear()
1069
+
1070
+ # As the cache is completely clear, make sure the _FUNCTION_HASHES
1071
+ # cache is also reset. Else, for a function that is present in this
1072
+ # table, results cached after this clear will be have cache miss
1073
+ # as the function code is not re-written.
1074
+ _FUNCTION_HASHES.clear()
1075
+
1076
+ def reduce_size(self, bytes_limit=None, items_limit=None, age_limit=None):
1077
+ """Remove cache elements to make the cache fit its limits.
1078
+
1079
+ The limitation can impose that the cache size fits in ``bytes_limit``,
1080
+ that the number of cache items is no more than ``items_limit``, and
1081
+ that all files in cache are not older than ``age_limit``.
1082
+
1083
+ Parameters
1084
+ ----------
1085
+ bytes_limit: int | str, optional
1086
+ Limit in bytes of the size of the cache. By default, the size of
1087
+ the cache is unlimited. When reducing the size of the cache,
1088
+ ``joblib`` keeps the most recently accessed items first. If a
1089
+ str is passed, it is converted to a number of bytes using units
1090
+ { K | M | G} for kilo, mega, giga.
1091
+
1092
+ items_limit: int, optional
1093
+ Number of items to limit the cache to. By default, the number of
1094
+ items in the cache is unlimited. When reducing the size of the
1095
+ cache, ``joblib`` keeps the most recently accessed items first.
1096
+
1097
+ age_limit: datetime.timedelta, optional
1098
+ Maximum age of items to limit the cache to. When reducing the size
1099
+ of the cache, any items last accessed more than the given length of
1100
+ time ago are deleted.
1101
+ """
1102
+ if bytes_limit is None:
1103
+ bytes_limit = self.bytes_limit
1104
+
1105
+ if self.store_backend is None:
1106
+ # No cached results, this function does nothing.
1107
+ return
1108
+
1109
+ if bytes_limit is None and items_limit is None and age_limit is None:
1110
+ # No limitation to impose, returning
1111
+ return
1112
+
1113
+ # Defers the actual limits enforcing to the store backend.
1114
+ self.store_backend.enforce_store_limits(
1115
+ bytes_limit, items_limit, age_limit
1116
+ )
1117
+
1118
+ def eval(self, func, *args, **kwargs):
1119
+ """ Eval function func with arguments `*args` and `**kwargs`,
1120
+ in the context of the memory.
1121
+
1122
+ This method works similarly to the builtin `apply`, except
1123
+ that the function is called only if the cache is not
1124
+ up to date.
1125
+
1126
+ """
1127
+ if self.store_backend is None:
1128
+ return func(*args, **kwargs)
1129
+ return self.cache(func)(*args, **kwargs)
1130
+
1131
+ # ------------------------------------------------------------------------
1132
+ # Private `object` interface
1133
+ # ------------------------------------------------------------------------
1134
+
1135
+ def __repr__(self):
1136
+ return '{class_name}(location={location})'.format(
1137
+ class_name=self.__class__.__name__,
1138
+ location=(None if self.store_backend is None
1139
+ else self.store_backend.location))
1140
+
1141
+ def __getstate__(self):
1142
+ """ We don't store the timestamp when pickling, to avoid the hash
1143
+ depending from it.
1144
+ """
1145
+ state = self.__dict__.copy()
1146
+ state['timestamp'] = None
1147
+ return state
1148
+
1149
+
1150
+ ###############################################################################
1151
+ # cache_validation_callback helpers
1152
+ ###############################################################################
1153
+
1154
+ def expires_after(days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0,
1155
+ hours=0, weeks=0):
1156
+ """Helper cache_validation_callback to force recompute after a duration.
1157
+
1158
+ Parameters
1159
+ ----------
1160
+ days, seconds, microseconds, milliseconds, minutes, hours, weeks: numbers
1161
+ argument passed to a timedelta.
1162
+ """
1163
+ delta = datetime.timedelta(
1164
+ days=days, seconds=seconds, microseconds=microseconds,
1165
+ milliseconds=milliseconds, minutes=minutes, hours=hours, weeks=weeks
1166
+ )
1167
+
1168
+ def cache_validation_callback(metadata):
1169
+ computation_age = time.time() - metadata['time']
1170
+ return computation_age < delta.total_seconds()
1171
+
1172
+ return cache_validation_callback
llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle.py ADDED
@@ -0,0 +1,659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utilities for fast persistence of big data, with optional compression."""
2
+
3
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
4
+ # Copyright (c) 2009 Gael Varoquaux
5
+ # License: BSD Style, 3 clauses.
6
+
7
+ import pickle
8
+ import os
9
+ import warnings
10
+ import io
11
+ from pathlib import Path
12
+
13
+ from .compressor import lz4, LZ4_NOT_INSTALLED_ERROR
14
+ from .compressor import _COMPRESSORS, register_compressor, BinaryZlibFile
15
+ from .compressor import (ZlibCompressorWrapper, GzipCompressorWrapper,
16
+ BZ2CompressorWrapper, LZMACompressorWrapper,
17
+ XZCompressorWrapper, LZ4CompressorWrapper)
18
+ from .numpy_pickle_utils import Unpickler, Pickler
19
+ from .numpy_pickle_utils import _read_fileobject, _write_fileobject
20
+ from .numpy_pickle_utils import _read_bytes, BUFFER_SIZE
21
+ from .numpy_pickle_utils import _ensure_native_byte_order
22
+ from .numpy_pickle_compat import load_compatibility
23
+ from .numpy_pickle_compat import NDArrayWrapper
24
+ # For compatibility with old versions of joblib, we need ZNDArrayWrapper
25
+ # to be visible in the current namespace.
26
+ # Explicitly skipping next line from flake8 as it triggers an F401 warning
27
+ # which we don't care.
28
+ from .numpy_pickle_compat import ZNDArrayWrapper # noqa
29
+ from .backports import make_memmap
30
+
31
+ # Register supported compressors
32
+ register_compressor('zlib', ZlibCompressorWrapper())
33
+ register_compressor('gzip', GzipCompressorWrapper())
34
+ register_compressor('bz2', BZ2CompressorWrapper())
35
+ register_compressor('lzma', LZMACompressorWrapper())
36
+ register_compressor('xz', XZCompressorWrapper())
37
+ register_compressor('lz4', LZ4CompressorWrapper())
38
+
39
+
40
+ ###############################################################################
41
+ # Utility objects for persistence.
42
+
43
+ # For convenience, 16 bytes are used to be sure to cover all the possible
44
+ # dtypes' alignments. For reference, see:
45
+ # https://numpy.org/devdocs/dev/alignment.html
46
+ NUMPY_ARRAY_ALIGNMENT_BYTES = 16
47
+
48
+
49
+ class NumpyArrayWrapper(object):
50
+ """An object to be persisted instead of numpy arrays.
51
+
52
+ This object is used to hack into the pickle machinery and read numpy
53
+ array data from our custom persistence format.
54
+ More precisely, this object is used for:
55
+ * carrying the information of the persisted array: subclass, shape, order,
56
+ dtype. Those ndarray metadata are used to correctly reconstruct the array
57
+ with low level numpy functions.
58
+ * determining if memmap is allowed on the array.
59
+ * reading the array bytes from a file.
60
+ * reading the array using memorymap from a file.
61
+ * writing the array bytes to a file.
62
+
63
+ Attributes
64
+ ----------
65
+ subclass: numpy.ndarray subclass
66
+ Determine the subclass of the wrapped array.
67
+ shape: numpy.ndarray shape
68
+ Determine the shape of the wrapped array.
69
+ order: {'C', 'F'}
70
+ Determine the order of wrapped array data. 'C' is for C order, 'F' is
71
+ for fortran order.
72
+ dtype: numpy.ndarray dtype
73
+ Determine the data type of the wrapped array.
74
+ allow_mmap: bool
75
+ Determine if memory mapping is allowed on the wrapped array.
76
+ Default: False.
77
+ """
78
+
79
+ def __init__(self, subclass, shape, order, dtype, allow_mmap=False,
80
+ numpy_array_alignment_bytes=NUMPY_ARRAY_ALIGNMENT_BYTES):
81
+ """Constructor. Store the useful information for later."""
82
+ self.subclass = subclass
83
+ self.shape = shape
84
+ self.order = order
85
+ self.dtype = dtype
86
+ self.allow_mmap = allow_mmap
87
+ # We make numpy_array_alignment_bytes an instance attribute to allow us
88
+ # to change our mind about the default alignment and still load the old
89
+ # pickles (with the previous alignment) correctly
90
+ self.numpy_array_alignment_bytes = numpy_array_alignment_bytes
91
+
92
+ def safe_get_numpy_array_alignment_bytes(self):
93
+ # NumpyArrayWrapper instances loaded from joblib <= 1.1 pickles don't
94
+ # have an numpy_array_alignment_bytes attribute
95
+ return getattr(self, 'numpy_array_alignment_bytes', None)
96
+
97
+ def write_array(self, array, pickler):
98
+ """Write array bytes to pickler file handle.
99
+
100
+ This function is an adaptation of the numpy write_array function
101
+ available in version 1.10.1 in numpy/lib/format.py.
102
+ """
103
+ # Set buffer size to 16 MiB to hide the Python loop overhead.
104
+ buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
105
+ if array.dtype.hasobject:
106
+ # We contain Python objects so we cannot write out the data
107
+ # directly. Instead, we will pickle it out with version 2 of the
108
+ # pickle protocol.
109
+ pickle.dump(array, pickler.file_handle, protocol=2)
110
+ else:
111
+ numpy_array_alignment_bytes = \
112
+ self.safe_get_numpy_array_alignment_bytes()
113
+ if numpy_array_alignment_bytes is not None:
114
+ current_pos = pickler.file_handle.tell()
115
+ pos_after_padding_byte = current_pos + 1
116
+ padding_length = numpy_array_alignment_bytes - (
117
+ pos_after_padding_byte % numpy_array_alignment_bytes)
118
+ # A single byte is written that contains the padding length in
119
+ # bytes
120
+ padding_length_byte = int.to_bytes(
121
+ padding_length, length=1, byteorder='little')
122
+ pickler.file_handle.write(padding_length_byte)
123
+
124
+ if padding_length != 0:
125
+ padding = b'\xff' * padding_length
126
+ pickler.file_handle.write(padding)
127
+
128
+ for chunk in pickler.np.nditer(array,
129
+ flags=['external_loop',
130
+ 'buffered',
131
+ 'zerosize_ok'],
132
+ buffersize=buffersize,
133
+ order=self.order):
134
+ pickler.file_handle.write(chunk.tobytes('C'))
135
+
136
+ def read_array(self, unpickler):
137
+ """Read array from unpickler file handle.
138
+
139
+ This function is an adaptation of the numpy read_array function
140
+ available in version 1.10.1 in numpy/lib/format.py.
141
+ """
142
+ if len(self.shape) == 0:
143
+ count = 1
144
+ else:
145
+ # joblib issue #859: we cast the elements of self.shape to int64 to
146
+ # prevent a potential overflow when computing their product.
147
+ shape_int64 = [unpickler.np.int64(x) for x in self.shape]
148
+ count = unpickler.np.multiply.reduce(shape_int64)
149
+ # Now read the actual data.
150
+ if self.dtype.hasobject:
151
+ # The array contained Python objects. We need to unpickle the data.
152
+ array = pickle.load(unpickler.file_handle)
153
+ else:
154
+ numpy_array_alignment_bytes = \
155
+ self.safe_get_numpy_array_alignment_bytes()
156
+ if numpy_array_alignment_bytes is not None:
157
+ padding_byte = unpickler.file_handle.read(1)
158
+ padding_length = int.from_bytes(
159
+ padding_byte, byteorder='little')
160
+ if padding_length != 0:
161
+ unpickler.file_handle.read(padding_length)
162
+
163
+ # This is not a real file. We have to read it the
164
+ # memory-intensive way.
165
+ # crc32 module fails on reads greater than 2 ** 32 bytes,
166
+ # breaking large reads from gzip streams. Chunk reads to
167
+ # BUFFER_SIZE bytes to avoid issue and reduce memory overhead
168
+ # of the read. In non-chunked case count < max_read_count, so
169
+ # only one read is performed.
170
+ max_read_count = BUFFER_SIZE // min(BUFFER_SIZE,
171
+ self.dtype.itemsize)
172
+
173
+ array = unpickler.np.empty(count, dtype=self.dtype)
174
+ for i in range(0, count, max_read_count):
175
+ read_count = min(max_read_count, count - i)
176
+ read_size = int(read_count * self.dtype.itemsize)
177
+ data = _read_bytes(unpickler.file_handle,
178
+ read_size, "array data")
179
+ array[i:i + read_count] = \
180
+ unpickler.np.frombuffer(data, dtype=self.dtype,
181
+ count=read_count)
182
+ del data
183
+
184
+ if self.order == 'F':
185
+ array.shape = self.shape[::-1]
186
+ array = array.transpose()
187
+ else:
188
+ array.shape = self.shape
189
+
190
+ # Detect byte order mismatch and swap as needed.
191
+ return _ensure_native_byte_order(array)
192
+
193
+ def read_mmap(self, unpickler):
194
+ """Read an array using numpy memmap."""
195
+ current_pos = unpickler.file_handle.tell()
196
+ offset = current_pos
197
+ numpy_array_alignment_bytes = \
198
+ self.safe_get_numpy_array_alignment_bytes()
199
+
200
+ if numpy_array_alignment_bytes is not None:
201
+ padding_byte = unpickler.file_handle.read(1)
202
+ padding_length = int.from_bytes(padding_byte, byteorder='little')
203
+ # + 1 is for the padding byte
204
+ offset += padding_length + 1
205
+
206
+ if unpickler.mmap_mode == 'w+':
207
+ unpickler.mmap_mode = 'r+'
208
+
209
+ marray = make_memmap(unpickler.filename,
210
+ dtype=self.dtype,
211
+ shape=self.shape,
212
+ order=self.order,
213
+ mode=unpickler.mmap_mode,
214
+ offset=offset)
215
+ # update the offset so that it corresponds to the end of the read array
216
+ unpickler.file_handle.seek(offset + marray.nbytes)
217
+
218
+ if (numpy_array_alignment_bytes is None and
219
+ current_pos % NUMPY_ARRAY_ALIGNMENT_BYTES != 0):
220
+ message = (
221
+ f'The memmapped array {marray} loaded from the file '
222
+ f'{unpickler.file_handle.name} is not byte aligned. '
223
+ 'This may cause segmentation faults if this memmapped array '
224
+ 'is used in some libraries like BLAS or PyTorch. '
225
+ 'To get rid of this warning, regenerate your pickle file '
226
+ 'with joblib >= 1.2.0. '
227
+ 'See https://github.com/joblib/joblib/issues/563 '
228
+ 'for more details'
229
+ )
230
+ warnings.warn(message)
231
+
232
+ return _ensure_native_byte_order(marray)
233
+
234
+ def read(self, unpickler):
235
+ """Read the array corresponding to this wrapper.
236
+
237
+ Use the unpickler to get all information to correctly read the array.
238
+
239
+ Parameters
240
+ ----------
241
+ unpickler: NumpyUnpickler
242
+
243
+ Returns
244
+ -------
245
+ array: numpy.ndarray
246
+
247
+ """
248
+ # When requested, only use memmap mode if allowed.
249
+ if unpickler.mmap_mode is not None and self.allow_mmap:
250
+ array = self.read_mmap(unpickler)
251
+ else:
252
+ array = self.read_array(unpickler)
253
+
254
+ # Manage array subclass case
255
+ if (hasattr(array, '__array_prepare__') and
256
+ self.subclass not in (unpickler.np.ndarray,
257
+ unpickler.np.memmap)):
258
+ # We need to reconstruct another subclass
259
+ new_array = unpickler.np.core.multiarray._reconstruct(
260
+ self.subclass, (0,), 'b')
261
+ return new_array.__array_prepare__(array)
262
+ else:
263
+ return array
264
+
265
+ ###############################################################################
266
+ # Pickler classes
267
+
268
+
269
+ class NumpyPickler(Pickler):
270
+ """A pickler to persist big data efficiently.
271
+
272
+ The main features of this object are:
273
+ * persistence of numpy arrays in a single file.
274
+ * optional compression with a special care on avoiding memory copies.
275
+
276
+ Attributes
277
+ ----------
278
+ fp: file
279
+ File object handle used for serializing the input object.
280
+ protocol: int, optional
281
+ Pickle protocol used. Default is pickle.DEFAULT_PROTOCOL.
282
+ """
283
+
284
+ dispatch = Pickler.dispatch.copy()
285
+
286
+ def __init__(self, fp, protocol=None):
287
+ self.file_handle = fp
288
+ self.buffered = isinstance(self.file_handle, BinaryZlibFile)
289
+
290
+ # By default we want a pickle protocol that only changes with
291
+ # the major python version and not the minor one
292
+ if protocol is None:
293
+ protocol = pickle.DEFAULT_PROTOCOL
294
+
295
+ Pickler.__init__(self, self.file_handle, protocol=protocol)
296
+ # delayed import of numpy, to avoid tight coupling
297
+ try:
298
+ import numpy as np
299
+ except ImportError:
300
+ np = None
301
+ self.np = np
302
+
303
+ def _create_array_wrapper(self, array):
304
+ """Create and returns a numpy array wrapper from a numpy array."""
305
+ order = 'F' if (array.flags.f_contiguous and
306
+ not array.flags.c_contiguous) else 'C'
307
+ allow_mmap = not self.buffered and not array.dtype.hasobject
308
+
309
+ kwargs = {}
310
+ try:
311
+ self.file_handle.tell()
312
+ except io.UnsupportedOperation:
313
+ kwargs = {'numpy_array_alignment_bytes': None}
314
+
315
+ wrapper = NumpyArrayWrapper(type(array),
316
+ array.shape, order, array.dtype,
317
+ allow_mmap=allow_mmap,
318
+ **kwargs)
319
+
320
+ return wrapper
321
+
322
+ def save(self, obj):
323
+ """Subclass the Pickler `save` method.
324
+
325
+ This is a total abuse of the Pickler class in order to use the numpy
326
+ persistence function `save` instead of the default pickle
327
+ implementation. The numpy array is replaced by a custom wrapper in the
328
+ pickle persistence stack and the serialized array is written right
329
+ after in the file. Warning: the file produced does not follow the
330
+ pickle format. As such it can not be read with `pickle.load`.
331
+ """
332
+ if self.np is not None and type(obj) in (self.np.ndarray,
333
+ self.np.matrix,
334
+ self.np.memmap):
335
+ if type(obj) is self.np.memmap:
336
+ # Pickling doesn't work with memmapped arrays
337
+ obj = self.np.asanyarray(obj)
338
+
339
+ # The array wrapper is pickled instead of the real array.
340
+ wrapper = self._create_array_wrapper(obj)
341
+ Pickler.save(self, wrapper)
342
+
343
+ # A framer was introduced with pickle protocol 4 and we want to
344
+ # ensure the wrapper object is written before the numpy array
345
+ # buffer in the pickle file.
346
+ # See https://www.python.org/dev/peps/pep-3154/#framing to get
347
+ # more information on the framer behavior.
348
+ if self.proto >= 4:
349
+ self.framer.commit_frame(force=True)
350
+
351
+ # And then array bytes are written right after the wrapper.
352
+ wrapper.write_array(obj, self)
353
+ return
354
+
355
+ return Pickler.save(self, obj)
356
+
357
+
358
+ class NumpyUnpickler(Unpickler):
359
+ """A subclass of the Unpickler to unpickle our numpy pickles.
360
+
361
+ Attributes
362
+ ----------
363
+ mmap_mode: str
364
+ The memorymap mode to use for reading numpy arrays.
365
+ file_handle: file_like
366
+ File object to unpickle from.
367
+ filename: str
368
+ Name of the file to unpickle from. It should correspond to file_handle.
369
+ This parameter is required when using mmap_mode.
370
+ np: module
371
+ Reference to numpy module if numpy is installed else None.
372
+
373
+ """
374
+
375
+ dispatch = Unpickler.dispatch.copy()
376
+
377
+ def __init__(self, filename, file_handle, mmap_mode=None):
378
+ # The next line is for backward compatibility with pickle generated
379
+ # with joblib versions less than 0.10.
380
+ self._dirname = os.path.dirname(filename)
381
+
382
+ self.mmap_mode = mmap_mode
383
+ self.file_handle = file_handle
384
+ # filename is required for numpy mmap mode.
385
+ self.filename = filename
386
+ self.compat_mode = False
387
+ Unpickler.__init__(self, self.file_handle)
388
+ try:
389
+ import numpy as np
390
+ except ImportError:
391
+ np = None
392
+ self.np = np
393
+
394
+ def load_build(self):
395
+ """Called to set the state of a newly created object.
396
+
397
+ We capture it to replace our place-holder objects, NDArrayWrapper or
398
+ NumpyArrayWrapper, by the array we are interested in. We
399
+ replace them directly in the stack of pickler.
400
+ NDArrayWrapper is used for backward compatibility with joblib <= 0.9.
401
+ """
402
+ Unpickler.load_build(self)
403
+
404
+ # For backward compatibility, we support NDArrayWrapper objects.
405
+ if isinstance(self.stack[-1], (NDArrayWrapper, NumpyArrayWrapper)):
406
+ if self.np is None:
407
+ raise ImportError("Trying to unpickle an ndarray, "
408
+ "but numpy didn't import correctly")
409
+ array_wrapper = self.stack.pop()
410
+ # If any NDArrayWrapper is found, we switch to compatibility mode,
411
+ # this will be used to raise a DeprecationWarning to the user at
412
+ # the end of the unpickling.
413
+ if isinstance(array_wrapper, NDArrayWrapper):
414
+ self.compat_mode = True
415
+ self.stack.append(array_wrapper.read(self))
416
+
417
+ # Be careful to register our new method.
418
+ dispatch[pickle.BUILD[0]] = load_build
419
+
420
+
421
+ ###############################################################################
422
+ # Utility functions
423
+
424
+ def dump(value, filename, compress=0, protocol=None, cache_size=None):
425
+ """Persist an arbitrary Python object into one file.
426
+
427
+ Read more in the :ref:`User Guide <persistence>`.
428
+
429
+ Parameters
430
+ ----------
431
+ value: any Python object
432
+ The object to store to disk.
433
+ filename: str, pathlib.Path, or file object.
434
+ The file object or path of the file in which it is to be stored.
435
+ The compression method corresponding to one of the supported filename
436
+ extensions ('.z', '.gz', '.bz2', '.xz' or '.lzma') will be used
437
+ automatically.
438
+ compress: int from 0 to 9 or bool or 2-tuple, optional
439
+ Optional compression level for the data. 0 or False is no compression.
440
+ Higher value means more compression, but also slower read and
441
+ write times. Using a value of 3 is often a good compromise.
442
+ See the notes for more details.
443
+ If compress is True, the compression level used is 3.
444
+ If compress is a 2-tuple, the first element must correspond to a string
445
+ between supported compressors (e.g 'zlib', 'gzip', 'bz2', 'lzma'
446
+ 'xz'), the second element must be an integer from 0 to 9, corresponding
447
+ to the compression level.
448
+ protocol: int, optional
449
+ Pickle protocol, see pickle.dump documentation for more details.
450
+ cache_size: positive int, optional
451
+ This option is deprecated in 0.10 and has no effect.
452
+
453
+ Returns
454
+ -------
455
+ filenames: list of strings
456
+ The list of file names in which the data is stored. If
457
+ compress is false, each array is stored in a different file.
458
+
459
+ See Also
460
+ --------
461
+ joblib.load : corresponding loader
462
+
463
+ Notes
464
+ -----
465
+ Memmapping on load cannot be used for compressed files. Thus
466
+ using compression can significantly slow down loading. In
467
+ addition, compressed files take up extra memory during
468
+ dump and load.
469
+
470
+ """
471
+
472
+ if Path is not None and isinstance(filename, Path):
473
+ filename = str(filename)
474
+
475
+ is_filename = isinstance(filename, str)
476
+ is_fileobj = hasattr(filename, "write")
477
+
478
+ compress_method = 'zlib' # zlib is the default compression method.
479
+ if compress is True:
480
+ # By default, if compress is enabled, we want the default compress
481
+ # level of the compressor.
482
+ compress_level = None
483
+ elif isinstance(compress, tuple):
484
+ # a 2-tuple was set in compress
485
+ if len(compress) != 2:
486
+ raise ValueError(
487
+ 'Compress argument tuple should contain exactly 2 elements: '
488
+ '(compress method, compress level), you passed {}'
489
+ .format(compress))
490
+ compress_method, compress_level = compress
491
+ elif isinstance(compress, str):
492
+ compress_method = compress
493
+ compress_level = None # Use default compress level
494
+ compress = (compress_method, compress_level)
495
+ else:
496
+ compress_level = compress
497
+
498
+ if compress_method == 'lz4' and lz4 is None:
499
+ raise ValueError(LZ4_NOT_INSTALLED_ERROR)
500
+
501
+ if (compress_level is not None and
502
+ compress_level is not False and
503
+ compress_level not in range(10)):
504
+ # Raising an error if a non valid compress level is given.
505
+ raise ValueError(
506
+ 'Non valid compress level given: "{}". Possible values are '
507
+ '{}.'.format(compress_level, list(range(10))))
508
+
509
+ if compress_method not in _COMPRESSORS:
510
+ # Raising an error if an unsupported compression method is given.
511
+ raise ValueError(
512
+ 'Non valid compression method given: "{}". Possible values are '
513
+ '{}.'.format(compress_method, _COMPRESSORS))
514
+
515
+ if not is_filename and not is_fileobj:
516
+ # People keep inverting arguments, and the resulting error is
517
+ # incomprehensible
518
+ raise ValueError(
519
+ 'Second argument should be a filename or a file-like object, '
520
+ '%s (type %s) was given.'
521
+ % (filename, type(filename))
522
+ )
523
+
524
+ if is_filename and not isinstance(compress, tuple):
525
+ # In case no explicit compression was requested using both compression
526
+ # method and level in a tuple and the filename has an explicit
527
+ # extension, we select the corresponding compressor.
528
+
529
+ # unset the variable to be sure no compression level is set afterwards.
530
+ compress_method = None
531
+ for name, compressor in _COMPRESSORS.items():
532
+ if filename.endswith(compressor.extension):
533
+ compress_method = name
534
+
535
+ if compress_method in _COMPRESSORS and compress_level == 0:
536
+ # we choose the default compress_level in case it was not given
537
+ # as an argument (using compress).
538
+ compress_level = None
539
+
540
+ if cache_size is not None:
541
+ # Cache size is deprecated starting from version 0.10
542
+ warnings.warn("Please do not set 'cache_size' in joblib.dump, "
543
+ "this parameter has no effect and will be removed. "
544
+ "You used 'cache_size={}'".format(cache_size),
545
+ DeprecationWarning, stacklevel=2)
546
+
547
+ if compress_level != 0:
548
+ with _write_fileobject(filename, compress=(compress_method,
549
+ compress_level)) as f:
550
+ NumpyPickler(f, protocol=protocol).dump(value)
551
+ elif is_filename:
552
+ with open(filename, 'wb') as f:
553
+ NumpyPickler(f, protocol=protocol).dump(value)
554
+ else:
555
+ NumpyPickler(filename, protocol=protocol).dump(value)
556
+
557
+ # If the target container is a file object, nothing is returned.
558
+ if is_fileobj:
559
+ return
560
+
561
+ # For compatibility, the list of created filenames (e.g with one element
562
+ # after 0.10.0) is returned by default.
563
+ return [filename]
564
+
565
+
566
+ def _unpickle(fobj, filename="", mmap_mode=None):
567
+ """Internal unpickling function."""
568
+ # We are careful to open the file handle early and keep it open to
569
+ # avoid race-conditions on renames.
570
+ # That said, if data is stored in companion files, which can be
571
+ # the case with the old persistence format, moving the directory
572
+ # will create a race when joblib tries to access the companion
573
+ # files.
574
+ unpickler = NumpyUnpickler(filename, fobj, mmap_mode=mmap_mode)
575
+ obj = None
576
+ try:
577
+ obj = unpickler.load()
578
+ if unpickler.compat_mode:
579
+ warnings.warn("The file '%s' has been generated with a "
580
+ "joblib version less than 0.10. "
581
+ "Please regenerate this pickle file."
582
+ % filename,
583
+ DeprecationWarning, stacklevel=3)
584
+ except UnicodeDecodeError as exc:
585
+ # More user-friendly error message
586
+ new_exc = ValueError(
587
+ 'You may be trying to read with '
588
+ 'python 3 a joblib pickle generated with python 2. '
589
+ 'This feature is not supported by joblib.')
590
+ new_exc.__cause__ = exc
591
+ raise new_exc
592
+ return obj
593
+
594
+
595
+ def load_temporary_memmap(filename, mmap_mode, unlink_on_gc_collect):
596
+ from ._memmapping_reducer import JOBLIB_MMAPS, add_maybe_unlink_finalizer
597
+ obj = load(filename, mmap_mode)
598
+ JOBLIB_MMAPS.add(obj.filename)
599
+ if unlink_on_gc_collect:
600
+ add_maybe_unlink_finalizer(obj)
601
+ return obj
602
+
603
+
604
+ def load(filename, mmap_mode=None):
605
+ """Reconstruct a Python object from a file persisted with joblib.dump.
606
+
607
+ Read more in the :ref:`User Guide <persistence>`.
608
+
609
+ WARNING: joblib.load relies on the pickle module and can therefore
610
+ execute arbitrary Python code. It should therefore never be used
611
+ to load files from untrusted sources.
612
+
613
+ Parameters
614
+ ----------
615
+ filename: str, pathlib.Path, or file object.
616
+ The file object or path of the file from which to load the object
617
+ mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
618
+ If not None, the arrays are memory-mapped from the disk. This
619
+ mode has no effect for compressed files. Note that in this
620
+ case the reconstructed object might no longer match exactly
621
+ the originally pickled object.
622
+
623
+ Returns
624
+ -------
625
+ result: any Python object
626
+ The object stored in the file.
627
+
628
+ See Also
629
+ --------
630
+ joblib.dump : function to save an object
631
+
632
+ Notes
633
+ -----
634
+
635
+ This function can load numpy array files saved separately during the
636
+ dump. If the mmap_mode argument is given, it is passed to np.load and
637
+ arrays are loaded as memmaps. As a consequence, the reconstructed
638
+ object might not match the original pickled object. Note that if the
639
+ file was saved with compression, the arrays cannot be memmapped.
640
+ """
641
+ if Path is not None and isinstance(filename, Path):
642
+ filename = str(filename)
643
+
644
+ if hasattr(filename, "read"):
645
+ fobj = filename
646
+ filename = getattr(fobj, 'name', '')
647
+ with _read_fileobject(fobj, filename, mmap_mode) as fobj:
648
+ obj = _unpickle(fobj)
649
+ else:
650
+ with open(filename, 'rb') as f:
651
+ with _read_fileobject(f, filename, mmap_mode) as fobj:
652
+ if isinstance(fobj, str):
653
+ # if the returned file object is a string, this means we
654
+ # try to load a pickle file generated with an version of
655
+ # Joblib so we load it with joblib compatibility function.
656
+ return load_compatibility(fobj)
657
+
658
+ obj = _unpickle(fobj, filename, mmap_mode)
659
+ return obj
llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle_utils.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utilities for fast persistence of big data, with optional compression."""
2
+
3
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
4
+ # Copyright (c) 2009 Gael Varoquaux
5
+ # License: BSD Style, 3 clauses.
6
+
7
+ import pickle
8
+ import io
9
+ import sys
10
+ import warnings
11
+ import contextlib
12
+
13
+ from .compressor import _ZFILE_PREFIX
14
+ from .compressor import _COMPRESSORS
15
+
16
+ try:
17
+ import numpy as np
18
+ except ImportError:
19
+ np = None
20
+
21
+ Unpickler = pickle._Unpickler
22
+ Pickler = pickle._Pickler
23
+ xrange = range
24
+
25
+
26
+ try:
27
+ # The python standard library can be built without bz2 so we make bz2
28
+ # usage optional.
29
+ # see https://github.com/scikit-learn/scikit-learn/issues/7526 for more
30
+ # details.
31
+ import bz2
32
+ except ImportError:
33
+ bz2 = None
34
+
35
+ # Buffer size used in io.BufferedReader and io.BufferedWriter
36
+ _IO_BUFFER_SIZE = 1024 ** 2
37
+
38
+
39
+ def _is_raw_file(fileobj):
40
+ """Check if fileobj is a raw file object, e.g created with open."""
41
+ fileobj = getattr(fileobj, 'raw', fileobj)
42
+ return isinstance(fileobj, io.FileIO)
43
+
44
+
45
+ def _get_prefixes_max_len():
46
+ # Compute the max prefix len of registered compressors.
47
+ prefixes = [len(compressor.prefix) for compressor in _COMPRESSORS.values()]
48
+ prefixes += [len(_ZFILE_PREFIX)]
49
+ return max(prefixes)
50
+
51
+
52
+ def _is_numpy_array_byte_order_mismatch(array):
53
+ """Check if numpy array is having byte order mismatch"""
54
+ return ((sys.byteorder == 'big' and
55
+ (array.dtype.byteorder == '<' or
56
+ (array.dtype.byteorder == '|' and array.dtype.fields and
57
+ all(e[0].byteorder == '<'
58
+ for e in array.dtype.fields.values())))) or
59
+ (sys.byteorder == 'little' and
60
+ (array.dtype.byteorder == '>' or
61
+ (array.dtype.byteorder == '|' and array.dtype.fields and
62
+ all(e[0].byteorder == '>'
63
+ for e in array.dtype.fields.values())))))
64
+
65
+
66
+ def _ensure_native_byte_order(array):
67
+ """Use the byte order of the host while preserving values
68
+
69
+ Does nothing if array already uses the system byte order.
70
+ """
71
+ if _is_numpy_array_byte_order_mismatch(array):
72
+ array = array.byteswap().view(array.dtype.newbyteorder('='))
73
+ return array
74
+
75
+
76
+ ###############################################################################
77
+ # Cache file utilities
78
+ def _detect_compressor(fileobj):
79
+ """Return the compressor matching fileobj.
80
+
81
+ Parameters
82
+ ----------
83
+ fileobj: file object
84
+
85
+ Returns
86
+ -------
87
+ str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat', 'not-compressed'}
88
+ """
89
+ # Read the magic number in the first bytes of the file.
90
+ max_prefix_len = _get_prefixes_max_len()
91
+ if hasattr(fileobj, 'peek'):
92
+ # Peek allows to read those bytes without moving the cursor in the
93
+ # file whic.
94
+ first_bytes = fileobj.peek(max_prefix_len)
95
+ else:
96
+ # Fallback to seek if the fileobject is not peekable.
97
+ first_bytes = fileobj.read(max_prefix_len)
98
+ fileobj.seek(0)
99
+
100
+ if first_bytes.startswith(_ZFILE_PREFIX):
101
+ return "compat"
102
+ else:
103
+ for name, compressor in _COMPRESSORS.items():
104
+ if first_bytes.startswith(compressor.prefix):
105
+ return name
106
+
107
+ return "not-compressed"
108
+
109
+
110
+ def _buffered_read_file(fobj):
111
+ """Return a buffered version of a read file object."""
112
+ return io.BufferedReader(fobj, buffer_size=_IO_BUFFER_SIZE)
113
+
114
+
115
+ def _buffered_write_file(fobj):
116
+ """Return a buffered version of a write file object."""
117
+ return io.BufferedWriter(fobj, buffer_size=_IO_BUFFER_SIZE)
118
+
119
+
120
+ @contextlib.contextmanager
121
+ def _read_fileobject(fileobj, filename, mmap_mode=None):
122
+ """Utility function opening the right fileobject from a filename.
123
+
124
+ The magic number is used to choose between the type of file object to open:
125
+ * regular file object (default)
126
+ * zlib file object
127
+ * gzip file object
128
+ * bz2 file object
129
+ * lzma file object (for xz and lzma compressor)
130
+
131
+ Parameters
132
+ ----------
133
+ fileobj: file object
134
+ compressor: str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat',
135
+ 'not-compressed'}
136
+ filename: str
137
+ filename path corresponding to the fileobj parameter.
138
+ mmap_mode: str
139
+ memory map mode that should be used to open the pickle file. This
140
+ parameter is useful to verify that the user is not trying to one with
141
+ compression. Default: None.
142
+
143
+ Returns
144
+ -------
145
+ a file like object
146
+
147
+ """
148
+ # Detect if the fileobj contains compressed data.
149
+ compressor = _detect_compressor(fileobj)
150
+
151
+ if compressor == 'compat':
152
+ # Compatibility with old pickle mode: simply return the input
153
+ # filename "as-is" and let the compatibility function be called by the
154
+ # caller.
155
+ warnings.warn("The file '%s' has been generated with a joblib "
156
+ "version less than 0.10. "
157
+ "Please regenerate this pickle file." % filename,
158
+ DeprecationWarning, stacklevel=2)
159
+ yield filename
160
+ else:
161
+ if compressor in _COMPRESSORS:
162
+ # based on the compressor detected in the file, we open the
163
+ # correct decompressor file object, wrapped in a buffer.
164
+ compressor_wrapper = _COMPRESSORS[compressor]
165
+ inst = compressor_wrapper.decompressor_file(fileobj)
166
+ fileobj = _buffered_read_file(inst)
167
+
168
+ # Checking if incompatible load parameters with the type of file:
169
+ # mmap_mode cannot be used with compressed file or in memory buffers
170
+ # such as io.BytesIO.
171
+ if mmap_mode is not None:
172
+ if isinstance(fileobj, io.BytesIO):
173
+ warnings.warn('In memory persistence is not compatible with '
174
+ 'mmap_mode "%(mmap_mode)s" flag passed. '
175
+ 'mmap_mode option will be ignored.'
176
+ % locals(), stacklevel=2)
177
+ elif compressor != 'not-compressed':
178
+ warnings.warn('mmap_mode "%(mmap_mode)s" is not compatible '
179
+ 'with compressed file %(filename)s. '
180
+ '"%(mmap_mode)s" flag will be ignored.'
181
+ % locals(), stacklevel=2)
182
+ elif not _is_raw_file(fileobj):
183
+ warnings.warn('"%(fileobj)r" is not a raw file, mmap_mode '
184
+ '"%(mmap_mode)s" flag will be ignored.'
185
+ % locals(), stacklevel=2)
186
+
187
+ yield fileobj
188
+
189
+
190
+ def _write_fileobject(filename, compress=("zlib", 3)):
191
+ """Return the right compressor file object in write mode."""
192
+ compressmethod = compress[0]
193
+ compresslevel = compress[1]
194
+
195
+ if compressmethod in _COMPRESSORS.keys():
196
+ file_instance = _COMPRESSORS[compressmethod].compressor_file(
197
+ filename, compresslevel=compresslevel)
198
+ return _buffered_write_file(file_instance)
199
+ else:
200
+ file_instance = _COMPRESSORS['zlib'].compressor_file(
201
+ filename, compresslevel=compresslevel)
202
+ return _buffered_write_file(file_instance)
203
+
204
+
205
+ # Utility functions/variables from numpy required for writing arrays.
206
+ # We need at least the functions introduced in version 1.9 of numpy. Here,
207
+ # we use the ones from numpy 1.10.2.
208
+ BUFFER_SIZE = 2 ** 18 # size of buffer for reading npz files in bytes
209
+
210
+
211
+ def _read_bytes(fp, size, error_template="ran out of data"):
212
+ """Read from file-like object until size bytes are read.
213
+
214
+ TODO python2_drop: is it still needed? The docstring mentions python 2.6
215
+ and it looks like this can be at least simplified ...
216
+
217
+ Raises ValueError if not EOF is encountered before size bytes are read.
218
+ Non-blocking objects only supported if they derive from io objects.
219
+
220
+ Required as e.g. ZipExtFile in python 2.6 can return less data than
221
+ requested.
222
+
223
+ This function was taken from numpy/lib/format.py in version 1.10.2.
224
+
225
+ Parameters
226
+ ----------
227
+ fp: file-like object
228
+ size: int
229
+ error_template: str
230
+
231
+ Returns
232
+ -------
233
+ a bytes object
234
+ The data read in bytes.
235
+
236
+ """
237
+ data = bytes()
238
+ while True:
239
+ # io files (default in python3) return None or raise on
240
+ # would-block, python2 file will truncate, probably nothing can be
241
+ # done about that. note that regular files can't be non-blocking
242
+ try:
243
+ r = fp.read(size - len(data))
244
+ data += r
245
+ if len(r) == 0 or len(data) == size:
246
+ break
247
+ except io.BlockingIOError:
248
+ pass
249
+ if len(data) != size:
250
+ msg = "EOF: reading %s, expected %d bytes got %d"
251
+ raise ValueError(msg % (error_template, size, len(data)))
252
+ else:
253
+ return data
llmeval-env/lib/python3.10/site-packages/joblib/parallel.py ADDED
@@ -0,0 +1,2010 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers for embarrassingly parallel code.
3
+ """
4
+ # Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
5
+ # Copyright: 2010, Gael Varoquaux
6
+ # License: BSD 3 clause
7
+
8
+ from __future__ import division
9
+
10
+ import os
11
+ import sys
12
+ from math import sqrt
13
+ import functools
14
+ import collections
15
+ import time
16
+ import threading
17
+ import itertools
18
+ from uuid import uuid4
19
+ from numbers import Integral
20
+ import warnings
21
+ import queue
22
+ import weakref
23
+ from contextlib import nullcontext
24
+
25
+ from multiprocessing import TimeoutError
26
+
27
+ from ._multiprocessing_helpers import mp
28
+
29
+ from .logger import Logger, short_format_time
30
+ from .disk import memstr_to_bytes
31
+ from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
32
+ ThreadingBackend, SequentialBackend,
33
+ LokyBackend)
34
+ from ._utils import eval_expr, _Sentinel
35
+
36
+ # Make sure that those two classes are part of the public joblib.parallel API
37
+ # so that 3rd party backend implementers can import them from here.
38
+ from ._parallel_backends import AutoBatchingMixin # noqa
39
+ from ._parallel_backends import ParallelBackendBase # noqa
40
+
41
+
42
+ IS_PYPY = hasattr(sys, "pypy_version_info")
43
+
44
+
45
+ BACKENDS = {
46
+ 'threading': ThreadingBackend,
47
+ 'sequential': SequentialBackend,
48
+ }
49
+ # name of the backend used by default by Parallel outside of any context
50
+ # managed by ``parallel_config`` or ``parallel_backend``.
51
+
52
+ # threading is the only backend that is always everywhere
53
+ DEFAULT_BACKEND = 'threading'
54
+
55
+ MAYBE_AVAILABLE_BACKENDS = {'multiprocessing', 'loky'}
56
+
57
+ # if multiprocessing is available, so is loky, we set it as the default
58
+ # backend
59
+ if mp is not None:
60
+ BACKENDS['multiprocessing'] = MultiprocessingBackend
61
+ from .externals import loky
62
+ BACKENDS['loky'] = LokyBackend
63
+ DEFAULT_BACKEND = 'loky'
64
+
65
+
66
+ DEFAULT_THREAD_BACKEND = 'threading'
67
+
68
+
69
+ # Thread local value that can be overridden by the ``parallel_config`` context
70
+ # manager
71
+ _backend = threading.local()
72
+
73
+
74
+ def _register_dask():
75
+ """Register Dask Backend if called with parallel_config(backend="dask")"""
76
+ try:
77
+ from ._dask import DaskDistributedBackend
78
+ register_parallel_backend('dask', DaskDistributedBackend)
79
+ except ImportError as e:
80
+ msg = ("To use the dask.distributed backend you must install both "
81
+ "the `dask` and distributed modules.\n\n"
82
+ "See https://dask.pydata.org/en/latest/install.html for more "
83
+ "information.")
84
+ raise ImportError(msg) from e
85
+
86
+
87
+ EXTERNAL_BACKENDS = {
88
+ 'dask': _register_dask,
89
+ }
90
+
91
+
92
+ # Sentinels for the default values of the Parallel constructor and
93
+ # the parallel_config and parallel_backend context managers
94
+ default_parallel_config = {
95
+ "backend": _Sentinel(default_value=None),
96
+ "n_jobs": _Sentinel(default_value=None),
97
+ "verbose": _Sentinel(default_value=0),
98
+ "temp_folder": _Sentinel(default_value=None),
99
+ "max_nbytes": _Sentinel(default_value="1M"),
100
+ "mmap_mode": _Sentinel(default_value="r"),
101
+ "prefer": _Sentinel(default_value=None),
102
+ "require": _Sentinel(default_value=None),
103
+ }
104
+
105
+
106
+ VALID_BACKEND_HINTS = ('processes', 'threads', None)
107
+ VALID_BACKEND_CONSTRAINTS = ('sharedmem', None)
108
+
109
+
110
+ def _get_config_param(param, context_config, key):
111
+ """Return the value of a parallel config parameter
112
+
113
+ Explicitly setting it in Parallel has priority over setting in a
114
+ parallel_(config/backend) context manager.
115
+ """
116
+ if param is not default_parallel_config[key]:
117
+ # param is explicitly set, return it
118
+ return param
119
+
120
+ if context_config[key] is not default_parallel_config[key]:
121
+ # there's a context manager and the key is set, return it
122
+ return context_config[key]
123
+
124
+ # Otherwise, we are in the default_parallel_config,
125
+ # return the default value
126
+ return param.default_value
127
+
128
+
129
+ def get_active_backend(
130
+ prefer=default_parallel_config["prefer"],
131
+ require=default_parallel_config["require"],
132
+ verbose=default_parallel_config["verbose"],
133
+ ):
134
+ """Return the active default backend"""
135
+ backend, config = _get_active_backend(prefer, require, verbose)
136
+ n_jobs = _get_config_param(
137
+ default_parallel_config['n_jobs'], config, "n_jobs"
138
+ )
139
+ return backend, n_jobs
140
+
141
+
142
+ def _get_active_backend(
143
+ prefer=default_parallel_config["prefer"],
144
+ require=default_parallel_config["require"],
145
+ verbose=default_parallel_config["verbose"],
146
+ ):
147
+ """Return the active default backend"""
148
+
149
+ backend_config = getattr(_backend, "config", default_parallel_config)
150
+
151
+ backend = _get_config_param(
152
+ default_parallel_config['backend'], backend_config, "backend"
153
+ )
154
+ prefer = _get_config_param(prefer, backend_config, "prefer")
155
+ require = _get_config_param(require, backend_config, "require")
156
+ verbose = _get_config_param(verbose, backend_config, "verbose")
157
+
158
+ if prefer not in VALID_BACKEND_HINTS:
159
+ raise ValueError(
160
+ f"prefer={prefer} is not a valid backend hint, "
161
+ f"expected one of {VALID_BACKEND_HINTS}"
162
+ )
163
+ if require not in VALID_BACKEND_CONSTRAINTS:
164
+ raise ValueError(
165
+ f"require={require} is not a valid backend constraint, "
166
+ f"expected one of {VALID_BACKEND_CONSTRAINTS}"
167
+ )
168
+ if prefer == 'processes' and require == 'sharedmem':
169
+ raise ValueError(
170
+ "prefer == 'processes' and require == 'sharedmem'"
171
+ " are inconsistent settings"
172
+ )
173
+
174
+ explicit_backend = True
175
+ if backend is None:
176
+
177
+ # We are either outside of the scope of any parallel_(config/backend)
178
+ # context manager or the context manager did not set a backend.
179
+ # create the default backend instance now.
180
+ backend = BACKENDS[DEFAULT_BACKEND](nesting_level=0)
181
+ explicit_backend = False
182
+
183
+ # Try to use the backend set by the user with the context manager.
184
+
185
+ nesting_level = backend.nesting_level
186
+ uses_threads = getattr(backend, 'uses_threads', False)
187
+ supports_sharedmem = getattr(backend, 'supports_sharedmem', False)
188
+ # Force to use thread-based backend if the provided backend does not
189
+ # match the shared memory constraint or if the backend is not explicitly
190
+ # given and threads are preferred.
191
+ force_threads = (require == 'sharedmem' and not supports_sharedmem)
192
+ force_threads |= (
193
+ not explicit_backend and prefer == 'threads' and not uses_threads
194
+ )
195
+ if force_threads:
196
+ # This backend does not match the shared memory constraint:
197
+ # fallback to the default thead-based backend.
198
+ sharedmem_backend = BACKENDS[DEFAULT_THREAD_BACKEND](
199
+ nesting_level=nesting_level
200
+ )
201
+ # Warn the user if we forced the backend to thread-based, while the
202
+ # user explicitly specified a non-thread-based backend.
203
+ if verbose >= 10 and explicit_backend:
204
+ print(
205
+ f"Using {sharedmem_backend.__class__.__name__} as "
206
+ f"joblib backend instead of {backend.__class__.__name__} "
207
+ "as the latter does not provide shared memory semantics."
208
+ )
209
+ # Force to n_jobs=1 by default
210
+ thread_config = backend_config.copy()
211
+ thread_config['n_jobs'] = 1
212
+ return sharedmem_backend, thread_config
213
+
214
+ return backend, backend_config
215
+
216
+
217
+ class parallel_config:
218
+ """Set the default backend or configuration for :class:`~joblib.Parallel`.
219
+
220
+ This is an alternative to directly passing keyword arguments to the
221
+ :class:`~joblib.Parallel` class constructor. It is particularly useful when
222
+ calling into library code that uses joblib internally but does not expose
223
+ the various parallel configuration arguments in its own API.
224
+
225
+ Parameters
226
+ ----------
227
+ backend: str or ParallelBackendBase instance, default=None
228
+ If ``backend`` is a string it must match a previously registered
229
+ implementation using the :func:`~register_parallel_backend` function.
230
+
231
+ By default the following backends are available:
232
+
233
+ - 'loky': single-host, process-based parallelism (used by default),
234
+ - 'threading': single-host, thread-based parallelism,
235
+ - 'multiprocessing': legacy single-host, process-based parallelism.
236
+
237
+ 'loky' is recommended to run functions that manipulate Python objects.
238
+ 'threading' is a low-overhead alternative that is most efficient for
239
+ functions that release the Global Interpreter Lock: e.g. I/O-bound
240
+ code or CPU-bound code in a few calls to native code that explicitly
241
+ releases the GIL. Note that on some rare systems (such as pyodide),
242
+ multiprocessing and loky may not be available, in which case joblib
243
+ defaults to threading.
244
+
245
+ In addition, if the ``dask`` and ``distributed`` Python packages are
246
+ installed, it is possible to use the 'dask' backend for better
247
+ scheduling of nested parallel calls without over-subscription and
248
+ potentially distribute parallel calls over a networked cluster of
249
+ several hosts.
250
+
251
+ It is also possible to use the distributed 'ray' backend for
252
+ distributing the workload to a cluster of nodes. See more details
253
+ in the Examples section below.
254
+
255
+ Alternatively the backend can be passed directly as an instance.
256
+
257
+ n_jobs: int, default=None
258
+ The maximum number of concurrently running jobs, such as the number
259
+ of Python worker processes when ``backend="loky"`` or the size of the
260
+ thread-pool when ``backend="threading"``.
261
+ This argument is converted to an integer, rounded below for float.
262
+ If -1 is given, `joblib` tries to use all CPUs. The number of CPUs
263
+ ``n_cpus`` is obtained with :func:`~cpu_count`.
264
+ For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. For instance,
265
+ using ``n_jobs=-2`` will result in all CPUs but one being used.
266
+ This argument can also go above ``n_cpus``, which will cause
267
+ oversubscription. In some cases, slight oversubscription can be
268
+ beneficial, e.g., for tasks with large I/O operations.
269
+ If 1 is given, no parallel computing code is used at all, and the
270
+ behavior amounts to a simple python `for` loop. This mode is not
271
+ compatible with `timeout`.
272
+ None is a marker for 'unset' that will be interpreted as n_jobs=1
273
+ unless the call is performed under a :func:`~parallel_config`
274
+ context manager that sets another value for ``n_jobs``.
275
+ If n_jobs = 0 then a ValueError is raised.
276
+
277
+ verbose: int, default=0
278
+ The verbosity level: if non zero, progress messages are
279
+ printed. Above 50, the output is sent to stdout.
280
+ The frequency of the messages increases with the verbosity level.
281
+ If it more than 10, all iterations are reported.
282
+
283
+ temp_folder: str or None, default=None
284
+ Folder to be used by the pool for memmapping large arrays
285
+ for sharing memory with worker processes. If None, this will try in
286
+ order:
287
+
288
+ - a folder pointed by the ``JOBLIB_TEMP_FOLDER`` environment
289
+ variable,
290
+ - ``/dev/shm`` if the folder exists and is writable: this is a
291
+ RAM disk filesystem available by default on modern Linux
292
+ distributions,
293
+ - the default system temporary folder that can be
294
+ overridden with ``TMP``, ``TMPDIR`` or ``TEMP`` environment
295
+ variables, typically ``/tmp`` under Unix operating systems.
296
+
297
+ max_nbytes int, str, or None, optional, default='1M'
298
+ Threshold on the size of arrays passed to the workers that
299
+ triggers automated memory mapping in temp_folder. Can be an int
300
+ in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
301
+ Use None to disable memmapping of large arrays.
302
+
303
+ mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, default='r'
304
+ Memmapping mode for numpy arrays passed to workers. None will
305
+ disable memmapping, other modes defined in the numpy.memmap doc:
306
+ https://numpy.org/doc/stable/reference/generated/numpy.memmap.html
307
+ Also, see 'max_nbytes' parameter documentation for more details.
308
+
309
+ prefer: str in {'processes', 'threads'} or None, default=None
310
+ Soft hint to choose the default backend.
311
+ The default process-based backend is 'loky' and the default
312
+ thread-based backend is 'threading'. Ignored if the ``backend``
313
+ parameter is specified.
314
+
315
+ require: 'sharedmem' or None, default=None
316
+ Hard constraint to select the backend. If set to 'sharedmem',
317
+ the selected backend will be single-host and thread-based.
318
+
319
+ inner_max_num_threads: int, default=None
320
+ If not None, overwrites the limit set on the number of threads
321
+ usable in some third-party library threadpools like OpenBLAS,
322
+ MKL or OpenMP. This is only used with the ``loky`` backend.
323
+
324
+ backend_params: dict
325
+ Additional parameters to pass to the backend constructor when
326
+ backend is a string.
327
+
328
+ Notes
329
+ -----
330
+ Joblib tries to limit the oversubscription by limiting the number of
331
+ threads usable in some third-party library threadpools like OpenBLAS, MKL
332
+ or OpenMP. The default limit in each worker is set to
333
+ ``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be
334
+ overwritten with the ``inner_max_num_threads`` argument which will be used
335
+ to set this limit in the child processes.
336
+
337
+ .. versionadded:: 1.3
338
+
339
+ Examples
340
+ --------
341
+ >>> from operator import neg
342
+ >>> with parallel_config(backend='threading'):
343
+ ... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
344
+ ...
345
+ [-1, -2, -3, -4, -5]
346
+
347
+ To use the 'ray' joblib backend add the following lines:
348
+
349
+ >>> from ray.util.joblib import register_ray # doctest: +SKIP
350
+ >>> register_ray() # doctest: +SKIP
351
+ >>> with parallel_config(backend="ray"): # doctest: +SKIP
352
+ ... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
353
+ [-1, -2, -3, -4, -5]
354
+
355
+ """
356
+ def __init__(
357
+ self,
358
+ backend=default_parallel_config["backend"],
359
+ *,
360
+ n_jobs=default_parallel_config["n_jobs"],
361
+ verbose=default_parallel_config["verbose"],
362
+ temp_folder=default_parallel_config["temp_folder"],
363
+ max_nbytes=default_parallel_config["max_nbytes"],
364
+ mmap_mode=default_parallel_config["mmap_mode"],
365
+ prefer=default_parallel_config["prefer"],
366
+ require=default_parallel_config["require"],
367
+ inner_max_num_threads=None,
368
+ **backend_params
369
+ ):
370
+ # Save the parallel info and set the active parallel config
371
+ self.old_parallel_config = getattr(
372
+ _backend, "config", default_parallel_config
373
+ )
374
+
375
+ backend = self._check_backend(
376
+ backend, inner_max_num_threads, **backend_params
377
+ )
378
+
379
+ new_config = {
380
+ "n_jobs": n_jobs,
381
+ "verbose": verbose,
382
+ "temp_folder": temp_folder,
383
+ "max_nbytes": max_nbytes,
384
+ "mmap_mode": mmap_mode,
385
+ "prefer": prefer,
386
+ "require": require,
387
+ "backend": backend
388
+ }
389
+ self.parallel_config = self.old_parallel_config.copy()
390
+ self.parallel_config.update({
391
+ k: v for k, v in new_config.items()
392
+ if not isinstance(v, _Sentinel)
393
+ })
394
+
395
+ setattr(_backend, "config", self.parallel_config)
396
+
397
+ def _check_backend(self, backend, inner_max_num_threads, **backend_params):
398
+ if backend is default_parallel_config['backend']:
399
+ if inner_max_num_threads is not None or len(backend_params) > 0:
400
+ raise ValueError(
401
+ "inner_max_num_threads and other constructor "
402
+ "parameters backend_params are only supported "
403
+ "when backend is not None."
404
+ )
405
+ return backend
406
+
407
+ if isinstance(backend, str):
408
+ # Handle non-registered or missing backends
409
+ if backend not in BACKENDS:
410
+ if backend in EXTERNAL_BACKENDS:
411
+ register = EXTERNAL_BACKENDS[backend]
412
+ register()
413
+ elif backend in MAYBE_AVAILABLE_BACKENDS:
414
+ warnings.warn(
415
+ f"joblib backend '{backend}' is not available on "
416
+ f"your system, falling back to {DEFAULT_BACKEND}.",
417
+ UserWarning,
418
+ stacklevel=2
419
+ )
420
+ BACKENDS[backend] = BACKENDS[DEFAULT_BACKEND]
421
+ else:
422
+ raise ValueError(
423
+ f"Invalid backend: {backend}, expected one of "
424
+ f"{sorted(BACKENDS.keys())}"
425
+ )
426
+
427
+ backend = BACKENDS[backend](**backend_params)
428
+
429
+ if inner_max_num_threads is not None:
430
+ msg = (
431
+ f"{backend.__class__.__name__} does not accept setting the "
432
+ "inner_max_num_threads argument."
433
+ )
434
+ assert backend.supports_inner_max_num_threads, msg
435
+ backend.inner_max_num_threads = inner_max_num_threads
436
+
437
+ # If the nesting_level of the backend is not set previously, use the
438
+ # nesting level from the previous active_backend to set it
439
+ if backend.nesting_level is None:
440
+ parent_backend = self.old_parallel_config['backend']
441
+ if parent_backend is default_parallel_config['backend']:
442
+ nesting_level = 0
443
+ else:
444
+ nesting_level = parent_backend.nesting_level
445
+ backend.nesting_level = nesting_level
446
+
447
+ return backend
448
+
449
+ def __enter__(self):
450
+ return self.parallel_config
451
+
452
+ def __exit__(self, type, value, traceback):
453
+ self.unregister()
454
+
455
+ def unregister(self):
456
+ setattr(_backend, "config", self.old_parallel_config)
457
+
458
+
459
+ class parallel_backend(parallel_config):
460
+ """Change the default backend used by Parallel inside a with block.
461
+
462
+ .. warning::
463
+ It is advised to use the :class:`~joblib.parallel_config` context
464
+ manager instead, which allows more fine-grained control over the
465
+ backend configuration.
466
+
467
+ If ``backend`` is a string it must match a previously registered
468
+ implementation using the :func:`~register_parallel_backend` function.
469
+
470
+ By default the following backends are available:
471
+
472
+ - 'loky': single-host, process-based parallelism (used by default),
473
+ - 'threading': single-host, thread-based parallelism,
474
+ - 'multiprocessing': legacy single-host, process-based parallelism.
475
+
476
+ 'loky' is recommended to run functions that manipulate Python objects.
477
+ 'threading' is a low-overhead alternative that is most efficient for
478
+ functions that release the Global Interpreter Lock: e.g. I/O-bound code or
479
+ CPU-bound code in a few calls to native code that explicitly releases the
480
+ GIL. Note that on some rare systems (such as Pyodide),
481
+ multiprocessing and loky may not be available, in which case joblib
482
+ defaults to threading.
483
+
484
+ You can also use the `Dask <https://docs.dask.org/en/stable/>`_ joblib
485
+ backend to distribute work across machines. This works well with
486
+ scikit-learn estimators with the ``n_jobs`` parameter, for example::
487
+
488
+ >>> import joblib # doctest: +SKIP
489
+ >>> from sklearn.model_selection import GridSearchCV # doctest: +SKIP
490
+ >>> from dask.distributed import Client, LocalCluster # doctest: +SKIP
491
+
492
+ >>> # create a local Dask cluster
493
+ >>> cluster = LocalCluster() # doctest: +SKIP
494
+ >>> client = Client(cluster) # doctest: +SKIP
495
+ >>> grid_search = GridSearchCV(estimator, param_grid, n_jobs=-1)
496
+ ... # doctest: +SKIP
497
+ >>> with joblib.parallel_backend("dask", scatter=[X, y]): # doctest: +SKIP
498
+ ... grid_search.fit(X, y)
499
+
500
+ It is also possible to use the distributed 'ray' backend for distributing
501
+ the workload to a cluster of nodes. To use the 'ray' joblib backend add
502
+ the following lines::
503
+
504
+ >>> from ray.util.joblib import register_ray # doctest: +SKIP
505
+ >>> register_ray() # doctest: +SKIP
506
+ >>> with parallel_backend("ray"): # doctest: +SKIP
507
+ ... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
508
+ [-1, -2, -3, -4, -5]
509
+
510
+ Alternatively the backend can be passed directly as an instance.
511
+
512
+ By default all available workers will be used (``n_jobs=-1``) unless the
513
+ caller passes an explicit value for the ``n_jobs`` parameter.
514
+
515
+ This is an alternative to passing a ``backend='backend_name'`` argument to
516
+ the :class:`~Parallel` class constructor. It is particularly useful when
517
+ calling into library code that uses joblib internally but does not expose
518
+ the backend argument in its own API.
519
+
520
+ >>> from operator import neg
521
+ >>> with parallel_backend('threading'):
522
+ ... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
523
+ ...
524
+ [-1, -2, -3, -4, -5]
525
+
526
+ Joblib also tries to limit the oversubscription by limiting the number of
527
+ threads usable in some third-party library threadpools like OpenBLAS, MKL
528
+ or OpenMP. The default limit in each worker is set to
529
+ ``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be
530
+ overwritten with the ``inner_max_num_threads`` argument which will be used
531
+ to set this limit in the child processes.
532
+
533
+ .. versionadded:: 0.10
534
+
535
+ See Also
536
+ --------
537
+ joblib.parallel_config: context manager to change the backend
538
+ configuration.
539
+ """
540
+ def __init__(self, backend, n_jobs=-1, inner_max_num_threads=None,
541
+ **backend_params):
542
+
543
+ super().__init__(
544
+ backend=backend,
545
+ n_jobs=n_jobs,
546
+ inner_max_num_threads=inner_max_num_threads,
547
+ **backend_params
548
+ )
549
+
550
+ if self.old_parallel_config is None:
551
+ self.old_backend_and_jobs = None
552
+ else:
553
+ self.old_backend_and_jobs = (
554
+ self.old_parallel_config["backend"],
555
+ self.old_parallel_config["n_jobs"],
556
+ )
557
+ self.new_backend_and_jobs = (
558
+ self.parallel_config["backend"],
559
+ self.parallel_config["n_jobs"],
560
+ )
561
+
562
+ def __enter__(self):
563
+ return self.new_backend_and_jobs
564
+
565
+
566
+ # Under Linux or OS X the default start method of multiprocessing
567
+ # can cause third party libraries to crash. Under Python 3.4+ it is possible
568
+ # to set an environment variable to switch the default start method from
569
+ # 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
570
+ # of causing semantic changes and some additional pool instantiation overhead.
571
+ DEFAULT_MP_CONTEXT = None
572
+ if hasattr(mp, 'get_context'):
573
+ method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
574
+ if method is not None:
575
+ DEFAULT_MP_CONTEXT = mp.get_context(method=method)
576
+
577
+
578
+ class BatchedCalls(object):
579
+ """Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
580
+
581
+ def __init__(self, iterator_slice, backend_and_jobs, reducer_callback=None,
582
+ pickle_cache=None):
583
+ self.items = list(iterator_slice)
584
+ self._size = len(self.items)
585
+ self._reducer_callback = reducer_callback
586
+ if isinstance(backend_and_jobs, tuple):
587
+ self._backend, self._n_jobs = backend_and_jobs
588
+ else:
589
+ # this is for backward compatibility purposes. Before 0.12.6,
590
+ # nested backends were returned without n_jobs indications.
591
+ self._backend, self._n_jobs = backend_and_jobs, None
592
+ self._pickle_cache = pickle_cache if pickle_cache is not None else {}
593
+
594
+ def __call__(self):
595
+ # Set the default nested backend to self._backend but do not set the
596
+ # change the default number of processes to -1
597
+ with parallel_config(backend=self._backend, n_jobs=self._n_jobs):
598
+ return [func(*args, **kwargs)
599
+ for func, args, kwargs in self.items]
600
+
601
+ def __reduce__(self):
602
+ if self._reducer_callback is not None:
603
+ self._reducer_callback()
604
+ # no need to pickle the callback.
605
+ return (
606
+ BatchedCalls,
607
+ (self.items, (self._backend, self._n_jobs), None,
608
+ self._pickle_cache)
609
+ )
610
+
611
+ def __len__(self):
612
+ return self._size
613
+
614
+
615
+ # Possible exit status for a task
616
+ TASK_DONE = "Done"
617
+ TASK_ERROR = "Error"
618
+ TASK_PENDING = "Pending"
619
+
620
+
621
+ ###############################################################################
622
+ # CPU count that works also when multiprocessing has been disabled via
623
+ # the JOBLIB_MULTIPROCESSING environment variable
624
+ def cpu_count(only_physical_cores=False):
625
+ """Return the number of CPUs.
626
+
627
+ This delegates to loky.cpu_count that takes into account additional
628
+ constraints such as Linux CFS scheduler quotas (typically set by container
629
+ runtimes such as docker) and CPU affinity (for instance using the taskset
630
+ command on Linux).
631
+
632
+ If only_physical_cores is True, do not take hyperthreading / SMT logical
633
+ cores into account.
634
+ """
635
+ if mp is None:
636
+ return 1
637
+
638
+ return loky.cpu_count(only_physical_cores=only_physical_cores)
639
+
640
+
641
+ ###############################################################################
642
+ # For verbosity
643
+
644
+ def _verbosity_filter(index, verbose):
645
+ """ Returns False for indices increasingly apart, the distance
646
+ depending on the value of verbose.
647
+
648
+ We use a lag increasing as the square of index
649
+ """
650
+ if not verbose:
651
+ return True
652
+ elif verbose > 10:
653
+ return False
654
+ if index == 0:
655
+ return False
656
+ verbose = .5 * (11 - verbose) ** 2
657
+ scale = sqrt(index / verbose)
658
+ next_scale = sqrt((index + 1) / verbose)
659
+ return (int(next_scale) == int(scale))
660
+
661
+
662
+ ###############################################################################
663
+ def delayed(function):
664
+ """Decorator used to capture the arguments of a function."""
665
+
666
+ def delayed_function(*args, **kwargs):
667
+ return function, args, kwargs
668
+ try:
669
+ delayed_function = functools.wraps(function)(delayed_function)
670
+ except AttributeError:
671
+ " functools.wraps fails on some callable objects "
672
+ return delayed_function
673
+
674
+
675
+ ###############################################################################
676
+ class BatchCompletionCallBack(object):
677
+ """Callback to keep track of completed results and schedule the next tasks.
678
+
679
+ This callable is executed by the parent process whenever a worker process
680
+ has completed a batch of tasks.
681
+
682
+ It is used for progress reporting, to update estimate of the batch
683
+ processing duration and to schedule the next batch of tasks to be
684
+ processed.
685
+
686
+ It is assumed that this callback will always be triggered by the backend
687
+ right after the end of a task, in case of success as well as in case of
688
+ failure.
689
+ """
690
+
691
+ ##########################################################################
692
+ # METHODS CALLED BY THE MAIN THREAD #
693
+ ##########################################################################
694
+ def __init__(self, dispatch_timestamp, batch_size, parallel):
695
+ self.dispatch_timestamp = dispatch_timestamp
696
+ self.batch_size = batch_size
697
+ self.parallel = parallel
698
+ self.parallel_call_id = parallel._call_id
699
+
700
+ # Internals to keep track of the status and outcome of the task.
701
+
702
+ # Used to hold a reference to the future-like object returned by the
703
+ # backend after launching this task
704
+ # This will be set later when calling `register_job`, as it is only
705
+ # created once the task has been submitted.
706
+ self.job = None
707
+
708
+ if not parallel._backend.supports_retrieve_callback:
709
+ # The status is only used for asynchronous result retrieval in the
710
+ # callback.
711
+ self.status = None
712
+ else:
713
+ # The initial status for the job is TASK_PENDING.
714
+ # Once it is done, it will be either TASK_DONE, or TASK_ERROR.
715
+ self.status = TASK_PENDING
716
+
717
+ def register_job(self, job):
718
+ """Register the object returned by `apply_async`."""
719
+ self.job = job
720
+
721
+ def get_result(self, timeout):
722
+ """Returns the raw result of the task that was submitted.
723
+
724
+ If the task raised an exception rather than returning, this same
725
+ exception will be raised instead.
726
+
727
+ If the backend supports the retrieval callback, it is assumed that this
728
+ method is only called after the result has been registered. It is
729
+ ensured by checking that `self.status(timeout)` does not return
730
+ TASK_PENDING. In this case, `get_result` directly returns the
731
+ registered result (or raise the registered exception).
732
+
733
+ For other backends, there are no such assumptions, but `get_result`
734
+ still needs to synchronously retrieve the result before it can
735
+ return it or raise. It will block at most `self.timeout` seconds
736
+ waiting for retrieval to complete, after that it raises a TimeoutError.
737
+ """
738
+
739
+ backend = self.parallel._backend
740
+
741
+ if backend.supports_retrieve_callback:
742
+ # We assume that the result has already been retrieved by the
743
+ # callback thread, and is stored internally. It's just waiting to
744
+ # be returned.
745
+ return self._return_or_raise()
746
+
747
+ # For other backends, the main thread needs to run the retrieval step.
748
+ try:
749
+ if backend.supports_timeout:
750
+ result = self.job.get(timeout=timeout)
751
+ else:
752
+ result = self.job.get()
753
+ outcome = dict(result=result, status=TASK_DONE)
754
+ except BaseException as e:
755
+ outcome = dict(result=e, status=TASK_ERROR)
756
+ self._register_outcome(outcome)
757
+
758
+ return self._return_or_raise()
759
+
760
+ def _return_or_raise(self):
761
+ try:
762
+ if self.status == TASK_ERROR:
763
+ raise self._result
764
+ return self._result
765
+ finally:
766
+ del self._result
767
+
768
+ def get_status(self, timeout):
769
+ """Get the status of the task.
770
+
771
+ This function also checks if the timeout has been reached and register
772
+ the TimeoutError outcome when it is the case.
773
+ """
774
+ if timeout is None or self.status != TASK_PENDING:
775
+ return self.status
776
+
777
+ # The computation are running and the status is pending.
778
+ # Check that we did not wait for this jobs more than `timeout`.
779
+ now = time.time()
780
+ if not hasattr(self, "_completion_timeout_counter"):
781
+ self._completion_timeout_counter = now
782
+
783
+ if (now - self._completion_timeout_counter) > timeout:
784
+ outcome = dict(result=TimeoutError(), status=TASK_ERROR)
785
+ self._register_outcome(outcome)
786
+
787
+ return self.status
788
+
789
+ ##########################################################################
790
+ # METHODS CALLED BY CALLBACK THREADS #
791
+ ##########################################################################
792
+ def __call__(self, out):
793
+ """Function called by the callback thread after a job is completed."""
794
+
795
+ # If the backend doesn't support callback retrievals, the next batch of
796
+ # tasks is dispatched regardless. The result will be retrieved by the
797
+ # main thread when calling `get_result`.
798
+ if not self.parallel._backend.supports_retrieve_callback:
799
+ self._dispatch_new()
800
+ return
801
+
802
+ # If the backend supports retrieving the result in the callback, it
803
+ # registers the task outcome (TASK_ERROR or TASK_DONE), and schedules
804
+ # the next batch if needed.
805
+ with self.parallel._lock:
806
+ # Edge case where while the task was processing, the `parallel`
807
+ # instance has been reset and a new call has been issued, but the
808
+ # worker managed to complete the task and trigger this callback
809
+ # call just before being aborted by the reset.
810
+ if self.parallel._call_id != self.parallel_call_id:
811
+ return
812
+
813
+ # When aborting, stop as fast as possible and do not retrieve the
814
+ # result as it won't be returned by the Parallel call.
815
+ if self.parallel._aborting:
816
+ return
817
+
818
+ # Retrieves the result of the task in the main process and dispatch
819
+ # a new batch if needed.
820
+ job_succeeded = self._retrieve_result(out)
821
+
822
+ if not self.parallel.return_ordered:
823
+ # Append the job to the queue in the order of completion
824
+ # instead of submission.
825
+ self.parallel._jobs.append(self)
826
+
827
+ if job_succeeded:
828
+ self._dispatch_new()
829
+
830
+ def _dispatch_new(self):
831
+ """Schedule the next batch of tasks to be processed."""
832
+
833
+ # This steps ensure that auto-batching works as expected.
834
+ this_batch_duration = time.time() - self.dispatch_timestamp
835
+ self.parallel._backend.batch_completed(self.batch_size,
836
+ this_batch_duration)
837
+
838
+ # Schedule the next batch of tasks.
839
+ with self.parallel._lock:
840
+ self.parallel.n_completed_tasks += self.batch_size
841
+ self.parallel.print_progress()
842
+ if self.parallel._original_iterator is not None:
843
+ self.parallel.dispatch_next()
844
+
845
+ def _retrieve_result(self, out):
846
+ """Fetch and register the outcome of a task.
847
+
848
+ Return True if the task succeeded, False otherwise.
849
+ This function is only called by backends that support retrieving
850
+ the task result in the callback thread.
851
+ """
852
+ try:
853
+ result = self.parallel._backend.retrieve_result_callback(out)
854
+ outcome = dict(status=TASK_DONE, result=result)
855
+ except BaseException as e:
856
+ # Avoid keeping references to parallel in the error.
857
+ e.__traceback__ = None
858
+ outcome = dict(result=e, status=TASK_ERROR)
859
+
860
+ self._register_outcome(outcome)
861
+ return outcome['status'] != TASK_ERROR
862
+
863
+ ##########################################################################
864
+ # This method can be called either in the main thread #
865
+ # or in the callback thread. #
866
+ ##########################################################################
867
+ def _register_outcome(self, outcome):
868
+ """Register the outcome of a task.
869
+
870
+ This method can be called only once, future calls will be ignored.
871
+ """
872
+ # Covers the edge case where the main thread tries to register a
873
+ # `TimeoutError` while the callback thread tries to register a result
874
+ # at the same time.
875
+ with self.parallel._lock:
876
+ if self.status not in (TASK_PENDING, None):
877
+ return
878
+ self.status = outcome["status"]
879
+
880
+ self._result = outcome["result"]
881
+
882
+ # Once the result and the status are extracted, the last reference to
883
+ # the job can be deleted.
884
+ self.job = None
885
+
886
+ # As soon as an error as been spotted, early stopping flags are sent to
887
+ # the `parallel` instance.
888
+ if self.status == TASK_ERROR:
889
+ self.parallel._exception = True
890
+ self.parallel._aborting = True
891
+
892
+
893
+ ###############################################################################
894
+ def register_parallel_backend(name, factory, make_default=False):
895
+ """Register a new Parallel backend factory.
896
+
897
+ The new backend can then be selected by passing its name as the backend
898
+ argument to the :class:`~Parallel` class. Moreover, the default backend can
899
+ be overwritten globally by setting make_default=True.
900
+
901
+ The factory can be any callable that takes no argument and return an
902
+ instance of ``ParallelBackendBase``.
903
+
904
+ Warning: this function is experimental and subject to change in a future
905
+ version of joblib.
906
+
907
+ .. versionadded:: 0.10
908
+ """
909
+ BACKENDS[name] = factory
910
+ if make_default:
911
+ global DEFAULT_BACKEND
912
+ DEFAULT_BACKEND = name
913
+
914
+
915
+ def effective_n_jobs(n_jobs=-1):
916
+ """Determine the number of jobs that can actually run in parallel
917
+
918
+ n_jobs is the number of workers requested by the callers. Passing n_jobs=-1
919
+ means requesting all available workers for instance matching the number of
920
+ CPU cores on the worker host(s).
921
+
922
+ This method should return a guesstimate of the number of workers that can
923
+ actually perform work concurrently with the currently enabled default
924
+ backend. The primary use case is to make it possible for the caller to know
925
+ in how many chunks to slice the work.
926
+
927
+ In general working on larger data chunks is more efficient (less scheduling
928
+ overhead and better use of CPU cache prefetching heuristics) as long as all
929
+ the workers have enough work to do.
930
+
931
+ Warning: this function is experimental and subject to change in a future
932
+ version of joblib.
933
+
934
+ .. versionadded:: 0.10
935
+ """
936
+ if n_jobs == 1:
937
+ return 1
938
+
939
+ backend, backend_n_jobs = get_active_backend()
940
+ if n_jobs is None:
941
+ n_jobs = backend_n_jobs
942
+ return backend.effective_n_jobs(n_jobs=n_jobs)
943
+
944
+
945
+ ###############################################################################
946
+ class Parallel(Logger):
947
+ ''' Helper class for readable parallel mapping.
948
+
949
+ Read more in the :ref:`User Guide <parallel>`.
950
+
951
+ Parameters
952
+ ----------
953
+ n_jobs: int, default=None
954
+ The maximum number of concurrently running jobs, such as the number
955
+ of Python worker processes when ``backend="loky"`` or the size of
956
+ the thread-pool when ``backend="threading"``.
957
+ This argument is converted to an integer, rounded below for float.
958
+ If -1 is given, `joblib` tries to use all CPUs. The number of CPUs
959
+ ``n_cpus`` is obtained with :func:`~cpu_count`.
960
+ For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. For instance,
961
+ using ``n_jobs=-2`` will result in all CPUs but one being used.
962
+ This argument can also go above ``n_cpus``, which will cause
963
+ oversubscription. In some cases, slight oversubscription can be
964
+ beneficial, e.g., for tasks with large I/O operations.
965
+ If 1 is given, no parallel computing code is used at all, and the
966
+ behavior amounts to a simple python `for` loop. This mode is not
967
+ compatible with ``timeout``.
968
+ None is a marker for 'unset' that will be interpreted as n_jobs=1
969
+ unless the call is performed under a :func:`~parallel_config`
970
+ context manager that sets another value for ``n_jobs``.
971
+ If n_jobs = 0 then a ValueError is raised.
972
+ backend: str, ParallelBackendBase instance or None, default='loky'
973
+ Specify the parallelization backend implementation.
974
+ Supported backends are:
975
+
976
+ - "loky" used by default, can induce some
977
+ communication and memory overhead when exchanging input and
978
+ output data with the worker Python processes. On some rare
979
+ systems (such as Pyiodide), the loky backend may not be
980
+ available.
981
+ - "multiprocessing" previous process-based backend based on
982
+ `multiprocessing.Pool`. Less robust than `loky`.
983
+ - "threading" is a very low-overhead backend but it suffers
984
+ from the Python Global Interpreter Lock if the called function
985
+ relies a lot on Python objects. "threading" is mostly useful
986
+ when the execution bottleneck is a compiled extension that
987
+ explicitly releases the GIL (for instance a Cython loop wrapped
988
+ in a "with nogil" block or an expensive call to a library such
989
+ as NumPy).
990
+ - finally, you can register backends by calling
991
+ :func:`~register_parallel_backend`. This will allow you to
992
+ implement a backend of your liking.
993
+
994
+ It is not recommended to hard-code the backend name in a call to
995
+ :class:`~Parallel` in a library. Instead it is recommended to set
996
+ soft hints (prefer) or hard constraints (require) so as to make it
997
+ possible for library users to change the backend from the outside
998
+ using the :func:`~parallel_config` context manager.
999
+ return_as: str in {'list', 'generator', 'generator_unordered'}, default='list'
1000
+ If 'list', calls to this instance will return a list, only when
1001
+ all results have been processed and retrieved.
1002
+ If 'generator', it will return a generator that yields the results
1003
+ as soon as they are available, in the order the tasks have been
1004
+ submitted with.
1005
+ If 'generator_unordered', the generator will immediately yield
1006
+ available results independently of the submission order. The output
1007
+ order is not deterministic in this case because it depends on the
1008
+ concurrency of the workers.
1009
+ prefer: str in {'processes', 'threads'} or None, default=None
1010
+ Soft hint to choose the default backend if no specific backend
1011
+ was selected with the :func:`~parallel_config` context manager.
1012
+ The default process-based backend is 'loky' and the default
1013
+ thread-based backend is 'threading'. Ignored if the ``backend``
1014
+ parameter is specified.
1015
+ require: 'sharedmem' or None, default=None
1016
+ Hard constraint to select the backend. If set to 'sharedmem',
1017
+ the selected backend will be single-host and thread-based even
1018
+ if the user asked for a non-thread based backend with
1019
+ :func:`~joblib.parallel_config`.
1020
+ verbose: int, default=0
1021
+ The verbosity level: if non zero, progress messages are
1022
+ printed. Above 50, the output is sent to stdout.
1023
+ The frequency of the messages increases with the verbosity level.
1024
+ If it more than 10, all iterations are reported.
1025
+ timeout: float or None, default=None
1026
+ Timeout limit for each task to complete. If any task takes longer
1027
+ a TimeOutError will be raised. Only applied when n_jobs != 1
1028
+ pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}, default='2*n_jobs'
1029
+ The number of batches (of tasks) to be pre-dispatched.
1030
+ Default is '2*n_jobs'. When batch_size="auto" this is reasonable
1031
+ default and the workers should never starve. Note that only basic
1032
+ arithmetics are allowed here and no modules can be used in this
1033
+ expression.
1034
+ batch_size: int or 'auto', default='auto'
1035
+ The number of atomic tasks to dispatch at once to each
1036
+ worker. When individual evaluations are very fast, dispatching
1037
+ calls to workers can be slower than sequential computation because
1038
+ of the overhead. Batching fast computations together can mitigate
1039
+ this.
1040
+ The ``'auto'`` strategy keeps track of the time it takes for a
1041
+ batch to complete, and dynamically adjusts the batch size to keep
1042
+ the time on the order of half a second, using a heuristic. The
1043
+ initial batch size is 1.
1044
+ ``batch_size="auto"`` with ``backend="threading"`` will dispatch
1045
+ batches of a single task at a time as the threading backend has
1046
+ very little overhead and using larger batch size has not proved to
1047
+ bring any gain in that case.
1048
+ temp_folder: str or None, default=None
1049
+ Folder to be used by the pool for memmapping large arrays
1050
+ for sharing memory with worker processes. If None, this will try in
1051
+ order:
1052
+
1053
+ - a folder pointed by the JOBLIB_TEMP_FOLDER environment
1054
+ variable,
1055
+ - /dev/shm if the folder exists and is writable: this is a
1056
+ RAM disk filesystem available by default on modern Linux
1057
+ distributions,
1058
+ - the default system temporary folder that can be
1059
+ overridden with TMP, TMPDIR or TEMP environment
1060
+ variables, typically /tmp under Unix operating systems.
1061
+
1062
+ Only active when ``backend="loky"`` or ``"multiprocessing"``.
1063
+ max_nbytes int, str, or None, optional, default='1M'
1064
+ Threshold on the size of arrays passed to the workers that
1065
+ triggers automated memory mapping in temp_folder. Can be an int
1066
+ in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
1067
+ Use None to disable memmapping of large arrays.
1068
+ Only active when ``backend="loky"`` or ``"multiprocessing"``.
1069
+ mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, default='r'
1070
+ Memmapping mode for numpy arrays passed to workers. None will
1071
+ disable memmapping, other modes defined in the numpy.memmap doc:
1072
+ https://numpy.org/doc/stable/reference/generated/numpy.memmap.html
1073
+ Also, see 'max_nbytes' parameter documentation for more details.
1074
+
1075
+ Notes
1076
+ -----
1077
+
1078
+ This object uses workers to compute in parallel the application of a
1079
+ function to many different arguments. The main functionality it brings
1080
+ in addition to using the raw multiprocessing or concurrent.futures API
1081
+ are (see examples for details):
1082
+
1083
+ * More readable code, in particular since it avoids
1084
+ constructing list of arguments.
1085
+
1086
+ * Easier debugging:
1087
+ - informative tracebacks even when the error happens on
1088
+ the client side
1089
+ - using 'n_jobs=1' enables to turn off parallel computing
1090
+ for debugging without changing the codepath
1091
+ - early capture of pickling errors
1092
+
1093
+ * An optional progress meter.
1094
+
1095
+ * Interruption of multiprocesses jobs with 'Ctrl-C'
1096
+
1097
+ * Flexible pickling control for the communication to and from
1098
+ the worker processes.
1099
+
1100
+ * Ability to use shared memory efficiently with worker
1101
+ processes for large numpy-based datastructures.
1102
+
1103
+ Note that the intended usage is to run one call at a time. Multiple
1104
+ calls to the same Parallel object will result in a ``RuntimeError``
1105
+
1106
+ Examples
1107
+ --------
1108
+
1109
+ A simple example:
1110
+
1111
+ >>> from math import sqrt
1112
+ >>> from joblib import Parallel, delayed
1113
+ >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
1114
+ [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
1115
+
1116
+ Reshaping the output when the function has several return
1117
+ values:
1118
+
1119
+ >>> from math import modf
1120
+ >>> from joblib import Parallel, delayed
1121
+ >>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
1122
+ >>> res, i = zip(*r)
1123
+ >>> res
1124
+ (0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
1125
+ >>> i
1126
+ (0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
1127
+
1128
+ The progress meter: the higher the value of `verbose`, the more
1129
+ messages:
1130
+
1131
+ >>> from time import sleep
1132
+ >>> from joblib import Parallel, delayed
1133
+ >>> r = Parallel(n_jobs=2, verbose=10)(
1134
+ ... delayed(sleep)(.2) for _ in range(10)) #doctest: +SKIP
1135
+ [Parallel(n_jobs=2)]: Done 1 tasks | elapsed: 0.6s
1136
+ [Parallel(n_jobs=2)]: Done 4 tasks | elapsed: 0.8s
1137
+ [Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 1.4s finished
1138
+
1139
+ Traceback example, note how the line of the error is indicated
1140
+ as well as the values of the parameter passed to the function that
1141
+ triggered the exception, even though the traceback happens in the
1142
+ child process:
1143
+
1144
+ >>> from heapq import nlargest
1145
+ >>> from joblib import Parallel, delayed
1146
+ >>> Parallel(n_jobs=2)(
1147
+ ... delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3))
1148
+ ... # doctest: +SKIP
1149
+ -----------------------------------------------------------------------
1150
+ Sub-process traceback:
1151
+ -----------------------------------------------------------------------
1152
+ TypeError Mon Nov 12 11:37:46 2012
1153
+ PID: 12934 Python 2.7.3: /usr/bin/python
1154
+ ........................................................................
1155
+ /usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
1156
+ 419 if n >= size:
1157
+ 420 return sorted(iterable, key=key, reverse=True)[:n]
1158
+ 421
1159
+ 422 # When key is none, use simpler decoration
1160
+ 423 if key is None:
1161
+ --> 424 it = izip(iterable, count(0,-1)) # decorate
1162
+ 425 result = _nlargest(n, it)
1163
+ 426 return map(itemgetter(0), result) # undecorate
1164
+ 427
1165
+ 428 # General case, slowest method
1166
+ TypeError: izip argument #1 must support iteration
1167
+ _______________________________________________________________________
1168
+
1169
+
1170
+ Using pre_dispatch in a producer/consumer situation, where the
1171
+ data is generated on the fly. Note how the producer is first
1172
+ called 3 times before the parallel loop is initiated, and then
1173
+ called to generate new data on the fly:
1174
+
1175
+ >>> from math import sqrt
1176
+ >>> from joblib import Parallel, delayed
1177
+ >>> def producer():
1178
+ ... for i in range(6):
1179
+ ... print('Produced %s' % i)
1180
+ ... yield i
1181
+ >>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
1182
+ ... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
1183
+ Produced 0
1184
+ Produced 1
1185
+ Produced 2
1186
+ [Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
1187
+ Produced 3
1188
+ [Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
1189
+ Produced 4
1190
+ [Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
1191
+ Produced 5
1192
+ [Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
1193
+ [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s remaining: 0.0s
1194
+ [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
1195
+
1196
+ ''' # noqa: E501
1197
+ def __init__(
1198
+ self,
1199
+ n_jobs=default_parallel_config["n_jobs"],
1200
+ backend=default_parallel_config['backend'],
1201
+ return_as="list",
1202
+ verbose=default_parallel_config["verbose"],
1203
+ timeout=None,
1204
+ pre_dispatch='2 * n_jobs',
1205
+ batch_size='auto',
1206
+ temp_folder=default_parallel_config["temp_folder"],
1207
+ max_nbytes=default_parallel_config["max_nbytes"],
1208
+ mmap_mode=default_parallel_config["mmap_mode"],
1209
+ prefer=default_parallel_config["prefer"],
1210
+ require=default_parallel_config["require"],
1211
+ ):
1212
+ # Initiate parent Logger class state
1213
+ super().__init__()
1214
+
1215
+ # Interpret n_jobs=None as 'unset'
1216
+ if n_jobs is None:
1217
+ n_jobs = default_parallel_config["n_jobs"]
1218
+
1219
+ active_backend, context_config = _get_active_backend(
1220
+ prefer=prefer, require=require, verbose=verbose
1221
+ )
1222
+
1223
+ nesting_level = active_backend.nesting_level
1224
+
1225
+ self.verbose = _get_config_param(verbose, context_config, "verbose")
1226
+ self.timeout = timeout
1227
+ self.pre_dispatch = pre_dispatch
1228
+
1229
+ if return_as not in {"list", "generator", "generator_unordered"}:
1230
+ raise ValueError(
1231
+ 'Expected `return_as` parameter to be a string equal to "list"'
1232
+ f',"generator" or "generator_unordered", but got {return_as} '
1233
+ "instead."
1234
+ )
1235
+ self.return_as = return_as
1236
+ self.return_generator = return_as != "list"
1237
+ self.return_ordered = return_as != "generator_unordered"
1238
+
1239
+ # Check if we are under a parallel_config or parallel_backend
1240
+ # context manager and use the config from the context manager
1241
+ # for arguments that are not explicitly set.
1242
+ self._backend_args = {
1243
+ k: _get_config_param(param, context_config, k) for param, k in [
1244
+ (max_nbytes, "max_nbytes"),
1245
+ (temp_folder, "temp_folder"),
1246
+ (mmap_mode, "mmap_mode"),
1247
+ (prefer, "prefer"),
1248
+ (require, "require"),
1249
+ (verbose, "verbose"),
1250
+ ]
1251
+ }
1252
+
1253
+ if isinstance(self._backend_args["max_nbytes"], str):
1254
+ self._backend_args["max_nbytes"] = memstr_to_bytes(
1255
+ self._backend_args["max_nbytes"]
1256
+ )
1257
+ self._backend_args["verbose"] = max(
1258
+ 0, self._backend_args["verbose"] - 50
1259
+ )
1260
+
1261
+ if DEFAULT_MP_CONTEXT is not None:
1262
+ self._backend_args['context'] = DEFAULT_MP_CONTEXT
1263
+ elif hasattr(mp, "get_context"):
1264
+ self._backend_args['context'] = mp.get_context()
1265
+
1266
+ if backend is default_parallel_config['backend'] or backend is None:
1267
+ backend = active_backend
1268
+
1269
+ elif isinstance(backend, ParallelBackendBase):
1270
+ # Use provided backend as is, with the current nesting_level if it
1271
+ # is not set yet.
1272
+ if backend.nesting_level is None:
1273
+ backend.nesting_level = nesting_level
1274
+
1275
+ elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
1276
+ # Make it possible to pass a custom multiprocessing context as
1277
+ # backend to change the start method to forkserver or spawn or
1278
+ # preload modules on the forkserver helper process.
1279
+ self._backend_args['context'] = backend
1280
+ backend = MultiprocessingBackend(nesting_level=nesting_level)
1281
+
1282
+ elif backend not in BACKENDS and backend in MAYBE_AVAILABLE_BACKENDS:
1283
+ warnings.warn(
1284
+ f"joblib backend '{backend}' is not available on "
1285
+ f"your system, falling back to {DEFAULT_BACKEND}.",
1286
+ UserWarning,
1287
+ stacklevel=2)
1288
+ BACKENDS[backend] = BACKENDS[DEFAULT_BACKEND]
1289
+ backend = BACKENDS[DEFAULT_BACKEND](nesting_level=nesting_level)
1290
+
1291
+ else:
1292
+ try:
1293
+ backend_factory = BACKENDS[backend]
1294
+ except KeyError as e:
1295
+ raise ValueError("Invalid backend: %s, expected one of %r"
1296
+ % (backend, sorted(BACKENDS.keys()))) from e
1297
+ backend = backend_factory(nesting_level=nesting_level)
1298
+
1299
+ n_jobs = _get_config_param(n_jobs, context_config, "n_jobs")
1300
+ if n_jobs is None:
1301
+ # No specific context override and no specific value request:
1302
+ # default to the default of the backend.
1303
+ n_jobs = backend.default_n_jobs
1304
+ try:
1305
+ n_jobs = int(n_jobs)
1306
+ except ValueError:
1307
+ raise ValueError("n_jobs could not be converted to int")
1308
+ self.n_jobs = n_jobs
1309
+
1310
+ if (require == 'sharedmem' and
1311
+ not getattr(backend, 'supports_sharedmem', False)):
1312
+ raise ValueError("Backend %s does not support shared memory"
1313
+ % backend)
1314
+
1315
+ if (batch_size == 'auto' or isinstance(batch_size, Integral) and
1316
+ batch_size > 0):
1317
+ self.batch_size = batch_size
1318
+ else:
1319
+ raise ValueError(
1320
+ "batch_size must be 'auto' or a positive integer, got: %r"
1321
+ % batch_size)
1322
+
1323
+ if not isinstance(backend, SequentialBackend):
1324
+ if self.return_generator and not backend.supports_return_generator:
1325
+ raise ValueError(
1326
+ "Backend {} does not support "
1327
+ "return_as={}".format(backend, return_as)
1328
+ )
1329
+ # This lock is used to coordinate the main thread of this process
1330
+ # with the async callback thread of our the pool.
1331
+ self._lock = threading.RLock()
1332
+ self._jobs = collections.deque()
1333
+ self._pending_outputs = list()
1334
+ self._ready_batches = queue.Queue()
1335
+ self._reducer_callback = None
1336
+
1337
+ # Internal variables
1338
+ self._backend = backend
1339
+ self._running = False
1340
+ self._managed_backend = False
1341
+ self._id = uuid4().hex
1342
+ self._call_ref = None
1343
+
1344
+ def __enter__(self):
1345
+ self._managed_backend = True
1346
+ self._calling = False
1347
+ self._initialize_backend()
1348
+ return self
1349
+
1350
+ def __exit__(self, exc_type, exc_value, traceback):
1351
+ self._managed_backend = False
1352
+ if self.return_generator and self._calling:
1353
+ self._abort()
1354
+ self._terminate_and_reset()
1355
+
1356
+ def _initialize_backend(self):
1357
+ """Build a process or thread pool and return the number of workers"""
1358
+ try:
1359
+ n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
1360
+ **self._backend_args)
1361
+ if self.timeout is not None and not self._backend.supports_timeout:
1362
+ warnings.warn(
1363
+ 'The backend class {!r} does not support timeout. '
1364
+ "You have set 'timeout={}' in Parallel but "
1365
+ "the 'timeout' parameter will not be used.".format(
1366
+ self._backend.__class__.__name__,
1367
+ self.timeout))
1368
+
1369
+ except FallbackToBackend as e:
1370
+ # Recursively initialize the backend in case of requested fallback.
1371
+ self._backend = e.backend
1372
+ n_jobs = self._initialize_backend()
1373
+
1374
+ return n_jobs
1375
+
1376
+ def _effective_n_jobs(self):
1377
+ if self._backend:
1378
+ return self._backend.effective_n_jobs(self.n_jobs)
1379
+ return 1
1380
+
1381
+ def _terminate_and_reset(self):
1382
+ if hasattr(self._backend, 'stop_call') and self._calling:
1383
+ self._backend.stop_call()
1384
+ self._calling = False
1385
+ if not self._managed_backend:
1386
+ self._backend.terminate()
1387
+
1388
+ def _dispatch(self, batch):
1389
+ """Queue the batch for computing, with or without multiprocessing
1390
+
1391
+ WARNING: this method is not thread-safe: it should be only called
1392
+ indirectly via dispatch_one_batch.
1393
+
1394
+ """
1395
+ # If job.get() catches an exception, it closes the queue:
1396
+ if self._aborting:
1397
+ return
1398
+
1399
+ batch_size = len(batch)
1400
+
1401
+ self.n_dispatched_tasks += batch_size
1402
+ self.n_dispatched_batches += 1
1403
+
1404
+ dispatch_timestamp = time.time()
1405
+
1406
+ batch_tracker = BatchCompletionCallBack(
1407
+ dispatch_timestamp, batch_size, self
1408
+ )
1409
+
1410
+ if self.return_ordered:
1411
+ self._jobs.append(batch_tracker)
1412
+
1413
+ # If return_ordered is False, the batch_tracker is not stored in the
1414
+ # jobs queue at the time of submission. Instead, it will be appended to
1415
+ # the queue by itself as soon as the callback is triggered to be able
1416
+ # to return the results in the order of completion.
1417
+
1418
+ job = self._backend.apply_async(batch, callback=batch_tracker)
1419
+ batch_tracker.register_job(job)
1420
+
1421
+ def dispatch_next(self):
1422
+ """Dispatch more data for parallel processing
1423
+
1424
+ This method is meant to be called concurrently by the multiprocessing
1425
+ callback. We rely on the thread-safety of dispatch_one_batch to protect
1426
+ against concurrent consumption of the unprotected iterator.
1427
+
1428
+ """
1429
+ if not self.dispatch_one_batch(self._original_iterator):
1430
+ self._iterating = False
1431
+ self._original_iterator = None
1432
+
1433
+ def dispatch_one_batch(self, iterator):
1434
+ """Prefetch the tasks for the next batch and dispatch them.
1435
+
1436
+ The effective size of the batch is computed here.
1437
+ If there are no more jobs to dispatch, return False, else return True.
1438
+
1439
+ The iterator consumption and dispatching is protected by the same
1440
+ lock so calling this function should be thread safe.
1441
+
1442
+ """
1443
+
1444
+ if self._aborting:
1445
+ return False
1446
+
1447
+ batch_size = self._get_batch_size()
1448
+
1449
+ with self._lock:
1450
+ # to ensure an even distribution of the workload between workers,
1451
+ # we look ahead in the original iterators more than batch_size
1452
+ # tasks - However, we keep consuming only one batch at each
1453
+ # dispatch_one_batch call. The extra tasks are stored in a local
1454
+ # queue, _ready_batches, that is looked-up prior to re-consuming
1455
+ # tasks from the origal iterator.
1456
+ try:
1457
+ tasks = self._ready_batches.get(block=False)
1458
+ except queue.Empty:
1459
+ # slice the iterator n_jobs * batchsize items at a time. If the
1460
+ # slice returns less than that, then the current batchsize puts
1461
+ # too much weight on a subset of workers, while other may end
1462
+ # up starving. So in this case, re-scale the batch size
1463
+ # accordingly to distribute evenly the last items between all
1464
+ # workers.
1465
+ n_jobs = self._cached_effective_n_jobs
1466
+ big_batch_size = batch_size * n_jobs
1467
+
1468
+ try:
1469
+ islice = list(itertools.islice(iterator, big_batch_size))
1470
+ except Exception as e:
1471
+ # Handle the fact that the generator of task raised an
1472
+ # exception. As this part of the code can be executed in
1473
+ # a thread internal to the backend, register a task with
1474
+ # an error that will be raised in the user's thread.
1475
+ if isinstance(e.__context__, queue.Empty):
1476
+ # Suppress the cause of the exception if it is
1477
+ # queue.Empty to avoid cluttered traceback. Only do it
1478
+ # if the __context__ is really empty to avoid messing
1479
+ # with causes of the original error.
1480
+ e.__cause__ = None
1481
+ batch_tracker = BatchCompletionCallBack(
1482
+ 0, batch_size, self
1483
+ )
1484
+ self._jobs.append(batch_tracker)
1485
+ batch_tracker._register_outcome(dict(
1486
+ result=e, status=TASK_ERROR
1487
+ ))
1488
+ return True
1489
+
1490
+ if len(islice) == 0:
1491
+ return False
1492
+ elif (iterator is self._original_iterator and
1493
+ len(islice) < big_batch_size):
1494
+ # We reached the end of the original iterator (unless
1495
+ # iterator is the ``pre_dispatch``-long initial slice of
1496
+ # the original iterator) -- decrease the batch size to
1497
+ # account for potential variance in the batches running
1498
+ # time.
1499
+ final_batch_size = max(1, len(islice) // (10 * n_jobs))
1500
+ else:
1501
+ final_batch_size = max(1, len(islice) // n_jobs)
1502
+
1503
+ # enqueue n_jobs batches in a local queue
1504
+ for i in range(0, len(islice), final_batch_size):
1505
+ tasks = BatchedCalls(islice[i:i + final_batch_size],
1506
+ self._backend.get_nested_backend(),
1507
+ self._reducer_callback,
1508
+ self._pickle_cache)
1509
+ self._ready_batches.put(tasks)
1510
+
1511
+ # finally, get one task.
1512
+ tasks = self._ready_batches.get(block=False)
1513
+ if len(tasks) == 0:
1514
+ # No more tasks available in the iterator: tell caller to stop.
1515
+ return False
1516
+ else:
1517
+ self._dispatch(tasks)
1518
+ return True
1519
+
1520
+ def _get_batch_size(self):
1521
+ """Returns the effective batch size for dispatch"""
1522
+ if self.batch_size == 'auto':
1523
+ return self._backend.compute_batch_size()
1524
+ else:
1525
+ # Fixed batch size strategy
1526
+ return self.batch_size
1527
+
1528
+ def _print(self, msg):
1529
+ """Display the message on stout or stderr depending on verbosity"""
1530
+ # XXX: Not using the logger framework: need to
1531
+ # learn to use logger better.
1532
+ if not self.verbose:
1533
+ return
1534
+ if self.verbose < 50:
1535
+ writer = sys.stderr.write
1536
+ else:
1537
+ writer = sys.stdout.write
1538
+ writer(f"[{self}]: {msg}\n")
1539
+
1540
+ def _is_completed(self):
1541
+ """Check if all tasks have been completed"""
1542
+ return self.n_completed_tasks == self.n_dispatched_tasks and not (
1543
+ self._iterating or self._aborting
1544
+ )
1545
+
1546
+ def print_progress(self):
1547
+ """Display the process of the parallel execution only a fraction
1548
+ of time, controlled by self.verbose.
1549
+ """
1550
+
1551
+ if not self.verbose:
1552
+ return
1553
+
1554
+ elapsed_time = time.time() - self._start_time
1555
+
1556
+ if self._is_completed():
1557
+ # Make sure that we get a last message telling us we are done
1558
+ self._print(
1559
+ f"Done {self.n_completed_tasks:3d} out of "
1560
+ f"{self.n_completed_tasks:3d} | elapsed: "
1561
+ f"{short_format_time(elapsed_time)} finished"
1562
+ )
1563
+ return
1564
+
1565
+ # Original job iterator becomes None once it has been fully
1566
+ # consumed: at this point we know the total number of jobs and we are
1567
+ # able to display an estimation of the remaining time based on already
1568
+ # completed jobs. Otherwise, we simply display the number of completed
1569
+ # tasks.
1570
+ elif self._original_iterator is not None:
1571
+ if _verbosity_filter(self.n_dispatched_batches, self.verbose):
1572
+ return
1573
+ self._print(
1574
+ f"Done {self.n_completed_tasks:3d} tasks | elapsed: "
1575
+ f"{short_format_time(elapsed_time)}"
1576
+ )
1577
+ else:
1578
+ index = self.n_completed_tasks
1579
+ # We are finished dispatching
1580
+ total_tasks = self.n_dispatched_tasks
1581
+ # We always display the first loop
1582
+ if not index == 0:
1583
+ # Display depending on the number of remaining items
1584
+ # A message as soon as we finish dispatching, cursor is 0
1585
+ cursor = (total_tasks - index + 1 -
1586
+ self._pre_dispatch_amount)
1587
+ frequency = (total_tasks // self.verbose) + 1
1588
+ is_last_item = (index + 1 == total_tasks)
1589
+ if (is_last_item or cursor % frequency):
1590
+ return
1591
+ remaining_time = (elapsed_time / index) * \
1592
+ (self.n_dispatched_tasks - index * 1.0)
1593
+ # only display status if remaining time is greater or equal to 0
1594
+ self._print(
1595
+ f"Done {index:3d} out of {total_tasks:3d} | elapsed: "
1596
+ f"{short_format_time(elapsed_time)} remaining: "
1597
+ f"{short_format_time(remaining_time)}"
1598
+ )
1599
+
1600
+ def _abort(self):
1601
+ # Stop dispatching new jobs in the async callback thread
1602
+ self._aborting = True
1603
+
1604
+ # If the backend allows it, cancel or kill remaining running
1605
+ # tasks without waiting for the results as we will raise
1606
+ # the exception we got back to the caller instead of returning
1607
+ # any result.
1608
+ backend = self._backend
1609
+ if (not self._aborted and hasattr(backend, 'abort_everything')):
1610
+ # If the backend is managed externally we need to make sure
1611
+ # to leave it in a working state to allow for future jobs
1612
+ # scheduling.
1613
+ ensure_ready = self._managed_backend
1614
+ backend.abort_everything(ensure_ready=ensure_ready)
1615
+ self._aborted = True
1616
+
1617
+ def _start(self, iterator, pre_dispatch):
1618
+ # Only set self._iterating to True if at least a batch
1619
+ # was dispatched. In particular this covers the edge
1620
+ # case of Parallel used with an exhausted iterator. If
1621
+ # self._original_iterator is None, then this means either
1622
+ # that pre_dispatch == "all", n_jobs == 1 or that the first batch
1623
+ # was very quick and its callback already dispatched all the
1624
+ # remaining jobs.
1625
+ self._iterating = False
1626
+ if self.dispatch_one_batch(iterator):
1627
+ self._iterating = self._original_iterator is not None
1628
+
1629
+ while self.dispatch_one_batch(iterator):
1630
+ pass
1631
+
1632
+ if pre_dispatch == "all":
1633
+ # The iterable was consumed all at once by the above for loop.
1634
+ # No need to wait for async callbacks to trigger to
1635
+ # consumption.
1636
+ self._iterating = False
1637
+
1638
+ def _get_outputs(self, iterator, pre_dispatch):
1639
+ """Iterator returning the tasks' output as soon as they are ready."""
1640
+ dispatch_thread_id = threading.get_ident()
1641
+ detach_generator_exit = False
1642
+ try:
1643
+ self._start(iterator, pre_dispatch)
1644
+ # first yield returns None, for internal use only. This ensures
1645
+ # that we enter the try/except block and start dispatching the
1646
+ # tasks.
1647
+ yield
1648
+
1649
+ with self._backend.retrieval_context():
1650
+ yield from self._retrieve()
1651
+
1652
+ except GeneratorExit:
1653
+ # The generator has been garbage collected before being fully
1654
+ # consumed. This aborts the remaining tasks if possible and warn
1655
+ # the user if necessary.
1656
+ self._exception = True
1657
+
1658
+ # In some interpreters such as PyPy, GeneratorExit can be raised in
1659
+ # a different thread than the one used to start the dispatch of the
1660
+ # parallel tasks. This can lead to hang when a thread attempts to
1661
+ # join itself. As workaround, we detach the execution of the
1662
+ # aborting code to a dedicated thread. We then need to make sure
1663
+ # the rest of the function does not call `_terminate_and_reset`
1664
+ # in finally.
1665
+ if dispatch_thread_id != threading.get_ident():
1666
+ if not IS_PYPY:
1667
+ warnings.warn(
1668
+ "A generator produced by joblib.Parallel has been "
1669
+ "gc'ed in an unexpected thread. This behavior should "
1670
+ "not cause major -issues but to make sure, please "
1671
+ "report this warning and your use case at "
1672
+ "https://github.com/joblib/joblib/issues so it can "
1673
+ "be investigated."
1674
+ )
1675
+
1676
+ detach_generator_exit = True
1677
+ _parallel = self
1678
+
1679
+ class _GeneratorExitThread(threading.Thread):
1680
+ def run(self):
1681
+ _parallel._abort()
1682
+ if _parallel.return_generator:
1683
+ _parallel._warn_exit_early()
1684
+ _parallel._terminate_and_reset()
1685
+
1686
+ _GeneratorExitThread(
1687
+ name="GeneratorExitThread"
1688
+ ).start()
1689
+ return
1690
+
1691
+ # Otherwise, we are in the thread that started the dispatch: we can
1692
+ # safely abort the execution and warn the user.
1693
+ self._abort()
1694
+ if self.return_generator:
1695
+ self._warn_exit_early()
1696
+
1697
+ raise
1698
+
1699
+ # Note: we catch any BaseException instead of just Exception instances
1700
+ # to also include KeyboardInterrupt
1701
+ except BaseException:
1702
+ self._exception = True
1703
+ self._abort()
1704
+ raise
1705
+ finally:
1706
+ # Store the unconsumed tasks and terminate the workers if necessary
1707
+ _remaining_outputs = ([] if self._exception else self._jobs)
1708
+ self._jobs = collections.deque()
1709
+ self._running = False
1710
+ if not detach_generator_exit:
1711
+ self._terminate_and_reset()
1712
+
1713
+ while len(_remaining_outputs) > 0:
1714
+ batched_results = _remaining_outputs.popleft()
1715
+ batched_results = batched_results.get_result(self.timeout)
1716
+ for result in batched_results:
1717
+ yield result
1718
+
1719
+ def _wait_retrieval(self):
1720
+ """Return True if we need to continue retrieving some tasks."""
1721
+
1722
+ # If the input load is still being iterated over, it means that tasks
1723
+ # are still on the dispatch waitlist and their results will need to
1724
+ # be retrieved later on.
1725
+ if self._iterating:
1726
+ return True
1727
+
1728
+ # If some of the dispatched tasks are still being processed by the
1729
+ # workers, wait for the compute to finish before starting retrieval
1730
+ if self.n_completed_tasks < self.n_dispatched_tasks:
1731
+ return True
1732
+
1733
+ # For backends that does not support retrieving asynchronously the
1734
+ # result to the main process, all results must be carefully retrieved
1735
+ # in the _retrieve loop in the main thread while the backend is alive.
1736
+ # For other backends, the actual retrieval is done asynchronously in
1737
+ # the callback thread, and we can terminate the backend before the
1738
+ # `self._jobs` result list has been emptied. The remaining results
1739
+ # will be collected in the `finally` step of the generator.
1740
+ if not self._backend.supports_retrieve_callback:
1741
+ if len(self._jobs) > 0:
1742
+ return True
1743
+
1744
+ return False
1745
+
1746
+ def _retrieve(self):
1747
+ while self._wait_retrieval():
1748
+
1749
+ # If the callback thread of a worker has signaled that its task
1750
+ # triggered an exception, or if the retrieval loop has raised an
1751
+ # exception (e.g. `GeneratorExit`), exit the loop and surface the
1752
+ # worker traceback.
1753
+ if self._aborting:
1754
+ self._raise_error_fast()
1755
+ break
1756
+
1757
+ # If the next job is not ready for retrieval yet, we just wait for
1758
+ # async callbacks to progress.
1759
+ if ((len(self._jobs) == 0) or
1760
+ (self._jobs[0].get_status(
1761
+ timeout=self.timeout) == TASK_PENDING)):
1762
+ time.sleep(0.01)
1763
+ continue
1764
+
1765
+ # We need to be careful: the job list can be filling up as
1766
+ # we empty it and Python list are not thread-safe by
1767
+ # default hence the use of the lock
1768
+ with self._lock:
1769
+ batched_results = self._jobs.popleft()
1770
+
1771
+ # Flatten the batched results to output one output at a time
1772
+ batched_results = batched_results.get_result(self.timeout)
1773
+ for result in batched_results:
1774
+ self._nb_consumed += 1
1775
+ yield result
1776
+
1777
+ def _raise_error_fast(self):
1778
+ """If we are aborting, raise if a job caused an error."""
1779
+
1780
+ # Find the first job whose status is TASK_ERROR if it exists.
1781
+ with self._lock:
1782
+ error_job = next((job for job in self._jobs
1783
+ if job.status == TASK_ERROR), None)
1784
+
1785
+ # If this error job exists, immediately raise the error by
1786
+ # calling get_result. This job might not exists if abort has been
1787
+ # called directly or if the generator is gc'ed.
1788
+ if error_job is not None:
1789
+ error_job.get_result(self.timeout)
1790
+
1791
+ def _warn_exit_early(self):
1792
+ """Warn the user if the generator is gc'ed before being consumned."""
1793
+ ready_outputs = self.n_completed_tasks - self._nb_consumed
1794
+ is_completed = self._is_completed()
1795
+ msg = ""
1796
+ if ready_outputs:
1797
+ msg += (
1798
+ f"{ready_outputs} tasks have been successfully executed "
1799
+ " but not used."
1800
+ )
1801
+ if not is_completed:
1802
+ msg += " Additionally, "
1803
+
1804
+ if not is_completed:
1805
+ msg += (
1806
+ f"{self.n_dispatched_tasks - self.n_completed_tasks} tasks "
1807
+ "which were still being processed by the workers have been "
1808
+ "cancelled."
1809
+ )
1810
+
1811
+ if msg:
1812
+ msg += (
1813
+ " You could benefit from adjusting the input task "
1814
+ "iterator to limit unnecessary computation time."
1815
+ )
1816
+
1817
+ warnings.warn(msg)
1818
+
1819
+ def _get_sequential_output(self, iterable):
1820
+ """Separate loop for sequential output.
1821
+
1822
+ This simplifies the traceback in case of errors and reduces the
1823
+ overhead of calling sequential tasks with `joblib`.
1824
+ """
1825
+ try:
1826
+ self._iterating = True
1827
+ self._original_iterator = iterable
1828
+ batch_size = self._get_batch_size()
1829
+
1830
+ if batch_size != 1:
1831
+ it = iter(iterable)
1832
+ iterable_batched = iter(
1833
+ lambda: tuple(itertools.islice(it, batch_size)), ()
1834
+ )
1835
+ iterable = (
1836
+ task for batch in iterable_batched for task in batch
1837
+ )
1838
+
1839
+ # first yield returns None, for internal use only. This ensures
1840
+ # that we enter the try/except block and setup the generator.
1841
+ yield None
1842
+
1843
+ # Sequentially call the tasks and yield the results.
1844
+ for func, args, kwargs in iterable:
1845
+ self.n_dispatched_batches += 1
1846
+ self.n_dispatched_tasks += 1
1847
+ res = func(*args, **kwargs)
1848
+ self.n_completed_tasks += 1
1849
+ self.print_progress()
1850
+ yield res
1851
+ self._nb_consumed += 1
1852
+ except BaseException:
1853
+ self._exception = True
1854
+ self._aborting = True
1855
+ self._aborted = True
1856
+ raise
1857
+ finally:
1858
+ self.print_progress()
1859
+ self._running = False
1860
+ self._iterating = False
1861
+ self._original_iterator = None
1862
+
1863
+ def _reset_run_tracking(self):
1864
+ """Reset the counters and flags used to track the execution."""
1865
+
1866
+ # Makes sur the parallel instance was not previously running in a
1867
+ # thread-safe way.
1868
+ with getattr(self, '_lock', nullcontext()):
1869
+ if self._running:
1870
+ msg = 'This Parallel instance is already running !'
1871
+ if self.return_generator is True:
1872
+ msg += (
1873
+ " Before submitting new tasks, you must wait for the "
1874
+ "completion of all the previous tasks, or clean all "
1875
+ "references to the output generator."
1876
+ )
1877
+ raise RuntimeError(msg)
1878
+ self._running = True
1879
+
1880
+ # Counter to keep track of the task dispatched and completed.
1881
+ self.n_dispatched_batches = 0
1882
+ self.n_dispatched_tasks = 0
1883
+ self.n_completed_tasks = 0
1884
+
1885
+ # Following count is incremented by one each time the user iterates
1886
+ # on the output generator, it is used to prepare an informative
1887
+ # warning message in case the generator is deleted before all the
1888
+ # dispatched tasks have been consumed.
1889
+ self._nb_consumed = 0
1890
+
1891
+ # Following flags are used to synchronize the threads in case one of
1892
+ # the tasks error-out to ensure that all workers abort fast and that
1893
+ # the backend terminates properly.
1894
+
1895
+ # Set to True as soon as a worker signals that a task errors-out
1896
+ self._exception = False
1897
+ # Set to True in case of early termination following an incident
1898
+ self._aborting = False
1899
+ # Set to True after abortion is complete
1900
+ self._aborted = False
1901
+
1902
+ def __call__(self, iterable):
1903
+ """Main function to dispatch parallel tasks."""
1904
+
1905
+ self._reset_run_tracking()
1906
+ self._start_time = time.time()
1907
+
1908
+ if not self._managed_backend:
1909
+ n_jobs = self._initialize_backend()
1910
+ else:
1911
+ n_jobs = self._effective_n_jobs()
1912
+
1913
+ if n_jobs == 1:
1914
+ # If n_jobs==1, run the computation sequentially and return
1915
+ # immediately to avoid overheads.
1916
+ output = self._get_sequential_output(iterable)
1917
+ next(output)
1918
+ return output if self.return_generator else list(output)
1919
+
1920
+ # Let's create an ID that uniquely identifies the current call. If the
1921
+ # call is interrupted early and that the same instance is immediately
1922
+ # re-used, this id will be used to prevent workers that were
1923
+ # concurrently finalizing a task from the previous call to run the
1924
+ # callback.
1925
+ with self._lock:
1926
+ self._call_id = uuid4().hex
1927
+
1928
+ # self._effective_n_jobs should be called in the Parallel.__call__
1929
+ # thread only -- store its value in an attribute for further queries.
1930
+ self._cached_effective_n_jobs = n_jobs
1931
+
1932
+ if isinstance(self._backend, LokyBackend):
1933
+ # For the loky backend, we add a callback executed when reducing
1934
+ # BatchCalls, that makes the loky executor use a temporary folder
1935
+ # specific to this Parallel object when pickling temporary memmaps.
1936
+ # This callback is necessary to ensure that several Parallel
1937
+ # objects using the same reusable executor don't use the same
1938
+ # temporary resources.
1939
+
1940
+ def _batched_calls_reducer_callback():
1941
+ # Relevant implementation detail: the following lines, called
1942
+ # when reducing BatchedCalls, are called in a thread-safe
1943
+ # situation, meaning that the context of the temporary folder
1944
+ # manager will not be changed in between the callback execution
1945
+ # and the end of the BatchedCalls pickling. The reason is that
1946
+ # pickling (the only place where set_current_context is used)
1947
+ # is done from a single thread (the queue_feeder_thread).
1948
+ self._backend._workers._temp_folder_manager.set_current_context( # noqa
1949
+ self._id
1950
+ )
1951
+ self._reducer_callback = _batched_calls_reducer_callback
1952
+
1953
+ # self._effective_n_jobs should be called in the Parallel.__call__
1954
+ # thread only -- store its value in an attribute for further queries.
1955
+ self._cached_effective_n_jobs = n_jobs
1956
+
1957
+ backend_name = self._backend.__class__.__name__
1958
+ if n_jobs == 0:
1959
+ raise RuntimeError("%s has no active worker." % backend_name)
1960
+
1961
+ self._print(
1962
+ f"Using backend {backend_name} with {n_jobs} concurrent workers."
1963
+ )
1964
+ if hasattr(self._backend, 'start_call'):
1965
+ self._backend.start_call()
1966
+
1967
+ # Following flag prevents double calls to `backend.stop_call`.
1968
+ self._calling = True
1969
+
1970
+ iterator = iter(iterable)
1971
+ pre_dispatch = self.pre_dispatch
1972
+
1973
+ if pre_dispatch == 'all':
1974
+ # prevent further dispatch via multiprocessing callback thread
1975
+ self._original_iterator = None
1976
+ self._pre_dispatch_amount = 0
1977
+ else:
1978
+ self._original_iterator = iterator
1979
+ if hasattr(pre_dispatch, 'endswith'):
1980
+ pre_dispatch = eval_expr(
1981
+ pre_dispatch.replace("n_jobs", str(n_jobs))
1982
+ )
1983
+ self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
1984
+
1985
+ # The main thread will consume the first pre_dispatch items and
1986
+ # the remaining items will later be lazily dispatched by async
1987
+ # callbacks upon task completions.
1988
+
1989
+ # TODO: this iterator should be batch_size * n_jobs
1990
+ iterator = itertools.islice(iterator, self._pre_dispatch_amount)
1991
+
1992
+ # Use a caching dict for callables that are pickled with cloudpickle to
1993
+ # improve performances. This cache is used only in the case of
1994
+ # functions that are defined in the __main__ module, functions that
1995
+ # are defined locally (inside another function) and lambda expressions.
1996
+ self._pickle_cache = dict()
1997
+
1998
+ output = self._get_outputs(iterator, pre_dispatch)
1999
+ self._call_ref = weakref.ref(output)
2000
+
2001
+ # The first item from the output is blank, but it makes the interpreter
2002
+ # progress until it enters the Try/Except block of the generator and
2003
+ # reaches the first `yield` statement. This starts the asynchronous
2004
+ # dispatch of the tasks to the workers.
2005
+ next(output)
2006
+
2007
+ return output if self.return_generator else list(output)
2008
+
2009
+ def __repr__(self):
2010
+ return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
llmeval-env/lib/python3.10/site-packages/joblib/pool.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Custom implementation of multiprocessing.Pool with custom pickler.
2
+
3
+ This module provides efficient ways of working with data stored in
4
+ shared memory with numpy.memmap arrays without inducing any memory
5
+ copy between the parent and child processes.
6
+
7
+ This module should not be imported if multiprocessing is not
8
+ available as it implements subclasses of multiprocessing Pool
9
+ that uses a custom alternative to SimpleQueue.
10
+
11
+ """
12
+ # Author: Olivier Grisel <[email protected]>
13
+ # Copyright: 2012, Olivier Grisel
14
+ # License: BSD 3 clause
15
+
16
+ import copyreg
17
+ import sys
18
+ import warnings
19
+ from time import sleep
20
+
21
+ try:
22
+ WindowsError
23
+ except NameError:
24
+ WindowsError = type(None)
25
+
26
+ from pickle import Pickler
27
+
28
+ from pickle import HIGHEST_PROTOCOL
29
+ from io import BytesIO
30
+
31
+ from ._memmapping_reducer import get_memmapping_reducers
32
+ from ._memmapping_reducer import TemporaryResourcesManager
33
+ from ._multiprocessing_helpers import mp, assert_spawning
34
+
35
+ # We need the class definition to derive from it, not the multiprocessing.Pool
36
+ # factory function
37
+ from multiprocessing.pool import Pool
38
+
39
+ try:
40
+ import numpy as np
41
+ except ImportError:
42
+ np = None
43
+
44
+
45
+ ###############################################################################
46
+ # Enable custom pickling in Pool queues
47
+
48
+ class CustomizablePickler(Pickler):
49
+ """Pickler that accepts custom reducers.
50
+
51
+ TODO python2_drop : can this be simplified ?
52
+
53
+ HIGHEST_PROTOCOL is selected by default as this pickler is used
54
+ to pickle ephemeral datastructures for interprocess communication
55
+ hence no backward compatibility is required.
56
+
57
+ `reducers` is expected to be a dictionary with key/values
58
+ being `(type, callable)` pairs where `callable` is a function that
59
+ give an instance of `type` will return a tuple `(constructor,
60
+ tuple_of_objects)` to rebuild an instance out of the pickled
61
+ `tuple_of_objects` as would return a `__reduce__` method. See the
62
+ standard library documentation on pickling for more details.
63
+
64
+ """
65
+
66
+ # We override the pure Python pickler as its the only way to be able to
67
+ # customize the dispatch table without side effects in Python 2.7
68
+ # to 3.2. For Python 3.3+ leverage the new dispatch_table
69
+ # feature from https://bugs.python.org/issue14166 that makes it possible
70
+ # to use the C implementation of the Pickler which is faster.
71
+
72
+ def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
73
+ Pickler.__init__(self, writer, protocol=protocol)
74
+ if reducers is None:
75
+ reducers = {}
76
+ if hasattr(Pickler, 'dispatch'):
77
+ # Make the dispatch registry an instance level attribute instead of
78
+ # a reference to the class dictionary under Python 2
79
+ self.dispatch = Pickler.dispatch.copy()
80
+ else:
81
+ # Under Python 3 initialize the dispatch table with a copy of the
82
+ # default registry
83
+ self.dispatch_table = copyreg.dispatch_table.copy()
84
+ for type, reduce_func in reducers.items():
85
+ self.register(type, reduce_func)
86
+
87
+ def register(self, type, reduce_func):
88
+ """Attach a reducer function to a given type in the dispatch table."""
89
+ if hasattr(Pickler, 'dispatch'):
90
+ # Python 2 pickler dispatching is not explicitly customizable.
91
+ # Let us use a closure to workaround this limitation.
92
+ def dispatcher(self, obj):
93
+ reduced = reduce_func(obj)
94
+ self.save_reduce(obj=obj, *reduced)
95
+ self.dispatch[type] = dispatcher
96
+ else:
97
+ self.dispatch_table[type] = reduce_func
98
+
99
+
100
+ class CustomizablePicklingQueue(object):
101
+ """Locked Pipe implementation that uses a customizable pickler.
102
+
103
+ This class is an alternative to the multiprocessing implementation
104
+ of SimpleQueue in order to make it possible to pass custom
105
+ pickling reducers, for instance to avoid memory copy when passing
106
+ memory mapped datastructures.
107
+
108
+ `reducers` is expected to be a dict with key / values being
109
+ `(type, callable)` pairs where `callable` is a function that, given an
110
+ instance of `type`, will return a tuple `(constructor, tuple_of_objects)`
111
+ to rebuild an instance out of the pickled `tuple_of_objects` as would
112
+ return a `__reduce__` method.
113
+
114
+ See the standard library documentation on pickling for more details.
115
+ """
116
+
117
+ def __init__(self, context, reducers=None):
118
+ self._reducers = reducers
119
+ self._reader, self._writer = context.Pipe(duplex=False)
120
+ self._rlock = context.Lock()
121
+ if sys.platform == 'win32':
122
+ self._wlock = None
123
+ else:
124
+ self._wlock = context.Lock()
125
+ self._make_methods()
126
+
127
+ def __getstate__(self):
128
+ assert_spawning(self)
129
+ return (self._reader, self._writer, self._rlock, self._wlock,
130
+ self._reducers)
131
+
132
+ def __setstate__(self, state):
133
+ (self._reader, self._writer, self._rlock, self._wlock,
134
+ self._reducers) = state
135
+ self._make_methods()
136
+
137
+ def empty(self):
138
+ return not self._reader.poll()
139
+
140
+ def _make_methods(self):
141
+ self._recv = recv = self._reader.recv
142
+ racquire, rrelease = self._rlock.acquire, self._rlock.release
143
+
144
+ def get():
145
+ racquire()
146
+ try:
147
+ return recv()
148
+ finally:
149
+ rrelease()
150
+
151
+ self.get = get
152
+
153
+ if self._reducers:
154
+ def send(obj):
155
+ buffer = BytesIO()
156
+ CustomizablePickler(buffer, self._reducers).dump(obj)
157
+ self._writer.send_bytes(buffer.getvalue())
158
+ self._send = send
159
+ else:
160
+ self._send = send = self._writer.send
161
+ if self._wlock is None:
162
+ # writes to a message oriented win32 pipe are atomic
163
+ self.put = send
164
+ else:
165
+ wlock_acquire, wlock_release = (
166
+ self._wlock.acquire, self._wlock.release)
167
+
168
+ def put(obj):
169
+ wlock_acquire()
170
+ try:
171
+ return send(obj)
172
+ finally:
173
+ wlock_release()
174
+
175
+ self.put = put
176
+
177
+
178
+ class PicklingPool(Pool):
179
+ """Pool implementation with customizable pickling reducers.
180
+
181
+ This is useful to control how data is shipped between processes
182
+ and makes it possible to use shared memory without useless
183
+ copies induces by the default pickling methods of the original
184
+ objects passed as arguments to dispatch.
185
+
186
+ `forward_reducers` and `backward_reducers` are expected to be
187
+ dictionaries with key/values being `(type, callable)` pairs where
188
+ `callable` is a function that, given an instance of `type`, will return a
189
+ tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the
190
+ pickled `tuple_of_objects` as would return a `__reduce__` method.
191
+ See the standard library documentation about pickling for more details.
192
+
193
+ """
194
+
195
+ def __init__(self, processes=None, forward_reducers=None,
196
+ backward_reducers=None, **kwargs):
197
+ if forward_reducers is None:
198
+ forward_reducers = dict()
199
+ if backward_reducers is None:
200
+ backward_reducers = dict()
201
+ self._forward_reducers = forward_reducers
202
+ self._backward_reducers = backward_reducers
203
+ poolargs = dict(processes=processes)
204
+ poolargs.update(kwargs)
205
+ super(PicklingPool, self).__init__(**poolargs)
206
+
207
+ def _setup_queues(self):
208
+ context = getattr(self, '_ctx', mp)
209
+ self._inqueue = CustomizablePicklingQueue(context,
210
+ self._forward_reducers)
211
+ self._outqueue = CustomizablePicklingQueue(context,
212
+ self._backward_reducers)
213
+ self._quick_put = self._inqueue._send
214
+ self._quick_get = self._outqueue._recv
215
+
216
+
217
+ class MemmappingPool(PicklingPool):
218
+ """Process pool that shares large arrays to avoid memory copy.
219
+
220
+ This drop-in replacement for `multiprocessing.pool.Pool` makes
221
+ it possible to work efficiently with shared memory in a numpy
222
+ context.
223
+
224
+ Existing instances of numpy.memmap are preserved: the child
225
+ suprocesses will have access to the same shared memory in the
226
+ original mode except for the 'w+' mode that is automatically
227
+ transformed as 'r+' to avoid zeroing the original data upon
228
+ instantiation.
229
+
230
+ Furthermore large arrays from the parent process are automatically
231
+ dumped to a temporary folder on the filesystem such as child
232
+ processes to access their content via memmapping (file system
233
+ backed shared memory).
234
+
235
+ Note: it is important to call the terminate method to collect
236
+ the temporary folder used by the pool.
237
+
238
+ Parameters
239
+ ----------
240
+ processes: int, optional
241
+ Number of worker processes running concurrently in the pool.
242
+ initializer: callable, optional
243
+ Callable executed on worker process creation.
244
+ initargs: tuple, optional
245
+ Arguments passed to the initializer callable.
246
+ temp_folder: (str, callable) optional
247
+ If str:
248
+ Folder to be used by the pool for memmapping large arrays
249
+ for sharing memory with worker processes. If None, this will try in
250
+ order:
251
+ - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
252
+ - /dev/shm if the folder exists and is writable: this is a RAMdisk
253
+ filesystem available by default on modern Linux distributions,
254
+ - the default system temporary folder that can be overridden
255
+ with TMP, TMPDIR or TEMP environment variables, typically /tmp
256
+ under Unix operating systems.
257
+ if callable:
258
+ An callable in charge of dynamically resolving a temporary folder
259
+ for memmapping large arrays.
260
+ max_nbytes int or None, optional, 1e6 by default
261
+ Threshold on the size of arrays passed to the workers that
262
+ triggers automated memory mapping in temp_folder.
263
+ Use None to disable memmapping of large arrays.
264
+ mmap_mode: {'r+', 'r', 'w+', 'c'}
265
+ Memmapping mode for numpy arrays passed to workers.
266
+ See 'max_nbytes' parameter documentation for more details.
267
+ forward_reducers: dictionary, optional
268
+ Reducers used to pickle objects passed from main process to worker
269
+ processes: see below.
270
+ backward_reducers: dictionary, optional
271
+ Reducers used to pickle return values from workers back to the
272
+ main process.
273
+ verbose: int, optional
274
+ Make it possible to monitor how the communication of numpy arrays
275
+ with the subprocess is handled (pickling or memmapping)
276
+ prewarm: bool or str, optional, "auto" by default.
277
+ If True, force a read on newly memmapped array to make sure that OS
278
+ pre-cache it in memory. This can be useful to avoid concurrent disk
279
+ access when the same data array is passed to different worker
280
+ processes. If "auto" (by default), prewarm is set to True, unless the
281
+ Linux shared memory partition /dev/shm is available and used as temp
282
+ folder.
283
+
284
+ `forward_reducers` and `backward_reducers` are expected to be
285
+ dictionaries with key/values being `(type, callable)` pairs where
286
+ `callable` is a function that give an instance of `type` will return
287
+ a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
288
+ of the pickled `tuple_of_objects` as would return a `__reduce__`
289
+ method. See the standard library documentation on pickling for more
290
+ details.
291
+
292
+ """
293
+
294
+ def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6,
295
+ mmap_mode='r', forward_reducers=None, backward_reducers=None,
296
+ verbose=0, context_id=None, prewarm=False, **kwargs):
297
+
298
+ if context_id is not None:
299
+ warnings.warn('context_id is deprecated and ignored in joblib'
300
+ ' 0.9.4 and will be removed in 0.11',
301
+ DeprecationWarning)
302
+
303
+ manager = TemporaryResourcesManager(temp_folder)
304
+ self._temp_folder_manager = manager
305
+
306
+ # The usage of a temp_folder_resolver over a simple temp_folder is
307
+ # superfluous for multiprocessing pools, as they don't get reused, see
308
+ # get_memmapping_executor for more details. We still use it for code
309
+ # simplicity.
310
+ forward_reducers, backward_reducers = \
311
+ get_memmapping_reducers(
312
+ temp_folder_resolver=manager.resolve_temp_folder_name,
313
+ max_nbytes=max_nbytes, mmap_mode=mmap_mode,
314
+ forward_reducers=forward_reducers,
315
+ backward_reducers=backward_reducers, verbose=verbose,
316
+ unlink_on_gc_collect=False, prewarm=prewarm)
317
+
318
+ poolargs = dict(
319
+ processes=processes,
320
+ forward_reducers=forward_reducers,
321
+ backward_reducers=backward_reducers)
322
+ poolargs.update(kwargs)
323
+ super(MemmappingPool, self).__init__(**poolargs)
324
+
325
+ def terminate(self):
326
+ n_retries = 10
327
+ for i in range(n_retries):
328
+ try:
329
+ super(MemmappingPool, self).terminate()
330
+ break
331
+ except OSError as e:
332
+ if isinstance(e, WindowsError):
333
+ # Workaround occasional "[Error 5] Access is denied" issue
334
+ # when trying to terminate a process under windows.
335
+ sleep(0.1)
336
+ if i + 1 == n_retries:
337
+ warnings.warn("Failed to terminate worker processes in"
338
+ " multiprocessing pool: %r" % e)
339
+
340
+ # Clean up the temporary resources as the workers should now be off.
341
+ self._temp_folder_manager._clean_temporary_resources()
342
+
343
+ @property
344
+ def _temp_folder(self):
345
+ # Legacy property in tests. could be removed if we refactored the
346
+ # memmapping tests. SHOULD ONLY BE USED IN TESTS!
347
+ # We cache this property because it is called late in the tests - at
348
+ # this point, all context have been unregistered, and
349
+ # resolve_temp_folder_name raises an error.
350
+ if getattr(self, '_cached_temp_folder', None) is not None:
351
+ return self._cached_temp_folder
352
+ else:
353
+ self._cached_temp_folder = self._temp_folder_manager.resolve_temp_folder_name() # noqa
354
+ return self._cached_temp_folder