applied-ai-018 commited on
Commit
24e0940
·
verified ·
1 Parent(s): b349409

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/joblib/__init__.py +148 -0
  2. llmeval-env/lib/python3.10/site-packages/joblib/_dask.py +379 -0
  3. llmeval-env/lib/python3.10/site-packages/joblib/_store_backends.py +474 -0
  4. llmeval-env/lib/python3.10/site-packages/joblib/compressor.py +570 -0
  5. llmeval-env/lib/python3.10/site-packages/joblib/disk.py +136 -0
  6. llmeval-env/lib/python3.10/site-packages/joblib/executor.py +117 -0
  7. llmeval-env/lib/python3.10/site-packages/joblib/hashing.py +265 -0
  8. llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle_compat.py +244 -0
  9. llmeval-env/lib/python3.10/site-packages/joblib/test/__init__.py +0 -0
  10. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py +95 -0
  26. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.gzip +0 -0
  27. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.lzma +0 -0
  28. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.gzip +0 -0
  29. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.lzma +0 -0
  30. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.gzip +0 -0
  31. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.lzma +0 -0
  32. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.lzma +0 -0
  33. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_01.npy.z +0 -0
  34. llmeval-env/lib/python3.10/site-packages/joblib/test/test_backports.py +35 -0
  35. llmeval-env/lib/python3.10/site-packages/joblib/test/test_cloudpickle_wrapper.py +27 -0
  36. llmeval-env/lib/python3.10/site-packages/joblib/test/test_config.py +151 -0
  37. llmeval-env/lib/python3.10/site-packages/joblib/test/test_dask.py +499 -0
  38. llmeval-env/lib/python3.10/site-packages/joblib/test/test_disk.py +71 -0
  39. llmeval-env/lib/python3.10/site-packages/joblib/test/test_func_inspect.py +310 -0
  40. llmeval-env/lib/python3.10/site-packages/joblib/test/test_hashing.py +495 -0
  41. llmeval-env/lib/python3.10/site-packages/joblib/test/test_logger.py +31 -0
  42. llmeval-env/lib/python3.10/site-packages/joblib/test/test_memmapping.py +1191 -0
  43. llmeval-env/lib/python3.10/site-packages/joblib/test/test_memory_async.py +170 -0
  44. llmeval-env/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py +32 -0
  45. llmeval-env/lib/python3.10/site-packages/joblib/test/test_module.py +53 -0
  46. llmeval-env/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py +9 -0
  47. llmeval-env/lib/python3.10/site-packages/joblib/test/test_parallel.py +2056 -0
  48. llmeval-env/lib/python3.10/site-packages/joblib/test/test_store_backends.py +94 -0
  49. llmeval-env/lib/python3.10/site-packages/joblib/test/test_testing.py +75 -0
  50. llmeval-env/lib/python3.10/site-packages/joblib/test/test_utils.py +27 -0
llmeval-env/lib/python3.10/site-packages/joblib/__init__.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Joblib is a set of tools to provide **lightweight pipelining in
2
+ Python**. In particular:
3
+
4
+ 1. transparent disk-caching of functions and lazy re-evaluation
5
+ (memoize pattern)
6
+
7
+ 2. easy simple parallel computing
8
+
9
+ Joblib is optimized to be **fast** and **robust** on large
10
+ data in particular and has specific optimizations for `numpy` arrays. It is
11
+ **BSD-licensed**.
12
+
13
+
14
+ ==================== ===============================================
15
+ **Documentation:** https://joblib.readthedocs.io
16
+
17
+ **Download:** https://pypi.python.org/pypi/joblib#downloads
18
+
19
+ **Source code:** https://github.com/joblib/joblib
20
+
21
+ **Report issues:** https://github.com/joblib/joblib/issues
22
+ ==================== ===============================================
23
+
24
+
25
+ Vision
26
+ --------
27
+
28
+ The vision is to provide tools to easily achieve better performance and
29
+ reproducibility when working with long running jobs.
30
+
31
+ * **Avoid computing the same thing twice**: code is often rerun again and
32
+ again, for instance when prototyping computational-heavy jobs (as in
33
+ scientific development), but hand-crafted solutions to alleviate this
34
+ issue are error-prone and often lead to unreproducible results.
35
+
36
+ * **Persist to disk transparently**: efficiently persisting
37
+ arbitrary objects containing large data is hard. Using
38
+ joblib's caching mechanism avoids hand-written persistence and
39
+ implicitly links the file on disk to the execution context of
40
+ the original Python object. As a result, joblib's persistence is
41
+ good for resuming an application status or computational job, eg
42
+ after a crash.
43
+
44
+ Joblib addresses these problems while **leaving your code and your flow
45
+ control as unmodified as possible** (no framework, no new paradigms).
46
+
47
+ Main features
48
+ ------------------
49
+
50
+ 1) **Transparent and fast disk-caching of output value:** a memoize or
51
+ make-like functionality for Python functions that works well for
52
+ arbitrary Python objects, including very large numpy arrays. Separate
53
+ persistence and flow-execution logic from domain logic or algorithmic
54
+ code by writing the operations as a set of steps with well-defined
55
+ inputs and outputs: Python functions. Joblib can save their
56
+ computation to disk and rerun it only if necessary::
57
+
58
+ >>> from joblib import Memory
59
+ >>> cachedir = 'your_cache_dir_goes_here'
60
+ >>> mem = Memory(cachedir)
61
+ >>> import numpy as np
62
+ >>> a = np.vander(np.arange(3)).astype(float)
63
+ >>> square = mem.cache(np.square)
64
+ >>> b = square(a) # doctest: +ELLIPSIS
65
+ ______________________________________________________________________...
66
+ [Memory] Calling square...
67
+ square(array([[0., 0., 1.],
68
+ [1., 1., 1.],
69
+ [4., 2., 1.]]))
70
+ _________________________________________________...square - ...s, 0.0min
71
+
72
+ >>> c = square(a)
73
+ >>> # The above call did not trigger an evaluation
74
+
75
+ 2) **Embarrassingly parallel helper:** to make it easy to write readable
76
+ parallel code and debug it quickly::
77
+
78
+ >>> from joblib import Parallel, delayed
79
+ >>> from math import sqrt
80
+ >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
81
+ [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
82
+
83
+
84
+ 3) **Fast compressed Persistence**: a replacement for pickle to work
85
+ efficiently on Python objects containing large data (
86
+ *joblib.dump* & *joblib.load* ).
87
+
88
+ ..
89
+ >>> import shutil ; shutil.rmtree(cachedir)
90
+
91
+ """
92
+
93
+ # PEP0440 compatible formatted version, see:
94
+ # https://www.python.org/dev/peps/pep-0440/
95
+ #
96
+ # Generic release markers:
97
+ # X.Y
98
+ # X.Y.Z # For bugfix releases
99
+ #
100
+ # Admissible pre-release markers:
101
+ # X.YaN # Alpha release
102
+ # X.YbN # Beta release
103
+ # X.YrcN # Release Candidate
104
+ # X.Y # Final release
105
+ #
106
+ # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
107
+ # 'X.Y.dev0' is the canonical version of 'X.Y.dev'
108
+ #
109
+ __version__ = '1.4.2'
110
+
111
+
112
+ import os
113
+
114
+ from .memory import Memory
115
+ from .memory import MemorizedResult
116
+ from .memory import register_store_backend
117
+ from .memory import expires_after
118
+
119
+ from .logger import PrintTime
120
+ from .logger import Logger
121
+
122
+ from .hashing import hash
123
+
124
+ from .numpy_pickle import dump
125
+ from .numpy_pickle import load
126
+
127
+ from .compressor import register_compressor
128
+
129
+ from .parallel import Parallel
130
+ from .parallel import delayed
131
+ from .parallel import cpu_count
132
+ from .parallel import register_parallel_backend
133
+ from .parallel import parallel_backend
134
+ from .parallel import parallel_config
135
+ from .parallel import effective_n_jobs
136
+ from ._cloudpickle_wrapper import wrap_non_picklable_objects
137
+
138
+
139
+ __all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump',
140
+ 'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs',
141
+ 'register_parallel_backend', 'parallel_backend', 'expires_after',
142
+ 'register_store_backend', 'register_compressor',
143
+ 'wrap_non_picklable_objects', 'parallel_config']
144
+
145
+
146
+ # Workaround issue discovered in intel-openmp 2019.5:
147
+ # https://github.com/ContinuumIO/anaconda-issues/issues/11294
148
+ os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
llmeval-env/lib/python3.10/site-packages/joblib/_dask.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function, division, absolute_import
2
+
3
+ import asyncio
4
+ import concurrent.futures
5
+ import contextlib
6
+
7
+ import time
8
+ from uuid import uuid4
9
+ import weakref
10
+
11
+ from .parallel import parallel_config
12
+ from .parallel import AutoBatchingMixin, ParallelBackendBase
13
+
14
+ from ._utils import (
15
+ _TracebackCapturingWrapper,
16
+ _retrieve_traceback_capturing_wrapped_call
17
+ )
18
+
19
+ try:
20
+ import dask
21
+ import distributed
22
+ except ImportError:
23
+ dask = None
24
+ distributed = None
25
+
26
+ if dask is not None and distributed is not None:
27
+ from dask.utils import funcname
28
+ from dask.sizeof import sizeof
29
+ from dask.distributed import (
30
+ Client,
31
+ as_completed,
32
+ get_client,
33
+ secede,
34
+ rejoin,
35
+ )
36
+ from distributed.utils import thread_state
37
+
38
+ try:
39
+ # asyncio.TimeoutError, Python3-only error thrown by recent versions of
40
+ # distributed
41
+ from distributed.utils import TimeoutError as _TimeoutError
42
+ except ImportError:
43
+ from tornado.gen import TimeoutError as _TimeoutError
44
+
45
+
46
+ def is_weakrefable(obj):
47
+ try:
48
+ weakref.ref(obj)
49
+ return True
50
+ except TypeError:
51
+ return False
52
+
53
+
54
+ class _WeakKeyDictionary:
55
+ """A variant of weakref.WeakKeyDictionary for unhashable objects.
56
+
57
+ This datastructure is used to store futures for broadcasted data objects
58
+ such as large numpy arrays or pandas dataframes that are not hashable and
59
+ therefore cannot be used as keys of traditional python dicts.
60
+
61
+ Furthermore using a dict with id(array) as key is not safe because the
62
+ Python is likely to reuse id of recently collected arrays.
63
+ """
64
+
65
+ def __init__(self):
66
+ self._data = {}
67
+
68
+ def __getitem__(self, obj):
69
+ ref, val = self._data[id(obj)]
70
+ if ref() is not obj:
71
+ # In case of a race condition with on_destroy.
72
+ raise KeyError(obj)
73
+ return val
74
+
75
+ def __setitem__(self, obj, value):
76
+ key = id(obj)
77
+ try:
78
+ ref, _ = self._data[key]
79
+ if ref() is not obj:
80
+ # In case of race condition with on_destroy.
81
+ raise KeyError(obj)
82
+ except KeyError:
83
+ # Insert the new entry in the mapping along with a weakref
84
+ # callback to automatically delete the entry from the mapping
85
+ # as soon as the object used as key is garbage collected.
86
+ def on_destroy(_):
87
+ del self._data[key]
88
+ ref = weakref.ref(obj, on_destroy)
89
+ self._data[key] = ref, value
90
+
91
+ def __len__(self):
92
+ return len(self._data)
93
+
94
+ def clear(self):
95
+ self._data.clear()
96
+
97
+
98
+ def _funcname(x):
99
+ try:
100
+ if isinstance(x, list):
101
+ x = x[0][0]
102
+ except Exception:
103
+ pass
104
+ return funcname(x)
105
+
106
+
107
+ def _make_tasks_summary(tasks):
108
+ """Summarize of list of (func, args, kwargs) function calls"""
109
+ unique_funcs = {func for func, args, kwargs in tasks}
110
+
111
+ if len(unique_funcs) == 1:
112
+ mixed = False
113
+ else:
114
+ mixed = True
115
+ return len(tasks), mixed, _funcname(tasks)
116
+
117
+
118
+ class Batch:
119
+ """dask-compatible wrapper that executes a batch of tasks"""
120
+ def __init__(self, tasks):
121
+ # collect some metadata from the tasks to ease Batch calls
122
+ # introspection when debugging
123
+ self._num_tasks, self._mixed, self._funcname = _make_tasks_summary(
124
+ tasks
125
+ )
126
+
127
+ def __call__(self, tasks=None):
128
+ results = []
129
+ with parallel_config(backend='dask'):
130
+ for func, args, kwargs in tasks:
131
+ results.append(func(*args, **kwargs))
132
+ return results
133
+
134
+ def __repr__(self):
135
+ descr = f"batch_of_{self._funcname}_{self._num_tasks}_calls"
136
+ if self._mixed:
137
+ descr = "mixed_" + descr
138
+ return descr
139
+
140
+
141
+ def _joblib_probe_task():
142
+ # Noop used by the joblib connector to probe when workers are ready.
143
+ pass
144
+
145
+
146
+ class DaskDistributedBackend(AutoBatchingMixin, ParallelBackendBase):
147
+ MIN_IDEAL_BATCH_DURATION = 0.2
148
+ MAX_IDEAL_BATCH_DURATION = 1.0
149
+ supports_retrieve_callback = True
150
+ default_n_jobs = -1
151
+
152
+ def __init__(self, scheduler_host=None, scatter=None,
153
+ client=None, loop=None, wait_for_workers_timeout=10,
154
+ **submit_kwargs):
155
+ super().__init__()
156
+
157
+ if distributed is None:
158
+ msg = ("You are trying to use 'dask' as a joblib parallel backend "
159
+ "but dask is not installed. Please install dask "
160
+ "to fix this error.")
161
+ raise ValueError(msg)
162
+
163
+ if client is None:
164
+ if scheduler_host:
165
+ client = Client(scheduler_host, loop=loop,
166
+ set_as_default=False)
167
+ else:
168
+ try:
169
+ client = get_client()
170
+ except ValueError as e:
171
+ msg = ("To use Joblib with Dask first create a Dask Client"
172
+ "\n\n"
173
+ " from dask.distributed import Client\n"
174
+ " client = Client()\n"
175
+ "or\n"
176
+ " client = Client('scheduler-address:8786')")
177
+ raise ValueError(msg) from e
178
+
179
+ self.client = client
180
+
181
+ if scatter is not None and not isinstance(scatter, (list, tuple)):
182
+ raise TypeError("scatter must be a list/tuple, got "
183
+ "`%s`" % type(scatter).__name__)
184
+
185
+ if scatter is not None and len(scatter) > 0:
186
+ # Keep a reference to the scattered data to keep the ids the same
187
+ self._scatter = list(scatter)
188
+ scattered = self.client.scatter(scatter, broadcast=True)
189
+ self.data_futures = {id(x): f for x, f in zip(scatter, scattered)}
190
+ else:
191
+ self._scatter = []
192
+ self.data_futures = {}
193
+ self.wait_for_workers_timeout = wait_for_workers_timeout
194
+ self.submit_kwargs = submit_kwargs
195
+ self.waiting_futures = as_completed(
196
+ [],
197
+ loop=client.loop,
198
+ with_results=True,
199
+ raise_errors=False
200
+ )
201
+ self._results = {}
202
+ self._callbacks = {}
203
+
204
+ async def _collect(self):
205
+ while self._continue:
206
+ async for future, result in self.waiting_futures:
207
+ cf_future = self._results.pop(future)
208
+ callback = self._callbacks.pop(future)
209
+ if future.status == "error":
210
+ typ, exc, tb = result
211
+ cf_future.set_exception(exc)
212
+ else:
213
+ cf_future.set_result(result)
214
+ callback(result)
215
+ await asyncio.sleep(0.01)
216
+
217
+ def __reduce__(self):
218
+ return (DaskDistributedBackend, ())
219
+
220
+ def get_nested_backend(self):
221
+ return DaskDistributedBackend(client=self.client), -1
222
+
223
+ def configure(self, n_jobs=1, parallel=None, **backend_args):
224
+ self.parallel = parallel
225
+ return self.effective_n_jobs(n_jobs)
226
+
227
+ def start_call(self):
228
+ self._continue = True
229
+ self.client.loop.add_callback(self._collect)
230
+ self.call_data_futures = _WeakKeyDictionary()
231
+
232
+ def stop_call(self):
233
+ # The explicit call to clear is required to break a cycling reference
234
+ # to the futures.
235
+ self._continue = False
236
+ # wait for the future collection routine (self._backend._collect) to
237
+ # finish in order to limit asyncio warnings due to aborting _collect
238
+ # during a following backend termination call
239
+ time.sleep(0.01)
240
+ self.call_data_futures.clear()
241
+
242
+ def effective_n_jobs(self, n_jobs):
243
+ effective_n_jobs = sum(self.client.ncores().values())
244
+ if effective_n_jobs != 0 or not self.wait_for_workers_timeout:
245
+ return effective_n_jobs
246
+
247
+ # If there is no worker, schedule a probe task to wait for the workers
248
+ # to come up and be available. If the dask cluster is in adaptive mode
249
+ # task might cause the cluster to provision some workers.
250
+ try:
251
+ self.client.submit(_joblib_probe_task).result(
252
+ timeout=self.wait_for_workers_timeout
253
+ )
254
+ except _TimeoutError as e:
255
+ error_msg = (
256
+ "DaskDistributedBackend has no worker after {} seconds. "
257
+ "Make sure that workers are started and can properly connect "
258
+ "to the scheduler and increase the joblib/dask connection "
259
+ "timeout with:\n\n"
260
+ "parallel_config(backend='dask', wait_for_workers_timeout={})"
261
+ ).format(self.wait_for_workers_timeout,
262
+ max(10, 2 * self.wait_for_workers_timeout))
263
+ raise TimeoutError(error_msg) from e
264
+ return sum(self.client.ncores().values())
265
+
266
+ async def _to_func_args(self, func):
267
+ itemgetters = dict()
268
+
269
+ # Futures that are dynamically generated during a single call to
270
+ # Parallel.__call__.
271
+ call_data_futures = getattr(self, 'call_data_futures', None)
272
+
273
+ async def maybe_to_futures(args):
274
+ out = []
275
+ for arg in args:
276
+ arg_id = id(arg)
277
+ if arg_id in itemgetters:
278
+ out.append(itemgetters[arg_id])
279
+ continue
280
+
281
+ f = self.data_futures.get(arg_id, None)
282
+ if f is None and call_data_futures is not None:
283
+ try:
284
+ f = await call_data_futures[arg]
285
+ except KeyError:
286
+ pass
287
+ if f is None:
288
+ if is_weakrefable(arg) and sizeof(arg) > 1e3:
289
+ # Automatically scatter large objects to some of
290
+ # the workers to avoid duplicated data transfers.
291
+ # Rely on automated inter-worker data stealing if
292
+ # more workers need to reuse this data
293
+ # concurrently.
294
+ # set hash=False - nested scatter calls (i.e
295
+ # calling client.scatter inside a dask worker)
296
+ # using hash=True often raise CancelledError,
297
+ # see dask/distributed#3703
298
+ _coro = self.client.scatter(
299
+ arg,
300
+ asynchronous=True,
301
+ hash=False
302
+ )
303
+ # Centralize the scattering of identical arguments
304
+ # between concurrent apply_async callbacks by
305
+ # exposing the running coroutine in
306
+ # call_data_futures before it completes.
307
+ t = asyncio.Task(_coro)
308
+ call_data_futures[arg] = t
309
+
310
+ f = await t
311
+
312
+ if f is not None:
313
+ out.append(f)
314
+ else:
315
+ out.append(arg)
316
+ return out
317
+
318
+ tasks = []
319
+ for f, args, kwargs in func.items:
320
+ args = list(await maybe_to_futures(args))
321
+ kwargs = dict(zip(kwargs.keys(),
322
+ await maybe_to_futures(kwargs.values())))
323
+ tasks.append((f, args, kwargs))
324
+
325
+ return (Batch(tasks), tasks)
326
+
327
+ def apply_async(self, func, callback=None):
328
+
329
+ cf_future = concurrent.futures.Future()
330
+ cf_future.get = cf_future.result # achieve AsyncResult API
331
+
332
+ async def f(func, callback):
333
+ batch, tasks = await self._to_func_args(func)
334
+ key = f'{repr(batch)}-{uuid4().hex}'
335
+
336
+ dask_future = self.client.submit(
337
+ _TracebackCapturingWrapper(batch),
338
+ tasks=tasks,
339
+ key=key,
340
+ **self.submit_kwargs
341
+ )
342
+ self.waiting_futures.add(dask_future)
343
+ self._callbacks[dask_future] = callback
344
+ self._results[dask_future] = cf_future
345
+
346
+ self.client.loop.add_callback(f, func, callback)
347
+
348
+ return cf_future
349
+
350
+ def retrieve_result_callback(self, out):
351
+ return _retrieve_traceback_capturing_wrapped_call(out)
352
+
353
+ def abort_everything(self, ensure_ready=True):
354
+ """ Tell the client to cancel any task submitted via this instance
355
+
356
+ joblib.Parallel will never access those results
357
+ """
358
+ with self.waiting_futures.lock:
359
+ self.waiting_futures.futures.clear()
360
+ while not self.waiting_futures.queue.empty():
361
+ self.waiting_futures.queue.get()
362
+
363
+ @contextlib.contextmanager
364
+ def retrieval_context(self):
365
+ """Override ParallelBackendBase.retrieval_context to avoid deadlocks.
366
+
367
+ This removes thread from the worker's thread pool (using 'secede').
368
+ Seceding avoids deadlock in nested parallelism settings.
369
+ """
370
+ # See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how
371
+ # this is used.
372
+ if hasattr(thread_state, 'execution_state'):
373
+ # we are in a worker. Secede to avoid deadlock.
374
+ secede()
375
+
376
+ yield
377
+
378
+ if hasattr(thread_state, 'execution_state'):
379
+ rejoin()
llmeval-env/lib/python3.10/site-packages/joblib/_store_backends.py ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Storage providers backends for Memory caching."""
2
+
3
+ from pickle import PicklingError
4
+ import re
5
+ import os
6
+ import os.path
7
+ import datetime
8
+ import json
9
+ import shutil
10
+ import time
11
+ import warnings
12
+ import collections
13
+ import operator
14
+ import threading
15
+ from abc import ABCMeta, abstractmethod
16
+
17
+ from .backports import concurrency_safe_rename
18
+ from .disk import mkdirp, memstr_to_bytes, rm_subdirs
19
+ from .logger import format_time
20
+ from . import numpy_pickle
21
+
22
+ CacheItemInfo = collections.namedtuple('CacheItemInfo',
23
+ 'path size last_access')
24
+
25
+
26
+ class CacheWarning(Warning):
27
+ """Warning to capture dump failures except for PicklingError."""
28
+ pass
29
+
30
+
31
+ def concurrency_safe_write(object_to_write, filename, write_func):
32
+ """Writes an object into a unique file in a concurrency-safe way."""
33
+ thread_id = id(threading.current_thread())
34
+ temporary_filename = '{}.thread-{}-pid-{}'.format(
35
+ filename, thread_id, os.getpid())
36
+ write_func(object_to_write, temporary_filename)
37
+
38
+ return temporary_filename
39
+
40
+
41
+ class StoreBackendBase(metaclass=ABCMeta):
42
+ """Helper Abstract Base Class which defines all methods that
43
+ a StorageBackend must implement."""
44
+
45
+ location = None
46
+
47
+ @abstractmethod
48
+ def _open_item(self, f, mode):
49
+ """Opens an item on the store and return a file-like object.
50
+
51
+ This method is private and only used by the StoreBackendMixin object.
52
+
53
+ Parameters
54
+ ----------
55
+ f: a file-like object
56
+ The file-like object where an item is stored and retrieved
57
+ mode: string, optional
58
+ the mode in which the file-like object is opened allowed valued are
59
+ 'rb', 'wb'
60
+
61
+ Returns
62
+ -------
63
+ a file-like object
64
+ """
65
+
66
+ @abstractmethod
67
+ def _item_exists(self, location):
68
+ """Checks if an item location exists in the store.
69
+
70
+ This method is private and only used by the StoreBackendMixin object.
71
+
72
+ Parameters
73
+ ----------
74
+ location: string
75
+ The location of an item. On a filesystem, this corresponds to the
76
+ absolute path, including the filename, of a file.
77
+
78
+ Returns
79
+ -------
80
+ True if the item exists, False otherwise
81
+ """
82
+
83
+ @abstractmethod
84
+ def _move_item(self, src, dst):
85
+ """Moves an item from src to dst in the store.
86
+
87
+ This method is private and only used by the StoreBackendMixin object.
88
+
89
+ Parameters
90
+ ----------
91
+ src: string
92
+ The source location of an item
93
+ dst: string
94
+ The destination location of an item
95
+ """
96
+
97
+ @abstractmethod
98
+ def create_location(self, location):
99
+ """Creates a location on the store.
100
+
101
+ Parameters
102
+ ----------
103
+ location: string
104
+ The location in the store. On a filesystem, this corresponds to a
105
+ directory.
106
+ """
107
+
108
+ @abstractmethod
109
+ def clear_location(self, location):
110
+ """Clears a location on the store.
111
+
112
+ Parameters
113
+ ----------
114
+ location: string
115
+ The location in the store. On a filesystem, this corresponds to a
116
+ directory or a filename absolute path
117
+ """
118
+
119
+ @abstractmethod
120
+ def get_items(self):
121
+ """Returns the whole list of items available in the store.
122
+
123
+ Returns
124
+ -------
125
+ The list of items identified by their ids (e.g filename in a
126
+ filesystem).
127
+ """
128
+
129
+ @abstractmethod
130
+ def configure(self, location, verbose=0, backend_options=dict()):
131
+ """Configures the store.
132
+
133
+ Parameters
134
+ ----------
135
+ location: string
136
+ The base location used by the store. On a filesystem, this
137
+ corresponds to a directory.
138
+ verbose: int
139
+ The level of verbosity of the store
140
+ backend_options: dict
141
+ Contains a dictionary of named parameters used to configure the
142
+ store backend.
143
+ """
144
+
145
+
146
+ class StoreBackendMixin(object):
147
+ """Class providing all logic for managing the store in a generic way.
148
+
149
+ The StoreBackend subclass has to implement 3 methods: create_location,
150
+ clear_location and configure. The StoreBackend also has to provide
151
+ a private _open_item, _item_exists and _move_item methods. The _open_item
152
+ method has to have the same signature as the builtin open and return a
153
+ file-like object.
154
+ """
155
+
156
+ def load_item(self, call_id, verbose=1, timestamp=None, metadata=None):
157
+ """Load an item from the store given its id as a list of str."""
158
+ full_path = os.path.join(self.location, *call_id)
159
+
160
+ if verbose > 1:
161
+ ts_string = ('{: <16}'.format(format_time(time.time() - timestamp))
162
+ if timestamp is not None else '')
163
+ signature = os.path.basename(call_id[0])
164
+ if metadata is not None and 'input_args' in metadata:
165
+ kwargs = ', '.join('{}={}'.format(*item)
166
+ for item in metadata['input_args'].items())
167
+ signature += '({})'.format(kwargs)
168
+ msg = '[Memory]{}: Loading {}'.format(ts_string, signature)
169
+ if verbose < 10:
170
+ print('{0}...'.format(msg))
171
+ else:
172
+ print('{0} from {1}'.format(msg, full_path))
173
+
174
+ mmap_mode = (None if not hasattr(self, 'mmap_mode')
175
+ else self.mmap_mode)
176
+
177
+ filename = os.path.join(full_path, 'output.pkl')
178
+ if not self._item_exists(filename):
179
+ raise KeyError("Non-existing item (may have been "
180
+ "cleared).\nFile %s does not exist" % filename)
181
+
182
+ # file-like object cannot be used when mmap_mode is set
183
+ if mmap_mode is None:
184
+ with self._open_item(filename, "rb") as f:
185
+ item = numpy_pickle.load(f)
186
+ else:
187
+ item = numpy_pickle.load(filename, mmap_mode=mmap_mode)
188
+ return item
189
+
190
+ def dump_item(self, call_id, item, verbose=1):
191
+ """Dump an item in the store at the id given as a list of str."""
192
+ try:
193
+ item_path = os.path.join(self.location, *call_id)
194
+ if not self._item_exists(item_path):
195
+ self.create_location(item_path)
196
+ filename = os.path.join(item_path, 'output.pkl')
197
+ if verbose > 10:
198
+ print('Persisting in %s' % item_path)
199
+
200
+ def write_func(to_write, dest_filename):
201
+ with self._open_item(dest_filename, "wb") as f:
202
+ try:
203
+ numpy_pickle.dump(to_write, f, compress=self.compress)
204
+ except PicklingError as e:
205
+ # TODO(1.5) turn into error
206
+ warnings.warn(
207
+ "Unable to cache to disk: failed to pickle "
208
+ "output. In version 1.5 this will raise an "
209
+ f"exception. Exception: {e}.",
210
+ FutureWarning
211
+ )
212
+
213
+ self._concurrency_safe_write(item, filename, write_func)
214
+ except Exception as e: # noqa: E722
215
+ warnings.warn(
216
+ "Unable to cache to disk. Possibly a race condition in the "
217
+ f"creation of the directory. Exception: {e}.",
218
+ CacheWarning
219
+ )
220
+
221
+ def clear_item(self, call_id):
222
+ """Clear the item at the id, given as a list of str."""
223
+ item_path = os.path.join(self.location, *call_id)
224
+ if self._item_exists(item_path):
225
+ self.clear_location(item_path)
226
+
227
+ def contains_item(self, call_id):
228
+ """Check if there is an item at the id, given as a list of str."""
229
+ item_path = os.path.join(self.location, *call_id)
230
+ filename = os.path.join(item_path, 'output.pkl')
231
+
232
+ return self._item_exists(filename)
233
+
234
+ def get_item_info(self, call_id):
235
+ """Return information about item."""
236
+ return {'location': os.path.join(self.location, *call_id)}
237
+
238
+ def get_metadata(self, call_id):
239
+ """Return actual metadata of an item."""
240
+ try:
241
+ item_path = os.path.join(self.location, *call_id)
242
+ filename = os.path.join(item_path, 'metadata.json')
243
+ with self._open_item(filename, 'rb') as f:
244
+ return json.loads(f.read().decode('utf-8'))
245
+ except: # noqa: E722
246
+ return {}
247
+
248
+ def store_metadata(self, call_id, metadata):
249
+ """Store metadata of a computation."""
250
+ try:
251
+ item_path = os.path.join(self.location, *call_id)
252
+ self.create_location(item_path)
253
+ filename = os.path.join(item_path, 'metadata.json')
254
+
255
+ def write_func(to_write, dest_filename):
256
+ with self._open_item(dest_filename, "wb") as f:
257
+ f.write(json.dumps(to_write).encode('utf-8'))
258
+
259
+ self._concurrency_safe_write(metadata, filename, write_func)
260
+ except: # noqa: E722
261
+ pass
262
+
263
+ def contains_path(self, call_id):
264
+ """Check cached function is available in store."""
265
+ func_path = os.path.join(self.location, *call_id)
266
+ return self.object_exists(func_path)
267
+
268
+ def clear_path(self, call_id):
269
+ """Clear all items with a common path in the store."""
270
+ func_path = os.path.join(self.location, *call_id)
271
+ if self._item_exists(func_path):
272
+ self.clear_location(func_path)
273
+
274
+ def store_cached_func_code(self, call_id, func_code=None):
275
+ """Store the code of the cached function."""
276
+ func_path = os.path.join(self.location, *call_id)
277
+ if not self._item_exists(func_path):
278
+ self.create_location(func_path)
279
+
280
+ if func_code is not None:
281
+ filename = os.path.join(func_path, "func_code.py")
282
+ with self._open_item(filename, 'wb') as f:
283
+ f.write(func_code.encode('utf-8'))
284
+
285
+ def get_cached_func_code(self, call_id):
286
+ """Store the code of the cached function."""
287
+ filename = os.path.join(self.location, *call_id, 'func_code.py')
288
+ try:
289
+ with self._open_item(filename, 'rb') as f:
290
+ return f.read().decode('utf-8')
291
+ except: # noqa: E722
292
+ raise
293
+
294
+ def get_cached_func_info(self, call_id):
295
+ """Return information related to the cached function if it exists."""
296
+ return {'location': os.path.join(self.location, *call_id)}
297
+
298
+ def clear(self):
299
+ """Clear the whole store content."""
300
+ self.clear_location(self.location)
301
+
302
+ def enforce_store_limits(
303
+ self, bytes_limit, items_limit=None, age_limit=None
304
+ ):
305
+ """
306
+ Remove the store's oldest files to enforce item, byte, and age limits.
307
+ """
308
+ items_to_delete = self._get_items_to_delete(
309
+ bytes_limit, items_limit, age_limit
310
+ )
311
+
312
+ for item in items_to_delete:
313
+ if self.verbose > 10:
314
+ print('Deleting item {0}'.format(item))
315
+ try:
316
+ self.clear_location(item.path)
317
+ except OSError:
318
+ # Even with ignore_errors=True shutil.rmtree can raise OSError
319
+ # with:
320
+ # [Errno 116] Stale file handle if another process has deleted
321
+ # the folder already.
322
+ pass
323
+
324
+ def _get_items_to_delete(
325
+ self, bytes_limit, items_limit=None, age_limit=None
326
+ ):
327
+ """
328
+ Get items to delete to keep the store under size, file, & age limits.
329
+ """
330
+ if isinstance(bytes_limit, str):
331
+ bytes_limit = memstr_to_bytes(bytes_limit)
332
+
333
+ items = self.get_items()
334
+ if not items:
335
+ return []
336
+
337
+ size = sum(item.size for item in items)
338
+
339
+ if bytes_limit is not None:
340
+ to_delete_size = size - bytes_limit
341
+ else:
342
+ to_delete_size = 0
343
+
344
+ if items_limit is not None:
345
+ to_delete_items = len(items) - items_limit
346
+ else:
347
+ to_delete_items = 0
348
+
349
+ if age_limit is not None:
350
+ older_item = min(item.last_access for item in items)
351
+ deadline = datetime.datetime.now() - age_limit
352
+ else:
353
+ deadline = None
354
+
355
+ if (
356
+ to_delete_size <= 0 and to_delete_items <= 0
357
+ and (deadline is None or older_item > deadline)
358
+ ):
359
+ return []
360
+
361
+ # We want to delete first the cache items that were accessed a
362
+ # long time ago
363
+ items.sort(key=operator.attrgetter('last_access'))
364
+
365
+ items_to_delete = []
366
+ size_so_far = 0
367
+ items_so_far = 0
368
+
369
+ for item in items:
370
+ if (
371
+ (size_so_far >= to_delete_size)
372
+ and items_so_far >= to_delete_items
373
+ and (deadline is None or deadline < item.last_access)
374
+ ):
375
+ break
376
+
377
+ items_to_delete.append(item)
378
+ size_so_far += item.size
379
+ items_so_far += 1
380
+
381
+ return items_to_delete
382
+
383
+ def _concurrency_safe_write(self, to_write, filename, write_func):
384
+ """Writes an object into a file in a concurrency-safe way."""
385
+ temporary_filename = concurrency_safe_write(to_write,
386
+ filename, write_func)
387
+ self._move_item(temporary_filename, filename)
388
+
389
+ def __repr__(self):
390
+ """Printable representation of the store location."""
391
+ return '{class_name}(location="{location}")'.format(
392
+ class_name=self.__class__.__name__, location=self.location)
393
+
394
+
395
+ class FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin):
396
+ """A StoreBackend used with local or network file systems."""
397
+
398
+ _open_item = staticmethod(open)
399
+ _item_exists = staticmethod(os.path.exists)
400
+ _move_item = staticmethod(concurrency_safe_rename)
401
+
402
+ def clear_location(self, location):
403
+ """Delete location on store."""
404
+ if (location == self.location):
405
+ rm_subdirs(location)
406
+ else:
407
+ shutil.rmtree(location, ignore_errors=True)
408
+
409
+ def create_location(self, location):
410
+ """Create object location on store"""
411
+ mkdirp(location)
412
+
413
+ def get_items(self):
414
+ """Returns the whole list of items available in the store."""
415
+ items = []
416
+
417
+ for dirpath, _, filenames in os.walk(self.location):
418
+ is_cache_hash_dir = re.match('[a-f0-9]{32}',
419
+ os.path.basename(dirpath))
420
+
421
+ if is_cache_hash_dir:
422
+ output_filename = os.path.join(dirpath, 'output.pkl')
423
+ try:
424
+ last_access = os.path.getatime(output_filename)
425
+ except OSError:
426
+ try:
427
+ last_access = os.path.getatime(dirpath)
428
+ except OSError:
429
+ # The directory has already been deleted
430
+ continue
431
+
432
+ last_access = datetime.datetime.fromtimestamp(last_access)
433
+ try:
434
+ full_filenames = [os.path.join(dirpath, fn)
435
+ for fn in filenames]
436
+ dirsize = sum(os.path.getsize(fn)
437
+ for fn in full_filenames)
438
+ except OSError:
439
+ # Either output_filename or one of the files in
440
+ # dirpath does not exist any more. We assume this
441
+ # directory is being cleaned by another process already
442
+ continue
443
+
444
+ items.append(CacheItemInfo(dirpath, dirsize,
445
+ last_access))
446
+
447
+ return items
448
+
449
+ def configure(self, location, verbose=1, backend_options=None):
450
+ """Configure the store backend.
451
+
452
+ For this backend, valid store options are 'compress' and 'mmap_mode'
453
+ """
454
+ if backend_options is None:
455
+ backend_options = {}
456
+
457
+ # setup location directory
458
+ self.location = location
459
+ if not os.path.exists(self.location):
460
+ mkdirp(self.location)
461
+
462
+ # item can be stored compressed for faster I/O
463
+ self.compress = backend_options.get('compress', False)
464
+
465
+ # FileSystemStoreBackend can be used with mmap_mode options under
466
+ # certain conditions.
467
+ mmap_mode = backend_options.get('mmap_mode')
468
+ if self.compress and mmap_mode is not None:
469
+ warnings.warn('Compressed items cannot be memmapped in a '
470
+ 'filesystem store. Option will be ignored.',
471
+ stacklevel=2)
472
+
473
+ self.mmap_mode = mmap_mode
474
+ self.verbose = verbose
llmeval-env/lib/python3.10/site-packages/joblib/compressor.py ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Classes and functions for managing compressors."""
2
+
3
+ import io
4
+ import zlib
5
+ from joblib.backports import LooseVersion
6
+
7
+ try:
8
+ from threading import RLock
9
+ except ImportError:
10
+ from dummy_threading import RLock
11
+
12
+ try:
13
+ import bz2
14
+ except ImportError:
15
+ bz2 = None
16
+
17
+ try:
18
+ import lz4
19
+ from lz4.frame import LZ4FrameFile
20
+ except ImportError:
21
+ lz4 = None
22
+
23
+ try:
24
+ import lzma
25
+ except ImportError:
26
+ lzma = None
27
+
28
+
29
+ LZ4_NOT_INSTALLED_ERROR = ('LZ4 is not installed. Install it with pip: '
30
+ 'https://python-lz4.readthedocs.io/')
31
+
32
+ # Registered compressors
33
+ _COMPRESSORS = {}
34
+
35
+ # Magic numbers of supported compression file formats.
36
+ _ZFILE_PREFIX = b'ZF' # used with pickle files created before 0.9.3.
37
+ _ZLIB_PREFIX = b'\x78'
38
+ _GZIP_PREFIX = b'\x1f\x8b'
39
+ _BZ2_PREFIX = b'BZ'
40
+ _XZ_PREFIX = b'\xfd\x37\x7a\x58\x5a'
41
+ _LZMA_PREFIX = b'\x5d\x00'
42
+ _LZ4_PREFIX = b'\x04\x22\x4D\x18'
43
+
44
+
45
+ def register_compressor(compressor_name, compressor,
46
+ force=False):
47
+ """Register a new compressor.
48
+
49
+ Parameters
50
+ ----------
51
+ compressor_name: str.
52
+ The name of the compressor.
53
+ compressor: CompressorWrapper
54
+ An instance of a 'CompressorWrapper'.
55
+ """
56
+ global _COMPRESSORS
57
+ if not isinstance(compressor_name, str):
58
+ raise ValueError("Compressor name should be a string, "
59
+ "'{}' given.".format(compressor_name))
60
+
61
+ if not isinstance(compressor, CompressorWrapper):
62
+ raise ValueError("Compressor should implement the CompressorWrapper "
63
+ "interface, '{}' given.".format(compressor))
64
+
65
+ if (compressor.fileobj_factory is not None and
66
+ (not hasattr(compressor.fileobj_factory, 'read') or
67
+ not hasattr(compressor.fileobj_factory, 'write') or
68
+ not hasattr(compressor.fileobj_factory, 'seek') or
69
+ not hasattr(compressor.fileobj_factory, 'tell'))):
70
+ raise ValueError("Compressor 'fileobj_factory' attribute should "
71
+ "implement the file object interface, '{}' given."
72
+ .format(compressor.fileobj_factory))
73
+
74
+ if compressor_name in _COMPRESSORS and not force:
75
+ raise ValueError("Compressor '{}' already registered."
76
+ .format(compressor_name))
77
+
78
+ _COMPRESSORS[compressor_name] = compressor
79
+
80
+
81
+ class CompressorWrapper():
82
+ """A wrapper around a compressor file object.
83
+
84
+ Attributes
85
+ ----------
86
+ obj: a file-like object
87
+ The object must implement the buffer interface and will be used
88
+ internally to compress/decompress the data.
89
+ prefix: bytestring
90
+ A bytestring corresponding to the magic number that identifies the
91
+ file format associated to the compressor.
92
+ extension: str
93
+ The file extension used to automatically select this compressor during
94
+ a dump to a file.
95
+ """
96
+
97
+ def __init__(self, obj, prefix=b'', extension=''):
98
+ self.fileobj_factory = obj
99
+ self.prefix = prefix
100
+ self.extension = extension
101
+
102
+ def compressor_file(self, fileobj, compresslevel=None):
103
+ """Returns an instance of a compressor file object."""
104
+ if compresslevel is None:
105
+ return self.fileobj_factory(fileobj, 'wb')
106
+ else:
107
+ return self.fileobj_factory(fileobj, 'wb',
108
+ compresslevel=compresslevel)
109
+
110
+ def decompressor_file(self, fileobj):
111
+ """Returns an instance of a decompressor file object."""
112
+ return self.fileobj_factory(fileobj, 'rb')
113
+
114
+
115
+ class BZ2CompressorWrapper(CompressorWrapper):
116
+
117
+ prefix = _BZ2_PREFIX
118
+ extension = '.bz2'
119
+
120
+ def __init__(self):
121
+ if bz2 is not None:
122
+ self.fileobj_factory = bz2.BZ2File
123
+ else:
124
+ self.fileobj_factory = None
125
+
126
+ def _check_versions(self):
127
+ if bz2 is None:
128
+ raise ValueError('bz2 module is not compiled on your python '
129
+ 'standard library.')
130
+
131
+ def compressor_file(self, fileobj, compresslevel=None):
132
+ """Returns an instance of a compressor file object."""
133
+ self._check_versions()
134
+ if compresslevel is None:
135
+ return self.fileobj_factory(fileobj, 'wb')
136
+ else:
137
+ return self.fileobj_factory(fileobj, 'wb',
138
+ compresslevel=compresslevel)
139
+
140
+ def decompressor_file(self, fileobj):
141
+ """Returns an instance of a decompressor file object."""
142
+ self._check_versions()
143
+ fileobj = self.fileobj_factory(fileobj, 'rb')
144
+ return fileobj
145
+
146
+
147
+ class LZMACompressorWrapper(CompressorWrapper):
148
+
149
+ prefix = _LZMA_PREFIX
150
+ extension = '.lzma'
151
+ _lzma_format_name = 'FORMAT_ALONE'
152
+
153
+ def __init__(self):
154
+ if lzma is not None:
155
+ self.fileobj_factory = lzma.LZMAFile
156
+ self._lzma_format = getattr(lzma, self._lzma_format_name)
157
+ else:
158
+ self.fileobj_factory = None
159
+
160
+ def _check_versions(self):
161
+ if lzma is None:
162
+ raise ValueError('lzma module is not compiled on your python '
163
+ 'standard library.')
164
+
165
+ def compressor_file(self, fileobj, compresslevel=None):
166
+ """Returns an instance of a compressor file object."""
167
+ if compresslevel is None:
168
+ return self.fileobj_factory(fileobj, 'wb',
169
+ format=self._lzma_format)
170
+ else:
171
+ return self.fileobj_factory(fileobj, 'wb',
172
+ format=self._lzma_format,
173
+ preset=compresslevel)
174
+
175
+ def decompressor_file(self, fileobj):
176
+ """Returns an instance of a decompressor file object."""
177
+ return lzma.LZMAFile(fileobj, 'rb')
178
+
179
+
180
+ class XZCompressorWrapper(LZMACompressorWrapper):
181
+
182
+ prefix = _XZ_PREFIX
183
+ extension = '.xz'
184
+ _lzma_format_name = 'FORMAT_XZ'
185
+
186
+
187
+ class LZ4CompressorWrapper(CompressorWrapper):
188
+
189
+ prefix = _LZ4_PREFIX
190
+ extension = '.lz4'
191
+
192
+ def __init__(self):
193
+ if lz4 is not None:
194
+ self.fileobj_factory = LZ4FrameFile
195
+ else:
196
+ self.fileobj_factory = None
197
+
198
+ def _check_versions(self):
199
+ if lz4 is None:
200
+ raise ValueError(LZ4_NOT_INSTALLED_ERROR)
201
+ lz4_version = lz4.__version__
202
+ if lz4_version.startswith("v"):
203
+ lz4_version = lz4_version[1:]
204
+ if LooseVersion(lz4_version) < LooseVersion('0.19'):
205
+ raise ValueError(LZ4_NOT_INSTALLED_ERROR)
206
+
207
+ def compressor_file(self, fileobj, compresslevel=None):
208
+ """Returns an instance of a compressor file object."""
209
+ self._check_versions()
210
+ if compresslevel is None:
211
+ return self.fileobj_factory(fileobj, 'wb')
212
+ else:
213
+ return self.fileobj_factory(fileobj, 'wb',
214
+ compression_level=compresslevel)
215
+
216
+ def decompressor_file(self, fileobj):
217
+ """Returns an instance of a decompressor file object."""
218
+ self._check_versions()
219
+ return self.fileobj_factory(fileobj, 'rb')
220
+
221
+
222
+ ###############################################################################
223
+ # base file compression/decompression object definition
224
+ _MODE_CLOSED = 0
225
+ _MODE_READ = 1
226
+ _MODE_READ_EOF = 2
227
+ _MODE_WRITE = 3
228
+ _BUFFER_SIZE = 8192
229
+
230
+
231
+ class BinaryZlibFile(io.BufferedIOBase):
232
+ """A file object providing transparent zlib (de)compression.
233
+
234
+ TODO python2_drop: is it still needed since we dropped Python 2 support A
235
+ BinaryZlibFile can act as a wrapper for an existing file object, or refer
236
+ directly to a named file on disk.
237
+
238
+ Note that BinaryZlibFile provides only a *binary* file interface: data read
239
+ is returned as bytes, and data to be written should be given as bytes.
240
+
241
+ This object is an adaptation of the BZ2File object and is compatible with
242
+ versions of python >= 2.7.
243
+
244
+ If filename is a str or bytes object, it gives the name
245
+ of the file to be opened. Otherwise, it should be a file object,
246
+ which will be used to read or write the compressed data.
247
+
248
+ mode can be 'rb' for reading (default) or 'wb' for (over)writing
249
+
250
+ If mode is 'wb', compresslevel can be a number between 1
251
+ and 9 specifying the level of compression: 1 produces the least
252
+ compression, and 9 produces the most compression. 3 is the default.
253
+ """
254
+
255
+ wbits = zlib.MAX_WBITS
256
+
257
+ def __init__(self, filename, mode="rb", compresslevel=3):
258
+ # This lock must be recursive, so that BufferedIOBase's
259
+ # readline(), readlines() and writelines() don't deadlock.
260
+ self._lock = RLock()
261
+ self._fp = None
262
+ self._closefp = False
263
+ self._mode = _MODE_CLOSED
264
+ self._pos = 0
265
+ self._size = -1
266
+ self.compresslevel = compresslevel
267
+
268
+ if not isinstance(compresslevel, int) or not (1 <= compresslevel <= 9):
269
+ raise ValueError("'compresslevel' must be an integer "
270
+ "between 1 and 9. You provided 'compresslevel={}'"
271
+ .format(compresslevel))
272
+
273
+ if mode == "rb":
274
+ self._mode = _MODE_READ
275
+ self._decompressor = zlib.decompressobj(self.wbits)
276
+ self._buffer = b""
277
+ self._buffer_offset = 0
278
+ elif mode == "wb":
279
+ self._mode = _MODE_WRITE
280
+ self._compressor = zlib.compressobj(self.compresslevel,
281
+ zlib.DEFLATED, self.wbits,
282
+ zlib.DEF_MEM_LEVEL, 0)
283
+ else:
284
+ raise ValueError("Invalid mode: %r" % (mode,))
285
+
286
+ if isinstance(filename, str):
287
+ self._fp = io.open(filename, mode)
288
+ self._closefp = True
289
+ elif hasattr(filename, "read") or hasattr(filename, "write"):
290
+ self._fp = filename
291
+ else:
292
+ raise TypeError("filename must be a str or bytes object, "
293
+ "or a file")
294
+
295
+ def close(self):
296
+ """Flush and close the file.
297
+
298
+ May be called more than once without error. Once the file is
299
+ closed, any other operation on it will raise a ValueError.
300
+ """
301
+ with self._lock:
302
+ if self._mode == _MODE_CLOSED:
303
+ return
304
+ try:
305
+ if self._mode in (_MODE_READ, _MODE_READ_EOF):
306
+ self._decompressor = None
307
+ elif self._mode == _MODE_WRITE:
308
+ self._fp.write(self._compressor.flush())
309
+ self._compressor = None
310
+ finally:
311
+ try:
312
+ if self._closefp:
313
+ self._fp.close()
314
+ finally:
315
+ self._fp = None
316
+ self._closefp = False
317
+ self._mode = _MODE_CLOSED
318
+ self._buffer = b""
319
+ self._buffer_offset = 0
320
+
321
+ @property
322
+ def closed(self):
323
+ """True if this file is closed."""
324
+ return self._mode == _MODE_CLOSED
325
+
326
+ def fileno(self):
327
+ """Return the file descriptor for the underlying file."""
328
+ self._check_not_closed()
329
+ return self._fp.fileno()
330
+
331
+ def seekable(self):
332
+ """Return whether the file supports seeking."""
333
+ return self.readable() and self._fp.seekable()
334
+
335
+ def readable(self):
336
+ """Return whether the file was opened for reading."""
337
+ self._check_not_closed()
338
+ return self._mode in (_MODE_READ, _MODE_READ_EOF)
339
+
340
+ def writable(self):
341
+ """Return whether the file was opened for writing."""
342
+ self._check_not_closed()
343
+ return self._mode == _MODE_WRITE
344
+
345
+ # Mode-checking helper functions.
346
+
347
+ def _check_not_closed(self):
348
+ if self.closed:
349
+ fname = getattr(self._fp, 'name', None)
350
+ msg = "I/O operation on closed file"
351
+ if fname is not None:
352
+ msg += " {}".format(fname)
353
+ msg += "."
354
+ raise ValueError(msg)
355
+
356
+ def _check_can_read(self):
357
+ if self._mode not in (_MODE_READ, _MODE_READ_EOF):
358
+ self._check_not_closed()
359
+ raise io.UnsupportedOperation("File not open for reading")
360
+
361
+ def _check_can_write(self):
362
+ if self._mode != _MODE_WRITE:
363
+ self._check_not_closed()
364
+ raise io.UnsupportedOperation("File not open for writing")
365
+
366
+ def _check_can_seek(self):
367
+ if self._mode not in (_MODE_READ, _MODE_READ_EOF):
368
+ self._check_not_closed()
369
+ raise io.UnsupportedOperation("Seeking is only supported "
370
+ "on files open for reading")
371
+ if not self._fp.seekable():
372
+ raise io.UnsupportedOperation("The underlying file object "
373
+ "does not support seeking")
374
+
375
+ # Fill the readahead buffer if it is empty. Returns False on EOF.
376
+ def _fill_buffer(self):
377
+ if self._mode == _MODE_READ_EOF:
378
+ return False
379
+ # Depending on the input data, our call to the decompressor may not
380
+ # return any data. In this case, try again after reading another block.
381
+ while self._buffer_offset == len(self._buffer):
382
+ try:
383
+ rawblock = (self._decompressor.unused_data or
384
+ self._fp.read(_BUFFER_SIZE))
385
+ if not rawblock:
386
+ raise EOFError
387
+ except EOFError:
388
+ # End-of-stream marker and end of file. We're good.
389
+ self._mode = _MODE_READ_EOF
390
+ self._size = self._pos
391
+ return False
392
+ else:
393
+ self._buffer = self._decompressor.decompress(rawblock)
394
+ self._buffer_offset = 0
395
+ return True
396
+
397
+ # Read data until EOF.
398
+ # If return_data is false, consume the data without returning it.
399
+ def _read_all(self, return_data=True):
400
+ # The loop assumes that _buffer_offset is 0. Ensure that this is true.
401
+ self._buffer = self._buffer[self._buffer_offset:]
402
+ self._buffer_offset = 0
403
+
404
+ blocks = []
405
+ while self._fill_buffer():
406
+ if return_data:
407
+ blocks.append(self._buffer)
408
+ self._pos += len(self._buffer)
409
+ self._buffer = b""
410
+ if return_data:
411
+ return b"".join(blocks)
412
+
413
+ # Read a block of up to n bytes.
414
+ # If return_data is false, consume the data without returning it.
415
+ def _read_block(self, n_bytes, return_data=True):
416
+ # If we have enough data buffered, return immediately.
417
+ end = self._buffer_offset + n_bytes
418
+ if end <= len(self._buffer):
419
+ data = self._buffer[self._buffer_offset: end]
420
+ self._buffer_offset = end
421
+ self._pos += len(data)
422
+ return data if return_data else None
423
+
424
+ # The loop assumes that _buffer_offset is 0. Ensure that this is true.
425
+ self._buffer = self._buffer[self._buffer_offset:]
426
+ self._buffer_offset = 0
427
+
428
+ blocks = []
429
+ while n_bytes > 0 and self._fill_buffer():
430
+ if n_bytes < len(self._buffer):
431
+ data = self._buffer[:n_bytes]
432
+ self._buffer_offset = n_bytes
433
+ else:
434
+ data = self._buffer
435
+ self._buffer = b""
436
+ if return_data:
437
+ blocks.append(data)
438
+ self._pos += len(data)
439
+ n_bytes -= len(data)
440
+ if return_data:
441
+ return b"".join(blocks)
442
+
443
+ def read(self, size=-1):
444
+ """Read up to size uncompressed bytes from the file.
445
+
446
+ If size is negative or omitted, read until EOF is reached.
447
+ Returns b'' if the file is already at EOF.
448
+ """
449
+ with self._lock:
450
+ self._check_can_read()
451
+ if size == 0:
452
+ return b""
453
+ elif size < 0:
454
+ return self._read_all()
455
+ else:
456
+ return self._read_block(size)
457
+
458
+ def readinto(self, b):
459
+ """Read up to len(b) bytes into b.
460
+
461
+ Returns the number of bytes read (0 for EOF).
462
+ """
463
+ with self._lock:
464
+ return io.BufferedIOBase.readinto(self, b)
465
+
466
+ def write(self, data):
467
+ """Write a byte string to the file.
468
+
469
+ Returns the number of uncompressed bytes written, which is
470
+ always len(data). Note that due to buffering, the file on disk
471
+ may not reflect the data written until close() is called.
472
+ """
473
+ with self._lock:
474
+ self._check_can_write()
475
+ # Convert data type if called by io.BufferedWriter.
476
+ if isinstance(data, memoryview):
477
+ data = data.tobytes()
478
+
479
+ compressed = self._compressor.compress(data)
480
+ self._fp.write(compressed)
481
+ self._pos += len(data)
482
+ return len(data)
483
+
484
+ # Rewind the file to the beginning of the data stream.
485
+ def _rewind(self):
486
+ self._fp.seek(0, 0)
487
+ self._mode = _MODE_READ
488
+ self._pos = 0
489
+ self._decompressor = zlib.decompressobj(self.wbits)
490
+ self._buffer = b""
491
+ self._buffer_offset = 0
492
+
493
+ def seek(self, offset, whence=0):
494
+ """Change the file position.
495
+
496
+ The new position is specified by offset, relative to the
497
+ position indicated by whence. Values for whence are:
498
+
499
+ 0: start of stream (default); offset must not be negative
500
+ 1: current stream position
501
+ 2: end of stream; offset must not be positive
502
+
503
+ Returns the new file position.
504
+
505
+ Note that seeking is emulated, so depending on the parameters,
506
+ this operation may be extremely slow.
507
+ """
508
+ with self._lock:
509
+ self._check_can_seek()
510
+
511
+ # Recalculate offset as an absolute file position.
512
+ if whence == 0:
513
+ pass
514
+ elif whence == 1:
515
+ offset = self._pos + offset
516
+ elif whence == 2:
517
+ # Seeking relative to EOF - we need to know the file's size.
518
+ if self._size < 0:
519
+ self._read_all(return_data=False)
520
+ offset = self._size + offset
521
+ else:
522
+ raise ValueError("Invalid value for whence: %s" % (whence,))
523
+
524
+ # Make it so that offset is the number of bytes to skip forward.
525
+ if offset < self._pos:
526
+ self._rewind()
527
+ else:
528
+ offset -= self._pos
529
+
530
+ # Read and discard data until we reach the desired position.
531
+ self._read_block(offset, return_data=False)
532
+
533
+ return self._pos
534
+
535
+ def tell(self):
536
+ """Return the current file position."""
537
+ with self._lock:
538
+ self._check_not_closed()
539
+ return self._pos
540
+
541
+
542
+ class ZlibCompressorWrapper(CompressorWrapper):
543
+
544
+ def __init__(self):
545
+ CompressorWrapper.__init__(self, obj=BinaryZlibFile,
546
+ prefix=_ZLIB_PREFIX, extension='.z')
547
+
548
+
549
+ class BinaryGzipFile(BinaryZlibFile):
550
+ """A file object providing transparent gzip (de)compression.
551
+
552
+ If filename is a str or bytes object, it gives the name
553
+ of the file to be opened. Otherwise, it should be a file object,
554
+ which will be used to read or write the compressed data.
555
+
556
+ mode can be 'rb' for reading (default) or 'wb' for (over)writing
557
+
558
+ If mode is 'wb', compresslevel can be a number between 1
559
+ and 9 specifying the level of compression: 1 produces the least
560
+ compression, and 9 produces the most compression. 3 is the default.
561
+ """
562
+
563
+ wbits = 31 # zlib compressor/decompressor wbits value for gzip format.
564
+
565
+
566
+ class GzipCompressorWrapper(CompressorWrapper):
567
+
568
+ def __init__(self):
569
+ CompressorWrapper.__init__(self, obj=BinaryGzipFile,
570
+ prefix=_GZIP_PREFIX, extension='.gz')
llmeval-env/lib/python3.10/site-packages/joblib/disk.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Disk management utilities.
3
+ """
4
+
5
+ # Authors: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Lars Buitinck
7
+ # Copyright (c) 2010 Gael Varoquaux
8
+ # License: BSD Style, 3 clauses.
9
+
10
+
11
+ import os
12
+ import sys
13
+ import time
14
+ import errno
15
+ import shutil
16
+
17
+ from multiprocessing import util
18
+
19
+
20
+ try:
21
+ WindowsError
22
+ except NameError:
23
+ WindowsError = OSError
24
+
25
+
26
+ def disk_used(path):
27
+ """ Return the disk usage in a directory."""
28
+ size = 0
29
+ for file in os.listdir(path) + ['.']:
30
+ stat = os.stat(os.path.join(path, file))
31
+ if hasattr(stat, 'st_blocks'):
32
+ size += stat.st_blocks * 512
33
+ else:
34
+ # on some platform st_blocks is not available (e.g., Windows)
35
+ # approximate by rounding to next multiple of 512
36
+ size += (stat.st_size // 512 + 1) * 512
37
+ # We need to convert to int to avoid having longs on some systems (we
38
+ # don't want longs to avoid problems we SQLite)
39
+ return int(size / 1024.)
40
+
41
+
42
+ def memstr_to_bytes(text):
43
+ """ Convert a memory text to its value in bytes.
44
+ """
45
+ kilo = 1024
46
+ units = dict(K=kilo, M=kilo ** 2, G=kilo ** 3)
47
+ try:
48
+ size = int(units[text[-1]] * float(text[:-1]))
49
+ except (KeyError, ValueError) as e:
50
+ raise ValueError(
51
+ "Invalid literal for size give: %s (type %s) should be "
52
+ "alike '10G', '500M', '50K'." % (text, type(text))) from e
53
+ return size
54
+
55
+
56
+ def mkdirp(d):
57
+ """Ensure directory d exists (like mkdir -p on Unix)
58
+ No guarantee that the directory is writable.
59
+ """
60
+ try:
61
+ os.makedirs(d)
62
+ except OSError as e:
63
+ if e.errno != errno.EEXIST:
64
+ raise
65
+
66
+
67
+ # if a rmtree operation fails in rm_subdirs, wait for this much time (in secs),
68
+ # then retry up to RM_SUBDIRS_N_RETRY times. If it still fails, raise the
69
+ # exception. this mechanism ensures that the sub-process gc have the time to
70
+ # collect and close the memmaps before we fail.
71
+ RM_SUBDIRS_RETRY_TIME = 0.1
72
+ RM_SUBDIRS_N_RETRY = 10
73
+
74
+
75
+ def rm_subdirs(path, onerror=None):
76
+ """Remove all subdirectories in this path.
77
+
78
+ The directory indicated by `path` is left in place, and its subdirectories
79
+ are erased.
80
+
81
+ If onerror is set, it is called to handle the error with arguments (func,
82
+ path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
83
+ path is the argument to that function that caused it to fail; and
84
+ exc_info is a tuple returned by sys.exc_info(). If onerror is None,
85
+ an exception is raised.
86
+ """
87
+
88
+ # NOTE this code is adapted from the one in shutil.rmtree, and is
89
+ # just as fast
90
+
91
+ names = []
92
+ try:
93
+ names = os.listdir(path)
94
+ except os.error:
95
+ if onerror is not None:
96
+ onerror(os.listdir, path, sys.exc_info())
97
+ else:
98
+ raise
99
+
100
+ for name in names:
101
+ fullname = os.path.join(path, name)
102
+ delete_folder(fullname, onerror=onerror)
103
+
104
+
105
+ def delete_folder(folder_path, onerror=None, allow_non_empty=True):
106
+ """Utility function to cleanup a temporary folder if it still exists."""
107
+ if os.path.isdir(folder_path):
108
+ if onerror is not None:
109
+ shutil.rmtree(folder_path, False, onerror)
110
+ else:
111
+ # allow the rmtree to fail once, wait and re-try.
112
+ # if the error is raised again, fail
113
+ err_count = 0
114
+ while True:
115
+ files = os.listdir(folder_path)
116
+ try:
117
+ if len(files) == 0 or allow_non_empty:
118
+ shutil.rmtree(
119
+ folder_path, ignore_errors=False, onerror=None
120
+ )
121
+ util.debug(
122
+ "Successfully deleted {}".format(folder_path))
123
+ break
124
+ else:
125
+ raise OSError(
126
+ "Expected empty folder {} but got {} "
127
+ "files.".format(folder_path, len(files))
128
+ )
129
+ except (OSError, WindowsError):
130
+ err_count += 1
131
+ if err_count > RM_SUBDIRS_N_RETRY:
132
+ # the folder cannot be deleted right now. It maybe
133
+ # because some temporary files have not been deleted
134
+ # yet.
135
+ raise
136
+ time.sleep(RM_SUBDIRS_RETRY_TIME)
llmeval-env/lib/python3.10/site-packages/joblib/executor.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility function to construct a loky.ReusableExecutor with custom pickler.
2
+
3
+ This module provides efficient ways of working with data stored in
4
+ shared memory with numpy.memmap arrays without inducing any memory
5
+ copy between the parent and child processes.
6
+ """
7
+ # Author: Thomas Moreau <[email protected]>
8
+ # Copyright: 2017, Thomas Moreau
9
+ # License: BSD 3 clause
10
+
11
+ from ._memmapping_reducer import get_memmapping_reducers
12
+ from ._memmapping_reducer import TemporaryResourcesManager
13
+ from .externals.loky.reusable_executor import _ReusablePoolExecutor
14
+
15
+
16
+ _executor_args = None
17
+
18
+
19
+ def get_memmapping_executor(n_jobs, **kwargs):
20
+ return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs)
21
+
22
+
23
+ class MemmappingExecutor(_ReusablePoolExecutor):
24
+
25
+ @classmethod
26
+ def get_memmapping_executor(cls, n_jobs, timeout=300, initializer=None,
27
+ initargs=(), env=None, temp_folder=None,
28
+ context_id=None, **backend_args):
29
+ """Factory for ReusableExecutor with automatic memmapping for large
30
+ numpy arrays.
31
+ """
32
+ global _executor_args
33
+ # Check if we can reuse the executor here instead of deferring the test
34
+ # to loky as the reducers are objects that changes at each call.
35
+ executor_args = backend_args.copy()
36
+ executor_args.update(env if env else {})
37
+ executor_args.update(dict(
38
+ timeout=timeout, initializer=initializer, initargs=initargs))
39
+ reuse = _executor_args is None or _executor_args == executor_args
40
+ _executor_args = executor_args
41
+
42
+ manager = TemporaryResourcesManager(temp_folder)
43
+
44
+ # reducers access the temporary folder in which to store temporary
45
+ # pickles through a call to manager.resolve_temp_folder_name. resolving
46
+ # the folder name dynamically is useful to use different folders across
47
+ # calls of a same reusable executor
48
+ job_reducers, result_reducers = get_memmapping_reducers(
49
+ unlink_on_gc_collect=True,
50
+ temp_folder_resolver=manager.resolve_temp_folder_name,
51
+ **backend_args)
52
+ _executor, executor_is_reused = super().get_reusable_executor(
53
+ n_jobs, job_reducers=job_reducers, result_reducers=result_reducers,
54
+ reuse=reuse, timeout=timeout, initializer=initializer,
55
+ initargs=initargs, env=env
56
+ )
57
+
58
+ if not executor_is_reused:
59
+ # Only set a _temp_folder_manager for new executors. Reused
60
+ # executors already have a _temporary_folder_manager that must not
61
+ # be re-assigned like that because it is referenced in various
62
+ # places in the reducing machinery of the executor.
63
+ _executor._temp_folder_manager = manager
64
+
65
+ if context_id is not None:
66
+ # Only register the specified context once we know which manager
67
+ # the current executor is using, in order to not register an atexit
68
+ # finalizer twice for the same folder.
69
+ _executor._temp_folder_manager.register_new_context(context_id)
70
+
71
+ return _executor
72
+
73
+ def terminate(self, kill_workers=False):
74
+
75
+ self.shutdown(kill_workers=kill_workers)
76
+
77
+ # When workers are killed in a brutal manner, they cannot execute the
78
+ # finalizer of their shared memmaps. The refcount of those memmaps may
79
+ # be off by an unknown number, so instead of decref'ing them, we force
80
+ # delete the whole temporary folder, and unregister them. There is no
81
+ # risk of PermissionError at folder deletion because at this
82
+ # point, all child processes are dead, so all references to temporary
83
+ # memmaps are closed. Otherwise, just try to delete as much as possible
84
+ # with allow_non_empty=True but if we can't, it will be clean up later
85
+ # on by the resource_tracker.
86
+ with self._submit_resize_lock:
87
+ self._temp_folder_manager._clean_temporary_resources(
88
+ force=kill_workers, allow_non_empty=True
89
+ )
90
+
91
+ @property
92
+ def _temp_folder(self):
93
+ # Legacy property in tests. could be removed if we refactored the
94
+ # memmapping tests. SHOULD ONLY BE USED IN TESTS!
95
+ # We cache this property because it is called late in the tests - at
96
+ # this point, all context have been unregistered, and
97
+ # resolve_temp_folder_name raises an error.
98
+ if getattr(self, '_cached_temp_folder', None) is not None:
99
+ return self._cached_temp_folder
100
+ else:
101
+ self._cached_temp_folder = self._temp_folder_manager.resolve_temp_folder_name() # noqa
102
+ return self._cached_temp_folder
103
+
104
+
105
+ class _TestingMemmappingExecutor(MemmappingExecutor):
106
+ """Wrapper around ReusableExecutor to ease memmapping testing with Pool
107
+ and Executor. This is only for testing purposes.
108
+
109
+ """
110
+ def apply_async(self, func, args):
111
+ """Schedule a func to be run"""
112
+ future = self.submit(func, *args)
113
+ future.get = future.result
114
+ return future
115
+
116
+ def map(self, f, *args):
117
+ return list(super().map(f, *args))
llmeval-env/lib/python3.10/site-packages/joblib/hashing.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Fast cryptographic hash of Python objects, with a special case for fast
3
+ hashing of numpy arrays.
4
+ """
5
+
6
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
7
+ # Copyright (c) 2009 Gael Varoquaux
8
+ # License: BSD Style, 3 clauses.
9
+
10
+ import pickle
11
+ import hashlib
12
+ import sys
13
+ import types
14
+ import struct
15
+ import io
16
+ import decimal
17
+
18
+
19
+ Pickler = pickle._Pickler
20
+
21
+
22
+ class _ConsistentSet(object):
23
+ """ Class used to ensure the hash of Sets is preserved
24
+ whatever the order of its items.
25
+ """
26
+ def __init__(self, set_sequence):
27
+ # Forces order of elements in set to ensure consistent hash.
28
+ try:
29
+ # Trying first to order the set assuming the type of elements is
30
+ # consistent and orderable.
31
+ # This fails on python 3 when elements are unorderable
32
+ # but we keep it in a try as it's faster.
33
+ self._sequence = sorted(set_sequence)
34
+ except (TypeError, decimal.InvalidOperation):
35
+ # If elements are unorderable, sorting them using their hash.
36
+ # This is slower but works in any case.
37
+ self._sequence = sorted((hash(e) for e in set_sequence))
38
+
39
+
40
+ class _MyHash(object):
41
+ """ Class used to hash objects that won't normally pickle """
42
+
43
+ def __init__(self, *args):
44
+ self.args = args
45
+
46
+
47
+ class Hasher(Pickler):
48
+ """ A subclass of pickler, to do cryptographic hashing, rather than
49
+ pickling.
50
+ """
51
+
52
+ def __init__(self, hash_name='md5'):
53
+ self.stream = io.BytesIO()
54
+ # By default we want a pickle protocol that only changes with
55
+ # the major python version and not the minor one
56
+ protocol = 3
57
+ Pickler.__init__(self, self.stream, protocol=protocol)
58
+ # Initialise the hash obj
59
+ self._hash = hashlib.new(hash_name)
60
+
61
+ def hash(self, obj, return_digest=True):
62
+ try:
63
+ self.dump(obj)
64
+ except pickle.PicklingError as e:
65
+ e.args += ('PicklingError while hashing %r: %r' % (obj, e),)
66
+ raise
67
+ dumps = self.stream.getvalue()
68
+ self._hash.update(dumps)
69
+ if return_digest:
70
+ return self._hash.hexdigest()
71
+
72
+ def save(self, obj):
73
+ if isinstance(obj, (types.MethodType, type({}.pop))):
74
+ # the Pickler cannot pickle instance methods; here we decompose
75
+ # them into components that make them uniquely identifiable
76
+ if hasattr(obj, '__func__'):
77
+ func_name = obj.__func__.__name__
78
+ else:
79
+ func_name = obj.__name__
80
+ inst = obj.__self__
81
+ if type(inst) is type(pickle):
82
+ obj = _MyHash(func_name, inst.__name__)
83
+ elif inst is None:
84
+ # type(None) or type(module) do not pickle
85
+ obj = _MyHash(func_name, inst)
86
+ else:
87
+ cls = obj.__self__.__class__
88
+ obj = _MyHash(func_name, inst, cls)
89
+ Pickler.save(self, obj)
90
+
91
+ def memoize(self, obj):
92
+ # We want hashing to be sensitive to value instead of reference.
93
+ # For example we want ['aa', 'aa'] and ['aa', 'aaZ'[:2]]
94
+ # to hash to the same value and that's why we disable memoization
95
+ # for strings
96
+ if isinstance(obj, (bytes, str)):
97
+ return
98
+ Pickler.memoize(self, obj)
99
+
100
+ # The dispatch table of the pickler is not accessible in Python
101
+ # 3, as these lines are only bugware for IPython, we skip them.
102
+ def save_global(self, obj, name=None, pack=struct.pack):
103
+ # We have to override this method in order to deal with objects
104
+ # defined interactively in IPython that are not injected in
105
+ # __main__
106
+ kwargs = dict(name=name, pack=pack)
107
+ del kwargs['pack']
108
+ try:
109
+ Pickler.save_global(self, obj, **kwargs)
110
+ except pickle.PicklingError:
111
+ Pickler.save_global(self, obj, **kwargs)
112
+ module = getattr(obj, "__module__", None)
113
+ if module == '__main__':
114
+ my_name = name
115
+ if my_name is None:
116
+ my_name = obj.__name__
117
+ mod = sys.modules[module]
118
+ if not hasattr(mod, my_name):
119
+ # IPython doesn't inject the variables define
120
+ # interactively in __main__
121
+ setattr(mod, my_name, obj)
122
+
123
+ dispatch = Pickler.dispatch.copy()
124
+ # builtin
125
+ dispatch[type(len)] = save_global
126
+ # type
127
+ dispatch[type(object)] = save_global
128
+ # classobj
129
+ dispatch[type(Pickler)] = save_global
130
+ # function
131
+ dispatch[type(pickle.dump)] = save_global
132
+
133
+ def _batch_setitems(self, items):
134
+ # forces order of keys in dict to ensure consistent hash.
135
+ try:
136
+ # Trying first to compare dict assuming the type of keys is
137
+ # consistent and orderable.
138
+ # This fails on python 3 when keys are unorderable
139
+ # but we keep it in a try as it's faster.
140
+ Pickler._batch_setitems(self, iter(sorted(items)))
141
+ except TypeError:
142
+ # If keys are unorderable, sorting them using their hash. This is
143
+ # slower but works in any case.
144
+ Pickler._batch_setitems(self, iter(sorted((hash(k), v)
145
+ for k, v in items)))
146
+
147
+ def save_set(self, set_items):
148
+ # forces order of items in Set to ensure consistent hash
149
+ Pickler.save(self, _ConsistentSet(set_items))
150
+
151
+ dispatch[type(set())] = save_set
152
+
153
+
154
+ class NumpyHasher(Hasher):
155
+ """ Special case the hasher for when numpy is loaded.
156
+ """
157
+
158
+ def __init__(self, hash_name='md5', coerce_mmap=False):
159
+ """
160
+ Parameters
161
+ ----------
162
+ hash_name: string
163
+ The hash algorithm to be used
164
+ coerce_mmap: boolean
165
+ Make no difference between np.memmap and np.ndarray
166
+ objects.
167
+ """
168
+ self.coerce_mmap = coerce_mmap
169
+ Hasher.__init__(self, hash_name=hash_name)
170
+ # delayed import of numpy, to avoid tight coupling
171
+ import numpy as np
172
+ self.np = np
173
+ if hasattr(np, 'getbuffer'):
174
+ self._getbuffer = np.getbuffer
175
+ else:
176
+ self._getbuffer = memoryview
177
+
178
+ def save(self, obj):
179
+ """ Subclass the save method, to hash ndarray subclass, rather
180
+ than pickling them. Off course, this is a total abuse of
181
+ the Pickler class.
182
+ """
183
+ if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject:
184
+ # Compute a hash of the object
185
+ # The update function of the hash requires a c_contiguous buffer.
186
+ if obj.shape == ():
187
+ # 0d arrays need to be flattened because viewing them as bytes
188
+ # raises a ValueError exception.
189
+ obj_c_contiguous = obj.flatten()
190
+ elif obj.flags.c_contiguous:
191
+ obj_c_contiguous = obj
192
+ elif obj.flags.f_contiguous:
193
+ obj_c_contiguous = obj.T
194
+ else:
195
+ # Cater for non-single-segment arrays: this creates a
196
+ # copy, and thus alleviates this issue.
197
+ # XXX: There might be a more efficient way of doing this
198
+ obj_c_contiguous = obj.flatten()
199
+
200
+ # memoryview is not supported for some dtypes, e.g. datetime64, see
201
+ # https://github.com/numpy/numpy/issues/4983. The
202
+ # workaround is to view the array as bytes before
203
+ # taking the memoryview.
204
+ self._hash.update(
205
+ self._getbuffer(obj_c_contiguous.view(self.np.uint8)))
206
+
207
+ # We store the class, to be able to distinguish between
208
+ # Objects with the same binary content, but different
209
+ # classes.
210
+ if self.coerce_mmap and isinstance(obj, self.np.memmap):
211
+ # We don't make the difference between memmap and
212
+ # normal ndarrays, to be able to reload previously
213
+ # computed results with memmap.
214
+ klass = self.np.ndarray
215
+ else:
216
+ klass = obj.__class__
217
+ # We also return the dtype and the shape, to distinguish
218
+ # different views on the same data with different dtypes.
219
+
220
+ # The object will be pickled by the pickler hashed at the end.
221
+ obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides))
222
+ elif isinstance(obj, self.np.dtype):
223
+ # numpy.dtype consistent hashing is tricky to get right. This comes
224
+ # from the fact that atomic np.dtype objects are interned:
225
+ # ``np.dtype('f4') is np.dtype('f4')``. The situation is
226
+ # complicated by the fact that this interning does not resist a
227
+ # simple pickle.load/dump roundtrip:
228
+ # ``pickle.loads(pickle.dumps(np.dtype('f4'))) is not
229
+ # np.dtype('f4') Because pickle relies on memoization during
230
+ # pickling, it is easy to
231
+ # produce different hashes for seemingly identical objects, such as
232
+ # ``[np.dtype('f4'), np.dtype('f4')]``
233
+ # and ``[np.dtype('f4'), pickle.loads(pickle.dumps('f4'))]``.
234
+ # To prevent memoization from interfering with hashing, we isolate
235
+ # the serialization (and thus the pickle memoization) of each dtype
236
+ # using each time a different ``pickle.dumps`` call unrelated to
237
+ # the current Hasher instance.
238
+ self._hash.update("_HASHED_DTYPE".encode('utf-8'))
239
+ self._hash.update(pickle.dumps(obj))
240
+ return
241
+ Hasher.save(self, obj)
242
+
243
+
244
+ def hash(obj, hash_name='md5', coerce_mmap=False):
245
+ """ Quick calculation of a hash to identify uniquely Python objects
246
+ containing numpy arrays.
247
+
248
+ Parameters
249
+ ----------
250
+ hash_name: 'md5' or 'sha1'
251
+ Hashing algorithm used. sha1 is supposedly safer, but md5 is
252
+ faster.
253
+ coerce_mmap: boolean
254
+ Make no difference between np.memmap and np.ndarray
255
+ """
256
+ valid_hash_names = ('md5', 'sha1')
257
+ if hash_name not in valid_hash_names:
258
+ raise ValueError("Valid options for 'hash_name' are {}. "
259
+ "Got hash_name={!r} instead."
260
+ .format(valid_hash_names, hash_name))
261
+ if 'numpy' in sys.modules:
262
+ hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
263
+ else:
264
+ hasher = Hasher(hash_name=hash_name)
265
+ return hasher.hash(obj)
llmeval-env/lib/python3.10/site-packages/joblib/numpy_pickle_compat.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Numpy pickle compatibility functions."""
2
+
3
+ import pickle
4
+ import os
5
+ import zlib
6
+ import inspect
7
+
8
+ from io import BytesIO
9
+
10
+ from .numpy_pickle_utils import _ZFILE_PREFIX
11
+ from .numpy_pickle_utils import Unpickler
12
+ from .numpy_pickle_utils import _ensure_native_byte_order
13
+
14
+
15
+ def hex_str(an_int):
16
+ """Convert an int to an hexadecimal string."""
17
+ return '{:#x}'.format(an_int)
18
+
19
+
20
+ def asbytes(s):
21
+ if isinstance(s, bytes):
22
+ return s
23
+ return s.encode('latin1')
24
+
25
+
26
+ _MAX_LEN = len(hex_str(2 ** 64))
27
+ _CHUNK_SIZE = 64 * 1024
28
+
29
+
30
+ def read_zfile(file_handle):
31
+ """Read the z-file and return the content as a string.
32
+
33
+ Z-files are raw data compressed with zlib used internally by joblib
34
+ for persistence. Backward compatibility is not guaranteed. Do not
35
+ use for external purposes.
36
+ """
37
+ file_handle.seek(0)
38
+ header_length = len(_ZFILE_PREFIX) + _MAX_LEN
39
+ length = file_handle.read(header_length)
40
+ length = length[len(_ZFILE_PREFIX):]
41
+ length = int(length, 16)
42
+
43
+ # With python2 and joblib version <= 0.8.4 compressed pickle header is one
44
+ # character wider so we need to ignore an additional space if present.
45
+ # Note: the first byte of the zlib data is guaranteed not to be a
46
+ # space according to
47
+ # https://tools.ietf.org/html/rfc6713#section-2.1
48
+ next_byte = file_handle.read(1)
49
+ if next_byte != b' ':
50
+ # The zlib compressed data has started and we need to go back
51
+ # one byte
52
+ file_handle.seek(header_length)
53
+
54
+ # We use the known length of the data to tell Zlib the size of the
55
+ # buffer to allocate.
56
+ data = zlib.decompress(file_handle.read(), 15, length)
57
+ assert len(data) == length, (
58
+ "Incorrect data length while decompressing %s."
59
+ "The file could be corrupted." % file_handle)
60
+ return data
61
+
62
+
63
+ def write_zfile(file_handle, data, compress=1):
64
+ """Write the data in the given file as a Z-file.
65
+
66
+ Z-files are raw data compressed with zlib used internally by joblib
67
+ for persistence. Backward compatibility is not guaranteed. Do not
68
+ use for external purposes.
69
+ """
70
+ file_handle.write(_ZFILE_PREFIX)
71
+ length = hex_str(len(data))
72
+ # Store the length of the data
73
+ file_handle.write(asbytes(length.ljust(_MAX_LEN)))
74
+ file_handle.write(zlib.compress(asbytes(data), compress))
75
+
76
+ ###############################################################################
77
+ # Utility objects for persistence.
78
+
79
+
80
+ class NDArrayWrapper(object):
81
+ """An object to be persisted instead of numpy arrays.
82
+
83
+ The only thing this object does, is to carry the filename in which
84
+ the array has been persisted, and the array subclass.
85
+ """
86
+
87
+ def __init__(self, filename, subclass, allow_mmap=True):
88
+ """Constructor. Store the useful information for later."""
89
+ self.filename = filename
90
+ self.subclass = subclass
91
+ self.allow_mmap = allow_mmap
92
+
93
+ def read(self, unpickler):
94
+ """Reconstruct the array."""
95
+ filename = os.path.join(unpickler._dirname, self.filename)
96
+ # Load the array from the disk
97
+ # use getattr instead of self.allow_mmap to ensure backward compat
98
+ # with NDArrayWrapper instances pickled with joblib < 0.9.0
99
+ allow_mmap = getattr(self, 'allow_mmap', True)
100
+ kwargs = {}
101
+ if allow_mmap:
102
+ kwargs['mmap_mode'] = unpickler.mmap_mode
103
+ if "allow_pickle" in inspect.signature(unpickler.np.load).parameters:
104
+ # Required in numpy 1.16.3 and later to aknowledge the security
105
+ # risk.
106
+ kwargs["allow_pickle"] = True
107
+ array = unpickler.np.load(filename, **kwargs)
108
+
109
+ # Detect byte order mismatch and swap as needed.
110
+ array = _ensure_native_byte_order(array)
111
+
112
+ # Reconstruct subclasses. This does not work with old
113
+ # versions of numpy
114
+ if (hasattr(array, '__array_prepare__') and
115
+ self.subclass not in (unpickler.np.ndarray,
116
+ unpickler.np.memmap)):
117
+ # We need to reconstruct another subclass
118
+ new_array = unpickler.np.core.multiarray._reconstruct(
119
+ self.subclass, (0,), 'b')
120
+ return new_array.__array_prepare__(array)
121
+ else:
122
+ return array
123
+
124
+
125
+ class ZNDArrayWrapper(NDArrayWrapper):
126
+ """An object to be persisted instead of numpy arrays.
127
+
128
+ This object store the Zfile filename in which
129
+ the data array has been persisted, and the meta information to
130
+ retrieve it.
131
+ The reason that we store the raw buffer data of the array and
132
+ the meta information, rather than array representation routine
133
+ (tobytes) is that it enables us to use completely the strided
134
+ model to avoid memory copies (a and a.T store as fast). In
135
+ addition saving the heavy information separately can avoid
136
+ creating large temporary buffers when unpickling data with
137
+ large arrays.
138
+ """
139
+
140
+ def __init__(self, filename, init_args, state):
141
+ """Constructor. Store the useful information for later."""
142
+ self.filename = filename
143
+ self.state = state
144
+ self.init_args = init_args
145
+
146
+ def read(self, unpickler):
147
+ """Reconstruct the array from the meta-information and the z-file."""
148
+ # Here we a simply reproducing the unpickling mechanism for numpy
149
+ # arrays
150
+ filename = os.path.join(unpickler._dirname, self.filename)
151
+ array = unpickler.np.core.multiarray._reconstruct(*self.init_args)
152
+ with open(filename, 'rb') as f:
153
+ data = read_zfile(f)
154
+ state = self.state + (data,)
155
+ array.__setstate__(state)
156
+ return array
157
+
158
+
159
+ class ZipNumpyUnpickler(Unpickler):
160
+ """A subclass of the Unpickler to unpickle our numpy pickles."""
161
+
162
+ dispatch = Unpickler.dispatch.copy()
163
+
164
+ def __init__(self, filename, file_handle, mmap_mode=None):
165
+ """Constructor."""
166
+ self._filename = os.path.basename(filename)
167
+ self._dirname = os.path.dirname(filename)
168
+ self.mmap_mode = mmap_mode
169
+ self.file_handle = self._open_pickle(file_handle)
170
+ Unpickler.__init__(self, self.file_handle)
171
+ try:
172
+ import numpy as np
173
+ except ImportError:
174
+ np = None
175
+ self.np = np
176
+
177
+ def _open_pickle(self, file_handle):
178
+ return BytesIO(read_zfile(file_handle))
179
+
180
+ def load_build(self):
181
+ """Set the state of a newly created object.
182
+
183
+ We capture it to replace our place-holder objects,
184
+ NDArrayWrapper, by the array we are interested in. We
185
+ replace them directly in the stack of pickler.
186
+ """
187
+ Unpickler.load_build(self)
188
+ if isinstance(self.stack[-1], NDArrayWrapper):
189
+ if self.np is None:
190
+ raise ImportError("Trying to unpickle an ndarray, "
191
+ "but numpy didn't import correctly")
192
+ nd_array_wrapper = self.stack.pop()
193
+ array = nd_array_wrapper.read(self)
194
+ self.stack.append(array)
195
+
196
+ dispatch[pickle.BUILD[0]] = load_build
197
+
198
+
199
+ def load_compatibility(filename):
200
+ """Reconstruct a Python object from a file persisted with joblib.dump.
201
+
202
+ This function ensures the compatibility with joblib old persistence format
203
+ (<= 0.9.3).
204
+
205
+ Parameters
206
+ ----------
207
+ filename: string
208
+ The name of the file from which to load the object
209
+
210
+ Returns
211
+ -------
212
+ result: any Python object
213
+ The object stored in the file.
214
+
215
+ See Also
216
+ --------
217
+ joblib.dump : function to save an object
218
+
219
+ Notes
220
+ -----
221
+
222
+ This function can load numpy array files saved separately during the
223
+ dump.
224
+ """
225
+ with open(filename, 'rb') as file_handle:
226
+ # We are careful to open the file handle early and keep it open to
227
+ # avoid race-conditions on renames. That said, if data is stored in
228
+ # companion files, moving the directory will create a race when
229
+ # joblib tries to access the companion files.
230
+ unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)
231
+ try:
232
+ obj = unpickler.load()
233
+ except UnicodeDecodeError as exc:
234
+ # More user-friendly error message
235
+ new_exc = ValueError(
236
+ 'You may be trying to read with '
237
+ 'python 3 a joblib pickle generated with python 2. '
238
+ 'This feature is not supported by joblib.')
239
+ new_exc.__cause__ = exc
240
+ raise new_exc
241
+ finally:
242
+ if hasattr(unpickler, 'file_handle'):
243
+ unpickler.file_handle.close()
244
+ return obj
llmeval-env/lib/python3.10/site-packages/joblib/test/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc ADDED
Binary file (5.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc ADDED
Binary file (341 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc ADDED
Binary file (774 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc ADDED
Binary file (30.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc ADDED
Binary file (4.55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc ADDED
Binary file (1.28 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc ADDED
Binary file (31.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc ADDED
Binary file (716 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc ADDED
Binary file (609 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc ADDED
Binary file (3.68 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc ADDED
Binary file (2.38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc ADDED
Binary file (890 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This script is used to generate test data for joblib/test/test_numpy_pickle.py
3
+ """
4
+
5
+ import sys
6
+ import re
7
+
8
+ # pytest needs to be able to import this module even when numpy is
9
+ # not installed
10
+ try:
11
+ import numpy as np
12
+ except ImportError:
13
+ np = None
14
+
15
+ import joblib
16
+
17
+
18
+ def get_joblib_version(joblib_version=joblib.__version__):
19
+ """Normalize joblib version by removing suffix.
20
+
21
+ >>> get_joblib_version('0.8.4')
22
+ '0.8.4'
23
+ >>> get_joblib_version('0.8.4b1')
24
+ '0.8.4'
25
+ >>> get_joblib_version('0.9.dev0')
26
+ '0.9'
27
+ """
28
+ matches = [re.match(r'(\d+).*', each)
29
+ for each in joblib_version.split('.')]
30
+ return '.'.join([m.group(1) for m in matches if m is not None])
31
+
32
+
33
+ def write_test_pickle(to_pickle, args):
34
+ kwargs = {}
35
+ compress = args.compress
36
+ method = args.method
37
+ joblib_version = get_joblib_version()
38
+ py_version = '{0[0]}{0[1]}'.format(sys.version_info)
39
+ numpy_version = ''.join(np.__version__.split('.')[:2])
40
+
41
+ # The game here is to generate the right filename according to the options.
42
+ body = '_compressed' if (compress and method == 'zlib') else ''
43
+ if compress:
44
+ if method == 'zlib':
45
+ kwargs['compress'] = True
46
+ extension = '.gz'
47
+ else:
48
+ kwargs['compress'] = (method, 3)
49
+ extension = '.pkl.{}'.format(method)
50
+ if args.cache_size:
51
+ kwargs['cache_size'] = 0
52
+ body += '_cache_size'
53
+ else:
54
+ extension = '.pkl'
55
+
56
+ pickle_filename = 'joblib_{}{}_pickle_py{}_np{}{}'.format(
57
+ joblib_version, body, py_version, numpy_version, extension)
58
+
59
+ try:
60
+ joblib.dump(to_pickle, pickle_filename, **kwargs)
61
+ except Exception as e:
62
+ # With old python version (=< 3.3.), we can arrive there when
63
+ # dumping compressed pickle with LzmaFile.
64
+ print("Error: cannot generate file '{}' with arguments '{}'. "
65
+ "Error was: {}".format(pickle_filename, kwargs, e))
66
+ else:
67
+ print("File '{}' generated successfully.".format(pickle_filename))
68
+
69
+
70
+ if __name__ == '__main__':
71
+ import argparse
72
+ parser = argparse.ArgumentParser(description="Joblib pickle data "
73
+ "generator.")
74
+ parser.add_argument('--cache_size', action="store_true",
75
+ help="Force creation of companion numpy "
76
+ "files for pickled arrays.")
77
+ parser.add_argument('--compress', action="store_true",
78
+ help="Generate compress pickles.")
79
+ parser.add_argument('--method', type=str, default='zlib',
80
+ choices=['zlib', 'gzip', 'bz2', 'xz', 'lzma', 'lz4'],
81
+ help="Set compression method.")
82
+ # We need to be specific about dtypes in particular endianness
83
+ # because the pickles can be generated on one architecture and
84
+ # the tests run on another one. See
85
+ # https://github.com/joblib/joblib/issues/279.
86
+ to_pickle = [np.arange(5, dtype=np.dtype('<i8')),
87
+ np.arange(5, dtype=np.dtype('<f8')),
88
+ np.array([1, 'abc', {'a': 1, 'b': 2}], dtype='O'),
89
+ # all possible bytes as a byte string
90
+ np.arange(256, dtype=np.uint8).tobytes(),
91
+ np.matrix([0, 1, 2], dtype=np.dtype('<i8')),
92
+ # unicode string with non-ascii chars
93
+ u"C'est l'\xe9t\xe9 !"]
94
+
95
+ write_test_pickle(to_pickle, parser.parse_args())
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.gzip ADDED
Binary file (798 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.lzma ADDED
Binary file (660 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.gzip ADDED
Binary file (831 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.lzma ADDED
Binary file (694 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.gzip ADDED
Binary file (831 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.lzma ADDED
Binary file (697 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.lzma ADDED
Binary file (715 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_01.npy.z ADDED
Binary file (43 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/test/test_backports.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mmap
2
+
3
+ from joblib.backports import make_memmap, concurrency_safe_rename
4
+ from joblib.test.common import with_numpy
5
+ from joblib.testing import parametrize
6
+ from joblib import Parallel, delayed
7
+
8
+
9
+ @with_numpy
10
+ def test_memmap(tmpdir):
11
+ fname = tmpdir.join('test.mmap').strpath
12
+ size = 5 * mmap.ALLOCATIONGRANULARITY
13
+ offset = mmap.ALLOCATIONGRANULARITY + 1
14
+ memmap_obj = make_memmap(fname, shape=size, mode='w+', offset=offset)
15
+ assert memmap_obj.offset == offset
16
+
17
+
18
+ @parametrize('dst_content', [None, 'dst content'])
19
+ @parametrize('backend', [None, 'threading'])
20
+ def test_concurrency_safe_rename(tmpdir, dst_content, backend):
21
+ src_paths = [tmpdir.join('src_%d' % i) for i in range(4)]
22
+ for src_path in src_paths:
23
+ src_path.write('src content')
24
+ dst_path = tmpdir.join('dst')
25
+ if dst_content is not None:
26
+ dst_path.write(dst_content)
27
+
28
+ Parallel(n_jobs=4, backend=backend)(
29
+ delayed(concurrency_safe_rename)(src_path.strpath, dst_path.strpath)
30
+ for src_path in src_paths
31
+ )
32
+ assert dst_path.exists()
33
+ assert dst_path.read() == 'src content'
34
+ for src_path in src_paths:
35
+ assert not src_path.exists()
llmeval-env/lib/python3.10/site-packages/joblib/test/test_cloudpickle_wrapper.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test that our implementation of wrap_non_picklable_objects mimics
3
+ properly the loky implementation.
4
+ """
5
+
6
+ from .._cloudpickle_wrapper import wrap_non_picklable_objects
7
+ from .._cloudpickle_wrapper import _my_wrap_non_picklable_objects
8
+
9
+
10
+ def a_function(x):
11
+ return x
12
+
13
+
14
+ class AClass(object):
15
+
16
+ def __call__(self, x):
17
+ return x
18
+
19
+
20
+ def test_wrap_non_picklable_objects():
21
+ # Mostly a smoke test: test that we can use callable in the same way
22
+ # with both our implementation of wrap_non_picklable_objects and the
23
+ # upstream one
24
+ for obj in (a_function, AClass()):
25
+ wrapped_obj = wrap_non_picklable_objects(obj)
26
+ my_wrapped_obj = _my_wrap_non_picklable_objects(obj)
27
+ assert wrapped_obj(1) == my_wrapped_obj(1)
llmeval-env/lib/python3.10/site-packages/joblib/test/test_config.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from joblib.parallel import parallel_config
4
+ from joblib.parallel import parallel_backend
5
+ from joblib.parallel import Parallel, delayed
6
+
7
+ from joblib.parallel import BACKENDS
8
+ from joblib.parallel import DEFAULT_BACKEND
9
+ from joblib.parallel import EXTERNAL_BACKENDS
10
+
11
+ from joblib._parallel_backends import LokyBackend
12
+ from joblib._parallel_backends import ThreadingBackend
13
+ from joblib._parallel_backends import MultiprocessingBackend
14
+
15
+ from joblib.testing import parametrize, raises
16
+ from joblib.test.common import np, with_numpy
17
+ from joblib.test.common import with_multiprocessing
18
+ from joblib.test.test_parallel import check_memmap
19
+
20
+
21
+ @parametrize("context", [parallel_config, parallel_backend])
22
+ def test_global_parallel_backend(context):
23
+ default = Parallel()._backend
24
+
25
+ pb = context('threading')
26
+ try:
27
+ assert isinstance(Parallel()._backend, ThreadingBackend)
28
+ finally:
29
+ pb.unregister()
30
+ assert type(Parallel()._backend) is type(default)
31
+
32
+
33
+ @parametrize("context", [parallel_config, parallel_backend])
34
+ def test_external_backends(context):
35
+ def register_foo():
36
+ BACKENDS['foo'] = ThreadingBackend
37
+
38
+ EXTERNAL_BACKENDS['foo'] = register_foo
39
+ try:
40
+ with context('foo'):
41
+ assert isinstance(Parallel()._backend, ThreadingBackend)
42
+ finally:
43
+ del EXTERNAL_BACKENDS['foo']
44
+
45
+
46
+ @with_numpy
47
+ @with_multiprocessing
48
+ def test_parallel_config_no_backend(tmpdir):
49
+ # Check that parallel_config allows to change the config
50
+ # even if no backend is set.
51
+ with parallel_config(n_jobs=2, max_nbytes=1, temp_folder=tmpdir):
52
+ with Parallel(prefer="processes") as p:
53
+ assert isinstance(p._backend, LokyBackend)
54
+ assert p.n_jobs == 2
55
+
56
+ # Checks that memmapping is enabled
57
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
58
+ assert len(os.listdir(tmpdir)) > 0
59
+
60
+
61
+ @with_numpy
62
+ @with_multiprocessing
63
+ def test_parallel_config_params_explicit_set(tmpdir):
64
+ with parallel_config(n_jobs=3, max_nbytes=1, temp_folder=tmpdir):
65
+ with Parallel(n_jobs=2, prefer="processes", max_nbytes='1M') as p:
66
+ assert isinstance(p._backend, LokyBackend)
67
+ assert p.n_jobs == 2
68
+
69
+ # Checks that memmapping is disabled
70
+ with raises(TypeError, match="Expected np.memmap instance"):
71
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
72
+
73
+
74
+ @parametrize("param", ["prefer", "require"])
75
+ def test_parallel_config_bad_params(param):
76
+ # Check that an error is raised when setting a wrong backend
77
+ # hint or constraint
78
+ with raises(ValueError, match=f"{param}=wrong is not a valid"):
79
+ with parallel_config(**{param: "wrong"}):
80
+ Parallel()
81
+
82
+
83
+ def test_parallel_config_constructor_params():
84
+ # Check that an error is raised when backend is None
85
+ # but backend constructor params are given
86
+ with raises(ValueError, match="only supported when backend is not None"):
87
+ with parallel_config(inner_max_num_threads=1):
88
+ pass
89
+
90
+ with raises(ValueError, match="only supported when backend is not None"):
91
+ with parallel_config(backend_param=1):
92
+ pass
93
+
94
+
95
+ def test_parallel_config_nested():
96
+ # Check that nested configuration retrieves the info from the
97
+ # parent config and do not reset them.
98
+
99
+ with parallel_config(n_jobs=2):
100
+ p = Parallel()
101
+ assert isinstance(p._backend, BACKENDS[DEFAULT_BACKEND])
102
+ assert p.n_jobs == 2
103
+
104
+ with parallel_config(backend='threading'):
105
+ with parallel_config(n_jobs=2):
106
+ p = Parallel()
107
+ assert isinstance(p._backend, ThreadingBackend)
108
+ assert p.n_jobs == 2
109
+
110
+ with parallel_config(verbose=100):
111
+ with parallel_config(n_jobs=2):
112
+ p = Parallel()
113
+ assert p.verbose == 100
114
+ assert p.n_jobs == 2
115
+
116
+
117
+ @with_numpy
118
+ @with_multiprocessing
119
+ @parametrize('backend', ['multiprocessing', 'threading',
120
+ MultiprocessingBackend(), ThreadingBackend()])
121
+ @parametrize("context", [parallel_config, parallel_backend])
122
+ def test_threadpool_limitation_in_child_context_error(context, backend):
123
+
124
+ with raises(AssertionError, match=r"does not acc.*inner_max_num_threads"):
125
+ context(backend, inner_max_num_threads=1)
126
+
127
+
128
+ @parametrize("context", [parallel_config, parallel_backend])
129
+ def test_parallel_n_jobs_none(context):
130
+ # Check that n_jobs=None is interpreted as "unset" in Parallel
131
+ # non regression test for #1473
132
+ with context(backend="threading", n_jobs=2):
133
+ with Parallel(n_jobs=None) as p:
134
+ assert p.n_jobs == 2
135
+
136
+ with context(backend="threading"):
137
+ default_n_jobs = Parallel().n_jobs
138
+ with Parallel(n_jobs=None) as p:
139
+ assert p.n_jobs == default_n_jobs
140
+
141
+
142
+ @parametrize("context", [parallel_config, parallel_backend])
143
+ def test_parallel_config_n_jobs_none(context):
144
+ # Check that n_jobs=None is interpreted as "explicitly set" in
145
+ # parallel_(config/backend)
146
+ # non regression test for #1473
147
+ with context(backend="threading", n_jobs=2):
148
+ with context(backend="threading", n_jobs=None):
149
+ # n_jobs=None resets n_jobs to backend's default
150
+ with Parallel() as p:
151
+ assert p.n_jobs == 1
llmeval-env/lib/python3.10/site-packages/joblib/test/test_dask.py ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function, division, absolute_import
2
+ import os
3
+ import warnings
4
+
5
+ import pytest
6
+ from random import random
7
+ from uuid import uuid4
8
+ from time import sleep
9
+
10
+ from .. import Parallel, delayed, parallel_config
11
+ from ..parallel import ThreadingBackend, AutoBatchingMixin
12
+ from .._dask import DaskDistributedBackend
13
+
14
+ distributed = pytest.importorskip('distributed')
15
+ dask = pytest.importorskip('dask')
16
+
17
+ # These imports need to be after the pytest.importorskip hence the noqa: E402
18
+ from distributed import Client, LocalCluster, get_client # noqa: E402
19
+ from distributed.metrics import time # noqa: E402
20
+ # Note: pytest requires to manually import all fixtures used in the test
21
+ # and their dependencies.
22
+ from distributed.utils_test import cluster, inc, cleanup # noqa: E402, F401
23
+
24
+
25
+ def noop(*args, **kwargs):
26
+ pass
27
+
28
+
29
+ def slow_raise_value_error(condition, duration=0.05):
30
+ sleep(duration)
31
+ if condition:
32
+ raise ValueError("condition evaluated to True")
33
+
34
+
35
+ def count_events(event_name, client):
36
+ worker_events = client.run(lambda dask_worker: dask_worker.log)
37
+ event_counts = {}
38
+ for w, events in worker_events.items():
39
+ event_counts[w] = len([event for event in list(events)
40
+ if event[1] == event_name])
41
+ return event_counts
42
+
43
+
44
+ def test_simple(loop):
45
+ with cluster() as (s, [a, b]):
46
+ with Client(s['address'], loop=loop) as client: # noqa: F841
47
+ with parallel_config(backend='dask'):
48
+ seq = Parallel()(delayed(inc)(i) for i in range(10))
49
+ assert seq == [inc(i) for i in range(10)]
50
+
51
+ with pytest.raises(ValueError):
52
+ Parallel()(delayed(slow_raise_value_error)(i == 3)
53
+ for i in range(10))
54
+
55
+ seq = Parallel()(delayed(inc)(i) for i in range(10))
56
+ assert seq == [inc(i) for i in range(10)]
57
+
58
+
59
+ def test_dask_backend_uses_autobatching(loop):
60
+ assert (DaskDistributedBackend.compute_batch_size
61
+ is AutoBatchingMixin.compute_batch_size)
62
+
63
+ with cluster() as (s, [a, b]):
64
+ with Client(s['address'], loop=loop) as client: # noqa: F841
65
+ with parallel_config(backend='dask'):
66
+ with Parallel() as parallel:
67
+ # The backend should be initialized with a default
68
+ # batch size of 1:
69
+ backend = parallel._backend
70
+ assert isinstance(backend, DaskDistributedBackend)
71
+ assert backend.parallel is parallel
72
+ assert backend._effective_batch_size == 1
73
+
74
+ # Launch many short tasks that should trigger
75
+ # auto-batching:
76
+ parallel(
77
+ delayed(lambda: None)()
78
+ for _ in range(int(1e4))
79
+ )
80
+ assert backend._effective_batch_size > 10
81
+
82
+
83
+ def random2():
84
+ return random()
85
+
86
+
87
+ def test_dont_assume_function_purity(loop):
88
+ with cluster() as (s, [a, b]):
89
+ with Client(s['address'], loop=loop) as client: # noqa: F841
90
+ with parallel_config(backend='dask'):
91
+ x, y = Parallel()(delayed(random2)() for i in range(2))
92
+ assert x != y
93
+
94
+
95
+ @pytest.mark.parametrize("mixed", [True, False])
96
+ def test_dask_funcname(loop, mixed):
97
+ from joblib._dask import Batch
98
+ if not mixed:
99
+ tasks = [delayed(inc)(i) for i in range(4)]
100
+ batch_repr = 'batch_of_inc_4_calls'
101
+ else:
102
+ tasks = [
103
+ delayed(abs)(i) if i % 2 else delayed(inc)(i) for i in range(4)
104
+ ]
105
+ batch_repr = 'mixed_batch_of_inc_4_calls'
106
+
107
+ assert repr(Batch(tasks)) == batch_repr
108
+
109
+ with cluster() as (s, [a, b]):
110
+ with Client(s['address'], loop=loop) as client:
111
+ with parallel_config(backend='dask'):
112
+ _ = Parallel(batch_size=2, pre_dispatch='all')(tasks)
113
+
114
+ def f(dask_scheduler):
115
+ return list(dask_scheduler.transition_log)
116
+ batch_repr = batch_repr.replace('4', '2')
117
+ log = client.run_on_scheduler(f)
118
+ assert all('batch_of_inc' in tup[0] for tup in log)
119
+
120
+
121
+ def test_no_undesired_distributed_cache_hit():
122
+ # Dask has a pickle cache for callables that are called many times. Because
123
+ # the dask backends used to wrap both the functions and the arguments
124
+ # under instances of the Batch callable class this caching mechanism could
125
+ # lead to bugs as described in: https://github.com/joblib/joblib/pull/1055
126
+ # The joblib-dask backend has been refactored to avoid bundling the
127
+ # arguments as an attribute of the Batch instance to avoid this problem.
128
+ # This test serves as non-regression problem.
129
+
130
+ # Use a large number of input arguments to give the AutoBatchingMixin
131
+ # enough tasks to kick-in.
132
+ lists = [[] for _ in range(100)]
133
+ np = pytest.importorskip('numpy')
134
+ X = np.arange(int(1e6))
135
+
136
+ def isolated_operation(list_, data=None):
137
+ if data is not None:
138
+ np.testing.assert_array_equal(data, X)
139
+ list_.append(uuid4().hex)
140
+ return list_
141
+
142
+ cluster = LocalCluster(n_workers=1, threads_per_worker=2)
143
+ client = Client(cluster)
144
+ try:
145
+ with parallel_config(backend='dask'):
146
+ # dispatches joblib.parallel.BatchedCalls
147
+ res = Parallel()(
148
+ delayed(isolated_operation)(list_) for list_ in lists
149
+ )
150
+
151
+ # The original arguments should not have been mutated as the mutation
152
+ # happens in the dask worker process.
153
+ assert lists == [[] for _ in range(100)]
154
+
155
+ # Here we did not pass any large numpy array as argument to
156
+ # isolated_operation so no scattering event should happen under the
157
+ # hood.
158
+ counts = count_events('receive-from-scatter', client)
159
+ assert sum(counts.values()) == 0
160
+ assert all([len(r) == 1 for r in res])
161
+
162
+ with parallel_config(backend='dask'):
163
+ # Append a large array which will be scattered by dask, and
164
+ # dispatch joblib._dask.Batch
165
+ res = Parallel()(
166
+ delayed(isolated_operation)(list_, data=X) for list_ in lists
167
+ )
168
+
169
+ # This time, auto-scattering should have kicked it.
170
+ counts = count_events('receive-from-scatter', client)
171
+ assert sum(counts.values()) > 0
172
+ assert all([len(r) == 1 for r in res])
173
+ finally:
174
+ client.close(timeout=30)
175
+ cluster.close(timeout=30)
176
+
177
+
178
+ class CountSerialized(object):
179
+ def __init__(self, x):
180
+ self.x = x
181
+ self.count = 0
182
+
183
+ def __add__(self, other):
184
+ return self.x + getattr(other, 'x', other)
185
+
186
+ __radd__ = __add__
187
+
188
+ def __reduce__(self):
189
+ self.count += 1
190
+ return (CountSerialized, (self.x,))
191
+
192
+
193
+ def add5(a, b, c, d=0, e=0):
194
+ return a + b + c + d + e
195
+
196
+
197
+ def test_manual_scatter(loop):
198
+ x = CountSerialized(1)
199
+ y = CountSerialized(2)
200
+ z = CountSerialized(3)
201
+
202
+ with cluster() as (s, [a, b]):
203
+ with Client(s['address'], loop=loop) as client: # noqa: F841
204
+ with parallel_config(backend='dask', scatter=[x, y]):
205
+ f = delayed(add5)
206
+ tasks = [f(x, y, z, d=4, e=5),
207
+ f(x, z, y, d=5, e=4),
208
+ f(y, x, z, d=x, e=5),
209
+ f(z, z, x, d=z, e=y)]
210
+ expected = [func(*args, **kwargs)
211
+ for func, args, kwargs in tasks]
212
+ results = Parallel()(tasks)
213
+
214
+ # Scatter must take a list/tuple
215
+ with pytest.raises(TypeError):
216
+ with parallel_config(backend='dask', loop=loop, scatter=1):
217
+ pass
218
+
219
+ assert results == expected
220
+
221
+ # Scattered variables only serialized once
222
+ assert x.count == 1
223
+ assert y.count == 1
224
+ # Depending on the version of distributed, the unscattered z variable
225
+ # is either pickled 4 or 6 times, possibly because of the memoization
226
+ # of objects that appear several times in the arguments of a delayed
227
+ # task.
228
+ assert z.count in (4, 6)
229
+
230
+
231
+ # When the same IOLoop is used for multiple clients in a row, use
232
+ # loop_in_thread instead of loop to prevent the Client from closing it. See
233
+ # dask/distributed #4112
234
+ def test_auto_scatter(loop_in_thread):
235
+ np = pytest.importorskip('numpy')
236
+ data1 = np.ones(int(1e4), dtype=np.uint8)
237
+ data2 = np.ones(int(1e4), dtype=np.uint8)
238
+ data_to_process = ([data1] * 3) + ([data2] * 3)
239
+
240
+ with cluster() as (s, [a, b]):
241
+ with Client(s['address'], loop=loop_in_thread) as client:
242
+ with parallel_config(backend='dask'):
243
+ # Passing the same data as arg and kwarg triggers a single
244
+ # scatter operation whose result is reused.
245
+ Parallel()(delayed(noop)(data, data, i, opt=data)
246
+ for i, data in enumerate(data_to_process))
247
+ # By default large array are automatically scattered with
248
+ # broadcast=1 which means that one worker must directly receive
249
+ # the data from the scatter operation once.
250
+ counts = count_events('receive-from-scatter', client)
251
+ assert counts[a['address']] + counts[b['address']] == 2
252
+
253
+ with cluster() as (s, [a, b]):
254
+ with Client(s['address'], loop=loop_in_thread) as client:
255
+ with parallel_config(backend='dask'):
256
+ Parallel()(delayed(noop)(data1[:3], i) for i in range(5))
257
+ # Small arrays are passed within the task definition without going
258
+ # through a scatter operation.
259
+ counts = count_events('receive-from-scatter', client)
260
+ assert counts[a['address']] == 0
261
+ assert counts[b['address']] == 0
262
+
263
+
264
+ @pytest.mark.parametrize("retry_no", list(range(2)))
265
+ def test_nested_scatter(loop, retry_no):
266
+
267
+ np = pytest.importorskip('numpy')
268
+
269
+ NUM_INNER_TASKS = 10
270
+ NUM_OUTER_TASKS = 10
271
+
272
+ def my_sum(x, i, j):
273
+ return np.sum(x)
274
+
275
+ def outer_function_joblib(array, i):
276
+ client = get_client() # noqa
277
+ with parallel_config(backend="dask"):
278
+ results = Parallel()(
279
+ delayed(my_sum)(array[j:], i, j) for j in range(
280
+ NUM_INNER_TASKS)
281
+ )
282
+ return sum(results)
283
+
284
+ with cluster() as (s, [a, b]):
285
+ with Client(s['address'], loop=loop) as _:
286
+ with parallel_config(backend="dask"):
287
+ my_array = np.ones(10000)
288
+ _ = Parallel()(
289
+ delayed(outer_function_joblib)(
290
+ my_array[i:], i) for i in range(NUM_OUTER_TASKS)
291
+ )
292
+
293
+
294
+ def test_nested_backend_context_manager(loop_in_thread):
295
+ def get_nested_pids():
296
+ pids = set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2)))
297
+ pids |= set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2)))
298
+ return pids
299
+
300
+ with cluster() as (s, [a, b]):
301
+ with Client(s['address'], loop=loop_in_thread) as client:
302
+ with parallel_config(backend='dask'):
303
+ pid_groups = Parallel(n_jobs=2)(
304
+ delayed(get_nested_pids)()
305
+ for _ in range(10)
306
+ )
307
+ for pid_group in pid_groups:
308
+ assert len(set(pid_group)) <= 2
309
+
310
+ # No deadlocks
311
+ with Client(s['address'], loop=loop_in_thread) as client: # noqa: F841
312
+ with parallel_config(backend='dask'):
313
+ pid_groups = Parallel(n_jobs=2)(
314
+ delayed(get_nested_pids)()
315
+ for _ in range(10)
316
+ )
317
+ for pid_group in pid_groups:
318
+ assert len(set(pid_group)) <= 2
319
+
320
+
321
+ def test_nested_backend_context_manager_implicit_n_jobs(loop):
322
+ # Check that Parallel with no explicit n_jobs value automatically selects
323
+ # all the dask workers, including in nested calls.
324
+
325
+ def _backend_type(p):
326
+ return p._backend.__class__.__name__
327
+
328
+ def get_nested_implicit_n_jobs():
329
+ with Parallel() as p:
330
+ return _backend_type(p), p.n_jobs
331
+
332
+ with cluster() as (s, [a, b]):
333
+ with Client(s['address'], loop=loop) as client: # noqa: F841
334
+ with parallel_config(backend='dask'):
335
+ with Parallel() as p:
336
+ assert _backend_type(p) == "DaskDistributedBackend"
337
+ assert p.n_jobs == -1
338
+ all_nested_n_jobs = p(
339
+ delayed(get_nested_implicit_n_jobs)()
340
+ for _ in range(2)
341
+ )
342
+ for backend_type, nested_n_jobs in all_nested_n_jobs:
343
+ assert backend_type == "DaskDistributedBackend"
344
+ assert nested_n_jobs == -1
345
+
346
+
347
+ def test_errors(loop):
348
+ with pytest.raises(ValueError) as info:
349
+ with parallel_config(backend='dask'):
350
+ pass
351
+
352
+ assert "create a dask client" in str(info.value).lower()
353
+
354
+
355
+ def test_correct_nested_backend(loop):
356
+ with cluster() as (s, [a, b]):
357
+ with Client(s['address'], loop=loop) as client: # noqa: F841
358
+ # No requirement, should be us
359
+ with parallel_config(backend='dask'):
360
+ result = Parallel(n_jobs=2)(
361
+ delayed(outer)(nested_require=None) for _ in range(1))
362
+ assert isinstance(result[0][0][0], DaskDistributedBackend)
363
+
364
+ # Require threads, should be threading
365
+ with parallel_config(backend='dask'):
366
+ result = Parallel(n_jobs=2)(
367
+ delayed(outer)(nested_require='sharedmem')
368
+ for _ in range(1))
369
+ assert isinstance(result[0][0][0], ThreadingBackend)
370
+
371
+
372
+ def outer(nested_require):
373
+ return Parallel(n_jobs=2, prefer='threads')(
374
+ delayed(middle)(nested_require) for _ in range(1)
375
+ )
376
+
377
+
378
+ def middle(require):
379
+ return Parallel(n_jobs=2, require=require)(
380
+ delayed(inner)() for _ in range(1)
381
+ )
382
+
383
+
384
+ def inner():
385
+ return Parallel()._backend
386
+
387
+
388
+ def test_secede_with_no_processes(loop):
389
+ # https://github.com/dask/distributed/issues/1775
390
+ with Client(loop=loop, processes=False, set_as_default=True):
391
+ with parallel_config(backend='dask'):
392
+ Parallel(n_jobs=4)(delayed(id)(i) for i in range(2))
393
+
394
+
395
+ def _worker_address(_):
396
+ from distributed import get_worker
397
+ return get_worker().address
398
+
399
+
400
+ def test_dask_backend_keywords(loop):
401
+ with cluster() as (s, [a, b]):
402
+ with Client(s['address'], loop=loop) as client: # noqa: F841
403
+ with parallel_config(backend='dask', workers=a['address']):
404
+ seq = Parallel()(
405
+ delayed(_worker_address)(i) for i in range(10))
406
+ assert seq == [a['address']] * 10
407
+
408
+ with parallel_config(backend='dask', workers=b['address']):
409
+ seq = Parallel()(
410
+ delayed(_worker_address)(i) for i in range(10))
411
+ assert seq == [b['address']] * 10
412
+
413
+
414
+ def test_scheduler_tasks_cleanup(loop):
415
+ with Client(processes=False, loop=loop) as client:
416
+ with parallel_config(backend='dask'):
417
+ Parallel()(delayed(inc)(i) for i in range(10))
418
+
419
+ start = time()
420
+ while client.cluster.scheduler.tasks:
421
+ sleep(0.01)
422
+ assert time() < start + 5
423
+
424
+ assert not client.futures
425
+
426
+
427
+ @pytest.mark.parametrize("cluster_strategy", ["adaptive", "late_scaling"])
428
+ @pytest.mark.skipif(
429
+ distributed.__version__ <= '2.1.1' and distributed.__version__ >= '1.28.0',
430
+ reason="distributed bug - https://github.com/dask/distributed/pull/2841")
431
+ def test_wait_for_workers(cluster_strategy):
432
+ cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2)
433
+ client = Client(cluster)
434
+ if cluster_strategy == "adaptive":
435
+ cluster.adapt(minimum=0, maximum=2)
436
+ elif cluster_strategy == "late_scaling":
437
+ # Tell the cluster to start workers but this is a non-blocking call
438
+ # and new workers might take time to connect. In this case the Parallel
439
+ # call should wait for at least one worker to come up before starting
440
+ # to schedule work.
441
+ cluster.scale(2)
442
+ try:
443
+ with parallel_config(backend='dask'):
444
+ # The following should wait a bit for at least one worker to
445
+ # become available.
446
+ Parallel()(delayed(inc)(i) for i in range(10))
447
+ finally:
448
+ client.close()
449
+ cluster.close()
450
+
451
+
452
+ def test_wait_for_workers_timeout():
453
+ # Start a cluster with 0 worker:
454
+ cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2)
455
+ client = Client(cluster)
456
+ try:
457
+ with parallel_config(backend='dask', wait_for_workers_timeout=0.1):
458
+ # Short timeout: DaskDistributedBackend
459
+ msg = "DaskDistributedBackend has no worker after 0.1 seconds."
460
+ with pytest.raises(TimeoutError, match=msg):
461
+ Parallel()(delayed(inc)(i) for i in range(10))
462
+
463
+ with parallel_config(backend='dask', wait_for_workers_timeout=0):
464
+ # No timeout: fallback to generic joblib failure:
465
+ msg = "DaskDistributedBackend has no active worker"
466
+ with pytest.raises(RuntimeError, match=msg):
467
+ Parallel()(delayed(inc)(i) for i in range(10))
468
+ finally:
469
+ client.close()
470
+ cluster.close()
471
+
472
+
473
+ @pytest.mark.parametrize("backend", ["loky", "multiprocessing"])
474
+ def test_joblib_warning_inside_dask_daemonic_worker(backend):
475
+ cluster = LocalCluster(n_workers=2)
476
+ client = Client(cluster)
477
+ try:
478
+
479
+ def func_using_joblib_parallel():
480
+ # Somehow trying to check the warning type here (e.g. with
481
+ # pytest.warns(UserWarning)) make the test hang. Work-around:
482
+ # return the warning record to the client and the warning check is
483
+ # done client-side.
484
+ with warnings.catch_warnings(record=True) as record:
485
+ Parallel(n_jobs=2, backend=backend)(
486
+ delayed(inc)(i) for i in range(10))
487
+
488
+ return record
489
+
490
+ fut = client.submit(func_using_joblib_parallel)
491
+ record = fut.result()
492
+
493
+ assert len(record) == 1
494
+ warning = record[0].message
495
+ assert isinstance(warning, UserWarning)
496
+ assert "distributed.worker.daemon" in str(warning)
497
+ finally:
498
+ client.close(timeout=30)
499
+ cluster.close(timeout=30)
llmeval-env/lib/python3.10/site-packages/joblib/test/test_disk.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for the disk utilities.
3
+ """
4
+
5
+ # Authors: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Lars Buitinck
7
+ # Copyright (c) 2010 Gael Varoquaux
8
+ # License: BSD Style, 3 clauses.
9
+
10
+ from __future__ import with_statement
11
+ import array
12
+ import os
13
+
14
+ from joblib.disk import disk_used, memstr_to_bytes, mkdirp, rm_subdirs
15
+ from joblib.testing import parametrize, raises
16
+
17
+ ###############################################################################
18
+
19
+
20
+ def test_disk_used(tmpdir):
21
+ cachedir = tmpdir.strpath
22
+ # Not write a file that is 1M big in this directory, and check the
23
+ # size. The reason we use such a big file is that it makes us robust
24
+ # to errors due to block allocation.
25
+ a = array.array('i')
26
+ sizeof_i = a.itemsize
27
+ target_size = 1024
28
+ n = int(target_size * 1024 / sizeof_i)
29
+ a = array.array('i', n * (1,))
30
+ with open(os.path.join(cachedir, 'test'), 'wb') as output:
31
+ a.tofile(output)
32
+ assert disk_used(cachedir) >= target_size
33
+ assert disk_used(cachedir) < target_size + 12
34
+
35
+
36
+ @parametrize('text,value',
37
+ [('80G', 80 * 1024 ** 3),
38
+ ('1.4M', int(1.4 * 1024 ** 2)),
39
+ ('120M', 120 * 1024 ** 2),
40
+ ('53K', 53 * 1024)])
41
+ def test_memstr_to_bytes(text, value):
42
+ assert memstr_to_bytes(text) == value
43
+
44
+
45
+ @parametrize('text,exception,regex',
46
+ [('fooG', ValueError, r'Invalid literal for size.*fooG.*'),
47
+ ('1.4N', ValueError, r'Invalid literal for size.*1.4N.*')])
48
+ def test_memstr_to_bytes_exception(text, exception, regex):
49
+ with raises(exception) as excinfo:
50
+ memstr_to_bytes(text)
51
+ assert excinfo.match(regex)
52
+
53
+
54
+ def test_mkdirp(tmpdir):
55
+ mkdirp(os.path.join(tmpdir.strpath, 'ham'))
56
+ mkdirp(os.path.join(tmpdir.strpath, 'ham'))
57
+ mkdirp(os.path.join(tmpdir.strpath, 'spam', 'spam'))
58
+
59
+ # Not all OSErrors are ignored
60
+ with raises(OSError):
61
+ mkdirp('')
62
+
63
+
64
+ def test_rm_subdirs(tmpdir):
65
+ sub_path = os.path.join(tmpdir.strpath, "am", "stram")
66
+ full_path = os.path.join(sub_path, "gram")
67
+ mkdirp(os.path.join(full_path))
68
+
69
+ rm_subdirs(sub_path)
70
+ assert os.path.exists(sub_path)
71
+ assert not os.path.exists(full_path)
llmeval-env/lib/python3.10/site-packages/joblib/test/test_func_inspect.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the func_inspect module.
3
+ """
4
+
5
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Copyright (c) 2009 Gael Varoquaux
7
+ # License: BSD Style, 3 clauses.
8
+
9
+ import functools
10
+
11
+ from joblib.func_inspect import filter_args, get_func_name, get_func_code
12
+ from joblib.func_inspect import _clean_win_chars, format_signature
13
+ from joblib.memory import Memory
14
+ from joblib.test.common import with_numpy
15
+ from joblib.testing import fixture, parametrize, raises
16
+
17
+
18
+ ###############################################################################
19
+ # Module-level functions and fixture, for tests
20
+ def f(x, y=0):
21
+ pass
22
+
23
+
24
+ def g(x):
25
+ pass
26
+
27
+
28
+ def h(x, y=0, *args, **kwargs):
29
+ pass
30
+
31
+
32
+ def i(x=1):
33
+ pass
34
+
35
+
36
+ def j(x, y, **kwargs):
37
+ pass
38
+
39
+
40
+ def k(*args, **kwargs):
41
+ pass
42
+
43
+
44
+ def m1(x, *, y):
45
+ pass
46
+
47
+
48
+ def m2(x, *, y, z=3):
49
+ pass
50
+
51
+
52
+ @fixture(scope='module')
53
+ def cached_func(tmpdir_factory):
54
+ # Create a Memory object to test decorated functions.
55
+ # We should be careful not to call the decorated functions, so that
56
+ # cache directories are not created in the temp dir.
57
+ cachedir = tmpdir_factory.mktemp("joblib_test_func_inspect")
58
+ mem = Memory(cachedir.strpath)
59
+
60
+ @mem.cache
61
+ def cached_func_inner(x):
62
+ return x
63
+
64
+ return cached_func_inner
65
+
66
+
67
+ class Klass(object):
68
+
69
+ def f(self, x):
70
+ return x
71
+
72
+
73
+ ###############################################################################
74
+ # Tests
75
+
76
+ @parametrize('func,args,filtered_args',
77
+ [(f, [[], (1, )], {'x': 1, 'y': 0}),
78
+ (f, [['x'], (1, )], {'y': 0}),
79
+ (f, [['y'], (0, )], {'x': 0}),
80
+ (f, [['y'], (0, ), {'y': 1}], {'x': 0}),
81
+ (f, [['x', 'y'], (0, )], {}),
82
+ (f, [[], (0,), {'y': 1}], {'x': 0, 'y': 1}),
83
+ (f, [['y'], (), {'x': 2, 'y': 1}], {'x': 2}),
84
+ (g, [[], (), {'x': 1}], {'x': 1}),
85
+ (i, [[], (2, )], {'x': 2})])
86
+ def test_filter_args(func, args, filtered_args):
87
+ assert filter_args(func, *args) == filtered_args
88
+
89
+
90
+ def test_filter_args_method():
91
+ obj = Klass()
92
+ assert filter_args(obj.f, [], (1, )) == {'x': 1, 'self': obj}
93
+
94
+
95
+ @parametrize('func,args,filtered_args',
96
+ [(h, [[], (1, )],
97
+ {'x': 1, 'y': 0, '*': [], '**': {}}),
98
+ (h, [[], (1, 2, 3, 4)],
99
+ {'x': 1, 'y': 2, '*': [3, 4], '**': {}}),
100
+ (h, [[], (1, 25), {'ee': 2}],
101
+ {'x': 1, 'y': 25, '*': [], '**': {'ee': 2}}),
102
+ (h, [['*'], (1, 2, 25), {'ee': 2}],
103
+ {'x': 1, 'y': 2, '**': {'ee': 2}})])
104
+ def test_filter_varargs(func, args, filtered_args):
105
+ assert filter_args(func, *args) == filtered_args
106
+
107
+
108
+ test_filter_kwargs_extra_params = [
109
+ (m1, [[], (1,), {'y': 2}], {'x': 1, 'y': 2}),
110
+ (m2, [[], (1,), {'y': 2}], {'x': 1, 'y': 2, 'z': 3})
111
+ ]
112
+
113
+
114
+ @parametrize('func,args,filtered_args',
115
+ [(k, [[], (1, 2), {'ee': 2}],
116
+ {'*': [1, 2], '**': {'ee': 2}}),
117
+ (k, [[], (3, 4)],
118
+ {'*': [3, 4], '**': {}})] +
119
+ test_filter_kwargs_extra_params)
120
+ def test_filter_kwargs(func, args, filtered_args):
121
+ assert filter_args(func, *args) == filtered_args
122
+
123
+
124
+ def test_filter_args_2():
125
+ assert (filter_args(j, [], (1, 2), {'ee': 2}) ==
126
+ {'x': 1, 'y': 2, '**': {'ee': 2}})
127
+
128
+ ff = functools.partial(f, 1)
129
+ # filter_args has to special-case partial
130
+ assert filter_args(ff, [], (1, )) == {'*': [1], '**': {}}
131
+ assert filter_args(ff, ['y'], (1, )) == {'*': [1], '**': {}}
132
+
133
+
134
+ @parametrize('func,funcname', [(f, 'f'), (g, 'g'),
135
+ (cached_func, 'cached_func')])
136
+ def test_func_name(func, funcname):
137
+ # Check that we are not confused by decoration
138
+ # here testcase 'cached_func' is the function itself
139
+ assert get_func_name(func)[1] == funcname
140
+
141
+
142
+ def test_func_name_on_inner_func(cached_func):
143
+ # Check that we are not confused by decoration
144
+ # here testcase 'cached_func' is the 'cached_func_inner' function
145
+ # returned by 'cached_func' fixture
146
+ assert get_func_name(cached_func)[1] == 'cached_func_inner'
147
+
148
+
149
+ def test_func_name_collision_on_inner_func():
150
+ # Check that two functions defining and caching an inner function
151
+ # with the same do not cause (module, name) collision
152
+ def f():
153
+ def inner_func():
154
+ return # pragma: no cover
155
+ return get_func_name(inner_func)
156
+
157
+ def g():
158
+ def inner_func():
159
+ return # pragma: no cover
160
+ return get_func_name(inner_func)
161
+
162
+ module, name = f()
163
+ other_module, other_name = g()
164
+
165
+ assert name == other_name
166
+ assert module != other_module
167
+
168
+
169
+ def test_func_inspect_errors():
170
+ # Check that func_inspect is robust and will work on weird objects
171
+ assert get_func_name('a'.lower)[-1] == 'lower'
172
+ assert get_func_code('a'.lower)[1:] == (None, -1)
173
+ ff = lambda x: x # noqa: E731
174
+ assert get_func_name(ff, win_characters=False)[-1] == '<lambda>'
175
+ assert get_func_code(ff)[1] == __file__.replace('.pyc', '.py')
176
+ # Simulate a function defined in __main__
177
+ ff.__module__ = '__main__'
178
+ assert get_func_name(ff, win_characters=False)[-1] == '<lambda>'
179
+ assert get_func_code(ff)[1] == __file__.replace('.pyc', '.py')
180
+
181
+
182
+ def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'):
183
+ pass
184
+
185
+
186
+ def func_with_signature(a: int, b: int) -> None:
187
+ pass
188
+
189
+
190
+ def test_filter_args_edge_cases():
191
+ assert (
192
+ filter_args(func_with_kwonly_args, [], (1, 2),
193
+ {'kw1': 3, 'kw2': 4}) ==
194
+ {'a': 1, 'b': 2, 'kw1': 3, 'kw2': 4})
195
+
196
+ # filter_args doesn't care about keyword-only arguments so you
197
+ # can pass 'kw1' into *args without any problem
198
+ with raises(ValueError) as excinfo:
199
+ filter_args(func_with_kwonly_args, [], (1, 2, 3), {'kw2': 2})
200
+ excinfo.match("Keyword-only parameter 'kw1' was passed as positional "
201
+ "parameter")
202
+
203
+ assert (
204
+ filter_args(func_with_kwonly_args, ['b', 'kw2'], (1, 2),
205
+ {'kw1': 3, 'kw2': 4}) ==
206
+ {'a': 1, 'kw1': 3})
207
+
208
+ assert (filter_args(func_with_signature, ['b'], (1, 2)) == {'a': 1})
209
+
210
+
211
+ def test_bound_methods():
212
+ """ Make sure that calling the same method on two different instances
213
+ of the same class does resolv to different signatures.
214
+ """
215
+ a = Klass()
216
+ b = Klass()
217
+ assert filter_args(a.f, [], (1, )) != filter_args(b.f, [], (1, ))
218
+
219
+
220
+ @parametrize('exception,regex,func,args',
221
+ [(ValueError, 'ignore_lst must be a list of parameters to ignore',
222
+ f, ['bar', (None, )]),
223
+ (ValueError, r'Ignore list: argument \'(.*)\' is not defined',
224
+ g, [['bar'], (None, )]),
225
+ (ValueError, 'Wrong number of arguments',
226
+ h, [[]])])
227
+ def test_filter_args_error_msg(exception, regex, func, args):
228
+ """ Make sure that filter_args returns decent error messages, for the
229
+ sake of the user.
230
+ """
231
+ with raises(exception) as excinfo:
232
+ filter_args(func, *args)
233
+ excinfo.match(regex)
234
+
235
+
236
+ def test_filter_args_no_kwargs_mutation():
237
+ """None-regression test against 0.12.0 changes.
238
+
239
+ https://github.com/joblib/joblib/pull/75
240
+
241
+ Make sure filter args doesn't mutate the kwargs dict that gets passed in.
242
+ """
243
+ kwargs = {'x': 0}
244
+ filter_args(g, [], [], kwargs)
245
+ assert kwargs == {'x': 0}
246
+
247
+
248
+ def test_clean_win_chars():
249
+ string = r'C:\foo\bar\main.py'
250
+ mangled_string = _clean_win_chars(string)
251
+ for char in ('\\', ':', '<', '>', '!'):
252
+ assert char not in mangled_string
253
+
254
+
255
+ @parametrize('func,args,kwargs,sgn_expected',
256
+ [(g, [list(range(5))], {}, 'g([0, 1, 2, 3, 4])'),
257
+ (k, [1, 2, (3, 4)], {'y': True}, 'k(1, 2, (3, 4), y=True)')])
258
+ def test_format_signature(func, args, kwargs, sgn_expected):
259
+ # Test signature formatting.
260
+ path, sgn_result = format_signature(func, *args, **kwargs)
261
+ assert sgn_result == sgn_expected
262
+
263
+
264
+ def test_format_signature_long_arguments():
265
+ shortening_threshold = 1500
266
+ # shortening gets it down to 700 characters but there is the name
267
+ # of the function in the signature and a few additional things
268
+ # like dots for the ellipsis
269
+ shortening_target = 700 + 10
270
+
271
+ arg = 'a' * shortening_threshold
272
+ _, signature = format_signature(h, arg)
273
+ assert len(signature) < shortening_target
274
+
275
+ nb_args = 5
276
+ args = [arg for _ in range(nb_args)]
277
+ _, signature = format_signature(h, *args)
278
+ assert len(signature) < shortening_target * nb_args
279
+
280
+ kwargs = {str(i): arg for i, arg in enumerate(args)}
281
+ _, signature = format_signature(h, **kwargs)
282
+ assert len(signature) < shortening_target * nb_args
283
+
284
+ _, signature = format_signature(h, *args, **kwargs)
285
+ assert len(signature) < shortening_target * 2 * nb_args
286
+
287
+
288
+ @with_numpy
289
+ def test_format_signature_numpy():
290
+ """ Test the format signature formatting with numpy.
291
+ """
292
+
293
+
294
+ def test_special_source_encoding():
295
+ from joblib.test.test_func_inspect_special_encoding import big5_f
296
+ func_code, source_file, first_line = get_func_code(big5_f)
297
+ assert first_line == 5
298
+ assert "def big5_f():" in func_code
299
+ assert "test_func_inspect_special_encoding" in source_file
300
+
301
+
302
+ def _get_code():
303
+ from joblib.test.test_func_inspect_special_encoding import big5_f
304
+ return get_func_code(big5_f)[0]
305
+
306
+
307
+ def test_func_code_consistency():
308
+ from joblib.parallel import Parallel, delayed
309
+ codes = Parallel(n_jobs=2)(delayed(_get_code)() for _ in range(5))
310
+ assert len(set(codes)) == 1
llmeval-env/lib/python3.10/site-packages/joblib/test/test_hashing.py ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the hashing module.
3
+ """
4
+
5
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Copyright (c) 2009 Gael Varoquaux
7
+ # License: BSD Style, 3 clauses.
8
+
9
+ import time
10
+ import hashlib
11
+ import sys
12
+ import gc
13
+ import io
14
+ import collections
15
+ import itertools
16
+ import pickle
17
+ import random
18
+ from concurrent.futures import ProcessPoolExecutor
19
+ from decimal import Decimal
20
+
21
+ from joblib.hashing import hash
22
+ from joblib.func_inspect import filter_args
23
+ from joblib.memory import Memory
24
+ from joblib.testing import raises, skipif, fixture, parametrize
25
+ from joblib.test.common import np, with_numpy
26
+
27
+
28
+ def unicode(s):
29
+ return s
30
+
31
+
32
+ ###############################################################################
33
+ # Helper functions for the tests
34
+ def time_func(func, *args):
35
+ """ Time function func on *args.
36
+ """
37
+ times = list()
38
+ for _ in range(3):
39
+ t1 = time.time()
40
+ func(*args)
41
+ times.append(time.time() - t1)
42
+ return min(times)
43
+
44
+
45
+ def relative_time(func1, func2, *args):
46
+ """ Return the relative time between func1 and func2 applied on
47
+ *args.
48
+ """
49
+ time_func1 = time_func(func1, *args)
50
+ time_func2 = time_func(func2, *args)
51
+ relative_diff = 0.5 * (abs(time_func1 - time_func2)
52
+ / (time_func1 + time_func2))
53
+ return relative_diff
54
+
55
+
56
+ class Klass(object):
57
+
58
+ def f(self, x):
59
+ return x
60
+
61
+
62
+ class KlassWithCachedMethod(object):
63
+
64
+ def __init__(self, cachedir):
65
+ mem = Memory(location=cachedir)
66
+ self.f = mem.cache(self.f)
67
+
68
+ def f(self, x):
69
+ return x
70
+
71
+
72
+ ###############################################################################
73
+ # Tests
74
+
75
+ input_list = [1, 2, 1., 2., 1 + 1j, 2. + 1j,
76
+ 'a', 'b',
77
+ (1,), (1, 1,), [1, ], [1, 1, ],
78
+ {1: 1}, {1: 2}, {2: 1},
79
+ None,
80
+ gc.collect,
81
+ [1, ].append,
82
+ # Next 2 sets have unorderable elements in python 3.
83
+ set(('a', 1)),
84
+ set(('a', 1, ('a', 1))),
85
+ # Next 2 dicts have unorderable type of keys in python 3.
86
+ {'a': 1, 1: 2},
87
+ {'a': 1, 1: 2, 'd': {'a': 1}}]
88
+
89
+
90
+ @parametrize('obj1', input_list)
91
+ @parametrize('obj2', input_list)
92
+ def test_trivial_hash(obj1, obj2):
93
+ """Smoke test hash on various types."""
94
+ # Check that 2 objects have the same hash only if they are the same.
95
+ are_hashes_equal = hash(obj1) == hash(obj2)
96
+ are_objs_identical = obj1 is obj2
97
+ assert are_hashes_equal == are_objs_identical
98
+
99
+
100
+ def test_hash_methods():
101
+ # Check that hashing instance methods works
102
+ a = io.StringIO(unicode('a'))
103
+ assert hash(a.flush) == hash(a.flush)
104
+ a1 = collections.deque(range(10))
105
+ a2 = collections.deque(range(9))
106
+ assert hash(a1.extend) != hash(a2.extend)
107
+
108
+
109
+ @fixture(scope='function')
110
+ @with_numpy
111
+ def three_np_arrays():
112
+ rnd = np.random.RandomState(0)
113
+ arr1 = rnd.random_sample((10, 10))
114
+ arr2 = arr1.copy()
115
+ arr3 = arr2.copy()
116
+ arr3[0] += 1
117
+ return arr1, arr2, arr3
118
+
119
+
120
+ def test_hash_numpy_arrays(three_np_arrays):
121
+ arr1, arr2, arr3 = three_np_arrays
122
+
123
+ for obj1, obj2 in itertools.product(three_np_arrays, repeat=2):
124
+ are_hashes_equal = hash(obj1) == hash(obj2)
125
+ are_arrays_equal = np.all(obj1 == obj2)
126
+ assert are_hashes_equal == are_arrays_equal
127
+
128
+ assert hash(arr1) != hash(arr1.T)
129
+
130
+
131
+ def test_hash_numpy_dict_of_arrays(three_np_arrays):
132
+ arr1, arr2, arr3 = three_np_arrays
133
+
134
+ d1 = {1: arr1, 2: arr2}
135
+ d2 = {1: arr2, 2: arr1}
136
+ d3 = {1: arr2, 2: arr3}
137
+
138
+ assert hash(d1) == hash(d2)
139
+ assert hash(d1) != hash(d3)
140
+
141
+
142
+ @with_numpy
143
+ @parametrize('dtype', ['datetime64[s]', 'timedelta64[D]'])
144
+ def test_numpy_datetime_array(dtype):
145
+ # memoryview is not supported for some dtypes e.g. datetime64
146
+ # see https://github.com/joblib/joblib/issues/188 for more details
147
+ a_hash = hash(np.arange(10))
148
+ array = np.arange(0, 10, dtype=dtype)
149
+ assert hash(array) != a_hash
150
+
151
+
152
+ @with_numpy
153
+ def test_hash_numpy_noncontiguous():
154
+ a = np.asarray(np.arange(6000).reshape((1000, 2, 3)),
155
+ order='F')[:, :1, :]
156
+ b = np.ascontiguousarray(a)
157
+ assert hash(a) != hash(b)
158
+
159
+ c = np.asfortranarray(a)
160
+ assert hash(a) != hash(c)
161
+
162
+
163
+ @with_numpy
164
+ @parametrize('coerce_mmap', [True, False])
165
+ def test_hash_memmap(tmpdir, coerce_mmap):
166
+ """Check that memmap and arrays hash identically if coerce_mmap is True."""
167
+ filename = tmpdir.join('memmap_temp').strpath
168
+ try:
169
+ m = np.memmap(filename, shape=(10, 10), mode='w+')
170
+ a = np.asarray(m)
171
+ are_hashes_equal = (hash(a, coerce_mmap=coerce_mmap) ==
172
+ hash(m, coerce_mmap=coerce_mmap))
173
+ assert are_hashes_equal == coerce_mmap
174
+ finally:
175
+ if 'm' in locals():
176
+ del m
177
+ # Force a garbage-collection cycle, to be certain that the
178
+ # object is delete, and we don't run in a problem under
179
+ # Windows with a file handle still open.
180
+ gc.collect()
181
+
182
+
183
+ @with_numpy
184
+ @skipif(sys.platform == 'win32', reason='This test is not stable under windows'
185
+ ' for some reason')
186
+ def test_hash_numpy_performance():
187
+ """ Check the performance of hashing numpy arrays:
188
+
189
+ In [22]: a = np.random.random(1000000)
190
+
191
+ In [23]: %timeit hashlib.md5(a).hexdigest()
192
+ 100 loops, best of 3: 20.7 ms per loop
193
+
194
+ In [24]: %timeit hashlib.md5(pickle.dumps(a, protocol=2)).hexdigest()
195
+ 1 loops, best of 3: 73.1 ms per loop
196
+
197
+ In [25]: %timeit hashlib.md5(cPickle.dumps(a, protocol=2)).hexdigest()
198
+ 10 loops, best of 3: 53.9 ms per loop
199
+
200
+ In [26]: %timeit hash(a)
201
+ 100 loops, best of 3: 20.8 ms per loop
202
+ """
203
+ rnd = np.random.RandomState(0)
204
+ a = rnd.random_sample(1000000)
205
+
206
+ def md5_hash(x):
207
+ return hashlib.md5(memoryview(x)).hexdigest()
208
+
209
+ relative_diff = relative_time(md5_hash, hash, a)
210
+ assert relative_diff < 0.3
211
+
212
+ # Check that hashing an tuple of 3 arrays takes approximately
213
+ # 3 times as much as hashing one array
214
+ time_hashlib = 3 * time_func(md5_hash, a)
215
+ time_hash = time_func(hash, (a, a, a))
216
+ relative_diff = 0.5 * (abs(time_hash - time_hashlib)
217
+ / (time_hash + time_hashlib))
218
+ assert relative_diff < 0.3
219
+
220
+
221
+ def test_bound_methods_hash():
222
+ """ Make sure that calling the same method on two different instances
223
+ of the same class does resolve to the same hashes.
224
+ """
225
+ a = Klass()
226
+ b = Klass()
227
+ assert (hash(filter_args(a.f, [], (1, ))) ==
228
+ hash(filter_args(b.f, [], (1, ))))
229
+
230
+
231
+ def test_bound_cached_methods_hash(tmpdir):
232
+ """ Make sure that calling the same _cached_ method on two different
233
+ instances of the same class does resolve to the same hashes.
234
+ """
235
+ a = KlassWithCachedMethod(tmpdir.strpath)
236
+ b = KlassWithCachedMethod(tmpdir.strpath)
237
+ assert (hash(filter_args(a.f.func, [], (1, ))) ==
238
+ hash(filter_args(b.f.func, [], (1, ))))
239
+
240
+
241
+ @with_numpy
242
+ def test_hash_object_dtype():
243
+ """ Make sure that ndarrays with dtype `object' hash correctly."""
244
+
245
+ a = np.array([np.arange(i) for i in range(6)], dtype=object)
246
+ b = np.array([np.arange(i) for i in range(6)], dtype=object)
247
+
248
+ assert hash(a) == hash(b)
249
+
250
+
251
+ @with_numpy
252
+ def test_numpy_scalar():
253
+ # Numpy scalars are built from compiled functions, and lead to
254
+ # strange pickling paths explored, that can give hash collisions
255
+ a = np.float64(2.0)
256
+ b = np.float64(3.0)
257
+ assert hash(a) != hash(b)
258
+
259
+
260
+ def test_dict_hash(tmpdir):
261
+ # Check that dictionaries hash consistently, even though the ordering
262
+ # of the keys is not guaranteed
263
+ k = KlassWithCachedMethod(tmpdir.strpath)
264
+
265
+ d = {'#s12069__c_maps.nii.gz': [33],
266
+ '#s12158__c_maps.nii.gz': [33],
267
+ '#s12258__c_maps.nii.gz': [33],
268
+ '#s12277__c_maps.nii.gz': [33],
269
+ '#s12300__c_maps.nii.gz': [33],
270
+ '#s12401__c_maps.nii.gz': [33],
271
+ '#s12430__c_maps.nii.gz': [33],
272
+ '#s13817__c_maps.nii.gz': [33],
273
+ '#s13903__c_maps.nii.gz': [33],
274
+ '#s13916__c_maps.nii.gz': [33],
275
+ '#s13981__c_maps.nii.gz': [33],
276
+ '#s13982__c_maps.nii.gz': [33],
277
+ '#s13983__c_maps.nii.gz': [33]}
278
+
279
+ a = k.f(d)
280
+ b = k.f(a)
281
+
282
+ assert hash(a) == hash(b)
283
+
284
+
285
+ def test_set_hash(tmpdir):
286
+ # Check that sets hash consistently, even though their ordering
287
+ # is not guaranteed
288
+ k = KlassWithCachedMethod(tmpdir.strpath)
289
+
290
+ s = set(['#s12069__c_maps.nii.gz',
291
+ '#s12158__c_maps.nii.gz',
292
+ '#s12258__c_maps.nii.gz',
293
+ '#s12277__c_maps.nii.gz',
294
+ '#s12300__c_maps.nii.gz',
295
+ '#s12401__c_maps.nii.gz',
296
+ '#s12430__c_maps.nii.gz',
297
+ '#s13817__c_maps.nii.gz',
298
+ '#s13903__c_maps.nii.gz',
299
+ '#s13916__c_maps.nii.gz',
300
+ '#s13981__c_maps.nii.gz',
301
+ '#s13982__c_maps.nii.gz',
302
+ '#s13983__c_maps.nii.gz'])
303
+
304
+ a = k.f(s)
305
+ b = k.f(a)
306
+
307
+ assert hash(a) == hash(b)
308
+
309
+
310
+ def test_set_decimal_hash():
311
+ # Check that sets containing decimals hash consistently, even though
312
+ # ordering is not guaranteed
313
+ assert (hash(set([Decimal(0), Decimal('NaN')])) ==
314
+ hash(set([Decimal('NaN'), Decimal(0)])))
315
+
316
+
317
+ def test_string():
318
+ # Test that we obtain the same hash for object owning several strings,
319
+ # whatever the past of these strings (which are immutable in Python)
320
+ string = 'foo'
321
+ a = {string: 'bar'}
322
+ b = {string: 'bar'}
323
+ c = pickle.loads(pickle.dumps(b))
324
+ assert hash([a, b]) == hash([a, c])
325
+
326
+
327
+ @with_numpy
328
+ def test_numpy_dtype_pickling():
329
+ # numpy dtype hashing is tricky to get right: see #231, #239, #251 #1080,
330
+ # #1082, and explanatory comments inside
331
+ # ``joblib.hashing.NumpyHasher.save``.
332
+
333
+ # In this test, we make sure that the pickling of numpy dtypes is robust to
334
+ # object identity and object copy.
335
+
336
+ dt1 = np.dtype('f4')
337
+ dt2 = np.dtype('f4')
338
+
339
+ # simple dtypes objects are interned
340
+ assert dt1 is dt2
341
+ assert hash(dt1) == hash(dt2)
342
+
343
+ dt1_roundtripped = pickle.loads(pickle.dumps(dt1))
344
+ assert dt1 is not dt1_roundtripped
345
+ assert hash(dt1) == hash(dt1_roundtripped)
346
+
347
+ assert hash([dt1, dt1]) == hash([dt1_roundtripped, dt1_roundtripped])
348
+ assert hash([dt1, dt1]) == hash([dt1, dt1_roundtripped])
349
+
350
+ complex_dt1 = np.dtype(
351
+ [('name', np.str_, 16), ('grades', np.float64, (2,))]
352
+ )
353
+ complex_dt2 = np.dtype(
354
+ [('name', np.str_, 16), ('grades', np.float64, (2,))]
355
+ )
356
+
357
+ # complex dtypes objects are not interned
358
+ assert hash(complex_dt1) == hash(complex_dt2)
359
+
360
+ complex_dt1_roundtripped = pickle.loads(pickle.dumps(complex_dt1))
361
+ assert complex_dt1_roundtripped is not complex_dt1
362
+ assert hash(complex_dt1) == hash(complex_dt1_roundtripped)
363
+
364
+ assert hash([complex_dt1, complex_dt1]) == hash(
365
+ [complex_dt1_roundtripped, complex_dt1_roundtripped]
366
+ )
367
+ assert hash([complex_dt1, complex_dt1]) == hash(
368
+ [complex_dt1_roundtripped, complex_dt1]
369
+ )
370
+
371
+
372
+ @parametrize('to_hash,expected',
373
+ [('This is a string to hash',
374
+ '71b3f47df22cb19431d85d92d0b230b2'),
375
+ (u"C'est l\xe9t\xe9",
376
+ '2d8d189e9b2b0b2e384d93c868c0e576'),
377
+ ((123456, 54321, -98765),
378
+ 'e205227dd82250871fa25aa0ec690aa3'),
379
+ ([random.Random(42).random() for _ in range(5)],
380
+ 'a11ffad81f9682a7d901e6edc3d16c84'),
381
+ ({'abcde': 123, 'sadfas': [-9999, 2, 3]},
382
+ 'aeda150553d4bb5c69f0e69d51b0e2ef')])
383
+ def test_hashes_stay_the_same(to_hash, expected):
384
+ # We want to make sure that hashes don't change with joblib
385
+ # version. For end users, that would mean that they have to
386
+ # regenerate their cache from scratch, which potentially means
387
+ # lengthy recomputations.
388
+ # Expected results have been generated with joblib 0.9.2
389
+ assert hash(to_hash) == expected
390
+
391
+
392
+ @with_numpy
393
+ def test_hashes_are_different_between_c_and_fortran_contiguous_arrays():
394
+ # We want to be sure that the c-contiguous and f-contiguous versions of the
395
+ # same array produce 2 different hashes.
396
+ rng = np.random.RandomState(0)
397
+ arr_c = rng.random_sample((10, 10))
398
+ arr_f = np.asfortranarray(arr_c)
399
+ assert hash(arr_c) != hash(arr_f)
400
+
401
+
402
+ @with_numpy
403
+ def test_0d_array():
404
+ hash(np.array(0))
405
+
406
+
407
+ @with_numpy
408
+ def test_0d_and_1d_array_hashing_is_different():
409
+ assert hash(np.array(0)) != hash(np.array([0]))
410
+
411
+
412
+ @with_numpy
413
+ def test_hashes_stay_the_same_with_numpy_objects():
414
+ # Note: joblib used to test numpy objects hashing by comparing the produced
415
+ # hash of an object with some hard-coded target value to guarantee that
416
+ # hashing remains the same across joblib versions. However, since numpy
417
+ # 1.20 and joblib 1.0, joblib relies on potentially unstable implementation
418
+ # details of numpy to hash np.dtype objects, which makes the stability of
419
+ # hash values across different environments hard to guarantee and to test.
420
+ # As a result, hashing stability across joblib versions becomes best-effort
421
+ # only, and we only test the consistency within a single environment by
422
+ # making sure:
423
+ # - the hash of two copies of the same objects is the same
424
+ # - hashing some object in two different python processes produces the same
425
+ # value. This should be viewed as a proxy for testing hash consistency
426
+ # through time between Python sessions (provided no change in the
427
+ # environment was done between sessions).
428
+
429
+ def create_objects_to_hash():
430
+ rng = np.random.RandomState(42)
431
+ # Being explicit about dtypes in order to avoid
432
+ # architecture-related differences. Also using 'f4' rather than
433
+ # 'f8' for float arrays because 'f8' arrays generated by
434
+ # rng.random.randn don't seem to be bit-identical on 32bit and
435
+ # 64bit machines.
436
+ to_hash_list = [
437
+ rng.randint(-1000, high=1000, size=50).astype('<i8'),
438
+ tuple(rng.randn(3).astype('<f4') for _ in range(5)),
439
+ [rng.randn(3).astype('<f4') for _ in range(5)],
440
+ {
441
+ -3333: rng.randn(3, 5).astype('<f4'),
442
+ 0: [
443
+ rng.randint(10, size=20).astype('<i8'),
444
+ rng.randn(10).astype('<f4')
445
+ ]
446
+ },
447
+ # Non regression cases for
448
+ # https://github.com/joblib/joblib/issues/308
449
+ np.arange(100, dtype='<i8').reshape((10, 10)),
450
+ # Fortran contiguous array
451
+ np.asfortranarray(np.arange(100, dtype='<i8').reshape((10, 10))),
452
+ # Non contiguous array
453
+ np.arange(100, dtype='<i8').reshape((10, 10))[:, :2],
454
+ ]
455
+ return to_hash_list
456
+
457
+ # Create two lists containing copies of the same objects. joblib.hash
458
+ # should return the same hash for to_hash_list_one[i] and
459
+ # to_hash_list_two[i]
460
+ to_hash_list_one = create_objects_to_hash()
461
+ to_hash_list_two = create_objects_to_hash()
462
+
463
+ e1 = ProcessPoolExecutor(max_workers=1)
464
+ e2 = ProcessPoolExecutor(max_workers=1)
465
+
466
+ try:
467
+ for obj_1, obj_2 in zip(to_hash_list_one, to_hash_list_two):
468
+ # testing consistency of hashes across python processes
469
+ hash_1 = e1.submit(hash, obj_1).result()
470
+ hash_2 = e2.submit(hash, obj_1).result()
471
+ assert hash_1 == hash_2
472
+
473
+ # testing consistency when hashing two copies of the same objects.
474
+ hash_3 = e1.submit(hash, obj_2).result()
475
+ assert hash_1 == hash_3
476
+
477
+ finally:
478
+ e1.shutdown()
479
+ e2.shutdown()
480
+
481
+
482
+ def test_hashing_pickling_error():
483
+ def non_picklable():
484
+ return 42
485
+
486
+ with raises(pickle.PicklingError) as excinfo:
487
+ hash(non_picklable)
488
+ excinfo.match('PicklingError while hashing')
489
+
490
+
491
+ def test_wrong_hash_name():
492
+ msg = "Valid options for 'hash_name' are"
493
+ with raises(ValueError, match=msg):
494
+ data = {'foo': 'bar'}
495
+ hash(data, hash_name='invalid')
llmeval-env/lib/python3.10/site-packages/joblib/test/test_logger.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the logger module.
3
+ """
4
+
5
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Copyright (c) 2009 Gael Varoquaux
7
+ # License: BSD Style, 3 clauses.
8
+ import re
9
+
10
+ from joblib.logger import PrintTime
11
+
12
+
13
+ def test_print_time(tmpdir, capsys):
14
+ # A simple smoke test for PrintTime.
15
+ logfile = tmpdir.join('test.log').strpath
16
+ print_time = PrintTime(logfile=logfile)
17
+ print_time('Foo')
18
+ # Create a second time, to smoke test log rotation.
19
+ print_time = PrintTime(logfile=logfile)
20
+ print_time('Foo')
21
+ # And a third time
22
+ print_time = PrintTime(logfile=logfile)
23
+ print_time('Foo')
24
+
25
+ out_printed_text, err_printed_text = capsys.readouterr()
26
+ # Use regexps to be robust to time variations
27
+ match = r"Foo: 0\..s, 0\..min\nFoo: 0\..s, 0..min\nFoo: " + \
28
+ r".\..s, 0..min\n"
29
+ if not re.match(match, err_printed_text):
30
+ raise AssertionError('Excepted %s, got %s' %
31
+ (match, err_printed_text))
llmeval-env/lib/python3.10/site-packages/joblib/test/test_memmapping.py ADDED
@@ -0,0 +1,1191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import mmap
3
+ import sys
4
+ import platform
5
+ import gc
6
+ import pickle
7
+ import itertools
8
+ from time import sleep
9
+ import subprocess
10
+ import threading
11
+ import faulthandler
12
+
13
+ import pytest
14
+
15
+ from joblib.test.common import with_numpy, np
16
+ from joblib.test.common import with_multiprocessing
17
+ from joblib.test.common import with_dev_shm
18
+ from joblib.testing import raises, parametrize, skipif
19
+ from joblib.backports import make_memmap
20
+ from joblib.parallel import Parallel, delayed
21
+
22
+ from joblib.pool import MemmappingPool
23
+ from joblib.executor import _TestingMemmappingExecutor as TestExecutor
24
+ from joblib._memmapping_reducer import has_shareable_memory
25
+ from joblib._memmapping_reducer import ArrayMemmapForwardReducer
26
+ from joblib._memmapping_reducer import _strided_from_memmap
27
+ from joblib._memmapping_reducer import _get_temp_dir
28
+ from joblib._memmapping_reducer import _WeakArrayKeyMap
29
+ from joblib._memmapping_reducer import _get_backing_memmap
30
+ import joblib._memmapping_reducer as jmr
31
+
32
+
33
+ def setup_module():
34
+ faulthandler.dump_traceback_later(timeout=300, exit=True)
35
+
36
+
37
+ def teardown_module():
38
+ faulthandler.cancel_dump_traceback_later()
39
+
40
+
41
+ def check_memmap_and_send_back(array):
42
+ assert _get_backing_memmap(array) is not None
43
+ return array
44
+
45
+
46
+ def check_array(args):
47
+ """Dummy helper function to be executed in subprocesses
48
+
49
+ Check that the provided array has the expected values in the provided
50
+ range.
51
+
52
+ """
53
+ data, position, expected = args
54
+ np.testing.assert_array_equal(data[position], expected)
55
+
56
+
57
+ def inplace_double(args):
58
+ """Dummy helper function to be executed in subprocesses
59
+
60
+
61
+ Check that the input array has the right values in the provided range
62
+ and perform an inplace modification to double the values in the range by
63
+ two.
64
+
65
+ """
66
+ data, position, expected = args
67
+ assert data[position] == expected
68
+ data[position] *= 2
69
+ np.testing.assert_array_equal(data[position], 2 * expected)
70
+
71
+
72
+ @with_numpy
73
+ @with_multiprocessing
74
+ def test_memmap_based_array_reducing(tmpdir):
75
+ """Check that it is possible to reduce a memmap backed array"""
76
+ assert_array_equal = np.testing.assert_array_equal
77
+ filename = tmpdir.join('test.mmap').strpath
78
+
79
+ # Create a file larger than what will be used by a
80
+ buffer = np.memmap(filename, dtype=np.float64, shape=500, mode='w+')
81
+
82
+ # Fill the original buffer with negative markers to detect over of
83
+ # underflow in case of test failures
84
+ buffer[:] = - 1.0 * np.arange(buffer.shape[0], dtype=buffer.dtype)
85
+ buffer.flush()
86
+
87
+ # Memmap a 2D fortran array on a offsetted subsection of the previous
88
+ # buffer
89
+ a = np.memmap(filename, dtype=np.float64, shape=(3, 5, 4),
90
+ mode='r+', order='F', offset=4)
91
+ a[:] = np.arange(60).reshape(a.shape)
92
+
93
+ # Build various views that share the buffer with the original memmap
94
+
95
+ # b is an memmap sliced view on an memmap instance
96
+ b = a[1:-1, 2:-1, 2:4]
97
+
98
+ # c and d are array views
99
+ c = np.asarray(b)
100
+ d = c.T
101
+
102
+ # Array reducer with auto dumping disabled
103
+ reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, 'c', True)
104
+
105
+ def reconstruct_array_or_memmap(x):
106
+ cons, args = reducer(x)
107
+ return cons(*args)
108
+
109
+ # Reconstruct original memmap
110
+ a_reconstructed = reconstruct_array_or_memmap(a)
111
+ assert has_shareable_memory(a_reconstructed)
112
+ assert isinstance(a_reconstructed, np.memmap)
113
+ assert_array_equal(a_reconstructed, a)
114
+
115
+ # Reconstruct strided memmap view
116
+ b_reconstructed = reconstruct_array_or_memmap(b)
117
+ assert has_shareable_memory(b_reconstructed)
118
+ assert_array_equal(b_reconstructed, b)
119
+
120
+ # Reconstruct arrays views on memmap base
121
+ c_reconstructed = reconstruct_array_or_memmap(c)
122
+ assert not isinstance(c_reconstructed, np.memmap)
123
+ assert has_shareable_memory(c_reconstructed)
124
+ assert_array_equal(c_reconstructed, c)
125
+
126
+ d_reconstructed = reconstruct_array_or_memmap(d)
127
+ assert not isinstance(d_reconstructed, np.memmap)
128
+ assert has_shareable_memory(d_reconstructed)
129
+ assert_array_equal(d_reconstructed, d)
130
+
131
+ # Test graceful degradation on fake memmap instances with in-memory
132
+ # buffers
133
+ a3 = a * 3
134
+ assert not has_shareable_memory(a3)
135
+ a3_reconstructed = reconstruct_array_or_memmap(a3)
136
+ assert not has_shareable_memory(a3_reconstructed)
137
+ assert not isinstance(a3_reconstructed, np.memmap)
138
+ assert_array_equal(a3_reconstructed, a * 3)
139
+
140
+ # Test graceful degradation on arrays derived from fake memmap instances
141
+ b3 = np.asarray(a3)
142
+ assert not has_shareable_memory(b3)
143
+
144
+ b3_reconstructed = reconstruct_array_or_memmap(b3)
145
+ assert isinstance(b3_reconstructed, np.ndarray)
146
+ assert not has_shareable_memory(b3_reconstructed)
147
+ assert_array_equal(b3_reconstructed, b3)
148
+
149
+
150
+ @with_multiprocessing
151
+ @skipif((sys.platform != "win32") or (),
152
+ reason="PermissionError only easily triggerable on Windows")
153
+ def test_resource_tracker_retries_when_permissionerror(tmpdir):
154
+ # Test resource_tracker retry mechanism when unlinking memmaps. See more
155
+ # thorough information in the ``unlink_file`` documentation of joblib.
156
+ filename = tmpdir.join('test.mmap').strpath
157
+ cmd = """if 1:
158
+ import os
159
+ import numpy as np
160
+ import time
161
+ from joblib.externals.loky.backend import resource_tracker
162
+ resource_tracker.VERBOSE = 1
163
+
164
+ # Start the resource tracker
165
+ resource_tracker.ensure_running()
166
+ time.sleep(1)
167
+
168
+ # Create a file containing numpy data
169
+ memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+')
170
+ memmap[:] = np.arange(10).astype(np.int8).data
171
+ memmap.flush()
172
+ assert os.path.exists(r"{filename}")
173
+ del memmap
174
+
175
+ # Create a np.memmap backed by this file
176
+ memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+')
177
+ resource_tracker.register(r"{filename}", "file")
178
+
179
+ # Ask the resource_tracker to delete the file backing the np.memmap , this
180
+ # should raise PermissionError that the resource_tracker will log.
181
+ resource_tracker.maybe_unlink(r"{filename}", "file")
182
+
183
+ # Wait for the resource_tracker to process the maybe_unlink before cleaning
184
+ # up the memmap
185
+ time.sleep(2)
186
+ """.format(filename=filename)
187
+ p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE,
188
+ stdout=subprocess.PIPE)
189
+ p.wait()
190
+ out, err = p.communicate()
191
+ assert p.returncode == 0
192
+ assert out == b''
193
+ msg = 'tried to unlink {}, got PermissionError'.format(filename)
194
+ assert msg in err.decode()
195
+
196
+
197
+ @with_numpy
198
+ @with_multiprocessing
199
+ def test_high_dimension_memmap_array_reducing(tmpdir):
200
+ assert_array_equal = np.testing.assert_array_equal
201
+
202
+ filename = tmpdir.join('test.mmap').strpath
203
+
204
+ # Create a high dimensional memmap
205
+ a = np.memmap(filename, dtype=np.float64, shape=(100, 15, 15, 3),
206
+ mode='w+')
207
+ a[:] = np.arange(100 * 15 * 15 * 3).reshape(a.shape)
208
+
209
+ # Create some slices/indices at various dimensions
210
+ b = a[0:10]
211
+ c = a[:, 5:10]
212
+ d = a[:, :, :, 0]
213
+ e = a[1:3:4]
214
+
215
+ # Array reducer with auto dumping disabled
216
+ reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, 'c', True)
217
+
218
+ def reconstruct_array_or_memmap(x):
219
+ cons, args = reducer(x)
220
+ return cons(*args)
221
+
222
+ a_reconstructed = reconstruct_array_or_memmap(a)
223
+ assert has_shareable_memory(a_reconstructed)
224
+ assert isinstance(a_reconstructed, np.memmap)
225
+ assert_array_equal(a_reconstructed, a)
226
+
227
+ b_reconstructed = reconstruct_array_or_memmap(b)
228
+ assert has_shareable_memory(b_reconstructed)
229
+ assert_array_equal(b_reconstructed, b)
230
+
231
+ c_reconstructed = reconstruct_array_or_memmap(c)
232
+ assert has_shareable_memory(c_reconstructed)
233
+ assert_array_equal(c_reconstructed, c)
234
+
235
+ d_reconstructed = reconstruct_array_or_memmap(d)
236
+ assert has_shareable_memory(d_reconstructed)
237
+ assert_array_equal(d_reconstructed, d)
238
+
239
+ e_reconstructed = reconstruct_array_or_memmap(e)
240
+ assert has_shareable_memory(e_reconstructed)
241
+ assert_array_equal(e_reconstructed, e)
242
+
243
+
244
+ @with_numpy
245
+ def test__strided_from_memmap(tmpdir):
246
+ fname = tmpdir.join('test.mmap').strpath
247
+ size = 5 * mmap.ALLOCATIONGRANULARITY
248
+ offset = mmap.ALLOCATIONGRANULARITY + 1
249
+ # This line creates the mmap file that is reused later
250
+ memmap_obj = np.memmap(fname, mode='w+', shape=size + offset)
251
+ # filename, dtype, mode, offset, order, shape, strides, total_buffer_len
252
+ memmap_obj = _strided_from_memmap(fname, dtype='uint8', mode='r',
253
+ offset=offset, order='C', shape=size,
254
+ strides=None, total_buffer_len=None,
255
+ unlink_on_gc_collect=False)
256
+ assert isinstance(memmap_obj, np.memmap)
257
+ assert memmap_obj.offset == offset
258
+ memmap_backed_obj = _strided_from_memmap(
259
+ fname, dtype='uint8', mode='r', offset=offset, order='C',
260
+ shape=(size // 2,), strides=(2,), total_buffer_len=size,
261
+ unlink_on_gc_collect=False
262
+ )
263
+ assert _get_backing_memmap(memmap_backed_obj).offset == offset
264
+
265
+
266
+ @with_numpy
267
+ @with_multiprocessing
268
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
269
+ ids=["multiprocessing", "loky"])
270
+ def test_pool_with_memmap(factory, tmpdir):
271
+ """Check that subprocess can access and update shared memory memmap"""
272
+ assert_array_equal = np.testing.assert_array_equal
273
+
274
+ # Fork the subprocess before allocating the objects to be passed
275
+ pool_temp_folder = tmpdir.mkdir('pool').strpath
276
+ p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder)
277
+ try:
278
+ filename = tmpdir.join('test.mmap').strpath
279
+ a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+')
280
+ a.fill(1.0)
281
+
282
+ p.map(inplace_double, [(a, (i, j), 1.0)
283
+ for i in range(a.shape[0])
284
+ for j in range(a.shape[1])])
285
+
286
+ assert_array_equal(a, 2 * np.ones(a.shape))
287
+
288
+ # Open a copy-on-write view on the previous data
289
+ b = np.memmap(filename, dtype=np.float32, shape=(5, 3), mode='c')
290
+
291
+ p.map(inplace_double, [(b, (i, j), 2.0)
292
+ for i in range(b.shape[0])
293
+ for j in range(b.shape[1])])
294
+
295
+ # Passing memmap instances to the pool should not trigger the creation
296
+ # of new files on the FS
297
+ assert os.listdir(pool_temp_folder) == []
298
+
299
+ # the original data is untouched
300
+ assert_array_equal(a, 2 * np.ones(a.shape))
301
+ assert_array_equal(b, 2 * np.ones(b.shape))
302
+
303
+ # readonly maps can be read but not updated
304
+ c = np.memmap(filename, dtype=np.float32, shape=(10,), mode='r',
305
+ offset=5 * 4)
306
+
307
+ with raises(AssertionError):
308
+ p.map(check_array, [(c, i, 3.0) for i in range(c.shape[0])])
309
+
310
+ # depending on the version of numpy one can either get a RuntimeError
311
+ # or a ValueError
312
+ with raises((RuntimeError, ValueError)):
313
+ p.map(inplace_double, [(c, i, 2.0) for i in range(c.shape[0])])
314
+ finally:
315
+ # Clean all filehandlers held by the pool
316
+ p.terminate()
317
+ del p
318
+
319
+
320
+ @with_numpy
321
+ @with_multiprocessing
322
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
323
+ ids=["multiprocessing", "loky"])
324
+ def test_pool_with_memmap_array_view(factory, tmpdir):
325
+ """Check that subprocess can access and update shared memory array"""
326
+ assert_array_equal = np.testing.assert_array_equal
327
+
328
+ # Fork the subprocess before allocating the objects to be passed
329
+ pool_temp_folder = tmpdir.mkdir('pool').strpath
330
+ p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder)
331
+ try:
332
+
333
+ filename = tmpdir.join('test.mmap').strpath
334
+ a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+')
335
+ a.fill(1.0)
336
+
337
+ # Create an ndarray view on the memmap instance
338
+ a_view = np.asarray(a)
339
+ assert not isinstance(a_view, np.memmap)
340
+ assert has_shareable_memory(a_view)
341
+
342
+ p.map(inplace_double, [(a_view, (i, j), 1.0)
343
+ for i in range(a.shape[0])
344
+ for j in range(a.shape[1])])
345
+
346
+ # Both a and the a_view have been updated
347
+ assert_array_equal(a, 2 * np.ones(a.shape))
348
+ assert_array_equal(a_view, 2 * np.ones(a.shape))
349
+
350
+ # Passing memmap array view to the pool should not trigger the
351
+ # creation of new files on the FS
352
+ assert os.listdir(pool_temp_folder) == []
353
+
354
+ finally:
355
+ p.terminate()
356
+ del p
357
+
358
+
359
+ @with_numpy
360
+ @with_multiprocessing
361
+ @parametrize("backend", ["multiprocessing", "loky"])
362
+ def test_permission_error_windows_reference_cycle(backend):
363
+ # Non regression test for:
364
+ # https://github.com/joblib/joblib/issues/806
365
+ #
366
+ # The issue happens when trying to delete a memory mapped file that has
367
+ # not yet been closed by one of the worker processes.
368
+ cmd = """if 1:
369
+ import numpy as np
370
+ from joblib import Parallel, delayed
371
+
372
+
373
+ data = np.random.rand(int(2e6)).reshape((int(1e6), 2))
374
+
375
+ # Build a complex cyclic reference that is likely to delay garbage
376
+ # collection of the memmapped array in the worker processes.
377
+ first_list = current_list = [data]
378
+ for i in range(10):
379
+ current_list = [current_list]
380
+ first_list.append(current_list)
381
+
382
+ if __name__ == "__main__":
383
+ results = Parallel(n_jobs=2, backend="{b}")(
384
+ delayed(len)(current_list) for i in range(10))
385
+ assert results == [1] * 10
386
+ """.format(b=backend)
387
+ p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE,
388
+ stdout=subprocess.PIPE)
389
+ p.wait()
390
+ out, err = p.communicate()
391
+ assert p.returncode == 0, out.decode() + "\n\n" + err.decode()
392
+
393
+
394
+ @with_numpy
395
+ @with_multiprocessing
396
+ @parametrize("backend", ["multiprocessing", "loky"])
397
+ def test_permission_error_windows_memmap_sent_to_parent(backend):
398
+ # Second non-regression test for:
399
+ # https://github.com/joblib/joblib/issues/806
400
+ # previously, child process would not convert temporary memmaps to numpy
401
+ # arrays when sending the data back to the parent process. This would lead
402
+ # to permission errors on windows when deleting joblib's temporary folder,
403
+ # as the memmaped files handles would still opened in the parent process.
404
+ cmd = '''if 1:
405
+ import os
406
+ import time
407
+
408
+ import numpy as np
409
+
410
+ from joblib import Parallel, delayed
411
+ from testutils import return_slice_of_data
412
+
413
+ data = np.ones(int(2e6))
414
+
415
+ if __name__ == '__main__':
416
+ # warm-up call to launch the workers and start the resource_tracker
417
+ _ = Parallel(n_jobs=2, verbose=5, backend='{b}')(
418
+ delayed(id)(i) for i in range(20))
419
+
420
+ time.sleep(0.5)
421
+
422
+ slice_of_data = Parallel(n_jobs=2, verbose=5, backend='{b}')(
423
+ delayed(return_slice_of_data)(data, 0, 20) for _ in range(10))
424
+ '''.format(b=backend)
425
+
426
+ for _ in range(3):
427
+ env = os.environ.copy()
428
+ env['PYTHONPATH'] = os.path.dirname(__file__)
429
+ p = subprocess.Popen([sys.executable, '-c', cmd],
430
+ stderr=subprocess.PIPE,
431
+ stdout=subprocess.PIPE, env=env)
432
+ p.wait()
433
+ out, err = p.communicate()
434
+ assert p.returncode == 0, err
435
+ assert out == b''
436
+ if sys.version_info[:3] not in [(3, 8, 0), (3, 8, 1)]:
437
+ # In early versions of Python 3.8, a reference leak
438
+ # https://github.com/cloudpipe/cloudpickle/issues/327, holds
439
+ # references to pickled objects, generating race condition during
440
+ # cleanup finalizers of joblib and noisy resource_tracker outputs.
441
+ assert b'resource_tracker' not in err
442
+
443
+
444
+ @with_numpy
445
+ @with_multiprocessing
446
+ @parametrize("backend", ["multiprocessing", "loky"])
447
+ def test_parallel_isolated_temp_folders(backend):
448
+ # Test that consecutive Parallel call use isolated subfolders, even
449
+ # for the loky backend that reuses its executor instance across calls.
450
+ array = np.arange(int(1e2))
451
+ [filename_1] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)(
452
+ delayed(getattr)(array, 'filename') for _ in range(1)
453
+ )
454
+ [filename_2] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)(
455
+ delayed(getattr)(array, 'filename') for _ in range(1)
456
+ )
457
+ assert os.path.dirname(filename_2) != os.path.dirname(filename_1)
458
+
459
+
460
+ @with_numpy
461
+ @with_multiprocessing
462
+ @parametrize("backend", ["multiprocessing", "loky"])
463
+ def test_managed_backend_reuse_temp_folder(backend):
464
+ # Test that calls to a managed parallel object reuse the same memmaps.
465
+ array = np.arange(int(1e2))
466
+ with Parallel(n_jobs=2, backend=backend, max_nbytes=10) as p:
467
+ [filename_1] = p(
468
+ delayed(getattr)(array, 'filename') for _ in range(1)
469
+ )
470
+ [filename_2] = p(
471
+ delayed(getattr)(array, 'filename') for _ in range(1)
472
+ )
473
+ assert os.path.dirname(filename_2) == os.path.dirname(filename_1)
474
+
475
+
476
+ @with_numpy
477
+ @with_multiprocessing
478
+ def test_memmapping_temp_folder_thread_safety():
479
+ # Concurrent calls to Parallel with the loky backend will use the same
480
+ # executor, and thus the same reducers. Make sure that those reducers use
481
+ # different temporary folders depending on which Parallel objects called
482
+ # them, which is necessary to limit potential race conditions during the
483
+ # garbage collection of temporary memmaps.
484
+ array = np.arange(int(1e2))
485
+
486
+ temp_dirs_thread_1 = set()
487
+ temp_dirs_thread_2 = set()
488
+
489
+ def concurrent_get_filename(array, temp_dirs):
490
+ with Parallel(backend='loky', n_jobs=2, max_nbytes=10) as p:
491
+ for i in range(10):
492
+ [filename] = p(
493
+ delayed(getattr)(array, 'filename') for _ in range(1)
494
+ )
495
+ temp_dirs.add(os.path.dirname(filename))
496
+
497
+ t1 = threading.Thread(
498
+ target=concurrent_get_filename, args=(array, temp_dirs_thread_1)
499
+ )
500
+ t2 = threading.Thread(
501
+ target=concurrent_get_filename, args=(array, temp_dirs_thread_2)
502
+ )
503
+
504
+ t1.start()
505
+ t2.start()
506
+
507
+ t1.join()
508
+ t2.join()
509
+
510
+ assert len(temp_dirs_thread_1) == 1
511
+ assert len(temp_dirs_thread_2) == 1
512
+
513
+ assert temp_dirs_thread_1 != temp_dirs_thread_2
514
+
515
+
516
+ @with_numpy
517
+ @with_multiprocessing
518
+ def test_multithreaded_parallel_termination_resource_tracker_silent():
519
+ # test that concurrent termination attempts of a same executor does not
520
+ # emit any spurious error from the resource_tracker. We test various
521
+ # situations making 0, 1 or both parallel call sending a task that will
522
+ # make the worker (and thus the whole Parallel call) error out.
523
+ cmd = '''if 1:
524
+ import os
525
+ import numpy as np
526
+ from joblib import Parallel, delayed
527
+ from joblib.externals.loky.backend import resource_tracker
528
+ from concurrent.futures import ThreadPoolExecutor, wait
529
+
530
+ resource_tracker.VERBOSE = 0
531
+
532
+ array = np.arange(int(1e2))
533
+
534
+ temp_dirs_thread_1 = set()
535
+ temp_dirs_thread_2 = set()
536
+
537
+
538
+ def raise_error(array):
539
+ raise ValueError
540
+
541
+
542
+ def parallel_get_filename(array, temp_dirs):
543
+ with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p:
544
+ for i in range(10):
545
+ [filename] = p(
546
+ delayed(getattr)(array, "filename") for _ in range(1)
547
+ )
548
+ temp_dirs.add(os.path.dirname(filename))
549
+
550
+
551
+ def parallel_raise(array, temp_dirs):
552
+ with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p:
553
+ for i in range(10):
554
+ [filename] = p(
555
+ delayed(raise_error)(array) for _ in range(1)
556
+ )
557
+ temp_dirs.add(os.path.dirname(filename))
558
+
559
+
560
+ executor = ThreadPoolExecutor(max_workers=2)
561
+
562
+ # both function calls will use the same loky executor, but with a
563
+ # different Parallel object.
564
+ future_1 = executor.submit({f1}, array, temp_dirs_thread_1)
565
+ future_2 = executor.submit({f2}, array, temp_dirs_thread_2)
566
+
567
+ # Wait for both threads to terminate their backend
568
+ wait([future_1, future_2])
569
+
570
+ future_1.result()
571
+ future_2.result()
572
+ '''
573
+ functions_and_returncodes = [
574
+ ("parallel_get_filename", "parallel_get_filename", 0),
575
+ ("parallel_get_filename", "parallel_raise", 1),
576
+ ("parallel_raise", "parallel_raise", 1)
577
+ ]
578
+
579
+ for f1, f2, returncode in functions_and_returncodes:
580
+ p = subprocess.Popen([sys.executable, '-c', cmd.format(f1=f1, f2=f2)],
581
+ stderr=subprocess.PIPE, stdout=subprocess.PIPE)
582
+ p.wait()
583
+ out, err = p.communicate()
584
+ assert p.returncode == returncode, out.decode()
585
+ assert b"resource_tracker" not in err, err.decode()
586
+
587
+
588
+ @with_numpy
589
+ @with_multiprocessing
590
+ @parametrize("backend", ["multiprocessing", "loky"])
591
+ def test_many_parallel_calls_on_same_object(backend):
592
+ # After #966 got merged, consecutive Parallel objects were sharing temp
593
+ # folder, which would lead to race conditions happening during the
594
+ # temporary resources management with the resource_tracker. This is a
595
+ # non-regression test that makes sure that consecutive Parallel operations
596
+ # on the same object do not error out.
597
+ cmd = '''if 1:
598
+ import os
599
+ import time
600
+
601
+ import numpy as np
602
+
603
+ from joblib import Parallel, delayed
604
+ from testutils import return_slice_of_data
605
+
606
+ data = np.ones(100)
607
+
608
+ if __name__ == '__main__':
609
+ for i in range(5):
610
+ slice_of_data = Parallel(
611
+ n_jobs=2, max_nbytes=1, backend='{b}')(
612
+ delayed(return_slice_of_data)(data, 0, 20)
613
+ for _ in range(10)
614
+ )
615
+ '''.format(b=backend)
616
+ env = os.environ.copy()
617
+ env['PYTHONPATH'] = os.path.dirname(__file__)
618
+ p = subprocess.Popen(
619
+ [sys.executable, '-c', cmd],
620
+ stderr=subprocess.PIPE,
621
+ stdout=subprocess.PIPE,
622
+ env=env,
623
+ )
624
+ p.wait()
625
+ out, err = p.communicate()
626
+ assert p.returncode == 0, err
627
+ assert out == b''
628
+ if sys.version_info[:3] not in [(3, 8, 0), (3, 8, 1)]:
629
+ # In early versions of Python 3.8, a reference leak
630
+ # https://github.com/cloudpipe/cloudpickle/issues/327, holds
631
+ # references to pickled objects, generating race condition during
632
+ # cleanup finalizers of joblib and noisy resource_tracker outputs.
633
+ assert b'resource_tracker' not in err
634
+
635
+
636
+ @with_numpy
637
+ @with_multiprocessing
638
+ @parametrize("backend", ["multiprocessing", "loky"])
639
+ def test_memmap_returned_as_regular_array(backend):
640
+ data = np.ones(int(1e3))
641
+ # Check that child processes send temporary memmaps back as numpy arrays.
642
+ [result] = Parallel(n_jobs=2, backend=backend, max_nbytes=100)(
643
+ delayed(check_memmap_and_send_back)(data) for _ in range(1))
644
+ assert _get_backing_memmap(result) is None
645
+
646
+
647
+ @with_numpy
648
+ @with_multiprocessing
649
+ @parametrize("backend", ["multiprocessing", "loky"])
650
+ def test_resource_tracker_silent_when_reference_cycles(backend):
651
+ # There is a variety of reasons that can make joblib with loky backend
652
+ # output noisy warnings when a reference cycle is preventing a memmap from
653
+ # being garbage collected. Especially, joblib's main process finalizer
654
+ # deletes the temporary folder if it was not done before, which can
655
+ # interact badly with the resource_tracker. We don't risk leaking any
656
+ # resources, but this will likely make joblib output a lot of low-level
657
+ # confusing messages.
658
+ #
659
+ # This test makes sure that the resource_tracker is silent when a reference
660
+ # has been collected concurrently on non-Windows platforms.
661
+ #
662
+ # Note that the script in ``cmd`` is the exact same script as in
663
+ # test_permission_error_windows_reference_cycle.
664
+ if backend == "loky" and sys.platform.startswith('win'):
665
+ # XXX: on Windows, reference cycles can delay timely garbage collection
666
+ # and make it impossible to properly delete the temporary folder in the
667
+ # main process because of permission errors.
668
+ pytest.xfail(
669
+ "The temporary folder cannot be deleted on Windows in the "
670
+ "presence of a reference cycle"
671
+ )
672
+
673
+ cmd = """if 1:
674
+ import numpy as np
675
+ from joblib import Parallel, delayed
676
+
677
+
678
+ data = np.random.rand(int(2e6)).reshape((int(1e6), 2))
679
+
680
+ # Build a complex cyclic reference that is likely to delay garbage
681
+ # collection of the memmapped array in the worker processes.
682
+ first_list = current_list = [data]
683
+ for i in range(10):
684
+ current_list = [current_list]
685
+ first_list.append(current_list)
686
+
687
+ if __name__ == "__main__":
688
+ results = Parallel(n_jobs=2, backend="{b}")(
689
+ delayed(len)(current_list) for i in range(10))
690
+ assert results == [1] * 10
691
+ """.format(b=backend)
692
+ p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE,
693
+ stdout=subprocess.PIPE)
694
+ p.wait()
695
+ out, err = p.communicate()
696
+ out = out.decode()
697
+ err = err.decode()
698
+ assert p.returncode == 0, out + "\n\n" + err
699
+ assert "resource_tracker" not in err, err
700
+
701
+
702
+ @with_numpy
703
+ @with_multiprocessing
704
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
705
+ ids=["multiprocessing", "loky"])
706
+ def test_memmapping_pool_for_large_arrays(factory, tmpdir):
707
+ """Check that large arrays are not copied in memory"""
708
+
709
+ # Check that the tempfolder is empty
710
+ assert os.listdir(tmpdir.strpath) == []
711
+
712
+ # Build an array reducers that automatically dump large array content
713
+ # to filesystem backed memmap instances to avoid memory explosion
714
+ p = factory(3, max_nbytes=40, temp_folder=tmpdir.strpath, verbose=2)
715
+ try:
716
+ # The temporary folder for the pool is not provisioned in advance
717
+ assert os.listdir(tmpdir.strpath) == []
718
+ assert not os.path.exists(p._temp_folder)
719
+
720
+ small = np.ones(5, dtype=np.float32)
721
+ assert small.nbytes == 20
722
+ p.map(check_array, [(small, i, 1.0) for i in range(small.shape[0])])
723
+
724
+ # Memory has been copied, the pool filesystem folder is unused
725
+ assert os.listdir(tmpdir.strpath) == []
726
+
727
+ # Try with a file larger than the memmap threshold of 40 bytes
728
+ large = np.ones(100, dtype=np.float64)
729
+ assert large.nbytes == 800
730
+ p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])])
731
+
732
+ # The data has been dumped in a temp folder for subprocess to share it
733
+ # without per-child memory copies
734
+ assert os.path.isdir(p._temp_folder)
735
+ dumped_filenames = os.listdir(p._temp_folder)
736
+ assert len(dumped_filenames) == 1
737
+
738
+ # Check that memory mapping is not triggered for arrays with
739
+ # dtype='object'
740
+ objects = np.array(['abc'] * 100, dtype='object')
741
+ results = p.map(has_shareable_memory, [objects])
742
+ assert not results[0]
743
+
744
+ finally:
745
+ # check FS garbage upon pool termination
746
+ p.terminate()
747
+ for i in range(10):
748
+ sleep(.1)
749
+ if not os.path.exists(p._temp_folder):
750
+ break
751
+ else: # pragma: no cover
752
+ raise AssertionError(
753
+ 'temporary folder {} was not deleted'.format(p._temp_folder)
754
+ )
755
+ del p
756
+
757
+
758
+ @with_numpy
759
+ @with_multiprocessing
760
+ @parametrize(
761
+ "backend",
762
+ [
763
+ pytest.param(
764
+ "multiprocessing",
765
+ marks=pytest.mark.xfail(
766
+ reason='https://github.com/joblib/joblib/issues/1086'
767
+ ),
768
+ ),
769
+ "loky",
770
+ ]
771
+ )
772
+ def test_child_raises_parent_exits_cleanly(backend):
773
+ # When a task executed by a child process raises an error, the parent
774
+ # process's backend is notified, and calls abort_everything.
775
+ # In loky, abort_everything itself calls shutdown(kill_workers=True) which
776
+ # sends SIGKILL to the worker, preventing it from running the finalizers
777
+ # supposed to signal the resource_tracker when the worker is done using
778
+ # objects relying on a shared resource (e.g np.memmaps). Because this
779
+ # behavior is prone to :
780
+ # - cause a resource leak
781
+ # - make the resource tracker emit noisy resource warnings
782
+ # we explicitly test that, when the said situation occurs:
783
+ # - no resources are actually leaked
784
+ # - the temporary resources are deleted as soon as possible (typically, at
785
+ # the end of the failing Parallel call)
786
+ # - the resource_tracker does not emit any warnings.
787
+ cmd = """if 1:
788
+ import os
789
+ from pathlib import Path
790
+ from time import sleep
791
+
792
+ import numpy as np
793
+ from joblib import Parallel, delayed
794
+ from testutils import print_filename_and_raise
795
+
796
+ data = np.random.rand(1000)
797
+
798
+ def get_temp_folder(parallel_obj, backend):
799
+ if "{b}" == "loky":
800
+ return Path(parallel_obj._backend._workers._temp_folder)
801
+ else:
802
+ return Path(parallel_obj._backend._pool._temp_folder)
803
+
804
+
805
+ if __name__ == "__main__":
806
+ try:
807
+ with Parallel(n_jobs=2, backend="{b}", max_nbytes=100) as p:
808
+ temp_folder = get_temp_folder(p, "{b}")
809
+ p(delayed(print_filename_and_raise)(data)
810
+ for i in range(1))
811
+ except ValueError as e:
812
+ # the temporary folder should be deleted by the end of this
813
+ # call but apparently on some file systems, this takes
814
+ # some time to be visible.
815
+ #
816
+ # We attempt to write into the temporary folder to test for
817
+ # its existence and we wait for a maximum of 10 seconds.
818
+ for i in range(100):
819
+ try:
820
+ with open(temp_folder / "some_file.txt", "w") as f:
821
+ f.write("some content")
822
+ except FileNotFoundError:
823
+ # temp_folder has been deleted, all is fine
824
+ break
825
+
826
+ # ... else, wait a bit and try again
827
+ sleep(.1)
828
+ else:
829
+ raise AssertionError(
830
+ str(temp_folder) + " was not deleted"
831
+ ) from e
832
+ """.format(b=backend)
833
+ env = os.environ.copy()
834
+ env['PYTHONPATH'] = os.path.dirname(__file__)
835
+ p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE,
836
+ stdout=subprocess.PIPE, env=env)
837
+ p.wait()
838
+ out, err = p.communicate()
839
+ out, err = out.decode(), err.decode()
840
+ filename = out.split('\n')[0]
841
+ assert p.returncode == 0, err or out
842
+ assert err == '' # no resource_tracker warnings.
843
+ assert not os.path.exists(filename)
844
+
845
+
846
+ @with_numpy
847
+ @with_multiprocessing
848
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
849
+ ids=["multiprocessing", "loky"])
850
+ def test_memmapping_pool_for_large_arrays_disabled(factory, tmpdir):
851
+ """Check that large arrays memmapping can be disabled"""
852
+ # Set max_nbytes to None to disable the auto memmapping feature
853
+ p = factory(3, max_nbytes=None, temp_folder=tmpdir.strpath)
854
+ try:
855
+
856
+ # Check that the tempfolder is empty
857
+ assert os.listdir(tmpdir.strpath) == []
858
+
859
+ # Try with a file largish than the memmap threshold of 40 bytes
860
+ large = np.ones(100, dtype=np.float64)
861
+ assert large.nbytes == 800
862
+ p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])])
863
+
864
+ # Check that the tempfolder is still empty
865
+ assert os.listdir(tmpdir.strpath) == []
866
+
867
+ finally:
868
+ # Cleanup open file descriptors
869
+ p.terminate()
870
+ del p
871
+
872
+
873
+ @with_numpy
874
+ @with_multiprocessing
875
+ @with_dev_shm
876
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
877
+ ids=["multiprocessing", "loky"])
878
+ def test_memmapping_on_large_enough_dev_shm(factory):
879
+ """Check that memmapping uses /dev/shm when possible"""
880
+ orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE
881
+ try:
882
+ # Make joblib believe that it can use /dev/shm even when running on a
883
+ # CI container where the size of the /dev/shm is not very large (that
884
+ # is at least 32 MB instead of 2 GB by default).
885
+ jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(32e6)
886
+ p = factory(3, max_nbytes=10)
887
+ try:
888
+ # Check that the pool has correctly detected the presence of the
889
+ # shared memory filesystem.
890
+ pool_temp_folder = p._temp_folder
891
+ folder_prefix = '/dev/shm/joblib_memmapping_folder_'
892
+ assert pool_temp_folder.startswith(folder_prefix)
893
+ assert os.path.exists(pool_temp_folder)
894
+
895
+ # Try with a file larger than the memmap threshold of 10 bytes
896
+ a = np.ones(100, dtype=np.float64)
897
+ assert a.nbytes == 800
898
+ p.map(id, [a] * 10)
899
+ # a should have been memmapped to the pool temp folder: the joblib
900
+ # pickling procedure generate one .pkl file:
901
+ assert len(os.listdir(pool_temp_folder)) == 1
902
+
903
+ # create a new array with content that is different from 'a' so
904
+ # that it is mapped to a different file in the temporary folder of
905
+ # the pool.
906
+ b = np.ones(100, dtype=np.float64) * 2
907
+ assert b.nbytes == 800
908
+ p.map(id, [b] * 10)
909
+ # A copy of both a and b are now stored in the shared memory folder
910
+ assert len(os.listdir(pool_temp_folder)) == 2
911
+ finally:
912
+ # Cleanup open file descriptors
913
+ p.terminate()
914
+ del p
915
+
916
+ for i in range(100):
917
+ # The temp folder is cleaned up upon pool termination
918
+ if not os.path.exists(pool_temp_folder):
919
+ break
920
+ sleep(.1)
921
+ else: # pragma: no cover
922
+ raise AssertionError('temporary folder of pool was not deleted')
923
+ finally:
924
+ jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size
925
+
926
+
927
+ @with_numpy
928
+ @with_multiprocessing
929
+ @with_dev_shm
930
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
931
+ ids=["multiprocessing", "loky"])
932
+ def test_memmapping_on_too_small_dev_shm(factory):
933
+ orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE
934
+ try:
935
+ # Make joblib believe that it cannot use /dev/shm unless there is
936
+ # 42 exabytes of available shared memory in /dev/shm
937
+ jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(42e18)
938
+
939
+ p = factory(3, max_nbytes=10)
940
+ try:
941
+ # Check that the pool has correctly detected the presence of the
942
+ # shared memory filesystem.
943
+ pool_temp_folder = p._temp_folder
944
+ assert not pool_temp_folder.startswith('/dev/shm')
945
+ finally:
946
+ # Cleanup open file descriptors
947
+ p.terminate()
948
+ del p
949
+
950
+ # The temp folder is cleaned up upon pool termination
951
+ assert not os.path.exists(pool_temp_folder)
952
+ finally:
953
+ jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size
954
+
955
+
956
+ @with_numpy
957
+ @with_multiprocessing
958
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
959
+ ids=["multiprocessing", "loky"])
960
+ def test_memmapping_pool_for_large_arrays_in_return(factory, tmpdir):
961
+ """Check that large arrays are not copied in memory in return"""
962
+ assert_array_equal = np.testing.assert_array_equal
963
+
964
+ # Build an array reducers that automatically dump large array content
965
+ # but check that the returned datastructure are regular arrays to avoid
966
+ # passing a memmap array pointing to a pool controlled temp folder that
967
+ # might be confusing to the user
968
+
969
+ # The MemmappingPool user can always return numpy.memmap object explicitly
970
+ # to avoid memory copy
971
+ p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath)
972
+ try:
973
+ res = p.apply_async(np.ones, args=(1000,))
974
+ large = res.get()
975
+ assert not has_shareable_memory(large)
976
+ assert_array_equal(large, np.ones(1000))
977
+ finally:
978
+ p.terminate()
979
+ del p
980
+
981
+
982
+ def _worker_multiply(a, n_times):
983
+ """Multiplication function to be executed by subprocess"""
984
+ assert has_shareable_memory(a)
985
+ return a * n_times
986
+
987
+
988
+ @with_numpy
989
+ @with_multiprocessing
990
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
991
+ ids=["multiprocessing", "loky"])
992
+ def test_workaround_against_bad_memmap_with_copied_buffers(factory, tmpdir):
993
+ """Check that memmaps with a bad buffer are returned as regular arrays
994
+
995
+ Unary operations and ufuncs on memmap instances return a new memmap
996
+ instance with an in-memory buffer (probably a numpy bug).
997
+ """
998
+ assert_array_equal = np.testing.assert_array_equal
999
+
1000
+ p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath)
1001
+ try:
1002
+ # Send a complex, large-ish view on a array that will be converted to
1003
+ # a memmap in the worker process
1004
+ a = np.asarray(np.arange(6000).reshape((1000, 2, 3)),
1005
+ order='F')[:, :1, :]
1006
+
1007
+ # Call a non-inplace multiply operation on the worker and memmap and
1008
+ # send it back to the parent.
1009
+ b = p.apply_async(_worker_multiply, args=(a, 3)).get()
1010
+ assert not has_shareable_memory(b)
1011
+ assert_array_equal(b, 3 * a)
1012
+ finally:
1013
+ p.terminate()
1014
+ del p
1015
+
1016
+
1017
+ def identity(arg):
1018
+ return arg
1019
+
1020
+
1021
+ @with_numpy
1022
+ @with_multiprocessing
1023
+ @parametrize(
1024
+ "factory,retry_no",
1025
+ list(itertools.product(
1026
+ [MemmappingPool, TestExecutor.get_memmapping_executor], range(3))),
1027
+ ids=['{}, {}'.format(x, y) for x, y in itertools.product(
1028
+ ["multiprocessing", "loky"], map(str, range(3)))])
1029
+ def test_pool_memmap_with_big_offset(factory, retry_no, tmpdir):
1030
+ # Test that numpy memmap offset is set correctly if greater than
1031
+ # mmap.ALLOCATIONGRANULARITY, see
1032
+ # https://github.com/joblib/joblib/issues/451 and
1033
+ # https://github.com/numpy/numpy/pull/8443 for more details.
1034
+ fname = tmpdir.join('test.mmap').strpath
1035
+ size = 5 * mmap.ALLOCATIONGRANULARITY
1036
+ offset = mmap.ALLOCATIONGRANULARITY + 1
1037
+ obj = make_memmap(fname, mode='w+', shape=size, dtype='uint8',
1038
+ offset=offset)
1039
+
1040
+ p = factory(2, temp_folder=tmpdir.strpath)
1041
+ result = p.apply_async(identity, args=(obj,)).get()
1042
+ assert isinstance(result, np.memmap)
1043
+ assert result.offset == offset
1044
+ np.testing.assert_array_equal(obj, result)
1045
+ p.terminate()
1046
+
1047
+
1048
+ def test_pool_get_temp_dir(tmpdir):
1049
+ pool_folder_name = 'test.tmpdir'
1050
+ pool_folder, shared_mem = _get_temp_dir(pool_folder_name, tmpdir.strpath)
1051
+ assert shared_mem is False
1052
+ assert pool_folder == tmpdir.join('test.tmpdir').strpath
1053
+
1054
+ pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None)
1055
+ if sys.platform.startswith('win'):
1056
+ assert shared_mem is False
1057
+ assert pool_folder.endswith(pool_folder_name)
1058
+
1059
+
1060
+ def test_pool_get_temp_dir_no_statvfs(tmpdir, monkeypatch):
1061
+ """Check that _get_temp_dir works when os.statvfs is not defined
1062
+
1063
+ Regression test for #902
1064
+ """
1065
+ pool_folder_name = 'test.tmpdir'
1066
+ import joblib._memmapping_reducer
1067
+ if hasattr(joblib._memmapping_reducer.os, 'statvfs'):
1068
+ # We are on Unix, since Windows doesn't have this function
1069
+ monkeypatch.delattr(joblib._memmapping_reducer.os, 'statvfs')
1070
+
1071
+ pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None)
1072
+ if sys.platform.startswith('win'):
1073
+ assert shared_mem is False
1074
+ assert pool_folder.endswith(pool_folder_name)
1075
+
1076
+
1077
+ @with_numpy
1078
+ @skipif(sys.platform == 'win32', reason='This test fails with a '
1079
+ 'PermissionError on Windows')
1080
+ @parametrize("mmap_mode", ["r+", "w+"])
1081
+ def test_numpy_arrays_use_different_memory(mmap_mode):
1082
+ def func(arr, value):
1083
+ arr[:] = value
1084
+ return arr
1085
+
1086
+ arrays = [np.zeros((10, 10), dtype='float64') for i in range(10)]
1087
+
1088
+ results = Parallel(mmap_mode=mmap_mode, max_nbytes=0, n_jobs=2)(
1089
+ delayed(func)(arr, i) for i, arr in enumerate(arrays))
1090
+
1091
+ for i, arr in enumerate(results):
1092
+ np.testing.assert_array_equal(arr, i)
1093
+
1094
+
1095
+ @with_numpy
1096
+ def test_weak_array_key_map():
1097
+
1098
+ def assert_empty_after_gc_collect(container, retries=100):
1099
+ for i in range(retries):
1100
+ if len(container) == 0:
1101
+ return
1102
+ gc.collect()
1103
+ sleep(.1)
1104
+ assert len(container) == 0
1105
+
1106
+ a = np.ones(42)
1107
+ m = _WeakArrayKeyMap()
1108
+ m.set(a, 'a')
1109
+ assert m.get(a) == 'a'
1110
+
1111
+ b = a
1112
+ assert m.get(b) == 'a'
1113
+ m.set(b, 'b')
1114
+ assert m.get(a) == 'b'
1115
+
1116
+ del a
1117
+ gc.collect()
1118
+ assert len(m._data) == 1
1119
+ assert m.get(b) == 'b'
1120
+
1121
+ del b
1122
+ assert_empty_after_gc_collect(m._data)
1123
+
1124
+ c = np.ones(42)
1125
+ m.set(c, 'c')
1126
+ assert len(m._data) == 1
1127
+ assert m.get(c) == 'c'
1128
+
1129
+ with raises(KeyError):
1130
+ m.get(np.ones(42))
1131
+
1132
+ del c
1133
+ assert_empty_after_gc_collect(m._data)
1134
+
1135
+ # Check that creating and dropping numpy arrays with potentially the same
1136
+ # object id will not cause the map to get confused.
1137
+ def get_set_get_collect(m, i):
1138
+ a = np.ones(42)
1139
+ with raises(KeyError):
1140
+ m.get(a)
1141
+ m.set(a, i)
1142
+ assert m.get(a) == i
1143
+ return id(a)
1144
+
1145
+ unique_ids = set([get_set_get_collect(m, i) for i in range(1000)])
1146
+ if platform.python_implementation() == 'CPython':
1147
+ # On CPython (at least) the same id is often reused many times for the
1148
+ # temporary arrays created under the local scope of the
1149
+ # get_set_get_collect function without causing any spurious lookups /
1150
+ # insertions in the map. Apparently on Python nogil, the id is not
1151
+ # reused as often.
1152
+ max_len_unique_ids = 400 if getattr(sys.flags, 'nogil', False) else 100
1153
+ assert len(unique_ids) < max_len_unique_ids
1154
+
1155
+
1156
+ def test_weak_array_key_map_no_pickling():
1157
+ m = _WeakArrayKeyMap()
1158
+ with raises(pickle.PicklingError):
1159
+ pickle.dumps(m)
1160
+
1161
+
1162
+ @with_numpy
1163
+ @with_multiprocessing
1164
+ def test_direct_mmap(tmpdir):
1165
+ testfile = str(tmpdir.join('arr.dat'))
1166
+ a = np.arange(10, dtype='uint8')
1167
+ a.tofile(testfile)
1168
+
1169
+ def _read_array():
1170
+ with open(testfile) as fd:
1171
+ mm = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ, offset=0)
1172
+ return np.ndarray((10,), dtype=np.uint8, buffer=mm, offset=0)
1173
+
1174
+ def func(x):
1175
+ return x**2
1176
+
1177
+ arr = _read_array()
1178
+
1179
+ # this is expected to work and gives the reference
1180
+ ref = Parallel(n_jobs=2)(delayed(func)(x) for x in [a])
1181
+
1182
+ # now test that it work with the mmap array
1183
+ results = Parallel(n_jobs=2)(delayed(func)(x) for x in [arr])
1184
+ np.testing.assert_array_equal(results, ref)
1185
+
1186
+ # also test with a mmap array read in the subprocess
1187
+ def worker():
1188
+ return _read_array()
1189
+
1190
+ results = Parallel(n_jobs=2)(delayed(worker)() for _ in range(1))
1191
+ np.testing.assert_array_equal(results[0], arr)
llmeval-env/lib/python3.10/site-packages/joblib/test/test_memory_async.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import gc
3
+ import shutil
4
+
5
+ import pytest
6
+
7
+ from joblib.memory import (AsyncMemorizedFunc, AsyncNotMemorizedFunc,
8
+ MemorizedResult, Memory, NotMemorizedResult)
9
+ from joblib.test.common import np, with_numpy
10
+ from joblib.testing import raises
11
+
12
+ from .test_memory import (corrupt_single_cache_item,
13
+ monkeypatch_cached_func_warn)
14
+
15
+
16
+ async def check_identity_lazy_async(func, accumulator, location):
17
+ """ Similar to check_identity_lazy_async for coroutine functions"""
18
+ memory = Memory(location=location, verbose=0)
19
+ func = memory.cache(func)
20
+ for i in range(3):
21
+ for _ in range(2):
22
+ value = await func(i)
23
+ assert value == i
24
+ assert len(accumulator) == i + 1
25
+
26
+
27
+ @pytest.mark.asyncio
28
+ async def test_memory_integration_async(tmpdir):
29
+ accumulator = list()
30
+
31
+ async def f(n):
32
+ await asyncio.sleep(0.1)
33
+ accumulator.append(1)
34
+ return n
35
+
36
+ await check_identity_lazy_async(f, accumulator, tmpdir.strpath)
37
+
38
+ # Now test clearing
39
+ for compress in (False, True):
40
+ for mmap_mode in ('r', None):
41
+ memory = Memory(location=tmpdir.strpath, verbose=10,
42
+ mmap_mode=mmap_mode, compress=compress)
43
+ # First clear the cache directory, to check that our code can
44
+ # handle that
45
+ # NOTE: this line would raise an exception, as the database
46
+ # file is still open; we ignore the error since we want to
47
+ # test what happens if the directory disappears
48
+ shutil.rmtree(tmpdir.strpath, ignore_errors=True)
49
+ g = memory.cache(f)
50
+ await g(1)
51
+ g.clear(warn=False)
52
+ current_accumulator = len(accumulator)
53
+ out = await g(1)
54
+
55
+ assert len(accumulator) == current_accumulator + 1
56
+ # Also, check that Memory.eval works similarly
57
+ evaled = await memory.eval(f, 1)
58
+ assert evaled == out
59
+ assert len(accumulator) == current_accumulator + 1
60
+
61
+ # Now do a smoke test with a function defined in __main__, as the name
62
+ # mangling rules are more complex
63
+ f.__module__ = '__main__'
64
+ memory = Memory(location=tmpdir.strpath, verbose=0)
65
+ await memory.cache(f)(1)
66
+
67
+
68
+ @pytest.mark.asyncio
69
+ async def test_no_memory_async():
70
+ accumulator = list()
71
+
72
+ async def ff(x):
73
+ await asyncio.sleep(0.1)
74
+ accumulator.append(1)
75
+ return x
76
+
77
+ memory = Memory(location=None, verbose=0)
78
+ gg = memory.cache(ff)
79
+ for _ in range(4):
80
+ current_accumulator = len(accumulator)
81
+ await gg(1)
82
+ assert len(accumulator) == current_accumulator + 1
83
+
84
+
85
+ @with_numpy
86
+ @pytest.mark.asyncio
87
+ async def test_memory_numpy_check_mmap_mode_async(tmpdir, monkeypatch):
88
+ """Check that mmap_mode is respected even at the first call"""
89
+
90
+ memory = Memory(location=tmpdir.strpath, mmap_mode='r', verbose=0)
91
+
92
+ @memory.cache()
93
+ async def twice(a):
94
+ return a * 2
95
+
96
+ a = np.ones(3)
97
+ b = await twice(a)
98
+ c = await twice(a)
99
+
100
+ assert isinstance(c, np.memmap)
101
+ assert c.mode == 'r'
102
+
103
+ assert isinstance(b, np.memmap)
104
+ assert b.mode == 'r'
105
+
106
+ # Corrupts the file, Deleting b and c mmaps
107
+ # is necessary to be able edit the file
108
+ del b
109
+ del c
110
+ gc.collect()
111
+ corrupt_single_cache_item(memory)
112
+
113
+ # Make sure that corrupting the file causes recomputation and that
114
+ # a warning is issued.
115
+ recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch)
116
+ d = await twice(a)
117
+ assert len(recorded_warnings) == 1
118
+ exception_msg = 'Exception while loading results'
119
+ assert exception_msg in recorded_warnings[0]
120
+ # Asserts that the recomputation returns a mmap
121
+ assert isinstance(d, np.memmap)
122
+ assert d.mode == 'r'
123
+
124
+
125
+ @pytest.mark.asyncio
126
+ async def test_call_and_shelve_async(tmpdir):
127
+ async def f(x, y=1):
128
+ await asyncio.sleep(0.1)
129
+ return x ** 2 + y
130
+
131
+ # Test MemorizedFunc outputting a reference to cache.
132
+ for func, Result in zip((AsyncMemorizedFunc(f, tmpdir.strpath),
133
+ AsyncNotMemorizedFunc(f),
134
+ Memory(location=tmpdir.strpath,
135
+ verbose=0).cache(f),
136
+ Memory(location=None).cache(f),
137
+ ),
138
+ (MemorizedResult, NotMemorizedResult,
139
+ MemorizedResult, NotMemorizedResult,
140
+ )):
141
+ for _ in range(2):
142
+ result = await func.call_and_shelve(2)
143
+ assert isinstance(result, Result)
144
+ assert result.get() == 5
145
+
146
+ result.clear()
147
+ with raises(KeyError):
148
+ result.get()
149
+ result.clear() # Do nothing if there is no cache.
150
+
151
+
152
+ @pytest.mark.asyncio
153
+ async def test_memorized_func_call_async(memory):
154
+
155
+ async def ff(x, counter):
156
+ await asyncio.sleep(0.1)
157
+ counter[x] = counter.get(x, 0) + 1
158
+ return counter[x]
159
+
160
+ gg = memory.cache(ff, ignore=['counter'])
161
+
162
+ counter = {}
163
+ assert await gg(2, counter) == 1
164
+ assert await gg(2, counter) == 1
165
+
166
+ x, meta = await gg.call(2, counter)
167
+ assert x == 2, "f has not been called properly"
168
+ assert isinstance(meta, dict), (
169
+ "Metadata are not returned by MemorizedFunc.call."
170
+ )
llmeval-env/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pyodide and other single-threaded Python builds will be missing the
3
+ _multiprocessing module. Test that joblib still works in this environment.
4
+ """
5
+
6
+ import os
7
+ import subprocess
8
+ import sys
9
+
10
+
11
+ def test_missing_multiprocessing(tmp_path):
12
+ """
13
+ Test that import joblib works even if _multiprocessing is missing.
14
+
15
+ pytest has already imported everything from joblib. The most reasonable way
16
+ to test importing joblib with modified environment is to invoke a separate
17
+ Python process. This also ensures that we don't break other tests by
18
+ importing a bad `_multiprocessing` module.
19
+ """
20
+ (tmp_path / "_multiprocessing.py").write_text(
21
+ 'raise ImportError("No _multiprocessing module!")'
22
+ )
23
+ env = dict(os.environ)
24
+ # For subprocess, use current sys.path with our custom version of
25
+ # multiprocessing inserted.
26
+ env["PYTHONPATH"] = ":".join([str(tmp_path)] + sys.path)
27
+ subprocess.check_call(
28
+ [sys.executable, "-c",
29
+ "import joblib, math; "
30
+ "joblib.Parallel(n_jobs=1)("
31
+ "joblib.delayed(math.sqrt)(i**2) for i in range(10))"
32
+ ], env=env)
llmeval-env/lib/python3.10/site-packages/joblib/test/test_module.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import joblib
3
+ from joblib.testing import check_subprocess_call
4
+ from joblib.test.common import with_multiprocessing
5
+
6
+
7
+ def test_version():
8
+ assert hasattr(joblib, '__version__'), (
9
+ "There are no __version__ argument on the joblib module")
10
+
11
+
12
+ @with_multiprocessing
13
+ def test_no_start_method_side_effect_on_import():
14
+ # check that importing joblib does not implicitly set the global
15
+ # start_method for multiprocessing.
16
+ code = """if True:
17
+ import joblib
18
+ import multiprocessing as mp
19
+ # The following line would raise RuntimeError if the
20
+ # start_method is already set.
21
+ mp.set_start_method("loky")
22
+ """
23
+ check_subprocess_call([sys.executable, '-c', code])
24
+
25
+
26
+ @with_multiprocessing
27
+ def test_no_semaphore_tracker_on_import():
28
+ # check that importing joblib does not implicitly spawn a resource tracker
29
+ # or a semaphore tracker
30
+ code = """if True:
31
+ import joblib
32
+ from multiprocessing import semaphore_tracker
33
+ # The following line would raise RuntimeError if the
34
+ # start_method is already set.
35
+ msg = "multiprocessing.semaphore_tracker has been spawned on import"
36
+ assert semaphore_tracker._semaphore_tracker._fd is None, msg"""
37
+ if sys.version_info >= (3, 8):
38
+ # semaphore_tracker was renamed in Python 3.8:
39
+ code = code.replace("semaphore_tracker", "resource_tracker")
40
+ check_subprocess_call([sys.executable, '-c', code])
41
+
42
+
43
+ @with_multiprocessing
44
+ def test_no_resource_tracker_on_import():
45
+ code = """if True:
46
+ import joblib
47
+ from joblib.externals.loky.backend import resource_tracker
48
+ # The following line would raise RuntimeError if the
49
+ # start_method is already set.
50
+ msg = "loky.resource_tracker has been spawned on import"
51
+ assert resource_tracker._resource_tracker._fd is None, msg
52
+ """
53
+ check_subprocess_call([sys.executable, '-c', code])
llmeval-env/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from joblib.compressor import BinaryZlibFile
2
+ from joblib.testing import parametrize
3
+
4
+
5
+ @parametrize('filename', ['test', u'test']) # testing str and unicode names
6
+ def test_binary_zlib_file(tmpdir, filename):
7
+ """Testing creation of files depending on the type of the filenames."""
8
+ binary_file = BinaryZlibFile(tmpdir.join(filename).strpath, mode='wb')
9
+ binary_file.close()
llmeval-env/lib/python3.10/site-packages/joblib/test/test_parallel.py ADDED
@@ -0,0 +1,2056 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the parallel module.
3
+ """
4
+
5
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Copyright (c) 2010-2011 Gael Varoquaux
7
+ # License: BSD Style, 3 clauses.
8
+
9
+ import os
10
+ import sys
11
+ import time
12
+ import mmap
13
+ import weakref
14
+ import warnings
15
+ import threading
16
+ from traceback import format_exception
17
+ from math import sqrt
18
+ from time import sleep
19
+ from pickle import PicklingError
20
+ from contextlib import nullcontext
21
+ from multiprocessing import TimeoutError
22
+ import pytest
23
+
24
+ import joblib
25
+ from joblib import parallel
26
+ from joblib import dump, load
27
+
28
+ from joblib._multiprocessing_helpers import mp
29
+
30
+ from joblib.test.common import np, with_numpy
31
+ from joblib.test.common import with_multiprocessing
32
+ from joblib.test.common import IS_PYPY, force_gc_pypy
33
+ from joblib.testing import (parametrize, raises, check_subprocess_call,
34
+ skipif, warns)
35
+
36
+ if mp is not None:
37
+ # Loky is not available if multiprocessing is not
38
+ from joblib.externals.loky import get_reusable_executor
39
+
40
+ from queue import Queue
41
+
42
+ try:
43
+ import posix
44
+ except ImportError:
45
+ posix = None
46
+
47
+ try:
48
+ from ._openmp_test_helper.parallel_sum import parallel_sum
49
+ except ImportError:
50
+ parallel_sum = None
51
+
52
+ try:
53
+ import distributed
54
+ except ImportError:
55
+ distributed = None
56
+
57
+ from joblib._parallel_backends import SequentialBackend
58
+ from joblib._parallel_backends import ThreadingBackend
59
+ from joblib._parallel_backends import MultiprocessingBackend
60
+ from joblib._parallel_backends import ParallelBackendBase
61
+ from joblib._parallel_backends import LokyBackend
62
+
63
+ from joblib.parallel import Parallel, delayed
64
+ from joblib.parallel import parallel_config
65
+ from joblib.parallel import parallel_backend
66
+ from joblib.parallel import register_parallel_backend
67
+ from joblib.parallel import effective_n_jobs, cpu_count
68
+
69
+ from joblib.parallel import mp, BACKENDS, DEFAULT_BACKEND
70
+
71
+
72
+ RETURN_GENERATOR_BACKENDS = BACKENDS.copy()
73
+ RETURN_GENERATOR_BACKENDS.pop("multiprocessing", None)
74
+
75
+ ALL_VALID_BACKENDS = [None] + sorted(BACKENDS.keys())
76
+ # Add instances of backend classes deriving from ParallelBackendBase
77
+ ALL_VALID_BACKENDS += [BACKENDS[backend_str]() for backend_str in BACKENDS]
78
+ if mp is None:
79
+ PROCESS_BACKENDS = []
80
+ else:
81
+ PROCESS_BACKENDS = ['multiprocessing', 'loky']
82
+ PARALLEL_BACKENDS = PROCESS_BACKENDS + ['threading']
83
+
84
+ if hasattr(mp, 'get_context'):
85
+ # Custom multiprocessing context in Python 3.4+
86
+ ALL_VALID_BACKENDS.append(mp.get_context('spawn'))
87
+
88
+ DefaultBackend = BACKENDS[DEFAULT_BACKEND]
89
+
90
+
91
+ def get_workers(backend):
92
+ return getattr(backend, '_pool', getattr(backend, '_workers', None))
93
+
94
+
95
+ def division(x, y):
96
+ return x / y
97
+
98
+
99
+ def square(x):
100
+ return x ** 2
101
+
102
+
103
+ class MyExceptionWithFinickyInit(Exception):
104
+ """An exception class with non trivial __init__
105
+ """
106
+ def __init__(self, a, b, c, d):
107
+ pass
108
+
109
+
110
+ def exception_raiser(x, custom_exception=False):
111
+ if x == 7:
112
+ raise (MyExceptionWithFinickyInit('a', 'b', 'c', 'd')
113
+ if custom_exception else ValueError)
114
+ return x
115
+
116
+
117
+ def interrupt_raiser(x):
118
+ time.sleep(.05)
119
+ raise KeyboardInterrupt
120
+
121
+
122
+ def f(x, y=0, z=0):
123
+ """ A module-level function so that it can be spawn with
124
+ multiprocessing.
125
+ """
126
+ return x ** 2 + y + z
127
+
128
+
129
+ def _active_backend_type():
130
+ return type(parallel.get_active_backend()[0])
131
+
132
+
133
+ def parallel_func(inner_n_jobs, backend):
134
+ return Parallel(n_jobs=inner_n_jobs, backend=backend)(
135
+ delayed(square)(i) for i in range(3))
136
+
137
+
138
+ ###############################################################################
139
+ def test_cpu_count():
140
+ assert cpu_count() > 0
141
+
142
+
143
+ def test_effective_n_jobs():
144
+ assert effective_n_jobs() > 0
145
+
146
+
147
+ @parametrize("context", [parallel_config, parallel_backend])
148
+ @pytest.mark.parametrize(
149
+ "backend_n_jobs, expected_n_jobs",
150
+ [(3, 3), (-1, effective_n_jobs(n_jobs=-1)), (None, 1)],
151
+ ids=["positive-int", "negative-int", "None"]
152
+ )
153
+ @with_multiprocessing
154
+ def test_effective_n_jobs_None(context, backend_n_jobs, expected_n_jobs):
155
+ # check the number of effective jobs when `n_jobs=None`
156
+ # non-regression test for https://github.com/joblib/joblib/issues/984
157
+ with context("threading", n_jobs=backend_n_jobs):
158
+ # when using a backend, the default of number jobs will be the one set
159
+ # in the backend
160
+ assert effective_n_jobs(n_jobs=None) == expected_n_jobs
161
+ # without any backend, None will default to a single job
162
+ assert effective_n_jobs(n_jobs=None) == 1
163
+
164
+
165
+ ###############################################################################
166
+ # Test parallel
167
+
168
+ @parametrize('backend', ALL_VALID_BACKENDS)
169
+ @parametrize('n_jobs', [1, 2, -1, -2])
170
+ @parametrize('verbose', [2, 11, 100])
171
+ def test_simple_parallel(backend, n_jobs, verbose):
172
+ assert ([square(x) for x in range(5)] ==
173
+ Parallel(n_jobs=n_jobs, backend=backend,
174
+ verbose=verbose)(
175
+ delayed(square)(x) for x in range(5)))
176
+
177
+
178
+ @parametrize('backend', ALL_VALID_BACKENDS)
179
+ def test_main_thread_renamed_no_warning(backend, monkeypatch):
180
+ # Check that no default backend relies on the name of the main thread:
181
+ # https://github.com/joblib/joblib/issues/180#issuecomment-253266247
182
+ # Some programs use a different name for the main thread. This is the case
183
+ # for uWSGI apps for instance.
184
+ monkeypatch.setattr(target=threading.current_thread(), name='name',
185
+ value='some_new_name_for_the_main_thread')
186
+
187
+ with warnings.catch_warnings(record=True) as warninfo:
188
+ results = Parallel(n_jobs=2, backend=backend)(
189
+ delayed(square)(x) for x in range(3))
190
+ assert results == [0, 1, 4]
191
+
192
+ # Due to the default parameters of LokyBackend, there is a chance that
193
+ # warninfo catches Warnings from worker timeouts. We remove it if it exists
194
+ warninfo = [w for w in warninfo if "worker timeout" not in str(w.message)]
195
+
196
+ # The multiprocessing backend will raise a warning when detecting that is
197
+ # started from the non-main thread. Let's check that there is no false
198
+ # positive because of the name change.
199
+ assert len(warninfo) == 0
200
+
201
+
202
+ def _assert_warning_nested(backend, inner_n_jobs, expected):
203
+ with warnings.catch_warnings(record=True) as warninfo:
204
+ warnings.simplefilter("always")
205
+ parallel_func(backend=backend, inner_n_jobs=inner_n_jobs)
206
+
207
+ warninfo = [w.message for w in warninfo]
208
+ if expected:
209
+ if warninfo:
210
+ warnings_are_correct = all(
211
+ 'backed parallel loops cannot' in each.args[0]
212
+ for each in warninfo
213
+ )
214
+ # With Python nogil, when the outer backend is threading, we might
215
+ # see more that one warning
216
+ warnings_have_the_right_length = (
217
+ len(warninfo) >= 1 if getattr(sys.flags, 'nogil', False)
218
+ else len(warninfo) == 1)
219
+ return warnings_are_correct and warnings_have_the_right_length
220
+
221
+ return False
222
+ else:
223
+ assert not warninfo
224
+ return True
225
+
226
+
227
+ @with_multiprocessing
228
+ @parametrize('parent_backend,child_backend,expected', [
229
+ ('loky', 'multiprocessing', True),
230
+ ('loky', 'loky', False),
231
+ ('multiprocessing', 'multiprocessing', True),
232
+ ('multiprocessing', 'loky', True),
233
+ ('threading', 'multiprocessing', True),
234
+ ('threading', 'loky', True),
235
+ ])
236
+ def test_nested_parallel_warnings(parent_backend, child_backend, expected):
237
+
238
+ # no warnings if inner_n_jobs=1
239
+ Parallel(n_jobs=2, backend=parent_backend)(
240
+ delayed(_assert_warning_nested)(
241
+ backend=child_backend, inner_n_jobs=1,
242
+ expected=False)
243
+ for _ in range(5))
244
+
245
+ # warnings if inner_n_jobs != 1 and expected
246
+ res = Parallel(n_jobs=2, backend=parent_backend)(
247
+ delayed(_assert_warning_nested)(
248
+ backend=child_backend, inner_n_jobs=2,
249
+ expected=expected)
250
+ for _ in range(5))
251
+
252
+ # warning handling is not thread safe. One thread might see multiple
253
+ # warning or no warning at all.
254
+ if parent_backend == "threading":
255
+ if IS_PYPY and not any(res):
256
+ # Related to joblib#1426, should be removed once it is solved.
257
+ pytest.xfail(reason="This test often fails in PyPy.")
258
+ assert any(res)
259
+ else:
260
+ assert all(res)
261
+
262
+
263
+ @with_multiprocessing
264
+ @parametrize('backend', ['loky', 'multiprocessing', 'threading'])
265
+ def test_background_thread_parallelism(backend):
266
+ is_run_parallel = [False]
267
+
268
+ def background_thread(is_run_parallel):
269
+ with warnings.catch_warnings(record=True) as warninfo:
270
+ Parallel(n_jobs=2)(
271
+ delayed(sleep)(.1) for _ in range(4))
272
+ print(len(warninfo))
273
+ is_run_parallel[0] = len(warninfo) == 0
274
+
275
+ t = threading.Thread(target=background_thread, args=(is_run_parallel,))
276
+ t.start()
277
+ t.join()
278
+ assert is_run_parallel[0]
279
+
280
+
281
+ def nested_loop(backend):
282
+ Parallel(n_jobs=2, backend=backend)(
283
+ delayed(square)(.01) for _ in range(2))
284
+
285
+
286
+ @parametrize('child_backend', BACKENDS)
287
+ @parametrize('parent_backend', BACKENDS)
288
+ def test_nested_loop(parent_backend, child_backend):
289
+ Parallel(n_jobs=2, backend=parent_backend)(
290
+ delayed(nested_loop)(child_backend) for _ in range(2))
291
+
292
+
293
+ def raise_exception(backend):
294
+ raise ValueError
295
+
296
+
297
+ @with_multiprocessing
298
+ def test_nested_loop_with_exception_with_loky():
299
+ with raises(ValueError):
300
+ with Parallel(n_jobs=2, backend="loky") as parallel:
301
+ parallel([delayed(nested_loop)("loky"),
302
+ delayed(raise_exception)("loky")])
303
+
304
+
305
+ def test_mutate_input_with_threads():
306
+ """Input is mutable when using the threading backend"""
307
+ q = Queue(maxsize=5)
308
+ Parallel(n_jobs=2, backend="threading")(
309
+ delayed(q.put)(1) for _ in range(5))
310
+ assert q.full()
311
+
312
+
313
+ @parametrize('n_jobs', [1, 2, 3])
314
+ def test_parallel_kwargs(n_jobs):
315
+ """Check the keyword argument processing of pmap."""
316
+ lst = range(10)
317
+ assert ([f(x, y=1) for x in lst] ==
318
+ Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst))
319
+
320
+
321
+ @parametrize('backend', PARALLEL_BACKENDS)
322
+ def test_parallel_as_context_manager(backend):
323
+ lst = range(10)
324
+ expected = [f(x, y=1) for x in lst]
325
+
326
+ with Parallel(n_jobs=4, backend=backend) as p:
327
+ # Internally a pool instance has been eagerly created and is managed
328
+ # via the context manager protocol
329
+ managed_backend = p._backend
330
+
331
+ # We make call with the managed parallel object several times inside
332
+ # the managed block:
333
+ assert expected == p(delayed(f)(x, y=1) for x in lst)
334
+ assert expected == p(delayed(f)(x, y=1) for x in lst)
335
+
336
+ # Those calls have all used the same pool instance:
337
+ if mp is not None:
338
+ assert get_workers(managed_backend) is get_workers(p._backend)
339
+
340
+ # As soon as we exit the context manager block, the pool is terminated and
341
+ # no longer referenced from the parallel object:
342
+ if mp is not None:
343
+ assert get_workers(p._backend) is None
344
+
345
+ # It's still possible to use the parallel instance in non-managed mode:
346
+ assert expected == p(delayed(f)(x, y=1) for x in lst)
347
+ if mp is not None:
348
+ assert get_workers(p._backend) is None
349
+
350
+
351
+ @with_multiprocessing
352
+ def test_parallel_pickling():
353
+ """ Check that pmap captures the errors when it is passed an object
354
+ that cannot be pickled.
355
+ """
356
+ class UnpicklableObject(object):
357
+ def __reduce__(self):
358
+ raise RuntimeError('123')
359
+
360
+ with raises(PicklingError, match=r"the task to send"):
361
+ Parallel(n_jobs=2, backend='loky')(delayed(id)(
362
+ UnpicklableObject()) for _ in range(10))
363
+
364
+
365
+ @with_numpy
366
+ @with_multiprocessing
367
+ @parametrize('byteorder', ['<', '>', '='])
368
+ def test_parallel_byteorder_corruption(byteorder):
369
+
370
+ def inspect_byteorder(x):
371
+ return x, x.dtype.byteorder
372
+
373
+ x = np.arange(6).reshape((2, 3)).view(f'{byteorder}i4')
374
+
375
+ initial_np_byteorder = x.dtype.byteorder
376
+
377
+ result = Parallel(n_jobs=2, backend='loky')(
378
+ delayed(inspect_byteorder)(x) for _ in range(3)
379
+ )
380
+
381
+ for x_returned, byteorder_in_worker in result:
382
+ assert byteorder_in_worker == initial_np_byteorder
383
+ assert byteorder_in_worker == x_returned.dtype.byteorder
384
+ np.testing.assert_array_equal(x, x_returned)
385
+
386
+
387
+ @parametrize('backend', PARALLEL_BACKENDS)
388
+ def test_parallel_timeout_success(backend):
389
+ # Check that timeout isn't thrown when function is fast enough
390
+ assert len(Parallel(n_jobs=2, backend=backend, timeout=30)(
391
+ delayed(sleep)(0.001) for x in range(10))) == 10
392
+
393
+
394
+ @with_multiprocessing
395
+ @parametrize('backend', PARALLEL_BACKENDS)
396
+ def test_parallel_timeout_fail(backend):
397
+ # Check that timeout properly fails when function is too slow
398
+ with raises(TimeoutError):
399
+ Parallel(n_jobs=2, backend=backend, timeout=0.01)(
400
+ delayed(sleep)(10) for x in range(10))
401
+
402
+
403
+ @with_multiprocessing
404
+ @parametrize('backend', PROCESS_BACKENDS)
405
+ def test_error_capture(backend):
406
+ # Check that error are captured, and that correct exceptions
407
+ # are raised.
408
+ if mp is not None:
409
+ with raises(ZeroDivisionError):
410
+ Parallel(n_jobs=2, backend=backend)(
411
+ [delayed(division)(x, y)
412
+ for x, y in zip((0, 1), (1, 0))])
413
+
414
+ with raises(KeyboardInterrupt):
415
+ Parallel(n_jobs=2, backend=backend)(
416
+ [delayed(interrupt_raiser)(x) for x in (1, 0)])
417
+
418
+ # Try again with the context manager API
419
+ with Parallel(n_jobs=2, backend=backend) as parallel:
420
+ assert get_workers(parallel._backend) is not None
421
+ original_workers = get_workers(parallel._backend)
422
+
423
+ with raises(ZeroDivisionError):
424
+ parallel([delayed(division)(x, y)
425
+ for x, y in zip((0, 1), (1, 0))])
426
+
427
+ # The managed pool should still be available and be in a working
428
+ # state despite the previously raised (and caught) exception
429
+ assert get_workers(parallel._backend) is not None
430
+
431
+ # The pool should have been interrupted and restarted:
432
+ assert get_workers(parallel._backend) is not original_workers
433
+
434
+ assert ([f(x, y=1) for x in range(10)] ==
435
+ parallel(delayed(f)(x, y=1) for x in range(10)))
436
+
437
+ original_workers = get_workers(parallel._backend)
438
+ with raises(KeyboardInterrupt):
439
+ parallel([delayed(interrupt_raiser)(x) for x in (1, 0)])
440
+
441
+ # The pool should still be available despite the exception
442
+ assert get_workers(parallel._backend) is not None
443
+
444
+ # The pool should have been interrupted and restarted:
445
+ assert get_workers(parallel._backend) is not original_workers
446
+
447
+ assert ([f(x, y=1) for x in range(10)] ==
448
+ parallel(delayed(f)(x, y=1) for x in range(10))), (
449
+ parallel._iterating, parallel.n_completed_tasks,
450
+ parallel.n_dispatched_tasks, parallel._aborting
451
+ )
452
+
453
+ # Check that the inner pool has been terminated when exiting the
454
+ # context manager
455
+ assert get_workers(parallel._backend) is None
456
+ else:
457
+ with raises(KeyboardInterrupt):
458
+ Parallel(n_jobs=2)(
459
+ [delayed(interrupt_raiser)(x) for x in (1, 0)])
460
+
461
+ # wrapped exceptions should inherit from the class of the original
462
+ # exception to make it easy to catch them
463
+ with raises(ZeroDivisionError):
464
+ Parallel(n_jobs=2)(
465
+ [delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))])
466
+
467
+ with raises(MyExceptionWithFinickyInit):
468
+ Parallel(n_jobs=2, verbose=0)(
469
+ (delayed(exception_raiser)(i, custom_exception=True)
470
+ for i in range(30)))
471
+
472
+
473
+ @with_multiprocessing
474
+ @parametrize('backend', BACKENDS)
475
+ def test_error_in_task_iterator(backend):
476
+
477
+ def my_generator(raise_at=0):
478
+ for i in range(20):
479
+ if i == raise_at:
480
+ raise ValueError("Iterator Raising Error")
481
+ yield i
482
+
483
+ with Parallel(n_jobs=2, backend=backend) as p:
484
+ # The error is raised in the pre-dispatch phase
485
+ with raises(ValueError, match="Iterator Raising Error"):
486
+ p(delayed(square)(i) for i in my_generator(raise_at=0))
487
+
488
+ # The error is raised when dispatching a new task after the
489
+ # pre-dispatch (likely to happen in a different thread)
490
+ with raises(ValueError, match="Iterator Raising Error"):
491
+ p(delayed(square)(i) for i in my_generator(raise_at=5))
492
+
493
+ # Same, but raises long after the pre-dispatch phase
494
+ with raises(ValueError, match="Iterator Raising Error"):
495
+ p(delayed(square)(i) for i in my_generator(raise_at=19))
496
+
497
+
498
+ def consumer(queue, item):
499
+ queue.append('Consumed %s' % item)
500
+
501
+
502
+ @parametrize('backend', BACKENDS)
503
+ @parametrize('batch_size, expected_queue',
504
+ [(1, ['Produced 0', 'Consumed 0',
505
+ 'Produced 1', 'Consumed 1',
506
+ 'Produced 2', 'Consumed 2',
507
+ 'Produced 3', 'Consumed 3',
508
+ 'Produced 4', 'Consumed 4',
509
+ 'Produced 5', 'Consumed 5']),
510
+ (4, [ # First Batch
511
+ 'Produced 0', 'Produced 1', 'Produced 2', 'Produced 3',
512
+ 'Consumed 0', 'Consumed 1', 'Consumed 2', 'Consumed 3',
513
+ # Second batch
514
+ 'Produced 4', 'Produced 5', 'Consumed 4', 'Consumed 5'])])
515
+ def test_dispatch_one_job(backend, batch_size, expected_queue):
516
+ """ Test that with only one job, Parallel does act as a iterator.
517
+ """
518
+ queue = list()
519
+
520
+ def producer():
521
+ for i in range(6):
522
+ queue.append('Produced %i' % i)
523
+ yield i
524
+
525
+ Parallel(n_jobs=1, batch_size=batch_size, backend=backend)(
526
+ delayed(consumer)(queue, x) for x in producer())
527
+ assert queue == expected_queue
528
+ assert len(queue) == 12
529
+
530
+
531
+ @with_multiprocessing
532
+ @parametrize('backend', PARALLEL_BACKENDS)
533
+ def test_dispatch_multiprocessing(backend):
534
+ """ Check that using pre_dispatch Parallel does indeed dispatch items
535
+ lazily.
536
+ """
537
+ manager = mp.Manager()
538
+ queue = manager.list()
539
+
540
+ def producer():
541
+ for i in range(6):
542
+ queue.append('Produced %i' % i)
543
+ yield i
544
+
545
+ Parallel(n_jobs=2, batch_size=1, pre_dispatch=3, backend=backend)(
546
+ delayed(consumer)(queue, 'any') for _ in producer())
547
+
548
+ queue_contents = list(queue)
549
+ assert queue_contents[0] == 'Produced 0'
550
+
551
+ # Only 3 tasks are pre-dispatched out of 6. The 4th task is dispatched only
552
+ # after any of the first 3 jobs have completed.
553
+ first_consumption_index = queue_contents[:4].index('Consumed any')
554
+ assert first_consumption_index > -1
555
+
556
+ produced_3_index = queue_contents.index('Produced 3') # 4th task produced
557
+ assert produced_3_index > first_consumption_index
558
+
559
+ assert len(queue) == 12
560
+
561
+
562
+ def test_batching_auto_threading():
563
+ # batching='auto' with the threading backend leaves the effective batch
564
+ # size to 1 (no batching) as it has been found to never be beneficial with
565
+ # this low-overhead backend.
566
+
567
+ with Parallel(n_jobs=2, batch_size='auto', backend='threading') as p:
568
+ p(delayed(id)(i) for i in range(5000)) # many very fast tasks
569
+ assert p._backend.compute_batch_size() == 1
570
+
571
+
572
+ @with_multiprocessing
573
+ @parametrize('backend', PROCESS_BACKENDS)
574
+ def test_batching_auto_subprocesses(backend):
575
+ with Parallel(n_jobs=2, batch_size='auto', backend=backend) as p:
576
+ p(delayed(id)(i) for i in range(5000)) # many very fast tasks
577
+
578
+ # It should be strictly larger than 1 but as we don't want heisen
579
+ # failures on clogged CI worker environment be safe and only check that
580
+ # it's a strictly positive number.
581
+ assert p._backend.compute_batch_size() > 0
582
+
583
+
584
+ def test_exception_dispatch():
585
+ """Make sure that exception raised during dispatch are indeed captured"""
586
+ with raises(ValueError):
587
+ Parallel(n_jobs=2, pre_dispatch=16, verbose=0)(
588
+ delayed(exception_raiser)(i) for i in range(30))
589
+
590
+
591
+ def nested_function_inner(i):
592
+ Parallel(n_jobs=2)(
593
+ delayed(exception_raiser)(j) for j in range(30))
594
+
595
+
596
+ def nested_function_outer(i):
597
+ Parallel(n_jobs=2)(
598
+ delayed(nested_function_inner)(j) for j in range(30))
599
+
600
+
601
+ @with_multiprocessing
602
+ @parametrize('backend', PARALLEL_BACKENDS)
603
+ @pytest.mark.xfail(reason="https://github.com/joblib/loky/pull/255")
604
+ def test_nested_exception_dispatch(backend):
605
+ """Ensure errors for nested joblib cases gets propagated
606
+
607
+ We rely on the Python 3 built-in __cause__ system that already
608
+ report this kind of information to the user.
609
+ """
610
+ with raises(ValueError) as excinfo:
611
+ Parallel(n_jobs=2, backend=backend)(
612
+ delayed(nested_function_outer)(i) for i in range(30))
613
+
614
+ # Check that important information such as function names are visible
615
+ # in the final error message reported to the user
616
+ report_lines = format_exception(excinfo.type, excinfo.value, excinfo.tb)
617
+ report = "".join(report_lines)
618
+ assert 'nested_function_outer' in report
619
+ assert 'nested_function_inner' in report
620
+ assert 'exception_raiser' in report
621
+
622
+ assert type(excinfo.value) is ValueError
623
+
624
+
625
+ class FakeParallelBackend(SequentialBackend):
626
+ """Pretends to run concurrently while running sequentially."""
627
+
628
+ def configure(self, n_jobs=1, parallel=None, **backend_args):
629
+ self.n_jobs = self.effective_n_jobs(n_jobs)
630
+ self.parallel = parallel
631
+ return n_jobs
632
+
633
+ def effective_n_jobs(self, n_jobs=1):
634
+ if n_jobs < 0:
635
+ n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
636
+ return n_jobs
637
+
638
+
639
+ def test_invalid_backend():
640
+ with raises(ValueError, match="Invalid backend:"):
641
+ Parallel(backend='unit-testing')
642
+
643
+ with raises(ValueError, match="Invalid backend:"):
644
+ with parallel_config(backend='unit-testing'):
645
+ pass
646
+
647
+ with raises(ValueError, match="Invalid backend:"):
648
+ with parallel_config(backend='unit-testing'):
649
+ pass
650
+
651
+
652
+ @parametrize('backend', ALL_VALID_BACKENDS)
653
+ def test_invalid_njobs(backend):
654
+ with raises(ValueError) as excinfo:
655
+ Parallel(n_jobs=0, backend=backend)._initialize_backend()
656
+ assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value)
657
+
658
+ with raises(ValueError) as excinfo:
659
+ Parallel(n_jobs=0.5, backend=backend)._initialize_backend()
660
+ assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value)
661
+
662
+ with raises(ValueError) as excinfo:
663
+ Parallel(n_jobs="2.3", backend=backend)._initialize_backend()
664
+ assert "n_jobs could not be converted to int" in str(excinfo.value)
665
+
666
+ with raises(ValueError) as excinfo:
667
+ Parallel(n_jobs="invalid_str", backend=backend)._initialize_backend()
668
+ assert "n_jobs could not be converted to int" in str(excinfo.value)
669
+
670
+
671
+ @with_multiprocessing
672
+ @parametrize('backend', PARALLEL_BACKENDS)
673
+ @parametrize('n_jobs', ['2', 2.3, 2])
674
+ def test_njobs_converted_to_int(backend, n_jobs):
675
+ p = Parallel(n_jobs=n_jobs, backend=backend)
676
+ assert p._effective_n_jobs() == 2
677
+
678
+ res = p(delayed(square)(i) for i in range(10))
679
+ assert all(r == square(i) for i, r in enumerate(res))
680
+
681
+
682
+ def test_register_parallel_backend():
683
+ try:
684
+ register_parallel_backend("test_backend", FakeParallelBackend)
685
+ assert "test_backend" in BACKENDS
686
+ assert BACKENDS["test_backend"] == FakeParallelBackend
687
+ finally:
688
+ del BACKENDS["test_backend"]
689
+
690
+
691
+ def test_overwrite_default_backend():
692
+ assert _active_backend_type() == DefaultBackend
693
+ try:
694
+ register_parallel_backend("threading", BACKENDS["threading"],
695
+ make_default=True)
696
+ assert _active_backend_type() == ThreadingBackend
697
+ finally:
698
+ # Restore the global default manually
699
+ parallel.DEFAULT_BACKEND = DEFAULT_BACKEND
700
+ assert _active_backend_type() == DefaultBackend
701
+
702
+
703
+ @skipif(mp is not None, reason="Only without multiprocessing")
704
+ def test_backend_no_multiprocessing():
705
+ with warns(UserWarning,
706
+ match="joblib backend '.*' is not available on.*"):
707
+ Parallel(backend='loky')(delayed(square)(i) for i in range(3))
708
+
709
+ # The below should now work without problems
710
+ with parallel_config(backend='loky'):
711
+ Parallel()(delayed(square)(i) for i in range(3))
712
+
713
+
714
+ def check_backend_context_manager(context, backend_name):
715
+ with context(backend_name, n_jobs=3):
716
+ active_backend, active_n_jobs = parallel.get_active_backend()
717
+ assert active_n_jobs == 3
718
+ assert effective_n_jobs(3) == 3
719
+ p = Parallel()
720
+ assert p.n_jobs == 3
721
+ if backend_name == 'multiprocessing':
722
+ assert type(active_backend) is MultiprocessingBackend
723
+ assert type(p._backend) is MultiprocessingBackend
724
+ elif backend_name == 'loky':
725
+ assert type(active_backend) is LokyBackend
726
+ assert type(p._backend) is LokyBackend
727
+ elif backend_name == 'threading':
728
+ assert type(active_backend) is ThreadingBackend
729
+ assert type(p._backend) is ThreadingBackend
730
+ elif backend_name.startswith('test_'):
731
+ assert type(active_backend) is FakeParallelBackend
732
+ assert type(p._backend) is FakeParallelBackend
733
+
734
+
735
+ all_backends_for_context_manager = PARALLEL_BACKENDS[:]
736
+ all_backends_for_context_manager.extend(
737
+ ['test_backend_%d' % i for i in range(3)]
738
+ )
739
+
740
+
741
+ @with_multiprocessing
742
+ @parametrize('backend', all_backends_for_context_manager)
743
+ @parametrize('context', [parallel_backend, parallel_config])
744
+ def test_backend_context_manager(monkeypatch, backend, context):
745
+ if backend not in BACKENDS:
746
+ monkeypatch.setitem(BACKENDS, backend, FakeParallelBackend)
747
+
748
+ assert _active_backend_type() == DefaultBackend
749
+ # check that this possible to switch parallel backends sequentially
750
+ check_backend_context_manager(context, backend)
751
+
752
+ # The default backend is restored
753
+ assert _active_backend_type() == DefaultBackend
754
+
755
+ # Check that context manager switching is thread safe:
756
+ Parallel(n_jobs=2, backend='threading')(
757
+ delayed(check_backend_context_manager)(context, b)
758
+ for b in all_backends_for_context_manager if not b)
759
+
760
+ # The default backend is again restored
761
+ assert _active_backend_type() == DefaultBackend
762
+
763
+
764
+ class ParameterizedParallelBackend(SequentialBackend):
765
+ """Pretends to run conncurrently while running sequentially."""
766
+
767
+ def __init__(self, param=None):
768
+ if param is None:
769
+ raise ValueError('param should not be None')
770
+ self.param = param
771
+
772
+
773
+ @parametrize("context", [parallel_config, parallel_backend])
774
+ def test_parameterized_backend_context_manager(monkeypatch, context):
775
+ monkeypatch.setitem(BACKENDS, 'param_backend',
776
+ ParameterizedParallelBackend)
777
+ assert _active_backend_type() == DefaultBackend
778
+
779
+ with context('param_backend', param=42, n_jobs=3):
780
+ active_backend, active_n_jobs = parallel.get_active_backend()
781
+ assert type(active_backend) is ParameterizedParallelBackend
782
+ assert active_backend.param == 42
783
+ assert active_n_jobs == 3
784
+ p = Parallel()
785
+ assert p.n_jobs == 3
786
+ assert p._backend is active_backend
787
+ results = p(delayed(sqrt)(i) for i in range(5))
788
+ assert results == [sqrt(i) for i in range(5)]
789
+
790
+ # The default backend is again restored
791
+ assert _active_backend_type() == DefaultBackend
792
+
793
+
794
+ @parametrize("context", [parallel_config, parallel_backend])
795
+ def test_directly_parameterized_backend_context_manager(context):
796
+ assert _active_backend_type() == DefaultBackend
797
+
798
+ # Check that it's possible to pass a backend instance directly,
799
+ # without registration
800
+ with context(ParameterizedParallelBackend(param=43), n_jobs=5):
801
+ active_backend, active_n_jobs = parallel.get_active_backend()
802
+ assert type(active_backend) is ParameterizedParallelBackend
803
+ assert active_backend.param == 43
804
+ assert active_n_jobs == 5
805
+ p = Parallel()
806
+ assert p.n_jobs == 5
807
+ assert p._backend is active_backend
808
+ results = p(delayed(sqrt)(i) for i in range(5))
809
+ assert results == [sqrt(i) for i in range(5)]
810
+
811
+ # The default backend is again restored
812
+ assert _active_backend_type() == DefaultBackend
813
+
814
+
815
+ def sleep_and_return_pid():
816
+ sleep(.1)
817
+ return os.getpid()
818
+
819
+
820
+ def get_nested_pids():
821
+ assert _active_backend_type() == ThreadingBackend
822
+ # Assert that the nested backend does not change the default number of
823
+ # jobs used in Parallel
824
+ assert Parallel()._effective_n_jobs() == 1
825
+
826
+ # Assert that the tasks are running only on one process
827
+ return Parallel(n_jobs=2)(delayed(sleep_and_return_pid)()
828
+ for _ in range(2))
829
+
830
+
831
+ class MyBackend(joblib._parallel_backends.LokyBackend):
832
+ """Backend to test backward compatibility with older backends"""
833
+ def get_nested_backend(self, ):
834
+ # Older backends only return a backend, without n_jobs indications.
835
+ return super(MyBackend, self).get_nested_backend()[0]
836
+
837
+
838
+ register_parallel_backend('back_compat_backend', MyBackend)
839
+
840
+
841
+ @with_multiprocessing
842
+ @parametrize('backend', ['threading', 'loky', 'multiprocessing',
843
+ 'back_compat_backend'])
844
+ @parametrize("context", [parallel_config, parallel_backend])
845
+ def test_nested_backend_context_manager(context, backend):
846
+ # Check that by default, nested parallel calls will always use the
847
+ # ThreadingBackend
848
+
849
+ with context(backend):
850
+ pid_groups = Parallel(n_jobs=2)(
851
+ delayed(get_nested_pids)()
852
+ for _ in range(10)
853
+ )
854
+ for pid_group in pid_groups:
855
+ assert len(set(pid_group)) == 1
856
+
857
+
858
+ @with_multiprocessing
859
+ @parametrize('n_jobs', [2, -1, None])
860
+ @parametrize('backend', PARALLEL_BACKENDS)
861
+ @parametrize("context", [parallel_config, parallel_backend])
862
+ def test_nested_backend_in_sequential(backend, n_jobs, context):
863
+ # Check that by default, nested parallel calls will always use the
864
+ # ThreadingBackend
865
+
866
+ def check_nested_backend(expected_backend_type, expected_n_job):
867
+ # Assert that the sequential backend at top level, does not change the
868
+ # backend for nested calls.
869
+ assert _active_backend_type() == BACKENDS[expected_backend_type]
870
+
871
+ # Assert that the nested backend in SequentialBackend does not change
872
+ # the default number of jobs used in Parallel
873
+ expected_n_job = effective_n_jobs(expected_n_job)
874
+ assert Parallel()._effective_n_jobs() == expected_n_job
875
+
876
+ Parallel(n_jobs=1)(
877
+ delayed(check_nested_backend)(DEFAULT_BACKEND, 1)
878
+ for _ in range(10)
879
+ )
880
+
881
+ with context(backend, n_jobs=n_jobs):
882
+ Parallel(n_jobs=1)(
883
+ delayed(check_nested_backend)(backend, n_jobs)
884
+ for _ in range(10)
885
+ )
886
+
887
+
888
+ def check_nesting_level(context, inner_backend, expected_level):
889
+ with context(inner_backend) as ctx:
890
+ if context is parallel_config:
891
+ backend = ctx["backend"]
892
+ if context is parallel_backend:
893
+ backend = ctx[0]
894
+ assert backend.nesting_level == expected_level
895
+
896
+
897
+ @with_multiprocessing
898
+ @parametrize('outer_backend', PARALLEL_BACKENDS)
899
+ @parametrize('inner_backend', PARALLEL_BACKENDS)
900
+ @parametrize("context", [parallel_config, parallel_backend])
901
+ def test_backend_nesting_level(context, outer_backend, inner_backend):
902
+ # Check that the nesting level for the backend is correctly set
903
+ check_nesting_level(context, outer_backend, 0)
904
+
905
+ Parallel(n_jobs=2, backend=outer_backend)(
906
+ delayed(check_nesting_level)(context, inner_backend, 1)
907
+ for _ in range(10)
908
+ )
909
+
910
+ with context(inner_backend, n_jobs=2):
911
+ Parallel()(delayed(check_nesting_level)(context, inner_backend, 1)
912
+ for _ in range(10))
913
+
914
+
915
+ @with_multiprocessing
916
+ @parametrize("context", [parallel_config, parallel_backend])
917
+ @parametrize('with_retrieve_callback', [True, False])
918
+ def test_retrieval_context(context, with_retrieve_callback):
919
+ import contextlib
920
+
921
+ class MyBackend(ThreadingBackend):
922
+ i = 0
923
+ supports_retrieve_callback = with_retrieve_callback
924
+
925
+ @contextlib.contextmanager
926
+ def retrieval_context(self):
927
+ self.i += 1
928
+ yield
929
+
930
+ register_parallel_backend("retrieval", MyBackend)
931
+
932
+ def nested_call(n):
933
+ return Parallel(n_jobs=2)(delayed(id)(i) for i in range(n))
934
+
935
+ with context("retrieval") as ctx:
936
+ Parallel(n_jobs=2)(
937
+ delayed(nested_call)(i)
938
+ for i in range(5)
939
+ )
940
+ if context is parallel_config:
941
+ assert ctx["backend"].i == 1
942
+ if context is parallel_backend:
943
+ assert ctx[0].i == 1
944
+
945
+
946
+ ###############################################################################
947
+ # Test helpers
948
+
949
+ @parametrize('batch_size', [0, -1, 1.42])
950
+ def test_invalid_batch_size(batch_size):
951
+ with raises(ValueError):
952
+ Parallel(batch_size=batch_size)
953
+
954
+
955
+ @parametrize('n_tasks, n_jobs, pre_dispatch, batch_size',
956
+ [(2, 2, 'all', 'auto'),
957
+ (2, 2, 'n_jobs', 'auto'),
958
+ (10, 2, 'n_jobs', 'auto'),
959
+ (517, 2, 'n_jobs', 'auto'),
960
+ (10, 2, 'n_jobs', 'auto'),
961
+ (10, 4, 'n_jobs', 'auto'),
962
+ (200, 12, 'n_jobs', 'auto'),
963
+ (25, 12, '2 * n_jobs', 1),
964
+ (250, 12, 'all', 1),
965
+ (250, 12, '2 * n_jobs', 7),
966
+ (200, 12, '2 * n_jobs', 'auto')])
967
+ def test_dispatch_race_condition(n_tasks, n_jobs, pre_dispatch, batch_size):
968
+ # Check that using (async-)dispatch does not yield a race condition on the
969
+ # iterable generator that is not thread-safe natively.
970
+ # This is a non-regression test for the "Pool seems closed" class of error
971
+ params = {'n_jobs': n_jobs, 'pre_dispatch': pre_dispatch,
972
+ 'batch_size': batch_size}
973
+ expected = [square(i) for i in range(n_tasks)]
974
+ results = Parallel(**params)(delayed(square)(i) for i in range(n_tasks))
975
+ assert results == expected
976
+
977
+
978
+ @with_multiprocessing
979
+ def test_default_mp_context():
980
+ mp_start_method = mp.get_start_method()
981
+ p = Parallel(n_jobs=2, backend='multiprocessing')
982
+ context = p._backend_args.get('context')
983
+ start_method = context.get_start_method()
984
+ assert start_method == mp_start_method
985
+
986
+
987
+ @with_numpy
988
+ @with_multiprocessing
989
+ @parametrize('backend', PROCESS_BACKENDS)
990
+ def test_no_blas_crash_or_freeze_with_subprocesses(backend):
991
+ if backend == 'multiprocessing':
992
+ # Use the spawn backend that is both robust and available on all
993
+ # platforms
994
+ backend = mp.get_context('spawn')
995
+
996
+ # Check that on recent Python version, the 'spawn' start method can make
997
+ # it possible to use multiprocessing in conjunction of any BLAS
998
+ # implementation that happens to be used by numpy with causing a freeze or
999
+ # a crash
1000
+ rng = np.random.RandomState(42)
1001
+
1002
+ # call BLAS DGEMM to force the initialization of the internal thread-pool
1003
+ # in the main process
1004
+ a = rng.randn(1000, 1000)
1005
+ np.dot(a, a.T)
1006
+
1007
+ # check that the internal BLAS thread-pool is not in an inconsistent state
1008
+ # in the worker processes managed by multiprocessing
1009
+ Parallel(n_jobs=2, backend=backend)(
1010
+ delayed(np.dot)(a, a.T) for i in range(2))
1011
+
1012
+
1013
+ UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN = """\
1014
+ from joblib import Parallel, delayed
1015
+
1016
+ def square(x):
1017
+ return x ** 2
1018
+
1019
+ backend = "{}"
1020
+ if backend == "spawn":
1021
+ from multiprocessing import get_context
1022
+ backend = get_context(backend)
1023
+
1024
+ print(Parallel(n_jobs=2, backend=backend)(
1025
+ delayed(square)(i) for i in range(5)))
1026
+ """
1027
+
1028
+
1029
+ @with_multiprocessing
1030
+ @parametrize('backend', PROCESS_BACKENDS)
1031
+ def test_parallel_with_interactively_defined_functions(backend):
1032
+ # When using the "-c" flag, interactive functions defined in __main__
1033
+ # should work with any backend.
1034
+ if backend == "multiprocessing" and mp.get_start_method() != "fork":
1035
+ pytest.skip("Require fork start method to use interactively defined "
1036
+ "functions with multiprocessing.")
1037
+ code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN.format(backend)
1038
+ check_subprocess_call(
1039
+ [sys.executable, '-c', code], timeout=10,
1040
+ stdout_regex=r'\[0, 1, 4, 9, 16\]')
1041
+
1042
+
1043
+ UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN = """\
1044
+ import sys
1045
+ # Make sure that joblib is importable in the subprocess launching this
1046
+ # script. This is needed in case we run the tests from the joblib root
1047
+ # folder without having installed joblib
1048
+ sys.path.insert(0, {joblib_root_folder!r})
1049
+
1050
+ from joblib import Parallel, delayed
1051
+
1052
+ def run(f, x):
1053
+ return f(x)
1054
+
1055
+ {define_func}
1056
+
1057
+ if __name__ == "__main__":
1058
+ backend = "{backend}"
1059
+ if backend == "spawn":
1060
+ from multiprocessing import get_context
1061
+ backend = get_context(backend)
1062
+
1063
+ callable_position = "{callable_position}"
1064
+ if callable_position == "delayed":
1065
+ print(Parallel(n_jobs=2, backend=backend)(
1066
+ delayed(square)(i) for i in range(5)))
1067
+ elif callable_position == "args":
1068
+ print(Parallel(n_jobs=2, backend=backend)(
1069
+ delayed(run)(square, i) for i in range(5)))
1070
+ else:
1071
+ print(Parallel(n_jobs=2, backend=backend)(
1072
+ delayed(run)(f=square, x=i) for i in range(5)))
1073
+ """
1074
+
1075
+ SQUARE_MAIN = """\
1076
+ def square(x):
1077
+ return x ** 2
1078
+ """
1079
+ SQUARE_LOCAL = """\
1080
+ def gen_square():
1081
+ def square(x):
1082
+ return x ** 2
1083
+ return square
1084
+ square = gen_square()
1085
+ """
1086
+ SQUARE_LAMBDA = """\
1087
+ square = lambda x: x ** 2
1088
+ """
1089
+
1090
+
1091
+ @with_multiprocessing
1092
+ @parametrize('backend', PROCESS_BACKENDS + ([] if mp is None else ['spawn']))
1093
+ @parametrize('define_func', [SQUARE_MAIN, SQUARE_LOCAL, SQUARE_LAMBDA])
1094
+ @parametrize('callable_position', ['delayed', 'args', 'kwargs'])
1095
+ def test_parallel_with_unpicklable_functions_in_args(
1096
+ backend, define_func, callable_position, tmpdir):
1097
+ if backend in ['multiprocessing', 'spawn'] and (
1098
+ define_func != SQUARE_MAIN or sys.platform == "win32"):
1099
+ pytest.skip("Not picklable with pickle")
1100
+ code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN.format(
1101
+ define_func=define_func, backend=backend,
1102
+ callable_position=callable_position,
1103
+ joblib_root_folder=os.path.dirname(os.path.dirname(joblib.__file__)))
1104
+ code_file = tmpdir.join("unpicklable_func_script.py")
1105
+ code_file.write(code)
1106
+ check_subprocess_call(
1107
+ [sys.executable, code_file.strpath], timeout=10,
1108
+ stdout_regex=r'\[0, 1, 4, 9, 16\]')
1109
+
1110
+
1111
+ INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT = """\
1112
+ import sys
1113
+ import faulthandler
1114
+ # Make sure that joblib is importable in the subprocess launching this
1115
+ # script. This is needed in case we run the tests from the joblib root
1116
+ # folder without having installed joblib
1117
+ sys.path.insert(0, {joblib_root_folder!r})
1118
+
1119
+ from joblib import Parallel, delayed
1120
+ from functools import partial
1121
+
1122
+ class MyClass:
1123
+ '''Class defined in the __main__ namespace'''
1124
+ def __init__(self, value):
1125
+ self.value = value
1126
+
1127
+
1128
+ def square(x, ignored=None, ignored2=None):
1129
+ '''Function defined in the __main__ namespace'''
1130
+ return x.value ** 2
1131
+
1132
+
1133
+ square2 = partial(square, ignored2='something')
1134
+
1135
+ # Here, we do not need the `if __name__ == "__main__":` safeguard when
1136
+ # using the default `loky` backend (even on Windows).
1137
+
1138
+ # To make debugging easier
1139
+ faulthandler.dump_traceback_later(30, exit=True)
1140
+
1141
+ # The following baroque function call is meant to check that joblib
1142
+ # introspection rightfully uses cloudpickle instead of the (faster) pickle
1143
+ # module of the standard library when necessary. In particular cloudpickle is
1144
+ # necessary for functions and instances of classes interactively defined in the
1145
+ # __main__ module.
1146
+
1147
+ print(Parallel(backend="loky", n_jobs=2)(
1148
+ delayed(square2)(MyClass(i), ignored=[dict(a=MyClass(1))])
1149
+ for i in range(5)
1150
+ ))
1151
+ """.format(joblib_root_folder=os.path.dirname(
1152
+ os.path.dirname(joblib.__file__)))
1153
+
1154
+
1155
+ @with_multiprocessing
1156
+ def test_parallel_with_interactively_defined_functions_loky(tmpdir):
1157
+ # loky accepts interactive functions defined in __main__ and does not
1158
+ # require if __name__ == '__main__' even when the __main__ module is
1159
+ # defined by the result of the execution of a filesystem script.
1160
+ script = tmpdir.join('joblib_interactively_defined_function.py')
1161
+ script.write(INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT)
1162
+ check_subprocess_call(
1163
+ [sys.executable, script.strpath],
1164
+ stdout_regex=r'\[0, 1, 4, 9, 16\]',
1165
+ timeout=None, # rely on faulthandler to kill the process
1166
+ )
1167
+
1168
+
1169
+ INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT = """\
1170
+ import sys
1171
+ # Make sure that joblib is importable in the subprocess launching this
1172
+ # script. This is needed in case we run the tests from the joblib root
1173
+ # folder without having installed joblib
1174
+ sys.path.insert(0, {joblib_root_folder!r})
1175
+
1176
+ from joblib import Parallel, delayed, hash
1177
+ import multiprocessing as mp
1178
+ mp.util.log_to_stderr(5)
1179
+
1180
+ class MyList(list):
1181
+ '''MyList is interactively defined by MyList.append is a built-in'''
1182
+ def __hash__(self):
1183
+ # XXX: workaround limitation in cloudpickle
1184
+ return hash(self).__hash__()
1185
+
1186
+ l = MyList()
1187
+
1188
+ print(Parallel(backend="loky", n_jobs=2)(
1189
+ delayed(l.append)(i) for i in range(3)
1190
+ ))
1191
+ """.format(joblib_root_folder=os.path.dirname(
1192
+ os.path.dirname(joblib.__file__)))
1193
+
1194
+
1195
+ @with_multiprocessing
1196
+ def test_parallel_with_interactively_defined_bound_method_loky(tmpdir):
1197
+ script = tmpdir.join('joblib_interactive_bound_method_script.py')
1198
+ script.write(INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT)
1199
+ check_subprocess_call([sys.executable, script.strpath],
1200
+ stdout_regex=r'\[None, None, None\]',
1201
+ stderr_regex=r'LokyProcess',
1202
+ timeout=15)
1203
+
1204
+
1205
+ def test_parallel_with_exhausted_iterator():
1206
+ exhausted_iterator = iter([])
1207
+ assert Parallel(n_jobs=2)(exhausted_iterator) == []
1208
+
1209
+
1210
+ def _cleanup_worker():
1211
+ """Helper function to force gc in each worker."""
1212
+ force_gc_pypy()
1213
+ time.sleep(.1)
1214
+
1215
+
1216
+ def check_memmap(a):
1217
+ if not isinstance(a, np.memmap):
1218
+ raise TypeError('Expected np.memmap instance, got %r',
1219
+ type(a))
1220
+ return a.copy() # return a regular array instead of a memmap
1221
+
1222
+
1223
+ @with_numpy
1224
+ @with_multiprocessing
1225
+ @parametrize('backend', PROCESS_BACKENDS)
1226
+ def test_auto_memmap_on_arrays_from_generator(backend):
1227
+ # Non-regression test for a problem with a bad interaction between the
1228
+ # GC collecting arrays recently created during iteration inside the
1229
+ # parallel dispatch loop and the auto-memmap feature of Parallel.
1230
+ # See: https://github.com/joblib/joblib/pull/294
1231
+ def generate_arrays(n):
1232
+ for i in range(n):
1233
+ yield np.ones(10, dtype=np.float32) * i
1234
+ # Use max_nbytes=1 to force the use of memory-mapping even for small
1235
+ # arrays
1236
+ results = Parallel(n_jobs=2, max_nbytes=1, backend=backend)(
1237
+ delayed(check_memmap)(a) for a in generate_arrays(100))
1238
+ for result, expected in zip(results, generate_arrays(len(results))):
1239
+ np.testing.assert_array_equal(expected, result)
1240
+
1241
+ # Second call to force loky to adapt the executor by growing the number
1242
+ # of worker processes. This is a non-regression test for:
1243
+ # https://github.com/joblib/joblib/issues/629.
1244
+ results = Parallel(n_jobs=4, max_nbytes=1, backend=backend)(
1245
+ delayed(check_memmap)(a) for a in generate_arrays(100))
1246
+ for result, expected in zip(results, generate_arrays(len(results))):
1247
+ np.testing.assert_array_equal(expected, result)
1248
+
1249
+
1250
+ def identity(arg):
1251
+ return arg
1252
+
1253
+
1254
+ @with_numpy
1255
+ @with_multiprocessing
1256
+ def test_memmap_with_big_offset(tmpdir):
1257
+ fname = tmpdir.join('test.mmap').strpath
1258
+ size = mmap.ALLOCATIONGRANULARITY
1259
+ obj = [np.zeros(size, dtype='uint8'), np.ones(size, dtype='uint8')]
1260
+ dump(obj, fname)
1261
+ memmap = load(fname, mmap_mode='r')
1262
+ result, = Parallel(n_jobs=2)(delayed(identity)(memmap) for _ in [0])
1263
+ assert isinstance(memmap[1], np.memmap)
1264
+ assert memmap[1].offset > size
1265
+ np.testing.assert_array_equal(obj, result)
1266
+
1267
+
1268
+ def test_warning_about_timeout_not_supported_by_backend():
1269
+ with warnings.catch_warnings(record=True) as warninfo:
1270
+ Parallel(n_jobs=1, timeout=1)(delayed(square)(i) for i in range(50))
1271
+ assert len(warninfo) == 1
1272
+ w = warninfo[0]
1273
+ assert isinstance(w.message, UserWarning)
1274
+ assert str(w.message) == (
1275
+ "The backend class 'SequentialBackend' does not support timeout. "
1276
+ "You have set 'timeout=1' in Parallel but the 'timeout' parameter "
1277
+ "will not be used.")
1278
+
1279
+
1280
+ def set_list_value(input_list, index, value):
1281
+ input_list[index] = value
1282
+ return value
1283
+
1284
+
1285
+ @pytest.mark.parametrize('n_jobs', [1, 2, 4])
1286
+ def test_parallel_return_order_with_return_as_generator_parameter(n_jobs):
1287
+ # This test inserts values in a list in some expected order
1288
+ # in sequential computing, and then checks that this order has been
1289
+ # respected by Parallel output generator.
1290
+ input_list = [0] * 5
1291
+ result = Parallel(n_jobs=n_jobs, return_as="generator",
1292
+ backend='threading')(
1293
+ delayed(set_list_value)(input_list, i, i) for i in range(5))
1294
+
1295
+ # Ensure that all the tasks are completed before checking the result
1296
+ result = list(result)
1297
+
1298
+ assert all(v == r for v, r in zip(input_list, result))
1299
+
1300
+
1301
+ def _sqrt_with_delay(e, delay):
1302
+ if delay:
1303
+ sleep(30)
1304
+ return sqrt(e)
1305
+
1306
+
1307
+ def _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs):
1308
+ # This test submits 10 tasks, but the second task is super slow. This test
1309
+ # checks that the 9 other tasks return before the slow task is done, when
1310
+ # `return_as` parameter is set to `'generator_unordered'`
1311
+ result = Parallel(n_jobs=n_jobs, return_as="generator_unordered",
1312
+ backend=backend)(
1313
+ delayed(_sqrt_with_delay)(i**2, (i == 1)) for i in range(10))
1314
+
1315
+ quickly_returned = sorted(next(result) for _ in range(9))
1316
+
1317
+ expected_quickly_returned = [0] + list(range(2, 10))
1318
+
1319
+ assert all(
1320
+ v == r for v, r in zip(expected_quickly_returned, quickly_returned)
1321
+ )
1322
+
1323
+ del result
1324
+ force_gc_pypy()
1325
+
1326
+
1327
+ @pytest.mark.parametrize('n_jobs', [2, 4])
1328
+ # NB: for this test to work, the backend must be allowed to process tasks
1329
+ # concurrently, so at least two jobs with a non-sequential backend are
1330
+ # mandatory.
1331
+ @with_multiprocessing
1332
+ @parametrize('backend', set(RETURN_GENERATOR_BACKENDS) - {"sequential"})
1333
+ def test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs):
1334
+ _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs)
1335
+
1336
+
1337
+ @pytest.mark.parametrize('n_jobs', [2, -1])
1338
+ @parametrize("context", [parallel_config, parallel_backend])
1339
+ @skipif(distributed is None, reason='This test requires dask')
1340
+ def test_parallel_unordered_generator_returns_fastest_first_with_dask(
1341
+ n_jobs, context
1342
+ ):
1343
+ with distributed.Client(
1344
+ n_workers=2, threads_per_worker=2
1345
+ ), context("dask"):
1346
+ _test_parallel_unordered_generator_returns_fastest_first(None, n_jobs)
1347
+
1348
+
1349
+ @parametrize('backend', ALL_VALID_BACKENDS)
1350
+ @parametrize('n_jobs', [1, 2, -2, -1])
1351
+ def test_abort_backend(n_jobs, backend):
1352
+ delays = ["a"] + [10] * 100
1353
+ with raises(TypeError):
1354
+ t_start = time.time()
1355
+ Parallel(n_jobs=n_jobs, backend=backend)(
1356
+ delayed(time.sleep)(i) for i in delays)
1357
+ dt = time.time() - t_start
1358
+ assert dt < 20
1359
+
1360
+
1361
+ def get_large_object(arg):
1362
+ result = np.ones(int(5 * 1e5), dtype=bool)
1363
+ result[0] = False
1364
+ return result
1365
+
1366
+
1367
+ def _test_deadlock_with_generator(backend, return_as, n_jobs):
1368
+ # Non-regression test for a race condition in the backends when the pickler
1369
+ # is delayed by a large object.
1370
+ with Parallel(n_jobs=n_jobs, backend=backend,
1371
+ return_as=return_as) as parallel:
1372
+ result = parallel(delayed(get_large_object)(i) for i in range(10))
1373
+ next(result)
1374
+ next(result)
1375
+ del result
1376
+ # The gc in pypy can be delayed. Force it to make sure this test does
1377
+ # not cause timeout on the CI.
1378
+ force_gc_pypy()
1379
+
1380
+
1381
+ @with_numpy
1382
+ @parametrize('backend', RETURN_GENERATOR_BACKENDS)
1383
+ @parametrize('return_as', ["generator", "generator_unordered"])
1384
+ @parametrize('n_jobs', [1, 2, -2, -1])
1385
+ def test_deadlock_with_generator(backend, return_as, n_jobs):
1386
+ _test_deadlock_with_generator(backend, return_as, n_jobs)
1387
+
1388
+
1389
+ @with_numpy
1390
+ @pytest.mark.parametrize('n_jobs', [2, -1])
1391
+ @parametrize('return_as', ["generator", "generator_unordered"])
1392
+ @parametrize("context", [parallel_config, parallel_backend])
1393
+ @skipif(distributed is None, reason='This test requires dask')
1394
+ def test_deadlock_with_generator_and_dask(context, return_as, n_jobs):
1395
+ with distributed.Client(
1396
+ n_workers=2, threads_per_worker=2
1397
+ ), context("dask"):
1398
+ _test_deadlock_with_generator(None, return_as, n_jobs)
1399
+
1400
+
1401
+ @parametrize('backend', RETURN_GENERATOR_BACKENDS)
1402
+ @parametrize('return_as', ["generator", "generator_unordered"])
1403
+ @parametrize('n_jobs', [1, 2, -2, -1])
1404
+ def test_multiple_generator_call(backend, return_as, n_jobs):
1405
+ # Non-regression test that ensures the dispatch of the tasks starts
1406
+ # immediately when Parallel.__call__ is called. This test relies on the
1407
+ # assumption that only one generator can be submitted at a time.
1408
+ with raises(RuntimeError,
1409
+ match="This Parallel instance is already running"):
1410
+ parallel = Parallel(n_jobs, backend=backend, return_as=return_as)
1411
+ g = parallel(delayed(sleep)(1) for _ in range(10)) # noqa: F841
1412
+ t_start = time.time()
1413
+ gen2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841
1414
+
1415
+ # Make sure that the error is raised quickly
1416
+ assert time.time() - t_start < 2, (
1417
+ "The error should be raised immediatly when submitting a new task "
1418
+ "but it took more than 2s."
1419
+ )
1420
+
1421
+ del g
1422
+ # The gc in pypy can be delayed. Force it to make sure this test does not
1423
+ # cause timeout on the CI.
1424
+ force_gc_pypy()
1425
+
1426
+
1427
+ @parametrize('backend', RETURN_GENERATOR_BACKENDS)
1428
+ @parametrize('return_as', ["generator", "generator_unordered"])
1429
+ @parametrize('n_jobs', [1, 2, -2, -1])
1430
+ def test_multiple_generator_call_managed(backend, return_as, n_jobs):
1431
+ # Non-regression test that ensures the dispatch of the tasks starts
1432
+ # immediately when Parallel.__call__ is called. This test relies on the
1433
+ # assumption that only one generator can be submitted at a time.
1434
+ with Parallel(n_jobs, backend=backend,
1435
+ return_as=return_as) as parallel:
1436
+ g = parallel(delayed(sleep)(10) for _ in range(10)) # noqa: F841
1437
+ t_start = time.time()
1438
+ with raises(RuntimeError,
1439
+ match="This Parallel instance is already running"):
1440
+ g2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841
1441
+
1442
+ # Make sure that the error is raised quickly
1443
+ assert time.time() - t_start < 2, (
1444
+ "The error should be raised immediatly when submitting a new task "
1445
+ "but it took more than 2s."
1446
+ )
1447
+
1448
+ # The gc in pypy can be delayed. Force it to make sure this test does not
1449
+ # cause timeout on the CI.
1450
+ del g
1451
+ force_gc_pypy()
1452
+
1453
+
1454
+ @parametrize('backend', RETURN_GENERATOR_BACKENDS)
1455
+ @parametrize('return_as_1', ["generator", "generator_unordered"])
1456
+ @parametrize('return_as_2', ["generator", "generator_unordered"])
1457
+ @parametrize('n_jobs', [1, 2, -2, -1])
1458
+ def test_multiple_generator_call_separated(
1459
+ backend, return_as_1, return_as_2, n_jobs
1460
+ ):
1461
+ # Check that for separated Parallel, both tasks are correctly returned.
1462
+ g = Parallel(n_jobs, backend=backend, return_as=return_as_1)(
1463
+ delayed(sqrt)(i ** 2) for i in range(10)
1464
+ )
1465
+ g2 = Parallel(n_jobs, backend=backend, return_as=return_as_2)(
1466
+ delayed(sqrt)(i ** 2) for i in range(10, 20)
1467
+ )
1468
+
1469
+ if return_as_1 == "generator_unordered":
1470
+ g = sorted(g)
1471
+
1472
+ if return_as_2 == "generator_unordered":
1473
+ g2 = sorted(g2)
1474
+
1475
+ assert all(res == i for res, i in zip(g, range(10)))
1476
+ assert all(res == i for res, i in zip(g2, range(10, 20)))
1477
+
1478
+
1479
+ @parametrize('backend, error', [
1480
+ ('loky', True),
1481
+ ('threading', False),
1482
+ ('sequential', False),
1483
+ ])
1484
+ @parametrize('return_as_1', ["generator", "generator_unordered"])
1485
+ @parametrize('return_as_2', ["generator", "generator_unordered"])
1486
+ def test_multiple_generator_call_separated_gc(
1487
+ backend, return_as_1, return_as_2, error
1488
+ ):
1489
+
1490
+ if (backend == 'loky') and (mp is None):
1491
+ pytest.skip("Requires multiprocessing")
1492
+
1493
+ # Check that in loky, only one call can be run at a time with
1494
+ # a single executor.
1495
+ parallel = Parallel(2, backend=backend, return_as=return_as_1)
1496
+ g = parallel(delayed(sleep)(10) for i in range(10))
1497
+ g_wr = weakref.finalize(g, lambda: print("Generator collected"))
1498
+ ctx = (
1499
+ raises(RuntimeError, match="The executor underlying Parallel")
1500
+ if error else nullcontext()
1501
+ )
1502
+ with ctx:
1503
+ # For loky, this call will raise an error as the gc of the previous
1504
+ # generator will shutdown the shared executor.
1505
+ # For the other backends, as the worker pools are not shared between
1506
+ # the two calls, this should proceed correctly.
1507
+ t_start = time.time()
1508
+ g = Parallel(2, backend=backend, return_as=return_as_2)(
1509
+ delayed(sqrt)(i ** 2) for i in range(10, 20)
1510
+ )
1511
+
1512
+ # The gc in pypy can be delayed. Force it to test the behavior when it
1513
+ # will eventually be collected.
1514
+ force_gc_pypy()
1515
+
1516
+ if return_as_2 == "generator_unordered":
1517
+ g = sorted(g)
1518
+
1519
+ assert all(res == i for res, i in zip(g, range(10, 20)))
1520
+
1521
+ assert time.time() - t_start < 5
1522
+
1523
+ # Make sure that the computation are stopped for the gc'ed generator
1524
+ retry = 0
1525
+ while g_wr.alive and retry < 3:
1526
+ retry += 1
1527
+ time.sleep(.5)
1528
+ assert time.time() - t_start < 5
1529
+
1530
+ if parallel._effective_n_jobs() != 1:
1531
+ # check that the first parallel object is aborting (the final _aborted
1532
+ # state might be delayed).
1533
+ assert parallel._aborting
1534
+
1535
+
1536
+ @with_numpy
1537
+ @with_multiprocessing
1538
+ @parametrize('backend', PROCESS_BACKENDS)
1539
+ def test_memmapping_leaks(backend, tmpdir):
1540
+ # Non-regression test for memmapping backends. Ensure that the data
1541
+ # does not stay too long in memory
1542
+ tmpdir = tmpdir.strpath
1543
+
1544
+ # Use max_nbytes=1 to force the use of memory-mapping even for small
1545
+ # arrays
1546
+ with Parallel(n_jobs=2, max_nbytes=1, backend=backend,
1547
+ temp_folder=tmpdir) as p:
1548
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
1549
+
1550
+ # The memmap folder should not be clean in the context scope
1551
+ assert len(os.listdir(tmpdir)) > 0
1552
+
1553
+ # Cleaning of the memmap folder is triggered by the garbage
1554
+ # collection. With pypy the garbage collection has been observed to be
1555
+ # delayed, sometimes up until the shutdown of the interpreter. This
1556
+ # cleanup job executed in the worker ensures that it's triggered
1557
+ # immediately.
1558
+ p(delayed(_cleanup_worker)() for _ in range(2))
1559
+
1560
+ # Make sure that the shared memory is cleaned at the end when we exit
1561
+ # the context
1562
+ for _ in range(100):
1563
+ if not os.listdir(tmpdir):
1564
+ break
1565
+ sleep(.1)
1566
+ else:
1567
+ raise AssertionError('temporary directory of Parallel was not removed')
1568
+
1569
+ # Make sure that the shared memory is cleaned at the end of a call
1570
+ p = Parallel(n_jobs=2, max_nbytes=1, backend=backend)
1571
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
1572
+ p(delayed(_cleanup_worker)() for _ in range(2))
1573
+
1574
+ for _ in range(100):
1575
+ if not os.listdir(tmpdir):
1576
+ break
1577
+ sleep(.1)
1578
+ else:
1579
+ raise AssertionError('temporary directory of Parallel was not removed')
1580
+
1581
+
1582
+ @parametrize('backend',
1583
+ ([None, 'threading'] if mp is None
1584
+ else [None, 'loky', 'threading'])
1585
+ )
1586
+ def test_lambda_expression(backend):
1587
+ # cloudpickle is used to pickle delayed callables
1588
+ results = Parallel(n_jobs=2, backend=backend)(
1589
+ delayed(lambda x: x ** 2)(i) for i in range(10))
1590
+ assert results == [i ** 2 for i in range(10)]
1591
+
1592
+
1593
+ @with_multiprocessing
1594
+ @parametrize('backend', PROCESS_BACKENDS)
1595
+ def test_backend_batch_statistics_reset(backend):
1596
+ """Test that a parallel backend correctly resets its batch statistics."""
1597
+ n_jobs = 2
1598
+ n_inputs = 500
1599
+ task_time = 2. / n_inputs
1600
+
1601
+ p = Parallel(verbose=10, n_jobs=n_jobs, backend=backend)
1602
+ p(delayed(time.sleep)(task_time) for i in range(n_inputs))
1603
+ assert (p._backend._effective_batch_size ==
1604
+ p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE)
1605
+ assert (p._backend._smoothed_batch_duration ==
1606
+ p._backend._DEFAULT_SMOOTHED_BATCH_DURATION)
1607
+
1608
+ p(delayed(time.sleep)(task_time) for i in range(n_inputs))
1609
+ assert (p._backend._effective_batch_size ==
1610
+ p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE)
1611
+ assert (p._backend._smoothed_batch_duration ==
1612
+ p._backend._DEFAULT_SMOOTHED_BATCH_DURATION)
1613
+
1614
+
1615
+ @with_multiprocessing
1616
+ @parametrize("context", [parallel_config, parallel_backend])
1617
+ def test_backend_hinting_and_constraints(context):
1618
+ for n_jobs in [1, 2, -1]:
1619
+ assert type(Parallel(n_jobs=n_jobs)._backend) == DefaultBackend
1620
+
1621
+ p = Parallel(n_jobs=n_jobs, prefer='threads')
1622
+ assert type(p._backend) is ThreadingBackend
1623
+
1624
+ p = Parallel(n_jobs=n_jobs, prefer='processes')
1625
+ assert type(p._backend) is DefaultBackend
1626
+
1627
+ p = Parallel(n_jobs=n_jobs, require='sharedmem')
1628
+ assert type(p._backend) is ThreadingBackend
1629
+
1630
+ # Explicit backend selection can override backend hinting although it
1631
+ # is useless to pass a hint when selecting a backend.
1632
+ p = Parallel(n_jobs=2, backend='loky', prefer='threads')
1633
+ assert type(p._backend) is LokyBackend
1634
+
1635
+ with context('loky', n_jobs=2):
1636
+ # Explicit backend selection by the user with the context manager
1637
+ # should be respected when combined with backend hints only.
1638
+ p = Parallel(prefer='threads')
1639
+ assert type(p._backend) is LokyBackend
1640
+ assert p.n_jobs == 2
1641
+
1642
+ with context('loky', n_jobs=2):
1643
+ # Locally hard-coded n_jobs value is respected.
1644
+ p = Parallel(n_jobs=3, prefer='threads')
1645
+ assert type(p._backend) is LokyBackend
1646
+ assert p.n_jobs == 3
1647
+
1648
+ with context('loky', n_jobs=2):
1649
+ # Explicit backend selection by the user with the context manager
1650
+ # should be ignored when the Parallel call has hard constraints.
1651
+ # In this case, the default backend that supports shared mem is
1652
+ # used an the default number of processes is used.
1653
+ p = Parallel(require='sharedmem')
1654
+ assert type(p._backend) is ThreadingBackend
1655
+ assert p.n_jobs == 1
1656
+
1657
+ with context('loky', n_jobs=2):
1658
+ p = Parallel(n_jobs=3, require='sharedmem')
1659
+ assert type(p._backend) is ThreadingBackend
1660
+ assert p.n_jobs == 3
1661
+
1662
+
1663
+ @parametrize("context", [parallel_config, parallel_backend])
1664
+ def test_backend_hinting_and_constraints_with_custom_backends(
1665
+ capsys, context
1666
+ ):
1667
+ # Custom backends can declare that they use threads and have shared memory
1668
+ # semantics:
1669
+ class MyCustomThreadingBackend(ParallelBackendBase):
1670
+ supports_sharedmem = True
1671
+ use_threads = True
1672
+
1673
+ def apply_async(self):
1674
+ pass
1675
+
1676
+ def effective_n_jobs(self, n_jobs):
1677
+ return n_jobs
1678
+
1679
+ with context(MyCustomThreadingBackend()):
1680
+ p = Parallel(n_jobs=2, prefer='processes') # ignored
1681
+ assert type(p._backend) is MyCustomThreadingBackend
1682
+
1683
+ p = Parallel(n_jobs=2, require='sharedmem')
1684
+ assert type(p._backend) is MyCustomThreadingBackend
1685
+
1686
+ class MyCustomProcessingBackend(ParallelBackendBase):
1687
+ supports_sharedmem = False
1688
+ use_threads = False
1689
+
1690
+ def apply_async(self):
1691
+ pass
1692
+
1693
+ def effective_n_jobs(self, n_jobs):
1694
+ return n_jobs
1695
+
1696
+ with context(MyCustomProcessingBackend()):
1697
+ p = Parallel(n_jobs=2, prefer='processes')
1698
+ assert type(p._backend) is MyCustomProcessingBackend
1699
+
1700
+ out, err = capsys.readouterr()
1701
+ assert out == ""
1702
+ assert err == ""
1703
+
1704
+ p = Parallel(n_jobs=2, require='sharedmem', verbose=10)
1705
+ assert type(p._backend) is ThreadingBackend
1706
+
1707
+ out, err = capsys.readouterr()
1708
+ expected = ("Using ThreadingBackend as joblib backend "
1709
+ "instead of MyCustomProcessingBackend as the latter "
1710
+ "does not provide shared memory semantics.")
1711
+ assert out.strip() == expected
1712
+ assert err == ""
1713
+
1714
+ with raises(ValueError):
1715
+ Parallel(backend=MyCustomProcessingBackend(), require='sharedmem')
1716
+
1717
+
1718
+ def test_invalid_backend_hinting_and_constraints():
1719
+ with raises(ValueError):
1720
+ Parallel(prefer='invalid')
1721
+
1722
+ with raises(ValueError):
1723
+ Parallel(require='invalid')
1724
+
1725
+ with raises(ValueError):
1726
+ # It is inconsistent to prefer process-based parallelism while
1727
+ # requiring shared memory semantics.
1728
+ Parallel(prefer='processes', require='sharedmem')
1729
+
1730
+ if mp is not None:
1731
+ # It is inconsistent to ask explicitly for a process-based
1732
+ # parallelism while requiring shared memory semantics.
1733
+ with raises(ValueError):
1734
+ Parallel(backend='loky', require='sharedmem')
1735
+ with raises(ValueError):
1736
+ Parallel(backend='multiprocessing', require='sharedmem')
1737
+
1738
+
1739
+ def _recursive_backend_info(limit=3, **kwargs):
1740
+ """Perform nested parallel calls and introspect the backend on the way"""
1741
+
1742
+ with Parallel(n_jobs=2) as p:
1743
+ this_level = [(type(p._backend).__name__, p._backend.nesting_level)]
1744
+ if limit == 0:
1745
+ return this_level
1746
+ results = p(delayed(_recursive_backend_info)(limit=limit - 1, **kwargs)
1747
+ for i in range(1))
1748
+ return this_level + results[0]
1749
+
1750
+
1751
+ @with_multiprocessing
1752
+ @parametrize('backend', ['loky', 'threading'])
1753
+ @parametrize("context", [parallel_config, parallel_backend])
1754
+ def test_nested_parallelism_limit(context, backend):
1755
+ with context(backend, n_jobs=2):
1756
+ backend_types_and_levels = _recursive_backend_info()
1757
+
1758
+ if cpu_count() == 1:
1759
+ second_level_backend_type = 'SequentialBackend'
1760
+ max_level = 1
1761
+ else:
1762
+ second_level_backend_type = 'ThreadingBackend'
1763
+ max_level = 2
1764
+
1765
+ top_level_backend_type = backend.title() + 'Backend'
1766
+ expected_types_and_levels = [
1767
+ (top_level_backend_type, 0),
1768
+ (second_level_backend_type, 1),
1769
+ ('SequentialBackend', max_level),
1770
+ ('SequentialBackend', max_level)
1771
+ ]
1772
+ assert backend_types_and_levels == expected_types_and_levels
1773
+
1774
+
1775
+ @with_numpy
1776
+ @parametrize("context", [parallel_config, parallel_backend])
1777
+ @skipif(distributed is None, reason='This test requires dask')
1778
+ def test_nested_parallelism_with_dask(context):
1779
+ with distributed.Client(n_workers=2, threads_per_worker=2):
1780
+ # 10 MB of data as argument to trigger implicit scattering
1781
+ data = np.ones(int(1e7), dtype=np.uint8)
1782
+ for i in range(2):
1783
+ with context('dask'):
1784
+ backend_types_and_levels = _recursive_backend_info(data=data)
1785
+ assert len(backend_types_and_levels) == 4
1786
+ assert all(name == 'DaskDistributedBackend'
1787
+ for name, _ in backend_types_and_levels)
1788
+
1789
+ # No argument
1790
+ with context('dask'):
1791
+ backend_types_and_levels = _recursive_backend_info()
1792
+ assert len(backend_types_and_levels) == 4
1793
+ assert all(name == 'DaskDistributedBackend'
1794
+ for name, _ in backend_types_and_levels)
1795
+
1796
+
1797
+ def _recursive_parallel(nesting_limit=None):
1798
+ """A horrible function that does recursive parallel calls"""
1799
+ return Parallel()(delayed(_recursive_parallel)() for i in range(2))
1800
+
1801
+
1802
+ @pytest.mark.no_cover
1803
+ @parametrize("context", [parallel_config, parallel_backend])
1804
+ @parametrize(
1805
+ 'backend', (['threading'] if mp is None else ['loky', 'threading'])
1806
+ )
1807
+ def test_thread_bomb_mitigation(context, backend):
1808
+ # Test that recursive parallelism raises a recursion rather than
1809
+ # saturating the operating system resources by creating a unbounded number
1810
+ # of threads.
1811
+ with context(backend, n_jobs=2):
1812
+ with raises(BaseException) as excinfo:
1813
+ _recursive_parallel()
1814
+ exc = excinfo.value
1815
+ if backend == "loky":
1816
+ # Local import because loky may not be importable for lack of
1817
+ # multiprocessing
1818
+ from joblib.externals.loky.process_executor import TerminatedWorkerError # noqa
1819
+ if isinstance(exc, (TerminatedWorkerError, PicklingError)):
1820
+ # The recursion exception can itself cause an error when
1821
+ # pickling it to be send back to the parent process. In this
1822
+ # case the worker crashes but the original traceback is still
1823
+ # printed on stderr. This could be improved but does not seem
1824
+ # simple to do and this is not critical for users (as long
1825
+ # as there is no process or thread bomb happening).
1826
+ pytest.xfail("Loky worker crash when serializing RecursionError")
1827
+
1828
+ assert isinstance(exc, RecursionError)
1829
+
1830
+
1831
+ def _run_parallel_sum():
1832
+ env_vars = {}
1833
+ for var in ['OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS',
1834
+ 'VECLIB_MAXIMUM_THREADS', 'NUMEXPR_NUM_THREADS',
1835
+ 'NUMBA_NUM_THREADS', 'ENABLE_IPC']:
1836
+ env_vars[var] = os.environ.get(var)
1837
+ return env_vars, parallel_sum(100)
1838
+
1839
+
1840
+ @parametrize("backend", ([None, 'loky'] if mp is not None else [None]))
1841
+ @skipif(parallel_sum is None, reason="Need OpenMP helper compiled")
1842
+ def test_parallel_thread_limit(backend):
1843
+ results = Parallel(n_jobs=2, backend=backend)(
1844
+ delayed(_run_parallel_sum)() for _ in range(2)
1845
+ )
1846
+ expected_num_threads = max(cpu_count() // 2, 1)
1847
+ for worker_env_vars, omp_num_threads in results:
1848
+ assert omp_num_threads == expected_num_threads
1849
+ for name, value in worker_env_vars.items():
1850
+ if name.endswith("_THREADS"):
1851
+ assert value == str(expected_num_threads)
1852
+ else:
1853
+ assert name == "ENABLE_IPC"
1854
+ assert value == "1"
1855
+
1856
+
1857
+ @parametrize("context", [parallel_config, parallel_backend])
1858
+ @skipif(distributed is not None, reason='This test requires dask')
1859
+ def test_dask_backend_when_dask_not_installed(context):
1860
+ with raises(ValueError, match='Please install dask'):
1861
+ context('dask')
1862
+
1863
+
1864
+ @parametrize("context", [parallel_config, parallel_backend])
1865
+ def test_zero_worker_backend(context):
1866
+ # joblib.Parallel should reject with an explicit error message parallel
1867
+ # backends that have no worker.
1868
+ class ZeroWorkerBackend(ThreadingBackend):
1869
+ def configure(self, *args, **kwargs):
1870
+ return 0
1871
+
1872
+ def apply_async(self, func, callback=None): # pragma: no cover
1873
+ raise TimeoutError("No worker available")
1874
+
1875
+ def effective_n_jobs(self, n_jobs): # pragma: no cover
1876
+ return 0
1877
+
1878
+ expected_msg = "ZeroWorkerBackend has no active worker"
1879
+ with context(ZeroWorkerBackend()):
1880
+ with pytest.raises(RuntimeError, match=expected_msg):
1881
+ Parallel(n_jobs=2)(delayed(id)(i) for i in range(2))
1882
+
1883
+
1884
+ def test_globals_update_at_each_parallel_call():
1885
+ # This is a non-regression test related to joblib issues #836 and #833.
1886
+ # Cloudpickle versions between 0.5.4 and 0.7 introduced a bug where global
1887
+ # variables changes in a parent process between two calls to
1888
+ # joblib.Parallel would not be propagated into the workers.
1889
+ global MY_GLOBAL_VARIABLE
1890
+ MY_GLOBAL_VARIABLE = "original value"
1891
+
1892
+ def check_globals():
1893
+ global MY_GLOBAL_VARIABLE
1894
+ return MY_GLOBAL_VARIABLE
1895
+
1896
+ assert check_globals() == "original value"
1897
+
1898
+ workers_global_variable = Parallel(n_jobs=2)(
1899
+ delayed(check_globals)() for i in range(2))
1900
+ assert set(workers_global_variable) == {"original value"}
1901
+
1902
+ # Change the value of MY_GLOBAL_VARIABLE, and make sure this change gets
1903
+ # propagated into the workers environment
1904
+ MY_GLOBAL_VARIABLE = "changed value"
1905
+ assert check_globals() == "changed value"
1906
+
1907
+ workers_global_variable = Parallel(n_jobs=2)(
1908
+ delayed(check_globals)() for i in range(2))
1909
+ assert set(workers_global_variable) == {"changed value"}
1910
+
1911
+
1912
+ ##############################################################################
1913
+ # Test environment variable in child env, in particular for limiting
1914
+ # the maximal number of threads in C-library threadpools.
1915
+ #
1916
+
1917
+ def _check_numpy_threadpool_limits():
1918
+ import numpy as np
1919
+ # Let's call BLAS on a Matrix Matrix multiplication with dimensions large
1920
+ # enough to ensure that the threadpool managed by the underlying BLAS
1921
+ # implementation is actually used so as to force its initialization.
1922
+ a = np.random.randn(100, 100)
1923
+ np.dot(a, a)
1924
+ from threadpoolctl import threadpool_info
1925
+ return threadpool_info()
1926
+
1927
+
1928
+ def _parent_max_num_threads_for(child_module, parent_info):
1929
+ for parent_module in parent_info:
1930
+ if parent_module['filepath'] == child_module['filepath']:
1931
+ return parent_module['num_threads']
1932
+ raise ValueError("An unexpected module was loaded in child:\n{}"
1933
+ .format(child_module))
1934
+
1935
+
1936
+ def check_child_num_threads(workers_info, parent_info, num_threads):
1937
+ # Check that the number of threads reported in workers_info is consistent
1938
+ # with the expectation. We need to be careful to handle the cases where
1939
+ # the requested number of threads is below max_num_thread for the library.
1940
+ for child_threadpool_info in workers_info:
1941
+ for child_module in child_threadpool_info:
1942
+ parent_max_num_threads = _parent_max_num_threads_for(
1943
+ child_module, parent_info)
1944
+ expected = {min(num_threads, parent_max_num_threads), num_threads}
1945
+ assert child_module['num_threads'] in expected
1946
+
1947
+
1948
+ @with_numpy
1949
+ @with_multiprocessing
1950
+ @parametrize('n_jobs', [2, 4, -2, -1])
1951
+ def test_threadpool_limitation_in_child_loky(n_jobs):
1952
+ # Check that the protection against oversubscription in workers is working
1953
+ # using threadpoolctl functionalities.
1954
+
1955
+ # Skip this test if numpy is not linked to a BLAS library
1956
+ parent_info = _check_numpy_threadpool_limits()
1957
+ if len(parent_info) == 0:
1958
+ pytest.skip(reason="Need a version of numpy linked to BLAS")
1959
+
1960
+ workers_threadpool_infos = Parallel(backend="loky", n_jobs=n_jobs)(
1961
+ delayed(_check_numpy_threadpool_limits)() for i in range(2))
1962
+
1963
+ n_jobs = effective_n_jobs(n_jobs)
1964
+ expected_child_num_threads = max(cpu_count() // n_jobs, 1)
1965
+
1966
+ check_child_num_threads(workers_threadpool_infos, parent_info,
1967
+ expected_child_num_threads)
1968
+
1969
+
1970
+ @with_numpy
1971
+ @with_multiprocessing
1972
+ @parametrize('inner_max_num_threads', [1, 2, 4, None])
1973
+ @parametrize('n_jobs', [2, -1])
1974
+ @parametrize("context", [parallel_config, parallel_backend])
1975
+ def test_threadpool_limitation_in_child_context(
1976
+ context, n_jobs, inner_max_num_threads
1977
+ ):
1978
+ # Check that the protection against oversubscription in workers is working
1979
+ # using threadpoolctl functionalities.
1980
+
1981
+ # Skip this test if numpy is not linked to a BLAS library
1982
+ parent_info = _check_numpy_threadpool_limits()
1983
+ if len(parent_info) == 0:
1984
+ pytest.skip(reason="Need a version of numpy linked to BLAS")
1985
+
1986
+ with context('loky', inner_max_num_threads=inner_max_num_threads):
1987
+ workers_threadpool_infos = Parallel(n_jobs=n_jobs)(
1988
+ delayed(_check_numpy_threadpool_limits)() for i in range(2))
1989
+
1990
+ n_jobs = effective_n_jobs(n_jobs)
1991
+ if inner_max_num_threads is None:
1992
+ expected_child_num_threads = max(cpu_count() // n_jobs, 1)
1993
+ else:
1994
+ expected_child_num_threads = inner_max_num_threads
1995
+
1996
+ check_child_num_threads(workers_threadpool_infos, parent_info,
1997
+ expected_child_num_threads)
1998
+
1999
+
2000
+ @with_multiprocessing
2001
+ @parametrize('n_jobs', [2, -1])
2002
+ @parametrize('var_name', ["OPENBLAS_NUM_THREADS",
2003
+ "MKL_NUM_THREADS",
2004
+ "OMP_NUM_THREADS"])
2005
+ @parametrize("context", [parallel_config, parallel_backend])
2006
+ def test_threadpool_limitation_in_child_override(context, n_jobs, var_name):
2007
+ # Check that environment variables set by the user on the main process
2008
+ # always have the priority.
2009
+
2010
+ # Clean up the existing executor because we change the environment of the
2011
+ # parent at runtime and it is not detected in loky intentionally.
2012
+ get_reusable_executor(reuse=True).shutdown()
2013
+
2014
+ def _get_env(var_name):
2015
+ return os.environ.get(var_name)
2016
+
2017
+ original_var_value = os.environ.get(var_name)
2018
+ try:
2019
+ os.environ[var_name] = "4"
2020
+ # Skip this test if numpy is not linked to a BLAS library
2021
+ results = Parallel(n_jobs=n_jobs)(
2022
+ delayed(_get_env)(var_name) for i in range(2))
2023
+ assert results == ["4", "4"]
2024
+
2025
+ with context('loky', inner_max_num_threads=1):
2026
+ results = Parallel(n_jobs=n_jobs)(
2027
+ delayed(_get_env)(var_name) for i in range(2))
2028
+ assert results == ["1", "1"]
2029
+
2030
+ finally:
2031
+ if original_var_value is None:
2032
+ del os.environ[var_name]
2033
+ else:
2034
+ os.environ[var_name] = original_var_value
2035
+
2036
+
2037
+ @with_multiprocessing
2038
+ @parametrize('n_jobs', [2, 4, -1])
2039
+ def test_loky_reuse_workers(n_jobs):
2040
+ # Non-regression test for issue #967 where the workers are not reused when
2041
+ # calling multiple Parallel loops.
2042
+
2043
+ def parallel_call(n_jobs):
2044
+ x = range(10)
2045
+ Parallel(n_jobs=n_jobs)(delayed(sum)(x) for i in range(10))
2046
+
2047
+ # Run a parallel loop and get the workers used for computations
2048
+ parallel_call(n_jobs)
2049
+ first_executor = get_reusable_executor(reuse=True)
2050
+
2051
+ # Ensure that the workers are reused for the next calls, as the executor is
2052
+ # not restarted.
2053
+ for _ in range(10):
2054
+ parallel_call(n_jobs)
2055
+ executor = get_reusable_executor(reuse=True)
2056
+ assert executor == first_executor
llmeval-env/lib/python3.10/site-packages/joblib/test/test_store_backends.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ try:
3
+ # Python 2.7: use the C pickle to speed up
4
+ # test_concurrency_safe_write which pickles big python objects
5
+ import cPickle as cpickle
6
+ except ImportError:
7
+ import pickle as cpickle
8
+ import functools
9
+ from pickle import PicklingError
10
+ import time
11
+
12
+ import pytest
13
+
14
+ from joblib.testing import parametrize, timeout
15
+ from joblib.test.common import with_multiprocessing
16
+ from joblib.backports import concurrency_safe_rename
17
+ from joblib import Parallel, delayed
18
+ from joblib._store_backends import (
19
+ concurrency_safe_write,
20
+ FileSystemStoreBackend,
21
+ CacheWarning,
22
+ )
23
+
24
+
25
+ def write_func(output, filename):
26
+ with open(filename, 'wb') as f:
27
+ cpickle.dump(output, f)
28
+
29
+
30
+ def load_func(expected, filename):
31
+ for i in range(10):
32
+ try:
33
+ with open(filename, 'rb') as f:
34
+ reloaded = cpickle.load(f)
35
+ break
36
+ except (OSError, IOError):
37
+ # On Windows you can have WindowsError ([Error 5] Access
38
+ # is denied or [Error 13] Permission denied) when reading the file,
39
+ # probably because a writer process has a lock on the file
40
+ time.sleep(0.1)
41
+ else:
42
+ raise
43
+ assert expected == reloaded
44
+
45
+
46
+ def concurrency_safe_write_rename(to_write, filename, write_func):
47
+ temporary_filename = concurrency_safe_write(to_write,
48
+ filename, write_func)
49
+ concurrency_safe_rename(temporary_filename, filename)
50
+
51
+
52
+ @timeout(0) # No timeout as this test can be long
53
+ @with_multiprocessing
54
+ @parametrize('backend', ['multiprocessing', 'loky', 'threading'])
55
+ def test_concurrency_safe_write(tmpdir, backend):
56
+ # Add one item to cache
57
+ filename = tmpdir.join('test.pkl').strpath
58
+
59
+ obj = {str(i): i for i in range(int(1e5))}
60
+ funcs = [functools.partial(concurrency_safe_write_rename,
61
+ write_func=write_func)
62
+ if i % 3 != 2 else load_func for i in range(12)]
63
+ Parallel(n_jobs=2, backend=backend)(
64
+ delayed(func)(obj, filename) for func in funcs)
65
+
66
+
67
+ def test_warning_on_dump_failure(tmpdir):
68
+ # Check that a warning is raised when the dump fails for any reason but
69
+ # a PicklingError.
70
+ class UnpicklableObject(object):
71
+ def __reduce__(self):
72
+ raise RuntimeError("some exception")
73
+
74
+ backend = FileSystemStoreBackend()
75
+ backend.location = tmpdir.join('test_warning_on_pickling_error').strpath
76
+ backend.compress = None
77
+
78
+ with pytest.warns(CacheWarning, match="some exception"):
79
+ backend.dump_item("testpath", UnpicklableObject())
80
+
81
+
82
+ def test_warning_on_pickling_error(tmpdir):
83
+ # This is separate from test_warning_on_dump_failure because in the
84
+ # future we will turn this into an exception.
85
+ class UnpicklableObject(object):
86
+ def __reduce__(self):
87
+ raise PicklingError("not picklable")
88
+
89
+ backend = FileSystemStoreBackend()
90
+ backend.location = tmpdir.join('test_warning_on_pickling_error').strpath
91
+ backend.compress = None
92
+
93
+ with pytest.warns(FutureWarning, match="not picklable"):
94
+ backend.dump_item("testpath", UnpicklableObject())
llmeval-env/lib/python3.10/site-packages/joblib/test/test_testing.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import re
3
+
4
+ from joblib.testing import raises, check_subprocess_call
5
+
6
+
7
+ def test_check_subprocess_call():
8
+ code = '\n'.join(['result = 1 + 2 * 3',
9
+ 'print(result)',
10
+ 'my_list = [1, 2, 3]',
11
+ 'print(my_list)'])
12
+
13
+ check_subprocess_call([sys.executable, '-c', code])
14
+
15
+ # Now checking stdout with a regex
16
+ check_subprocess_call([sys.executable, '-c', code],
17
+ # Regex needed for platform-specific line endings
18
+ stdout_regex=r'7\s{1,2}\[1, 2, 3\]')
19
+
20
+
21
+ def test_check_subprocess_call_non_matching_regex():
22
+ code = '42'
23
+ non_matching_pattern = '_no_way_this_matches_anything_'
24
+
25
+ with raises(ValueError) as excinfo:
26
+ check_subprocess_call([sys.executable, '-c', code],
27
+ stdout_regex=non_matching_pattern)
28
+ excinfo.match('Unexpected stdout.+{}'.format(non_matching_pattern))
29
+
30
+
31
+ def test_check_subprocess_call_wrong_command():
32
+ wrong_command = '_a_command_that_does_not_exist_'
33
+ with raises(OSError):
34
+ check_subprocess_call([wrong_command])
35
+
36
+
37
+ def test_check_subprocess_call_non_zero_return_code():
38
+ code_with_non_zero_exit = '\n'.join([
39
+ 'import sys',
40
+ 'print("writing on stdout")',
41
+ 'sys.stderr.write("writing on stderr")',
42
+ 'sys.exit(123)'])
43
+
44
+ pattern = re.compile('Non-zero return code: 123.+'
45
+ 'Stdout:\nwriting on stdout.+'
46
+ 'Stderr:\nwriting on stderr', re.DOTALL)
47
+
48
+ with raises(ValueError) as excinfo:
49
+ check_subprocess_call([sys.executable, '-c', code_with_non_zero_exit])
50
+ excinfo.match(pattern)
51
+
52
+
53
+ def test_check_subprocess_call_timeout():
54
+ code_timing_out = '\n'.join([
55
+ 'import time',
56
+ 'import sys',
57
+ 'print("before sleep on stdout")',
58
+ 'sys.stdout.flush()',
59
+ 'sys.stderr.write("before sleep on stderr")',
60
+ 'sys.stderr.flush()',
61
+ # We need to sleep for at least 2 * timeout seconds in case the SIGKILL
62
+ # is triggered.
63
+ 'time.sleep(10)',
64
+ 'print("process should have be killed before")',
65
+ 'sys.stdout.flush()'])
66
+
67
+ pattern = re.compile('Non-zero return code:.+'
68
+ 'Stdout:\nbefore sleep on stdout\\s+'
69
+ 'Stderr:\nbefore sleep on stderr',
70
+ re.DOTALL)
71
+
72
+ with raises(ValueError) as excinfo:
73
+ check_subprocess_call([sys.executable, '-c', code_timing_out],
74
+ timeout=1)
75
+ excinfo.match(pattern)
llmeval-env/lib/python3.10/site-packages/joblib/test/test_utils.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from joblib._utils import eval_expr
4
+
5
+
6
+ @pytest.mark.parametrize(
7
+ "expr",
8
+ ["exec('import os')", "print(1)", "import os", "1+1; import os", "1^1"],
9
+ )
10
+ def test_eval_expr_invalid(expr):
11
+ with pytest.raises(
12
+ ValueError, match="is not a valid or supported arithmetic"
13
+ ):
14
+ eval_expr(expr)
15
+
16
+
17
+ @pytest.mark.parametrize(
18
+ "expr, result",
19
+ [
20
+ ("2*6", 12),
21
+ ("2**6", 64),
22
+ ("1 + 2*3**(4) / (6 + -7)", -161.0),
23
+ ("(20 // 3) % 5", 1),
24
+ ],
25
+ )
26
+ def test_eval_expr_valid(expr, result):
27
+ assert eval_expr(expr) == result