applied-ai-018 commited on
Commit
31097a7
·
verified ·
1 Parent(s): 3c17cbe

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/joblib/__init__.py +148 -0
  2. env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/joblib/_cloudpickle_wrapper.py +19 -0
  14. env-llmeval/lib/python3.10/site-packages/joblib/_dask.py +379 -0
  15. env-llmeval/lib/python3.10/site-packages/joblib/_memmapping_reducer.py +657 -0
  16. env-llmeval/lib/python3.10/site-packages/joblib/_multiprocessing_helpers.py +53 -0
  17. env-llmeval/lib/python3.10/site-packages/joblib/_parallel_backends.py +649 -0
  18. env-llmeval/lib/python3.10/site-packages/joblib/_store_backends.py +474 -0
  19. env-llmeval/lib/python3.10/site-packages/joblib/_utils.py +83 -0
  20. env-llmeval/lib/python3.10/site-packages/joblib/compressor.py +570 -0
  21. env-llmeval/lib/python3.10/site-packages/joblib/disk.py +136 -0
  22. env-llmeval/lib/python3.10/site-packages/joblib/executor.py +117 -0
  23. env-llmeval/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py +18 -0
  25. env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py +1487 -0
  29. env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py +13 -0
  30. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py +14 -0
  35. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py +67 -0
  40. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py +18 -0
  41. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py +378 -0
  42. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py +43 -0
  43. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py +193 -0
  44. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py +173 -0
  45. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py +85 -0
  46. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py +236 -0
  47. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py +224 -0
  48. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py +378 -0
  49. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py +250 -0
  50. env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py +409 -0
env-llmeval/lib/python3.10/site-packages/joblib/__init__.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Joblib is a set of tools to provide **lightweight pipelining in
2
+ Python**. In particular:
3
+
4
+ 1. transparent disk-caching of functions and lazy re-evaluation
5
+ (memoize pattern)
6
+
7
+ 2. easy simple parallel computing
8
+
9
+ Joblib is optimized to be **fast** and **robust** on large
10
+ data in particular and has specific optimizations for `numpy` arrays. It is
11
+ **BSD-licensed**.
12
+
13
+
14
+ ==================== ===============================================
15
+ **Documentation:** https://joblib.readthedocs.io
16
+
17
+ **Download:** https://pypi.python.org/pypi/joblib#downloads
18
+
19
+ **Source code:** https://github.com/joblib/joblib
20
+
21
+ **Report issues:** https://github.com/joblib/joblib/issues
22
+ ==================== ===============================================
23
+
24
+
25
+ Vision
26
+ --------
27
+
28
+ The vision is to provide tools to easily achieve better performance and
29
+ reproducibility when working with long running jobs.
30
+
31
+ * **Avoid computing the same thing twice**: code is often rerun again and
32
+ again, for instance when prototyping computational-heavy jobs (as in
33
+ scientific development), but hand-crafted solutions to alleviate this
34
+ issue are error-prone and often lead to unreproducible results.
35
+
36
+ * **Persist to disk transparently**: efficiently persisting
37
+ arbitrary objects containing large data is hard. Using
38
+ joblib's caching mechanism avoids hand-written persistence and
39
+ implicitly links the file on disk to the execution context of
40
+ the original Python object. As a result, joblib's persistence is
41
+ good for resuming an application status or computational job, eg
42
+ after a crash.
43
+
44
+ Joblib addresses these problems while **leaving your code and your flow
45
+ control as unmodified as possible** (no framework, no new paradigms).
46
+
47
+ Main features
48
+ ------------------
49
+
50
+ 1) **Transparent and fast disk-caching of output value:** a memoize or
51
+ make-like functionality for Python functions that works well for
52
+ arbitrary Python objects, including very large numpy arrays. Separate
53
+ persistence and flow-execution logic from domain logic or algorithmic
54
+ code by writing the operations as a set of steps with well-defined
55
+ inputs and outputs: Python functions. Joblib can save their
56
+ computation to disk and rerun it only if necessary::
57
+
58
+ >>> from joblib import Memory
59
+ >>> cachedir = 'your_cache_dir_goes_here'
60
+ >>> mem = Memory(cachedir)
61
+ >>> import numpy as np
62
+ >>> a = np.vander(np.arange(3)).astype(float)
63
+ >>> square = mem.cache(np.square)
64
+ >>> b = square(a) # doctest: +ELLIPSIS
65
+ ______________________________________________________________________...
66
+ [Memory] Calling square...
67
+ square(array([[0., 0., 1.],
68
+ [1., 1., 1.],
69
+ [4., 2., 1.]]))
70
+ _________________________________________________...square - ...s, 0.0min
71
+
72
+ >>> c = square(a)
73
+ >>> # The above call did not trigger an evaluation
74
+
75
+ 2) **Embarrassingly parallel helper:** to make it easy to write readable
76
+ parallel code and debug it quickly::
77
+
78
+ >>> from joblib import Parallel, delayed
79
+ >>> from math import sqrt
80
+ >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
81
+ [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
82
+
83
+
84
+ 3) **Fast compressed Persistence**: a replacement for pickle to work
85
+ efficiently on Python objects containing large data (
86
+ *joblib.dump* & *joblib.load* ).
87
+
88
+ ..
89
+ >>> import shutil ; shutil.rmtree(cachedir)
90
+
91
+ """
92
+
93
+ # PEP0440 compatible formatted version, see:
94
+ # https://www.python.org/dev/peps/pep-0440/
95
+ #
96
+ # Generic release markers:
97
+ # X.Y
98
+ # X.Y.Z # For bugfix releases
99
+ #
100
+ # Admissible pre-release markers:
101
+ # X.YaN # Alpha release
102
+ # X.YbN # Beta release
103
+ # X.YrcN # Release Candidate
104
+ # X.Y # Final release
105
+ #
106
+ # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
107
+ # 'X.Y.dev0' is the canonical version of 'X.Y.dev'
108
+ #
109
+ __version__ = '1.4.0'
110
+
111
+
112
+ import os
113
+
114
+ from .memory import Memory
115
+ from .memory import MemorizedResult
116
+ from .memory import register_store_backend
117
+ from .memory import expires_after
118
+
119
+ from .logger import PrintTime
120
+ from .logger import Logger
121
+
122
+ from .hashing import hash
123
+
124
+ from .numpy_pickle import dump
125
+ from .numpy_pickle import load
126
+
127
+ from .compressor import register_compressor
128
+
129
+ from .parallel import Parallel
130
+ from .parallel import delayed
131
+ from .parallel import cpu_count
132
+ from .parallel import register_parallel_backend
133
+ from .parallel import parallel_backend
134
+ from .parallel import parallel_config
135
+ from .parallel import effective_n_jobs
136
+ from ._cloudpickle_wrapper import wrap_non_picklable_objects
137
+
138
+
139
+ __all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump',
140
+ 'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs',
141
+ 'register_parallel_backend', 'parallel_backend', 'expires_after',
142
+ 'register_store_backend', 'register_compressor',
143
+ 'wrap_non_picklable_objects', 'parallel_config']
144
+
145
+
146
+ # Workaround issue discovered in intel-openmp 2019.5:
147
+ # https://github.com/ContinuumIO/anaconda-issues/issues/11294
148
+ os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc ADDED
Binary file (601 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc ADDED
Binary file (20.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc ADDED
Binary file (3.11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc ADDED
Binary file (8.91 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc ADDED
Binary file (33.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc ADDED
Binary file (7.11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc ADDED
Binary file (2.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/_cloudpickle_wrapper.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Small shim of loky's cloudpickle_wrapper to avoid failure when
3
+ multiprocessing is not available.
4
+ """
5
+
6
+
7
+ from ._multiprocessing_helpers import mp
8
+
9
+
10
+ def _my_wrap_non_picklable_objects(obj, keep_wrapper=True):
11
+ return obj
12
+
13
+
14
+ if mp is not None:
15
+ from .externals.loky import wrap_non_picklable_objects
16
+ else:
17
+ wrap_non_picklable_objects = _my_wrap_non_picklable_objects
18
+
19
+ __all__ = ["wrap_non_picklable_objects"]
env-llmeval/lib/python3.10/site-packages/joblib/_dask.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function, division, absolute_import
2
+
3
+ import asyncio
4
+ import concurrent.futures
5
+ import contextlib
6
+
7
+ import time
8
+ from uuid import uuid4
9
+ import weakref
10
+
11
+ from .parallel import parallel_config
12
+ from .parallel import AutoBatchingMixin, ParallelBackendBase
13
+
14
+ from ._utils import (
15
+ _TracebackCapturingWrapper,
16
+ _retrieve_traceback_capturing_wrapped_call
17
+ )
18
+
19
+ try:
20
+ import dask
21
+ import distributed
22
+ except ImportError:
23
+ dask = None
24
+ distributed = None
25
+
26
+ if dask is not None and distributed is not None:
27
+ from dask.utils import funcname
28
+ from dask.sizeof import sizeof
29
+ from dask.distributed import (
30
+ Client,
31
+ as_completed,
32
+ get_client,
33
+ secede,
34
+ rejoin,
35
+ )
36
+ from distributed.utils import thread_state
37
+
38
+ try:
39
+ # asyncio.TimeoutError, Python3-only error thrown by recent versions of
40
+ # distributed
41
+ from distributed.utils import TimeoutError as _TimeoutError
42
+ except ImportError:
43
+ from tornado.gen import TimeoutError as _TimeoutError
44
+
45
+
46
+ def is_weakrefable(obj):
47
+ try:
48
+ weakref.ref(obj)
49
+ return True
50
+ except TypeError:
51
+ return False
52
+
53
+
54
+ class _WeakKeyDictionary:
55
+ """A variant of weakref.WeakKeyDictionary for unhashable objects.
56
+
57
+ This datastructure is used to store futures for broadcasted data objects
58
+ such as large numpy arrays or pandas dataframes that are not hashable and
59
+ therefore cannot be used as keys of traditional python dicts.
60
+
61
+ Furthermore using a dict with id(array) as key is not safe because the
62
+ Python is likely to reuse id of recently collected arrays.
63
+ """
64
+
65
+ def __init__(self):
66
+ self._data = {}
67
+
68
+ def __getitem__(self, obj):
69
+ ref, val = self._data[id(obj)]
70
+ if ref() is not obj:
71
+ # In case of a race condition with on_destroy.
72
+ raise KeyError(obj)
73
+ return val
74
+
75
+ def __setitem__(self, obj, value):
76
+ key = id(obj)
77
+ try:
78
+ ref, _ = self._data[key]
79
+ if ref() is not obj:
80
+ # In case of race condition with on_destroy.
81
+ raise KeyError(obj)
82
+ except KeyError:
83
+ # Insert the new entry in the mapping along with a weakref
84
+ # callback to automatically delete the entry from the mapping
85
+ # as soon as the object used as key is garbage collected.
86
+ def on_destroy(_):
87
+ del self._data[key]
88
+ ref = weakref.ref(obj, on_destroy)
89
+ self._data[key] = ref, value
90
+
91
+ def __len__(self):
92
+ return len(self._data)
93
+
94
+ def clear(self):
95
+ self._data.clear()
96
+
97
+
98
+ def _funcname(x):
99
+ try:
100
+ if isinstance(x, list):
101
+ x = x[0][0]
102
+ except Exception:
103
+ pass
104
+ return funcname(x)
105
+
106
+
107
+ def _make_tasks_summary(tasks):
108
+ """Summarize of list of (func, args, kwargs) function calls"""
109
+ unique_funcs = {func for func, args, kwargs in tasks}
110
+
111
+ if len(unique_funcs) == 1:
112
+ mixed = False
113
+ else:
114
+ mixed = True
115
+ return len(tasks), mixed, _funcname(tasks)
116
+
117
+
118
+ class Batch:
119
+ """dask-compatible wrapper that executes a batch of tasks"""
120
+ def __init__(self, tasks):
121
+ # collect some metadata from the tasks to ease Batch calls
122
+ # introspection when debugging
123
+ self._num_tasks, self._mixed, self._funcname = _make_tasks_summary(
124
+ tasks
125
+ )
126
+
127
+ def __call__(self, tasks=None):
128
+ results = []
129
+ with parallel_config(backend='dask'):
130
+ for func, args, kwargs in tasks:
131
+ results.append(func(*args, **kwargs))
132
+ return results
133
+
134
+ def __repr__(self):
135
+ descr = f"batch_of_{self._funcname}_{self._num_tasks}_calls"
136
+ if self._mixed:
137
+ descr = "mixed_" + descr
138
+ return descr
139
+
140
+
141
+ def _joblib_probe_task():
142
+ # Noop used by the joblib connector to probe when workers are ready.
143
+ pass
144
+
145
+
146
+ class DaskDistributedBackend(AutoBatchingMixin, ParallelBackendBase):
147
+ MIN_IDEAL_BATCH_DURATION = 0.2
148
+ MAX_IDEAL_BATCH_DURATION = 1.0
149
+ supports_retrieve_callback = True
150
+ default_n_jobs = -1
151
+
152
+ def __init__(self, scheduler_host=None, scatter=None,
153
+ client=None, loop=None, wait_for_workers_timeout=10,
154
+ **submit_kwargs):
155
+ super().__init__()
156
+
157
+ if distributed is None:
158
+ msg = ("You are trying to use 'dask' as a joblib parallel backend "
159
+ "but dask is not installed. Please install dask "
160
+ "to fix this error.")
161
+ raise ValueError(msg)
162
+
163
+ if client is None:
164
+ if scheduler_host:
165
+ client = Client(scheduler_host, loop=loop,
166
+ set_as_default=False)
167
+ else:
168
+ try:
169
+ client = get_client()
170
+ except ValueError as e:
171
+ msg = ("To use Joblib with Dask first create a Dask Client"
172
+ "\n\n"
173
+ " from dask.distributed import Client\n"
174
+ " client = Client()\n"
175
+ "or\n"
176
+ " client = Client('scheduler-address:8786')")
177
+ raise ValueError(msg) from e
178
+
179
+ self.client = client
180
+
181
+ if scatter is not None and not isinstance(scatter, (list, tuple)):
182
+ raise TypeError("scatter must be a list/tuple, got "
183
+ "`%s`" % type(scatter).__name__)
184
+
185
+ if scatter is not None and len(scatter) > 0:
186
+ # Keep a reference to the scattered data to keep the ids the same
187
+ self._scatter = list(scatter)
188
+ scattered = self.client.scatter(scatter, broadcast=True)
189
+ self.data_futures = {id(x): f for x, f in zip(scatter, scattered)}
190
+ else:
191
+ self._scatter = []
192
+ self.data_futures = {}
193
+ self.wait_for_workers_timeout = wait_for_workers_timeout
194
+ self.submit_kwargs = submit_kwargs
195
+ self.waiting_futures = as_completed(
196
+ [],
197
+ loop=client.loop,
198
+ with_results=True,
199
+ raise_errors=False
200
+ )
201
+ self._results = {}
202
+ self._callbacks = {}
203
+
204
+ async def _collect(self):
205
+ while self._continue:
206
+ async for future, result in self.waiting_futures:
207
+ cf_future = self._results.pop(future)
208
+ callback = self._callbacks.pop(future)
209
+ if future.status == "error":
210
+ typ, exc, tb = result
211
+ cf_future.set_exception(exc)
212
+ else:
213
+ cf_future.set_result(result)
214
+ callback(result)
215
+ await asyncio.sleep(0.01)
216
+
217
+ def __reduce__(self):
218
+ return (DaskDistributedBackend, ())
219
+
220
+ def get_nested_backend(self):
221
+ return DaskDistributedBackend(client=self.client), -1
222
+
223
+ def configure(self, n_jobs=1, parallel=None, **backend_args):
224
+ self.parallel = parallel
225
+ return self.effective_n_jobs(n_jobs)
226
+
227
+ def start_call(self):
228
+ self._continue = True
229
+ self.client.loop.add_callback(self._collect)
230
+ self.call_data_futures = _WeakKeyDictionary()
231
+
232
+ def stop_call(self):
233
+ # The explicit call to clear is required to break a cycling reference
234
+ # to the futures.
235
+ self._continue = False
236
+ # wait for the future collection routine (self._backend._collect) to
237
+ # finish in order to limit asyncio warnings due to aborting _collect
238
+ # during a following backend termination call
239
+ time.sleep(0.01)
240
+ self.call_data_futures.clear()
241
+
242
+ def effective_n_jobs(self, n_jobs):
243
+ effective_n_jobs = sum(self.client.ncores().values())
244
+ if effective_n_jobs != 0 or not self.wait_for_workers_timeout:
245
+ return effective_n_jobs
246
+
247
+ # If there is no worker, schedule a probe task to wait for the workers
248
+ # to come up and be available. If the dask cluster is in adaptive mode
249
+ # task might cause the cluster to provision some workers.
250
+ try:
251
+ self.client.submit(_joblib_probe_task).result(
252
+ timeout=self.wait_for_workers_timeout
253
+ )
254
+ except _TimeoutError as e:
255
+ error_msg = (
256
+ "DaskDistributedBackend has no worker after {} seconds. "
257
+ "Make sure that workers are started and can properly connect "
258
+ "to the scheduler and increase the joblib/dask connection "
259
+ "timeout with:\n\n"
260
+ "parallel_config(backend='dask', wait_for_workers_timeout={})"
261
+ ).format(self.wait_for_workers_timeout,
262
+ max(10, 2 * self.wait_for_workers_timeout))
263
+ raise TimeoutError(error_msg) from e
264
+ return sum(self.client.ncores().values())
265
+
266
+ async def _to_func_args(self, func):
267
+ itemgetters = dict()
268
+
269
+ # Futures that are dynamically generated during a single call to
270
+ # Parallel.__call__.
271
+ call_data_futures = getattr(self, 'call_data_futures', None)
272
+
273
+ async def maybe_to_futures(args):
274
+ out = []
275
+ for arg in args:
276
+ arg_id = id(arg)
277
+ if arg_id in itemgetters:
278
+ out.append(itemgetters[arg_id])
279
+ continue
280
+
281
+ f = self.data_futures.get(arg_id, None)
282
+ if f is None and call_data_futures is not None:
283
+ try:
284
+ f = await call_data_futures[arg]
285
+ except KeyError:
286
+ pass
287
+ if f is None:
288
+ if is_weakrefable(arg) and sizeof(arg) > 1e3:
289
+ # Automatically scatter large objects to some of
290
+ # the workers to avoid duplicated data transfers.
291
+ # Rely on automated inter-worker data stealing if
292
+ # more workers need to reuse this data
293
+ # concurrently.
294
+ # set hash=False - nested scatter calls (i.e
295
+ # calling client.scatter inside a dask worker)
296
+ # using hash=True often raise CancelledError,
297
+ # see dask/distributed#3703
298
+ _coro = self.client.scatter(
299
+ arg,
300
+ asynchronous=True,
301
+ hash=False
302
+ )
303
+ # Centralize the scattering of identical arguments
304
+ # between concurrent apply_async callbacks by
305
+ # exposing the running coroutine in
306
+ # call_data_futures before it completes.
307
+ t = asyncio.Task(_coro)
308
+ call_data_futures[arg] = t
309
+
310
+ f = await t
311
+
312
+ if f is not None:
313
+ out.append(f)
314
+ else:
315
+ out.append(arg)
316
+ return out
317
+
318
+ tasks = []
319
+ for f, args, kwargs in func.items:
320
+ args = list(await maybe_to_futures(args))
321
+ kwargs = dict(zip(kwargs.keys(),
322
+ await maybe_to_futures(kwargs.values())))
323
+ tasks.append((f, args, kwargs))
324
+
325
+ return (Batch(tasks), tasks)
326
+
327
+ def apply_async(self, func, callback=None):
328
+
329
+ cf_future = concurrent.futures.Future()
330
+ cf_future.get = cf_future.result # achieve AsyncResult API
331
+
332
+ async def f(func, callback):
333
+ batch, tasks = await self._to_func_args(func)
334
+ key = f'{repr(batch)}-{uuid4().hex}'
335
+
336
+ dask_future = self.client.submit(
337
+ _TracebackCapturingWrapper(batch),
338
+ tasks=tasks,
339
+ key=key,
340
+ **self.submit_kwargs
341
+ )
342
+ self.waiting_futures.add(dask_future)
343
+ self._callbacks[dask_future] = callback
344
+ self._results[dask_future] = cf_future
345
+
346
+ self.client.loop.add_callback(f, func, callback)
347
+
348
+ return cf_future
349
+
350
+ def retrieve_result_callback(self, out):
351
+ return _retrieve_traceback_capturing_wrapped_call(out)
352
+
353
+ def abort_everything(self, ensure_ready=True):
354
+ """ Tell the client to cancel any task submitted via this instance
355
+
356
+ joblib.Parallel will never access those results
357
+ """
358
+ with self.waiting_futures.lock:
359
+ self.waiting_futures.futures.clear()
360
+ while not self.waiting_futures.queue.empty():
361
+ self.waiting_futures.queue.get()
362
+
363
+ @contextlib.contextmanager
364
+ def retrieval_context(self):
365
+ """Override ParallelBackendBase.retrieval_context to avoid deadlocks.
366
+
367
+ This removes thread from the worker's thread pool (using 'secede').
368
+ Seceding avoids deadlock in nested parallelism settings.
369
+ """
370
+ # See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how
371
+ # this is used.
372
+ if hasattr(thread_state, 'execution_state'):
373
+ # we are in a worker. Secede to avoid deadlock.
374
+ secede()
375
+
376
+ yield
377
+
378
+ if hasattr(thread_state, 'execution_state'):
379
+ rejoin()
env-llmeval/lib/python3.10/site-packages/joblib/_memmapping_reducer.py ADDED
@@ -0,0 +1,657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Reducer using memory mapping for numpy arrays
3
+ """
4
+ # Author: Thomas Moreau <[email protected]>
5
+ # Copyright: 2017, Thomas Moreau
6
+ # License: BSD 3 clause
7
+
8
+ from mmap import mmap
9
+ import errno
10
+ import os
11
+ import stat
12
+ import threading
13
+ import atexit
14
+ import tempfile
15
+ import time
16
+ import warnings
17
+ import weakref
18
+ from uuid import uuid4
19
+ from multiprocessing import util
20
+
21
+ from pickle import whichmodule, loads, dumps, HIGHEST_PROTOCOL, PicklingError
22
+
23
+ try:
24
+ WindowsError
25
+ except NameError:
26
+ WindowsError = type(None)
27
+
28
+ try:
29
+ import numpy as np
30
+ from numpy.lib.stride_tricks import as_strided
31
+ except ImportError:
32
+ np = None
33
+
34
+ from .numpy_pickle import dump, load, load_temporary_memmap
35
+ from .backports import make_memmap
36
+ from .disk import delete_folder
37
+ from .externals.loky.backend import resource_tracker
38
+
39
+ # Some system have a ramdisk mounted by default, we can use it instead of /tmp
40
+ # as the default folder to dump big arrays to share with subprocesses.
41
+ SYSTEM_SHARED_MEM_FS = '/dev/shm'
42
+
43
+ # Minimal number of bytes available on SYSTEM_SHARED_MEM_FS to consider using
44
+ # it as the default folder to dump big arrays to share with subprocesses.
45
+ SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(2e9)
46
+
47
+ # Folder and file permissions to chmod temporary files generated by the
48
+ # memmapping pool. Only the owner of the Python process can access the
49
+ # temporary files and folder.
50
+ FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
51
+ FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
52
+
53
+ # Set used in joblib workers, referencing the filenames of temporary memmaps
54
+ # created by joblib to speed up data communication. In child processes, we add
55
+ # a finalizer to these memmaps that sends a maybe_unlink call to the
56
+ # resource_tracker, in order to free main memory as fast as possible.
57
+ JOBLIB_MMAPS = set()
58
+
59
+
60
+ def _log_and_unlink(filename):
61
+ from .externals.loky.backend.resource_tracker import _resource_tracker
62
+ util.debug(
63
+ "[FINALIZER CALL] object mapping to {} about to be deleted,"
64
+ " decrementing the refcount of the file (pid: {})".format(
65
+ os.path.basename(filename), os.getpid()))
66
+ _resource_tracker.maybe_unlink(filename, "file")
67
+
68
+
69
+ def add_maybe_unlink_finalizer(memmap):
70
+ util.debug(
71
+ "[FINALIZER ADD] adding finalizer to {} (id {}, filename {}, pid {})"
72
+ "".format(type(memmap), id(memmap), os.path.basename(memmap.filename),
73
+ os.getpid()))
74
+ weakref.finalize(memmap, _log_and_unlink, memmap.filename)
75
+
76
+
77
+ def unlink_file(filename):
78
+ """Wrapper around os.unlink with a retry mechanism.
79
+
80
+ The retry mechanism has been implemented primarily to overcome a race
81
+ condition happening during the finalizer of a np.memmap: when a process
82
+ holding the last reference to a mmap-backed np.memmap/np.array is about to
83
+ delete this array (and close the reference), it sends a maybe_unlink
84
+ request to the resource_tracker. This request can be processed faster than
85
+ it takes for the last reference of the memmap to be closed, yielding (on
86
+ Windows) a PermissionError in the resource_tracker loop.
87
+ """
88
+ NUM_RETRIES = 10
89
+ for retry_no in range(1, NUM_RETRIES + 1):
90
+ try:
91
+ os.unlink(filename)
92
+ break
93
+ except PermissionError:
94
+ util.debug(
95
+ '[ResourceTracker] tried to unlink {}, got '
96
+ 'PermissionError'.format(filename)
97
+ )
98
+ if retry_no == NUM_RETRIES:
99
+ raise
100
+ else:
101
+ time.sleep(.2)
102
+ except FileNotFoundError:
103
+ # In case of a race condition when deleting the temporary folder,
104
+ # avoid noisy FileNotFoundError exception in the resource tracker.
105
+ pass
106
+
107
+
108
+ resource_tracker._CLEANUP_FUNCS['file'] = unlink_file
109
+
110
+
111
+ class _WeakArrayKeyMap:
112
+ """A variant of weakref.WeakKeyDictionary for unhashable numpy arrays.
113
+
114
+ This datastructure will be used with numpy arrays as obj keys, therefore we
115
+ do not use the __get__ / __set__ methods to avoid any conflict with the
116
+ numpy fancy indexing syntax.
117
+ """
118
+
119
+ def __init__(self):
120
+ self._data = {}
121
+
122
+ def get(self, obj):
123
+ ref, val = self._data[id(obj)]
124
+ if ref() is not obj:
125
+ # In case of race condition with on_destroy: could never be
126
+ # triggered by the joblib tests with CPython.
127
+ raise KeyError(obj)
128
+ return val
129
+
130
+ def set(self, obj, value):
131
+ key = id(obj)
132
+ try:
133
+ ref, _ = self._data[key]
134
+ if ref() is not obj:
135
+ # In case of race condition with on_destroy: could never be
136
+ # triggered by the joblib tests with CPython.
137
+ raise KeyError(obj)
138
+ except KeyError:
139
+ # Insert the new entry in the mapping along with a weakref
140
+ # callback to automatically delete the entry from the mapping
141
+ # as soon as the object used as key is garbage collected.
142
+ def on_destroy(_):
143
+ del self._data[key]
144
+ ref = weakref.ref(obj, on_destroy)
145
+ self._data[key] = ref, value
146
+
147
+ def __getstate__(self):
148
+ raise PicklingError("_WeakArrayKeyMap is not pickleable")
149
+
150
+
151
+ ###############################################################################
152
+ # Support for efficient transient pickling of numpy data structures
153
+
154
+
155
+ def _get_backing_memmap(a):
156
+ """Recursively look up the original np.memmap instance base if any."""
157
+ b = getattr(a, 'base', None)
158
+ if b is None:
159
+ # TODO: check scipy sparse datastructure if scipy is installed
160
+ # a nor its descendants do not have a memmap base
161
+ return None
162
+
163
+ elif isinstance(b, mmap):
164
+ # a is already a real memmap instance.
165
+ return a
166
+
167
+ else:
168
+ # Recursive exploration of the base ancestry
169
+ return _get_backing_memmap(b)
170
+
171
+
172
+ def _get_temp_dir(pool_folder_name, temp_folder=None):
173
+ """Get the full path to a subfolder inside the temporary folder.
174
+
175
+ Parameters
176
+ ----------
177
+ pool_folder_name : str
178
+ Sub-folder name used for the serialization of a pool instance.
179
+
180
+ temp_folder: str, optional
181
+ Folder to be used by the pool for memmapping large arrays
182
+ for sharing memory with worker processes. If None, this will try in
183
+ order:
184
+
185
+ - a folder pointed by the JOBLIB_TEMP_FOLDER environment
186
+ variable,
187
+ - /dev/shm if the folder exists and is writable: this is a
188
+ RAMdisk filesystem available by default on modern Linux
189
+ distributions,
190
+ - the default system temporary folder that can be
191
+ overridden with TMP, TMPDIR or TEMP environment
192
+ variables, typically /tmp under Unix operating systems.
193
+
194
+ Returns
195
+ -------
196
+ pool_folder : str
197
+ full path to the temporary folder
198
+ use_shared_mem : bool
199
+ whether the temporary folder is written to the system shared memory
200
+ folder or some other temporary folder.
201
+ """
202
+ use_shared_mem = False
203
+ if temp_folder is None:
204
+ temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
205
+ if temp_folder is None:
206
+ if os.path.exists(SYSTEM_SHARED_MEM_FS) and hasattr(os, 'statvfs'):
207
+ try:
208
+ shm_stats = os.statvfs(SYSTEM_SHARED_MEM_FS)
209
+ available_nbytes = shm_stats.f_bsize * shm_stats.f_bavail
210
+ if available_nbytes > SYSTEM_SHARED_MEM_FS_MIN_SIZE:
211
+ # Try to see if we have write access to the shared mem
212
+ # folder only if it is reasonably large (that is 2GB or
213
+ # more).
214
+ temp_folder = SYSTEM_SHARED_MEM_FS
215
+ pool_folder = os.path.join(temp_folder, pool_folder_name)
216
+ if not os.path.exists(pool_folder):
217
+ os.makedirs(pool_folder)
218
+ use_shared_mem = True
219
+ except (IOError, OSError):
220
+ # Missing rights in the /dev/shm partition, fallback to regular
221
+ # temp folder.
222
+ temp_folder = None
223
+ if temp_folder is None:
224
+ # Fallback to the default tmp folder, typically /tmp
225
+ temp_folder = tempfile.gettempdir()
226
+ temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
227
+ pool_folder = os.path.join(temp_folder, pool_folder_name)
228
+ return pool_folder, use_shared_mem
229
+
230
+
231
+ def has_shareable_memory(a):
232
+ """Return True if a is backed by some mmap buffer directly or not."""
233
+ return _get_backing_memmap(a) is not None
234
+
235
+
236
+ def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides,
237
+ total_buffer_len, unlink_on_gc_collect):
238
+ """Reconstruct an array view on a memory mapped file."""
239
+ if mode == 'w+':
240
+ # Do not zero the original data when unpickling
241
+ mode = 'r+'
242
+
243
+ if strides is None:
244
+ # Simple, contiguous memmap
245
+ return make_memmap(
246
+ filename, dtype=dtype, shape=shape, mode=mode, offset=offset,
247
+ order=order, unlink_on_gc_collect=unlink_on_gc_collect
248
+ )
249
+ else:
250
+ # For non-contiguous data, memmap the total enclosing buffer and then
251
+ # extract the non-contiguous view with the stride-tricks API
252
+ base = make_memmap(
253
+ filename, dtype=dtype, shape=total_buffer_len, offset=offset,
254
+ mode=mode, order=order, unlink_on_gc_collect=unlink_on_gc_collect
255
+ )
256
+ return as_strided(base, shape=shape, strides=strides)
257
+
258
+
259
+ def _reduce_memmap_backed(a, m):
260
+ """Pickling reduction for memmap backed arrays.
261
+
262
+ a is expected to be an instance of np.ndarray (or np.memmap)
263
+ m is expected to be an instance of np.memmap on the top of the ``base``
264
+ attribute ancestry of a. ``m.base`` should be the real python mmap object.
265
+ """
266
+ # offset that comes from the striding differences between a and m
267
+ util.debug('[MEMMAP REDUCE] reducing a memmap-backed array '
268
+ '(shape, {}, pid: {})'.format(a.shape, os.getpid()))
269
+ try:
270
+ from numpy.lib.array_utils import byte_bounds
271
+ except (ModuleNotFoundError, ImportError):
272
+ # Backward-compat for numpy < 2.0
273
+ from numpy import byte_bounds
274
+ a_start, a_end = byte_bounds(a)
275
+ m_start = byte_bounds(m)[0]
276
+ offset = a_start - m_start
277
+
278
+ # offset from the backing memmap
279
+ offset += m.offset
280
+
281
+ if m.flags['F_CONTIGUOUS']:
282
+ order = 'F'
283
+ else:
284
+ # The backing memmap buffer is necessarily contiguous hence C if not
285
+ # Fortran
286
+ order = 'C'
287
+
288
+ if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
289
+ # If the array is a contiguous view, no need to pass the strides
290
+ strides = None
291
+ total_buffer_len = None
292
+ else:
293
+ # Compute the total number of items to map from which the strided
294
+ # view will be extracted.
295
+ strides = a.strides
296
+ total_buffer_len = (a_end - a_start) // a.itemsize
297
+
298
+ return (_strided_from_memmap,
299
+ (m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
300
+ total_buffer_len, False))
301
+
302
+
303
+ def reduce_array_memmap_backward(a):
304
+ """reduce a np.array or a np.memmap from a child process"""
305
+ m = _get_backing_memmap(a)
306
+ if isinstance(m, np.memmap) and m.filename not in JOBLIB_MMAPS:
307
+ # if a is backed by a memmaped file, reconstruct a using the
308
+ # memmaped file.
309
+ return _reduce_memmap_backed(a, m)
310
+ else:
311
+ # a is either a regular (not memmap-backed) numpy array, or an array
312
+ # backed by a shared temporary file created by joblib. In the latter
313
+ # case, in order to limit the lifespan of these temporary files, we
314
+ # serialize the memmap as a regular numpy array, and decref the
315
+ # file backing the memmap (done implicitly in a previously registered
316
+ # finalizer, see ``unlink_on_gc_collect`` for more details)
317
+ return (
318
+ loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL), )
319
+ )
320
+
321
+
322
+ class ArrayMemmapForwardReducer(object):
323
+ """Reducer callable to dump large arrays to memmap files.
324
+
325
+ Parameters
326
+ ----------
327
+ max_nbytes: int
328
+ Threshold to trigger memmapping of large arrays to files created
329
+ a folder.
330
+ temp_folder_resolver: callable
331
+ An callable in charge of resolving a temporary folder name where files
332
+ for backing memmapped arrays are created.
333
+ mmap_mode: 'r', 'r+' or 'c'
334
+ Mode for the created memmap datastructure. See the documentation of
335
+ numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
336
+ automatically to avoid zeroing the data on unpickling.
337
+ verbose: int, optional, 0 by default
338
+ If verbose > 0, memmap creations are logged.
339
+ If verbose > 1, both memmap creations, reuse and array pickling are
340
+ logged.
341
+ prewarm: bool, optional, False by default.
342
+ Force a read on newly memmapped array to make sure that OS pre-cache it
343
+ memory. This can be useful to avoid concurrent disk access when the
344
+ same data array is passed to different worker processes.
345
+ """
346
+
347
+ def __init__(self, max_nbytes, temp_folder_resolver, mmap_mode,
348
+ unlink_on_gc_collect, verbose=0, prewarm=True):
349
+ self._max_nbytes = max_nbytes
350
+ self._temp_folder_resolver = temp_folder_resolver
351
+ self._mmap_mode = mmap_mode
352
+ self.verbose = int(verbose)
353
+ if prewarm == "auto":
354
+ self._prewarm = not self._temp_folder.startswith(
355
+ SYSTEM_SHARED_MEM_FS
356
+ )
357
+ else:
358
+ self._prewarm = prewarm
359
+ self._prewarm = prewarm
360
+ self._memmaped_arrays = _WeakArrayKeyMap()
361
+ self._temporary_memmaped_filenames = set()
362
+ self._unlink_on_gc_collect = unlink_on_gc_collect
363
+
364
+ @property
365
+ def _temp_folder(self):
366
+ return self._temp_folder_resolver()
367
+
368
+ def __reduce__(self):
369
+ # The ArrayMemmapForwardReducer is passed to the children processes: it
370
+ # needs to be pickled but the _WeakArrayKeyMap need to be skipped as
371
+ # it's only guaranteed to be consistent with the parent process memory
372
+ # garbage collection.
373
+ # Although this reducer is pickled, it is not needed in its destination
374
+ # process (child processes), as we only use this reducer to send
375
+ # memmaps from the parent process to the children processes. For this
376
+ # reason, we can afford skipping the resolver, (which would otherwise
377
+ # be unpicklable), and pass it as None instead.
378
+ args = (self._max_nbytes, None, self._mmap_mode,
379
+ self._unlink_on_gc_collect)
380
+ kwargs = {
381
+ 'verbose': self.verbose,
382
+ 'prewarm': self._prewarm,
383
+ }
384
+ return ArrayMemmapForwardReducer, args, kwargs
385
+
386
+ def __call__(self, a):
387
+ m = _get_backing_memmap(a)
388
+ if m is not None and isinstance(m, np.memmap):
389
+ # a is already backed by a memmap file, let's reuse it directly
390
+ return _reduce_memmap_backed(a, m)
391
+
392
+ if (not a.dtype.hasobject and self._max_nbytes is not None and
393
+ a.nbytes > self._max_nbytes):
394
+ # check that the folder exists (lazily create the pool temp folder
395
+ # if required)
396
+ try:
397
+ os.makedirs(self._temp_folder)
398
+ os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
399
+ except OSError as e:
400
+ if e.errno != errno.EEXIST:
401
+ raise e
402
+
403
+ try:
404
+ basename = self._memmaped_arrays.get(a)
405
+ except KeyError:
406
+ # Generate a new unique random filename. The process and thread
407
+ # ids are only useful for debugging purpose and to make it
408
+ # easier to cleanup orphaned files in case of hard process
409
+ # kill (e.g. by "kill -9" or segfault).
410
+ basename = "{}-{}-{}.pkl".format(
411
+ os.getpid(), id(threading.current_thread()), uuid4().hex)
412
+ self._memmaped_arrays.set(a, basename)
413
+ filename = os.path.join(self._temp_folder, basename)
414
+
415
+ # In case the same array with the same content is passed several
416
+ # times to the pool subprocess children, serialize it only once
417
+
418
+ is_new_memmap = filename not in self._temporary_memmaped_filenames
419
+
420
+ # add the memmap to the list of temporary memmaps created by joblib
421
+ self._temporary_memmaped_filenames.add(filename)
422
+
423
+ if self._unlink_on_gc_collect:
424
+ # Bump reference count of the memmap by 1 to account for
425
+ # shared usage of the memmap by a child process. The
426
+ # corresponding decref call will be executed upon calling
427
+ # resource_tracker.maybe_unlink, registered as a finalizer in
428
+ # the child.
429
+ # the incref/decref calls here are only possible when the child
430
+ # and the parent share the same resource_tracker. It is not the
431
+ # case for the multiprocessing backend, but it does not matter
432
+ # because unlinking a memmap from a child process is only
433
+ # useful to control the memory usage of long-lasting child
434
+ # processes, while the multiprocessing-based pools terminate
435
+ # their workers at the end of a map() call.
436
+ resource_tracker.register(filename, "file")
437
+
438
+ if is_new_memmap:
439
+ # Incref each temporary memmap created by joblib one extra
440
+ # time. This means that these memmaps will only be deleted
441
+ # once an extra maybe_unlink() is called, which is done once
442
+ # all the jobs have completed (or been canceled) in the
443
+ # Parallel._terminate_backend() method.
444
+ resource_tracker.register(filename, "file")
445
+
446
+ if not os.path.exists(filename):
447
+ util.debug(
448
+ "[ARRAY DUMP] Pickling new array (shape={}, dtype={}) "
449
+ "creating a new memmap at {}".format(
450
+ a.shape, a.dtype, filename))
451
+ for dumped_filename in dump(a, filename):
452
+ os.chmod(dumped_filename, FILE_PERMISSIONS)
453
+
454
+ if self._prewarm:
455
+ # Warm up the data by accessing it. This operation ensures
456
+ # that the disk access required to create the memmapping
457
+ # file are performed in the reducing process and avoids
458
+ # concurrent memmap creation in multiple children
459
+ # processes.
460
+ load(filename, mmap_mode=self._mmap_mode).max()
461
+
462
+ else:
463
+ util.debug(
464
+ "[ARRAY DUMP] Pickling known array (shape={}, dtype={}) "
465
+ "reusing memmap file: {}".format(
466
+ a.shape, a.dtype, os.path.basename(filename)))
467
+
468
+ # The worker process will use joblib.load to memmap the data
469
+ return (
470
+ (load_temporary_memmap, (filename, self._mmap_mode,
471
+ self._unlink_on_gc_collect))
472
+ )
473
+ else:
474
+ # do not convert a into memmap, let pickler do its usual copy with
475
+ # the default system pickler
476
+ util.debug(
477
+ '[ARRAY DUMP] Pickling array (NO MEMMAPPING) (shape={}, '
478
+ ' dtype={}).'.format(a.shape, a.dtype))
479
+ return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
480
+
481
+
482
+ def get_memmapping_reducers(
483
+ forward_reducers=None, backward_reducers=None,
484
+ temp_folder_resolver=None, max_nbytes=1e6, mmap_mode='r', verbose=0,
485
+ prewarm=False, unlink_on_gc_collect=True, **kwargs):
486
+ """Construct a pair of memmapping reducer linked to a tmpdir.
487
+
488
+ This function manage the creation and the clean up of the temporary folders
489
+ underlying the memory maps and should be use to get the reducers necessary
490
+ to construct joblib pool or executor.
491
+ """
492
+ if forward_reducers is None:
493
+ forward_reducers = dict()
494
+ if backward_reducers is None:
495
+ backward_reducers = dict()
496
+
497
+ if np is not None:
498
+ # Register smart numpy.ndarray reducers that detects memmap backed
499
+ # arrays and that is also able to dump to memmap large in-memory
500
+ # arrays over the max_nbytes threshold
501
+ forward_reduce_ndarray = ArrayMemmapForwardReducer(
502
+ max_nbytes, temp_folder_resolver, mmap_mode, unlink_on_gc_collect,
503
+ verbose, prewarm=prewarm)
504
+ forward_reducers[np.ndarray] = forward_reduce_ndarray
505
+ forward_reducers[np.memmap] = forward_reduce_ndarray
506
+
507
+ # Communication from child process to the parent process always
508
+ # pickles in-memory numpy.ndarray without dumping them as memmap
509
+ # to avoid confusing the caller and make it tricky to collect the
510
+ # temporary folder
511
+ backward_reducers[np.ndarray] = reduce_array_memmap_backward
512
+ backward_reducers[np.memmap] = reduce_array_memmap_backward
513
+
514
+ return forward_reducers, backward_reducers
515
+
516
+
517
+ class TemporaryResourcesManager(object):
518
+ """Stateful object able to manage temporary folder and pickles
519
+
520
+ It exposes:
521
+ - a per-context folder name resolving API that memmap-based reducers will
522
+ rely on to know where to pickle the temporary memmaps
523
+ - a temporary file/folder management API that internally uses the
524
+ resource_tracker.
525
+ """
526
+
527
+ def __init__(self, temp_folder_root=None, context_id=None):
528
+ self._current_temp_folder = None
529
+ self._temp_folder_root = temp_folder_root
530
+ self._use_shared_mem = None
531
+ self._cached_temp_folders = dict()
532
+ self._id = uuid4().hex
533
+ self._finalizers = {}
534
+ if context_id is None:
535
+ # It would be safer to not assign a default context id (less silent
536
+ # bugs), but doing this while maintaining backward compatibility
537
+ # with the previous, context-unaware version get_memmaping_executor
538
+ # exposes too many low-level details.
539
+ context_id = uuid4().hex
540
+ self.set_current_context(context_id)
541
+
542
+ def set_current_context(self, context_id):
543
+ self._current_context_id = context_id
544
+ self.register_new_context(context_id)
545
+
546
+ def register_new_context(self, context_id):
547
+ # Prepare a sub-folder name specific to a context (usually a unique id
548
+ # generated by each instance of the Parallel class). Do not create in
549
+ # advance to spare FS write access if no array is to be dumped).
550
+ if context_id in self._cached_temp_folders:
551
+ return
552
+ else:
553
+ # During its lifecycle, one Parallel object can have several
554
+ # executors associated to it (for instance, if a loky worker raises
555
+ # an exception, joblib shutdowns the executor and instantly
556
+ # recreates a new one before raising the error - see
557
+ # ``ensure_ready``. Because we don't want two executors tied to
558
+ # the same Parallel object (and thus the same context id) to
559
+ # register/use/delete the same folder, we also add an id specific
560
+ # to the current Manager (and thus specific to its associated
561
+ # executor) to the folder name.
562
+ new_folder_name = (
563
+ "joblib_memmapping_folder_{}_{}_{}".format(
564
+ os.getpid(), self._id, context_id)
565
+ )
566
+ new_folder_path, _ = _get_temp_dir(
567
+ new_folder_name, self._temp_folder_root
568
+ )
569
+ self.register_folder_finalizer(new_folder_path, context_id)
570
+ self._cached_temp_folders[context_id] = new_folder_path
571
+
572
+ def resolve_temp_folder_name(self):
573
+ """Return a folder name specific to the currently activated context"""
574
+ return self._cached_temp_folders[self._current_context_id]
575
+
576
+ # resource management API
577
+
578
+ def register_folder_finalizer(self, pool_subfolder, context_id):
579
+ # Register the garbage collector at program exit in case caller forgets
580
+ # to call terminate explicitly: note we do not pass any reference to
581
+ # ensure that this callback won't prevent garbage collection of
582
+ # parallel instance and related file handler resources such as POSIX
583
+ # semaphores and pipes
584
+ pool_module_name = whichmodule(delete_folder, 'delete_folder')
585
+ resource_tracker.register(pool_subfolder, "folder")
586
+
587
+ def _cleanup():
588
+ # In some cases the Python runtime seems to set delete_folder to
589
+ # None just before exiting when accessing the delete_folder
590
+ # function from the closure namespace. So instead we reimport
591
+ # the delete_folder function explicitly.
592
+ # https://github.com/joblib/joblib/issues/328
593
+ # We cannot just use from 'joblib.pool import delete_folder'
594
+ # because joblib should only use relative imports to allow
595
+ # easy vendoring.
596
+ delete_folder = __import__(
597
+ pool_module_name, fromlist=['delete_folder']
598
+ ).delete_folder
599
+ try:
600
+ delete_folder(pool_subfolder, allow_non_empty=True)
601
+ resource_tracker.unregister(pool_subfolder, "folder")
602
+ except OSError:
603
+ warnings.warn("Failed to delete temporary folder: {}"
604
+ .format(pool_subfolder))
605
+
606
+ self._finalizers[context_id] = atexit.register(_cleanup)
607
+
608
+ def _clean_temporary_resources(self, context_id=None, force=False,
609
+ allow_non_empty=False):
610
+ """Clean temporary resources created by a process-based pool"""
611
+ if context_id is None:
612
+ # Iterates over a copy of the cache keys to avoid Error due to
613
+ # iterating over a changing size dictionary.
614
+ for context_id in list(self._cached_temp_folders):
615
+ self._clean_temporary_resources(
616
+ context_id, force=force, allow_non_empty=allow_non_empty
617
+ )
618
+ else:
619
+ temp_folder = self._cached_temp_folders.get(context_id)
620
+ if temp_folder and os.path.exists(temp_folder):
621
+ for filename in os.listdir(temp_folder):
622
+ if force:
623
+ # Some workers have failed and the ref counted might
624
+ # be off. The workers should have shut down by this
625
+ # time so forcefully clean up the files.
626
+ resource_tracker.unregister(
627
+ os.path.join(temp_folder, filename), "file"
628
+ )
629
+ else:
630
+ resource_tracker.maybe_unlink(
631
+ os.path.join(temp_folder, filename), "file"
632
+ )
633
+
634
+ # When forcing clean-up, try to delete the folder even if some
635
+ # files are still in it. Otherwise, try to delete the folder
636
+ allow_non_empty |= force
637
+
638
+ # Clean up the folder if possible, either if it is empty or
639
+ # if none of the files in it are in used and allow_non_empty.
640
+ try:
641
+ delete_folder(
642
+ temp_folder, allow_non_empty=allow_non_empty
643
+ )
644
+ # Forget the folder once it has been deleted
645
+ self._cached_temp_folders.pop(context_id, None)
646
+ resource_tracker.unregister(temp_folder, "folder")
647
+
648
+ # Also cancel the finalizers that gets triggered at gc.
649
+ finalizer = self._finalizers.pop(context_id, None)
650
+ if finalizer is not None:
651
+ atexit.unregister(finalizer)
652
+
653
+ except OSError:
654
+ # Temporary folder cannot be deleted right now.
655
+ # This folder will be cleaned up by an atexit
656
+ # finalizer registered by the memmapping_reducer.
657
+ pass
env-llmeval/lib/python3.10/site-packages/joblib/_multiprocessing_helpers.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helper module to factorize the conditional multiprocessing import logic
2
+
3
+ We use a distinct module to simplify import statements and avoid introducing
4
+ circular dependencies (for instance for the assert_spawning name).
5
+ """
6
+ import os
7
+ import warnings
8
+
9
+
10
+ # Obtain possible configuration from the environment, assuming 1 (on)
11
+ # by default, upon 0 set to None. Should instructively fail if some non
12
+ # 0/1 value is set.
13
+ mp = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
14
+ if mp:
15
+ try:
16
+ import multiprocessing as mp
17
+ import _multiprocessing # noqa
18
+ except ImportError:
19
+ mp = None
20
+
21
+ # 2nd stage: validate that locking is available on the system and
22
+ # issue a warning if not
23
+ if mp is not None:
24
+ try:
25
+ # try to create a named semaphore using SemLock to make sure they are
26
+ # available on this platform. We use the low level object
27
+ # _multiprocessing.SemLock to avoid spawning a resource tracker on
28
+ # Unix system or changing the default backend.
29
+ import tempfile
30
+ from _multiprocessing import SemLock
31
+
32
+ _rand = tempfile._RandomNameSequence()
33
+ for i in range(100):
34
+ try:
35
+ name = '/joblib-{}-{}' .format(
36
+ os.getpid(), next(_rand))
37
+ _sem = SemLock(0, 0, 1, name=name, unlink=True)
38
+ del _sem # cleanup
39
+ break
40
+ except FileExistsError as e: # pragma: no cover
41
+ if i >= 99:
42
+ raise FileExistsError(
43
+ 'cannot find name for semaphore') from e
44
+ except (FileExistsError, AttributeError, ImportError, OSError) as e:
45
+ mp = None
46
+ warnings.warn('%s. joblib will operate in serial mode' % (e,))
47
+
48
+
49
+ # 3rd stage: backward compat for the assert_spawning helper
50
+ if mp is not None:
51
+ from multiprocessing.context import assert_spawning
52
+ else:
53
+ assert_spawning = None
env-llmeval/lib/python3.10/site-packages/joblib/_parallel_backends.py ADDED
@@ -0,0 +1,649 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Backends for embarrassingly parallel code.
3
+ """
4
+
5
+ import gc
6
+ import os
7
+ import warnings
8
+ import threading
9
+ import contextlib
10
+ from abc import ABCMeta, abstractmethod
11
+
12
+ from ._utils import (
13
+ _TracebackCapturingWrapper,
14
+ _retrieve_traceback_capturing_wrapped_call
15
+ )
16
+
17
+ from ._multiprocessing_helpers import mp
18
+
19
+ if mp is not None:
20
+ from .pool import MemmappingPool
21
+ from multiprocessing.pool import ThreadPool
22
+ from .executor import get_memmapping_executor
23
+
24
+ # Import loky only if multiprocessing is present
25
+ from .externals.loky import process_executor, cpu_count
26
+ from .externals.loky.process_executor import ShutdownExecutorError
27
+
28
+
29
+ class ParallelBackendBase(metaclass=ABCMeta):
30
+ """Helper abc which defines all methods a ParallelBackend must implement"""
31
+
32
+ supports_inner_max_num_threads = False
33
+ supports_retrieve_callback = False
34
+ default_n_jobs = 1
35
+
36
+ @property
37
+ def supports_return_generator(self):
38
+ return self.supports_retrieve_callback
39
+
40
+ @property
41
+ def supports_timeout(self):
42
+ return self.supports_retrieve_callback
43
+
44
+ nesting_level = None
45
+
46
+ def __init__(self, nesting_level=None, inner_max_num_threads=None,
47
+ **kwargs):
48
+ super().__init__(**kwargs)
49
+ self.nesting_level = nesting_level
50
+ self.inner_max_num_threads = inner_max_num_threads
51
+
52
+ MAX_NUM_THREADS_VARS = [
53
+ 'OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS',
54
+ 'BLIS_NUM_THREADS', 'VECLIB_MAXIMUM_THREADS', 'NUMBA_NUM_THREADS',
55
+ 'NUMEXPR_NUM_THREADS',
56
+ ]
57
+
58
+ TBB_ENABLE_IPC_VAR = "ENABLE_IPC"
59
+
60
+ @abstractmethod
61
+ def effective_n_jobs(self, n_jobs):
62
+ """Determine the number of jobs that can actually run in parallel
63
+
64
+ n_jobs is the number of workers requested by the callers. Passing
65
+ n_jobs=-1 means requesting all available workers for instance matching
66
+ the number of CPU cores on the worker host(s).
67
+
68
+ This method should return a guesstimate of the number of workers that
69
+ can actually perform work concurrently. The primary use case is to make
70
+ it possible for the caller to know in how many chunks to slice the
71
+ work.
72
+
73
+ In general working on larger data chunks is more efficient (less
74
+ scheduling overhead and better use of CPU cache prefetching heuristics)
75
+ as long as all the workers have enough work to do.
76
+ """
77
+
78
+ @abstractmethod
79
+ def apply_async(self, func, callback=None):
80
+ """Schedule a func to be run"""
81
+
82
+ def retrieve_result_callback(self, out):
83
+ """Called within the callback function passed in apply_async.
84
+
85
+ The argument of this function is the argument given to a callback in
86
+ the considered backend. It is supposed to return the outcome of a task
87
+ if it succeeded or raise the exception if it failed.
88
+ """
89
+
90
+ def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
91
+ **backend_args):
92
+ """Reconfigure the backend and return the number of workers.
93
+
94
+ This makes it possible to reuse an existing backend instance for
95
+ successive independent calls to Parallel with different parameters.
96
+ """
97
+ self.parallel = parallel
98
+ return self.effective_n_jobs(n_jobs)
99
+
100
+ def start_call(self):
101
+ """Call-back method called at the beginning of a Parallel call"""
102
+
103
+ def stop_call(self):
104
+ """Call-back method called at the end of a Parallel call"""
105
+
106
+ def terminate(self):
107
+ """Shutdown the workers and free the shared memory."""
108
+
109
+ def compute_batch_size(self):
110
+ """Determine the optimal batch size"""
111
+ return 1
112
+
113
+ def batch_completed(self, batch_size, duration):
114
+ """Callback indicate how long it took to run a batch"""
115
+
116
+ def get_exceptions(self):
117
+ """List of exception types to be captured."""
118
+ return []
119
+
120
+ def abort_everything(self, ensure_ready=True):
121
+ """Abort any running tasks
122
+
123
+ This is called when an exception has been raised when executing a task
124
+ and all the remaining tasks will be ignored and can therefore be
125
+ aborted to spare computation resources.
126
+
127
+ If ensure_ready is True, the backend should be left in an operating
128
+ state as future tasks might be re-submitted via that same backend
129
+ instance.
130
+
131
+ If ensure_ready is False, the implementer of this method can decide
132
+ to leave the backend in a closed / terminated state as no new task
133
+ are expected to be submitted to this backend.
134
+
135
+ Setting ensure_ready to False is an optimization that can be leveraged
136
+ when aborting tasks via killing processes from a local process pool
137
+ managed by the backend it-self: if we expect no new tasks, there is no
138
+ point in re-creating new workers.
139
+ """
140
+ # Does nothing by default: to be overridden in subclasses when
141
+ # canceling tasks is possible.
142
+ pass
143
+
144
+ def get_nested_backend(self):
145
+ """Backend instance to be used by nested Parallel calls.
146
+
147
+ By default a thread-based backend is used for the first level of
148
+ nesting. Beyond, switch to sequential backend to avoid spawning too
149
+ many threads on the host.
150
+ """
151
+ nesting_level = getattr(self, 'nesting_level', 0) + 1
152
+ if nesting_level > 1:
153
+ return SequentialBackend(nesting_level=nesting_level), None
154
+ else:
155
+ return ThreadingBackend(nesting_level=nesting_level), None
156
+
157
+ @contextlib.contextmanager
158
+ def retrieval_context(self):
159
+ """Context manager to manage an execution context.
160
+
161
+ Calls to Parallel.retrieve will be made inside this context.
162
+
163
+ By default, this does nothing. It may be useful for subclasses to
164
+ handle nested parallelism. In particular, it may be required to avoid
165
+ deadlocks if a backend manages a fixed number of workers, when those
166
+ workers may be asked to do nested Parallel calls. Without
167
+ 'retrieval_context' this could lead to deadlock, as all the workers
168
+ managed by the backend may be "busy" waiting for the nested parallel
169
+ calls to finish, but the backend has no free workers to execute those
170
+ tasks.
171
+ """
172
+ yield
173
+
174
+ def _prepare_worker_env(self, n_jobs):
175
+ """Return environment variables limiting threadpools in external libs.
176
+
177
+ This function return a dict containing environment variables to pass
178
+ when creating a pool of process. These environment variables limit the
179
+ number of threads to `n_threads` for OpenMP, MKL, Accelerated and
180
+ OpenBLAS libraries in the child processes.
181
+ """
182
+ explicit_n_threads = self.inner_max_num_threads
183
+ default_n_threads = max(cpu_count() // n_jobs, 1)
184
+
185
+ # Set the inner environment variables to self.inner_max_num_threads if
186
+ # it is given. Else, default to cpu_count // n_jobs unless the variable
187
+ # is already present in the parent process environment.
188
+ env = {}
189
+ for var in self.MAX_NUM_THREADS_VARS:
190
+ if explicit_n_threads is None:
191
+ var_value = os.environ.get(var, default_n_threads)
192
+ else:
193
+ var_value = explicit_n_threads
194
+
195
+ env[var] = str(var_value)
196
+
197
+ if self.TBB_ENABLE_IPC_VAR not in os.environ:
198
+ # To avoid over-subscription when using TBB, let the TBB schedulers
199
+ # use Inter Process Communication to coordinate:
200
+ env[self.TBB_ENABLE_IPC_VAR] = "1"
201
+ return env
202
+
203
+ @staticmethod
204
+ def in_main_thread():
205
+ return isinstance(threading.current_thread(), threading._MainThread)
206
+
207
+
208
+ class SequentialBackend(ParallelBackendBase):
209
+ """A ParallelBackend which will execute all batches sequentially.
210
+
211
+ Does not use/create any threading objects, and hence has minimal
212
+ overhead. Used when n_jobs == 1.
213
+ """
214
+
215
+ uses_threads = True
216
+ supports_timeout = False
217
+ supports_retrieve_callback = False
218
+ supports_sharedmem = True
219
+
220
+ def effective_n_jobs(self, n_jobs):
221
+ """Determine the number of jobs which are going to run in parallel"""
222
+ if n_jobs == 0:
223
+ raise ValueError('n_jobs == 0 in Parallel has no meaning')
224
+ return 1
225
+
226
+ def apply_async(self, func, callback=None):
227
+ """Schedule a func to be run"""
228
+ raise RuntimeError("Should never be called for SequentialBackend.")
229
+
230
+ def retrieve_result_callback(self, out):
231
+ raise RuntimeError("Should never be called for SequentialBackend.")
232
+
233
+ def get_nested_backend(self):
234
+ # import is not top level to avoid cyclic import errors.
235
+ from .parallel import get_active_backend
236
+
237
+ # SequentialBackend should neither change the nesting level, the
238
+ # default backend or the number of jobs. Just return the current one.
239
+ return get_active_backend()
240
+
241
+
242
+ class PoolManagerMixin(object):
243
+ """A helper class for managing pool of workers."""
244
+
245
+ _pool = None
246
+
247
+ def effective_n_jobs(self, n_jobs):
248
+ """Determine the number of jobs which are going to run in parallel"""
249
+ if n_jobs == 0:
250
+ raise ValueError('n_jobs == 0 in Parallel has no meaning')
251
+ elif mp is None or n_jobs is None:
252
+ # multiprocessing is not available or disabled, fallback
253
+ # to sequential mode
254
+ return 1
255
+ elif n_jobs < 0:
256
+ n_jobs = max(cpu_count() + 1 + n_jobs, 1)
257
+ return n_jobs
258
+
259
+ def terminate(self):
260
+ """Shutdown the process or thread pool"""
261
+ if self._pool is not None:
262
+ self._pool.close()
263
+ self._pool.terminate() # terminate does a join()
264
+ self._pool = None
265
+
266
+ def _get_pool(self):
267
+ """Used by apply_async to make it possible to implement lazy init"""
268
+ return self._pool
269
+
270
+ def apply_async(self, func, callback=None):
271
+ """Schedule a func to be run"""
272
+ # Here, we need a wrapper to avoid crashes on KeyboardInterruptErrors.
273
+ # We also call the callback on error, to make sure the pool does not
274
+ # wait on crashed jobs.
275
+ return self._get_pool().apply_async(
276
+ _TracebackCapturingWrapper(func), (),
277
+ callback=callback, error_callback=callback
278
+ )
279
+
280
+ def retrieve_result_callback(self, out):
281
+ """Mimic concurrent.futures results, raising an error if needed."""
282
+ return _retrieve_traceback_capturing_wrapped_call(out)
283
+
284
+ def abort_everything(self, ensure_ready=True):
285
+ """Shutdown the pool and restart a new one with the same parameters"""
286
+ self.terminate()
287
+ if ensure_ready:
288
+ self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel,
289
+ **self.parallel._backend_args)
290
+
291
+
292
+ class AutoBatchingMixin(object):
293
+ """A helper class for automagically batching jobs."""
294
+
295
+ # In seconds, should be big enough to hide multiprocessing dispatching
296
+ # overhead.
297
+ # This settings was found by running benchmarks/bench_auto_batching.py
298
+ # with various parameters on various platforms.
299
+ MIN_IDEAL_BATCH_DURATION = .2
300
+
301
+ # Should not be too high to avoid stragglers: long jobs running alone
302
+ # on a single worker while other workers have no work to process any more.
303
+ MAX_IDEAL_BATCH_DURATION = 2
304
+
305
+ # Batching counters default values
306
+ _DEFAULT_EFFECTIVE_BATCH_SIZE = 1
307
+ _DEFAULT_SMOOTHED_BATCH_DURATION = 0.0
308
+
309
+ def __init__(self, **kwargs):
310
+ super().__init__(**kwargs)
311
+ self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
312
+ self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
313
+
314
+ def compute_batch_size(self):
315
+ """Determine the optimal batch size"""
316
+ old_batch_size = self._effective_batch_size
317
+ batch_duration = self._smoothed_batch_duration
318
+ if (batch_duration > 0 and
319
+ batch_duration < self.MIN_IDEAL_BATCH_DURATION):
320
+ # The current batch size is too small: the duration of the
321
+ # processing of a batch of task is not large enough to hide
322
+ # the scheduling overhead.
323
+ ideal_batch_size = int(old_batch_size *
324
+ self.MIN_IDEAL_BATCH_DURATION /
325
+ batch_duration)
326
+ # Multiply by two to limit oscilations between min and max.
327
+ ideal_batch_size *= 2
328
+
329
+ # dont increase the batch size too fast to limit huge batch sizes
330
+ # potentially leading to starving worker
331
+ batch_size = min(2 * old_batch_size, ideal_batch_size)
332
+
333
+ batch_size = max(batch_size, 1)
334
+
335
+ self._effective_batch_size = batch_size
336
+ if self.parallel.verbose >= 10:
337
+ self.parallel._print(
338
+ f"Batch computation too fast ({batch_duration}s.) "
339
+ f"Setting batch_size={batch_size}."
340
+ )
341
+ elif (batch_duration > self.MAX_IDEAL_BATCH_DURATION and
342
+ old_batch_size >= 2):
343
+ # The current batch size is too big. If we schedule overly long
344
+ # running batches some CPUs might wait with nothing left to do
345
+ # while a couple of CPUs a left processing a few long running
346
+ # batches. Better reduce the batch size a bit to limit the
347
+ # likelihood of scheduling such stragglers.
348
+
349
+ # decrease the batch size quickly to limit potential starving
350
+ ideal_batch_size = int(
351
+ old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration
352
+ )
353
+ # Multiply by two to limit oscilations between min and max.
354
+ batch_size = max(2 * ideal_batch_size, 1)
355
+ self._effective_batch_size = batch_size
356
+ if self.parallel.verbose >= 10:
357
+ self.parallel._print(
358
+ f"Batch computation too slow ({batch_duration}s.) "
359
+ f"Setting batch_size={batch_size}."
360
+ )
361
+ else:
362
+ # No batch size adjustment
363
+ batch_size = old_batch_size
364
+
365
+ if batch_size != old_batch_size:
366
+ # Reset estimation of the smoothed mean batch duration: this
367
+ # estimate is updated in the multiprocessing apply_async
368
+ # CallBack as long as the batch_size is constant. Therefore
369
+ # we need to reset the estimate whenever we re-tune the batch
370
+ # size.
371
+ self._smoothed_batch_duration = \
372
+ self._DEFAULT_SMOOTHED_BATCH_DURATION
373
+
374
+ return batch_size
375
+
376
+ def batch_completed(self, batch_size, duration):
377
+ """Callback indicate how long it took to run a batch"""
378
+ if batch_size == self._effective_batch_size:
379
+ # Update the smoothed streaming estimate of the duration of a batch
380
+ # from dispatch to completion
381
+ old_duration = self._smoothed_batch_duration
382
+ if old_duration == self._DEFAULT_SMOOTHED_BATCH_DURATION:
383
+ # First record of duration for this batch size after the last
384
+ # reset.
385
+ new_duration = duration
386
+ else:
387
+ # Update the exponentially weighted average of the duration of
388
+ # batch for the current effective size.
389
+ new_duration = 0.8 * old_duration + 0.2 * duration
390
+ self._smoothed_batch_duration = new_duration
391
+
392
+ def reset_batch_stats(self):
393
+ """Reset batch statistics to default values.
394
+
395
+ This avoids interferences with future jobs.
396
+ """
397
+ self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
398
+ self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
399
+
400
+
401
+ class ThreadingBackend(PoolManagerMixin, ParallelBackendBase):
402
+ """A ParallelBackend which will use a thread pool to execute batches in.
403
+
404
+ This is a low-overhead backend but it suffers from the Python Global
405
+ Interpreter Lock if the called function relies a lot on Python objects.
406
+ Mostly useful when the execution bottleneck is a compiled extension that
407
+ explicitly releases the GIL (for instance a Cython loop wrapped in a "with
408
+ nogil" block or an expensive call to a library such as NumPy).
409
+
410
+ The actual thread pool is lazily initialized: the actual thread pool
411
+ construction is delayed to the first call to apply_async.
412
+
413
+ ThreadingBackend is used as the default backend for nested calls.
414
+ """
415
+
416
+ supports_retrieve_callback = True
417
+ uses_threads = True
418
+ supports_sharedmem = True
419
+
420
+ def configure(self, n_jobs=1, parallel=None, **backend_args):
421
+ """Build a process or thread pool and return the number of workers"""
422
+ n_jobs = self.effective_n_jobs(n_jobs)
423
+ if n_jobs == 1:
424
+ # Avoid unnecessary overhead and use sequential backend instead.
425
+ raise FallbackToBackend(
426
+ SequentialBackend(nesting_level=self.nesting_level))
427
+ self.parallel = parallel
428
+ self._n_jobs = n_jobs
429
+ return n_jobs
430
+
431
+ def _get_pool(self):
432
+ """Lazily initialize the thread pool
433
+
434
+ The actual pool of worker threads is only initialized at the first
435
+ call to apply_async.
436
+ """
437
+ if self._pool is None:
438
+ self._pool = ThreadPool(self._n_jobs)
439
+ return self._pool
440
+
441
+
442
+ class MultiprocessingBackend(PoolManagerMixin, AutoBatchingMixin,
443
+ ParallelBackendBase):
444
+ """A ParallelBackend which will use a multiprocessing.Pool.
445
+
446
+ Will introduce some communication and memory overhead when exchanging
447
+ input and output data with the with the worker Python processes.
448
+ However, does not suffer from the Python Global Interpreter Lock.
449
+ """
450
+
451
+ supports_retrieve_callback = True
452
+ supports_return_generator = False
453
+
454
+ def effective_n_jobs(self, n_jobs):
455
+ """Determine the number of jobs which are going to run in parallel.
456
+
457
+ This also checks if we are attempting to create a nested parallel
458
+ loop.
459
+ """
460
+ if mp is None:
461
+ return 1
462
+
463
+ if mp.current_process().daemon:
464
+ # Daemonic processes cannot have children
465
+ if n_jobs != 1:
466
+ if inside_dask_worker():
467
+ msg = (
468
+ "Inside a Dask worker with daemon=True, "
469
+ "setting n_jobs=1.\nPossible work-arounds:\n"
470
+ "- dask.config.set("
471
+ "{'distributed.worker.daemon': False})"
472
+ "- set the environment variable "
473
+ "DASK_DISTRIBUTED__WORKER__DAEMON=False\n"
474
+ "before creating your Dask cluster."
475
+ )
476
+ else:
477
+ msg = (
478
+ 'Multiprocessing-backed parallel loops '
479
+ 'cannot be nested, setting n_jobs=1'
480
+ )
481
+ warnings.warn(msg, stacklevel=3)
482
+ return 1
483
+
484
+ if process_executor._CURRENT_DEPTH > 0:
485
+ # Mixing loky and multiprocessing in nested loop is not supported
486
+ if n_jobs != 1:
487
+ warnings.warn(
488
+ 'Multiprocessing-backed parallel loops cannot be nested,'
489
+ ' below loky, setting n_jobs=1',
490
+ stacklevel=3)
491
+ return 1
492
+
493
+ elif not (self.in_main_thread() or self.nesting_level == 0):
494
+ # Prevent posix fork inside in non-main posix threads
495
+ if n_jobs != 1:
496
+ warnings.warn(
497
+ 'Multiprocessing-backed parallel loops cannot be nested'
498
+ ' below threads, setting n_jobs=1',
499
+ stacklevel=3)
500
+ return 1
501
+
502
+ return super(MultiprocessingBackend, self).effective_n_jobs(n_jobs)
503
+
504
+ def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
505
+ **memmappingpool_args):
506
+ """Build a process or thread pool and return the number of workers"""
507
+ n_jobs = self.effective_n_jobs(n_jobs)
508
+ if n_jobs == 1:
509
+ raise FallbackToBackend(
510
+ SequentialBackend(nesting_level=self.nesting_level))
511
+
512
+ # Make sure to free as much memory as possible before forking
513
+ gc.collect()
514
+ self._pool = MemmappingPool(n_jobs, **memmappingpool_args)
515
+ self.parallel = parallel
516
+ return n_jobs
517
+
518
+ def terminate(self):
519
+ """Shutdown the process or thread pool"""
520
+ super(MultiprocessingBackend, self).terminate()
521
+ self.reset_batch_stats()
522
+
523
+
524
+ class LokyBackend(AutoBatchingMixin, ParallelBackendBase):
525
+ """Managing pool of workers with loky instead of multiprocessing."""
526
+
527
+ supports_retrieve_callback = True
528
+ supports_inner_max_num_threads = True
529
+
530
+ def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
531
+ idle_worker_timeout=300, **memmappingexecutor_args):
532
+ """Build a process executor and return the number of workers"""
533
+ n_jobs = self.effective_n_jobs(n_jobs)
534
+ if n_jobs == 1:
535
+ raise FallbackToBackend(
536
+ SequentialBackend(nesting_level=self.nesting_level))
537
+
538
+ self._workers = get_memmapping_executor(
539
+ n_jobs, timeout=idle_worker_timeout,
540
+ env=self._prepare_worker_env(n_jobs=n_jobs),
541
+ context_id=parallel._id, **memmappingexecutor_args)
542
+ self.parallel = parallel
543
+ return n_jobs
544
+
545
+ def effective_n_jobs(self, n_jobs):
546
+ """Determine the number of jobs which are going to run in parallel"""
547
+ if n_jobs == 0:
548
+ raise ValueError('n_jobs == 0 in Parallel has no meaning')
549
+ elif mp is None or n_jobs is None:
550
+ # multiprocessing is not available or disabled, fallback
551
+ # to sequential mode
552
+ return 1
553
+ elif mp.current_process().daemon:
554
+ # Daemonic processes cannot have children
555
+ if n_jobs != 1:
556
+ if inside_dask_worker():
557
+ msg = (
558
+ "Inside a Dask worker with daemon=True, "
559
+ "setting n_jobs=1.\nPossible work-arounds:\n"
560
+ "- dask.config.set("
561
+ "{'distributed.worker.daemon': False})\n"
562
+ "- set the environment variable "
563
+ "DASK_DISTRIBUTED__WORKER__DAEMON=False\n"
564
+ "before creating your Dask cluster."
565
+ )
566
+ else:
567
+ msg = (
568
+ 'Loky-backed parallel loops cannot be called in a'
569
+ ' multiprocessing, setting n_jobs=1'
570
+ )
571
+ warnings.warn(msg, stacklevel=3)
572
+
573
+ return 1
574
+ elif not (self.in_main_thread() or self.nesting_level == 0):
575
+ # Prevent posix fork inside in non-main posix threads
576
+ if n_jobs != 1:
577
+ warnings.warn(
578
+ 'Loky-backed parallel loops cannot be nested below '
579
+ 'threads, setting n_jobs=1',
580
+ stacklevel=3)
581
+ return 1
582
+ elif n_jobs < 0:
583
+ n_jobs = max(cpu_count() + 1 + n_jobs, 1)
584
+ return n_jobs
585
+
586
+ def apply_async(self, func, callback=None):
587
+ """Schedule a func to be run"""
588
+ future = self._workers.submit(func)
589
+ if callback is not None:
590
+ future.add_done_callback(callback)
591
+ return future
592
+
593
+ def retrieve_result_callback(self, out):
594
+ try:
595
+ return out.result()
596
+ except ShutdownExecutorError:
597
+ raise RuntimeError(
598
+ "The executor underlying Parallel has been shutdown. "
599
+ "This is likely due to the garbage collection of a previous "
600
+ "generator from a call to Parallel with return_as='generator'."
601
+ " Make sure the generator is not garbage collected when "
602
+ "submitting a new job or that it is first properly exhausted."
603
+ )
604
+
605
+ def terminate(self):
606
+ if self._workers is not None:
607
+ # Don't terminate the workers as we want to reuse them in later
608
+ # calls, but cleanup the temporary resources that the Parallel call
609
+ # created. This 'hack' requires a private, low-level operation.
610
+ self._workers._temp_folder_manager._clean_temporary_resources(
611
+ context_id=self.parallel._id, force=False
612
+ )
613
+ self._workers = None
614
+
615
+ self.reset_batch_stats()
616
+
617
+ def abort_everything(self, ensure_ready=True):
618
+ """Shutdown the workers and restart a new one with the same parameters
619
+ """
620
+ self._workers.terminate(kill_workers=True)
621
+ self._workers = None
622
+
623
+ if ensure_ready:
624
+ self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel)
625
+
626
+
627
+ class FallbackToBackend(Exception):
628
+ """Raised when configuration should fallback to another backend"""
629
+
630
+ def __init__(self, backend):
631
+ self.backend = backend
632
+
633
+
634
+ def inside_dask_worker():
635
+ """Check whether the current function is executed inside a Dask worker.
636
+ """
637
+ # This function can not be in joblib._dask because there would be a
638
+ # circular import:
639
+ # _dask imports _parallel_backend that imports _dask ...
640
+ try:
641
+ from distributed import get_worker
642
+ except ImportError:
643
+ return False
644
+
645
+ try:
646
+ get_worker()
647
+ return True
648
+ except ValueError:
649
+ return False
env-llmeval/lib/python3.10/site-packages/joblib/_store_backends.py ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Storage providers backends for Memory caching."""
2
+
3
+ from pickle import PicklingError
4
+ import re
5
+ import os
6
+ import os.path
7
+ import datetime
8
+ import json
9
+ import shutil
10
+ import time
11
+ import warnings
12
+ import collections
13
+ import operator
14
+ import threading
15
+ from abc import ABCMeta, abstractmethod
16
+
17
+ from .backports import concurrency_safe_rename
18
+ from .disk import mkdirp, memstr_to_bytes, rm_subdirs
19
+ from .logger import format_time
20
+ from . import numpy_pickle
21
+
22
+ CacheItemInfo = collections.namedtuple('CacheItemInfo',
23
+ 'path size last_access')
24
+
25
+
26
+ class CacheWarning(Warning):
27
+ """Warning to capture dump failures except for PicklingError."""
28
+ pass
29
+
30
+
31
+ def concurrency_safe_write(object_to_write, filename, write_func):
32
+ """Writes an object into a unique file in a concurrency-safe way."""
33
+ thread_id = id(threading.current_thread())
34
+ temporary_filename = '{}.thread-{}-pid-{}'.format(
35
+ filename, thread_id, os.getpid())
36
+ write_func(object_to_write, temporary_filename)
37
+
38
+ return temporary_filename
39
+
40
+
41
+ class StoreBackendBase(metaclass=ABCMeta):
42
+ """Helper Abstract Base Class which defines all methods that
43
+ a StorageBackend must implement."""
44
+
45
+ location = None
46
+
47
+ @abstractmethod
48
+ def _open_item(self, f, mode):
49
+ """Opens an item on the store and return a file-like object.
50
+
51
+ This method is private and only used by the StoreBackendMixin object.
52
+
53
+ Parameters
54
+ ----------
55
+ f: a file-like object
56
+ The file-like object where an item is stored and retrieved
57
+ mode: string, optional
58
+ the mode in which the file-like object is opened allowed valued are
59
+ 'rb', 'wb'
60
+
61
+ Returns
62
+ -------
63
+ a file-like object
64
+ """
65
+
66
+ @abstractmethod
67
+ def _item_exists(self, location):
68
+ """Checks if an item location exists in the store.
69
+
70
+ This method is private and only used by the StoreBackendMixin object.
71
+
72
+ Parameters
73
+ ----------
74
+ location: string
75
+ The location of an item. On a filesystem, this corresponds to the
76
+ absolute path, including the filename, of a file.
77
+
78
+ Returns
79
+ -------
80
+ True if the item exists, False otherwise
81
+ """
82
+
83
+ @abstractmethod
84
+ def _move_item(self, src, dst):
85
+ """Moves an item from src to dst in the store.
86
+
87
+ This method is private and only used by the StoreBackendMixin object.
88
+
89
+ Parameters
90
+ ----------
91
+ src: string
92
+ The source location of an item
93
+ dst: string
94
+ The destination location of an item
95
+ """
96
+
97
+ @abstractmethod
98
+ def create_location(self, location):
99
+ """Creates a location on the store.
100
+
101
+ Parameters
102
+ ----------
103
+ location: string
104
+ The location in the store. On a filesystem, this corresponds to a
105
+ directory.
106
+ """
107
+
108
+ @abstractmethod
109
+ def clear_location(self, location):
110
+ """Clears a location on the store.
111
+
112
+ Parameters
113
+ ----------
114
+ location: string
115
+ The location in the store. On a filesystem, this corresponds to a
116
+ directory or a filename absolute path
117
+ """
118
+
119
+ @abstractmethod
120
+ def get_items(self):
121
+ """Returns the whole list of items available in the store.
122
+
123
+ Returns
124
+ -------
125
+ The list of items identified by their ids (e.g filename in a
126
+ filesystem).
127
+ """
128
+
129
+ @abstractmethod
130
+ def configure(self, location, verbose=0, backend_options=dict()):
131
+ """Configures the store.
132
+
133
+ Parameters
134
+ ----------
135
+ location: string
136
+ The base location used by the store. On a filesystem, this
137
+ corresponds to a directory.
138
+ verbose: int
139
+ The level of verbosity of the store
140
+ backend_options: dict
141
+ Contains a dictionary of named parameters used to configure the
142
+ store backend.
143
+ """
144
+
145
+
146
+ class StoreBackendMixin(object):
147
+ """Class providing all logic for managing the store in a generic way.
148
+
149
+ The StoreBackend subclass has to implement 3 methods: create_location,
150
+ clear_location and configure. The StoreBackend also has to provide
151
+ a private _open_item, _item_exists and _move_item methods. The _open_item
152
+ method has to have the same signature as the builtin open and return a
153
+ file-like object.
154
+ """
155
+
156
+ def load_item(self, call_id, verbose=1, timestamp=None, metadata=None):
157
+ """Load an item from the store given its id as a list of str."""
158
+ full_path = os.path.join(self.location, *call_id)
159
+
160
+ if verbose > 1:
161
+ ts_string = ('{: <16}'.format(format_time(time.time() - timestamp))
162
+ if timestamp is not None else '')
163
+ signature = os.path.basename(call_id[0])
164
+ if metadata is not None and 'input_args' in metadata:
165
+ kwargs = ', '.join('{}={}'.format(*item)
166
+ for item in metadata['input_args'].items())
167
+ signature += '({})'.format(kwargs)
168
+ msg = '[Memory]{}: Loading {}'.format(ts_string, signature)
169
+ if verbose < 10:
170
+ print('{0}...'.format(msg))
171
+ else:
172
+ print('{0} from {1}'.format(msg, full_path))
173
+
174
+ mmap_mode = (None if not hasattr(self, 'mmap_mode')
175
+ else self.mmap_mode)
176
+
177
+ filename = os.path.join(full_path, 'output.pkl')
178
+ if not self._item_exists(filename):
179
+ raise KeyError("Non-existing item (may have been "
180
+ "cleared).\nFile %s does not exist" % filename)
181
+
182
+ # file-like object cannot be used when mmap_mode is set
183
+ if mmap_mode is None:
184
+ with self._open_item(filename, "rb") as f:
185
+ item = numpy_pickle.load(f)
186
+ else:
187
+ item = numpy_pickle.load(filename, mmap_mode=mmap_mode)
188
+ return item
189
+
190
+ def dump_item(self, call_id, item, verbose=1):
191
+ """Dump an item in the store at the id given as a list of str."""
192
+ try:
193
+ item_path = os.path.join(self.location, *call_id)
194
+ if not self._item_exists(item_path):
195
+ self.create_location(item_path)
196
+ filename = os.path.join(item_path, 'output.pkl')
197
+ if verbose > 10:
198
+ print('Persisting in %s' % item_path)
199
+
200
+ def write_func(to_write, dest_filename):
201
+ with self._open_item(dest_filename, "wb") as f:
202
+ try:
203
+ numpy_pickle.dump(to_write, f, compress=self.compress)
204
+ except PicklingError as e:
205
+ # TODO(1.5) turn into error
206
+ warnings.warn(
207
+ "Unable to cache to disk: failed to pickle "
208
+ "output. In version 1.5 this will raise an "
209
+ f"exception. Exception: {e}.",
210
+ FutureWarning
211
+ )
212
+
213
+ self._concurrency_safe_write(item, filename, write_func)
214
+ except Exception as e: # noqa: E722
215
+ warnings.warn(
216
+ "Unable to cache to disk. Possibly a race condition in the "
217
+ f"creation of the directory. Exception: {e}.",
218
+ CacheWarning
219
+ )
220
+
221
+ def clear_item(self, call_id):
222
+ """Clear the item at the id, given as a list of str."""
223
+ item_path = os.path.join(self.location, *call_id)
224
+ if self._item_exists(item_path):
225
+ self.clear_location(item_path)
226
+
227
+ def contains_item(self, call_id):
228
+ """Check if there is an item at the id, given as a list of str."""
229
+ item_path = os.path.join(self.location, *call_id)
230
+ filename = os.path.join(item_path, 'output.pkl')
231
+
232
+ return self._item_exists(filename)
233
+
234
+ def get_item_info(self, call_id):
235
+ """Return information about item."""
236
+ return {'location': os.path.join(self.location, *call_id)}
237
+
238
+ def get_metadata(self, call_id):
239
+ """Return actual metadata of an item."""
240
+ try:
241
+ item_path = os.path.join(self.location, *call_id)
242
+ filename = os.path.join(item_path, 'metadata.json')
243
+ with self._open_item(filename, 'rb') as f:
244
+ return json.loads(f.read().decode('utf-8'))
245
+ except: # noqa: E722
246
+ return {}
247
+
248
+ def store_metadata(self, call_id, metadata):
249
+ """Store metadata of a computation."""
250
+ try:
251
+ item_path = os.path.join(self.location, *call_id)
252
+ self.create_location(item_path)
253
+ filename = os.path.join(item_path, 'metadata.json')
254
+
255
+ def write_func(to_write, dest_filename):
256
+ with self._open_item(dest_filename, "wb") as f:
257
+ f.write(json.dumps(to_write).encode('utf-8'))
258
+
259
+ self._concurrency_safe_write(metadata, filename, write_func)
260
+ except: # noqa: E722
261
+ pass
262
+
263
+ def contains_path(self, call_id):
264
+ """Check cached function is available in store."""
265
+ func_path = os.path.join(self.location, *call_id)
266
+ return self.object_exists(func_path)
267
+
268
+ def clear_path(self, call_id):
269
+ """Clear all items with a common path in the store."""
270
+ func_path = os.path.join(self.location, *call_id)
271
+ if self._item_exists(func_path):
272
+ self.clear_location(func_path)
273
+
274
+ def store_cached_func_code(self, call_id, func_code=None):
275
+ """Store the code of the cached function."""
276
+ func_path = os.path.join(self.location, *call_id)
277
+ if not self._item_exists(func_path):
278
+ self.create_location(func_path)
279
+
280
+ if func_code is not None:
281
+ filename = os.path.join(func_path, "func_code.py")
282
+ with self._open_item(filename, 'wb') as f:
283
+ f.write(func_code.encode('utf-8'))
284
+
285
+ def get_cached_func_code(self, call_id):
286
+ """Store the code of the cached function."""
287
+ filename = os.path.join(self.location, *call_id, 'func_code.py')
288
+ try:
289
+ with self._open_item(filename, 'rb') as f:
290
+ return f.read().decode('utf-8')
291
+ except: # noqa: E722
292
+ raise
293
+
294
+ def get_cached_func_info(self, call_id):
295
+ """Return information related to the cached function if it exists."""
296
+ return {'location': os.path.join(self.location, *call_id)}
297
+
298
+ def clear(self):
299
+ """Clear the whole store content."""
300
+ self.clear_location(self.location)
301
+
302
+ def enforce_store_limits(
303
+ self, bytes_limit, items_limit=None, age_limit=None
304
+ ):
305
+ """
306
+ Remove the store's oldest files to enforce item, byte, and age limits.
307
+ """
308
+ items_to_delete = self._get_items_to_delete(
309
+ bytes_limit, items_limit, age_limit
310
+ )
311
+
312
+ for item in items_to_delete:
313
+ if self.verbose > 10:
314
+ print('Deleting item {0}'.format(item))
315
+ try:
316
+ self.clear_location(item.path)
317
+ except OSError:
318
+ # Even with ignore_errors=True shutil.rmtree can raise OSError
319
+ # with:
320
+ # [Errno 116] Stale file handle if another process has deleted
321
+ # the folder already.
322
+ pass
323
+
324
+ def _get_items_to_delete(
325
+ self, bytes_limit, items_limit=None, age_limit=None
326
+ ):
327
+ """
328
+ Get items to delete to keep the store under size, file, & age limits.
329
+ """
330
+ if isinstance(bytes_limit, str):
331
+ bytes_limit = memstr_to_bytes(bytes_limit)
332
+
333
+ items = self.get_items()
334
+ if not items:
335
+ return []
336
+
337
+ size = sum(item.size for item in items)
338
+
339
+ if bytes_limit is not None:
340
+ to_delete_size = size - bytes_limit
341
+ else:
342
+ to_delete_size = 0
343
+
344
+ if items_limit is not None:
345
+ to_delete_items = len(items) - items_limit
346
+ else:
347
+ to_delete_items = 0
348
+
349
+ if age_limit is not None:
350
+ older_item = min(item.last_access for item in items)
351
+ deadline = datetime.datetime.now() - age_limit
352
+ else:
353
+ deadline = None
354
+
355
+ if (
356
+ to_delete_size <= 0 and to_delete_items <= 0
357
+ and (deadline is None or older_item > deadline)
358
+ ):
359
+ return []
360
+
361
+ # We want to delete first the cache items that were accessed a
362
+ # long time ago
363
+ items.sort(key=operator.attrgetter('last_access'))
364
+
365
+ items_to_delete = []
366
+ size_so_far = 0
367
+ items_so_far = 0
368
+
369
+ for item in items:
370
+ if (
371
+ (size_so_far >= to_delete_size)
372
+ and items_so_far >= to_delete_items
373
+ and (deadline is None or deadline < item.last_access)
374
+ ):
375
+ break
376
+
377
+ items_to_delete.append(item)
378
+ size_so_far += item.size
379
+ items_so_far += 1
380
+
381
+ return items_to_delete
382
+
383
+ def _concurrency_safe_write(self, to_write, filename, write_func):
384
+ """Writes an object into a file in a concurrency-safe way."""
385
+ temporary_filename = concurrency_safe_write(to_write,
386
+ filename, write_func)
387
+ self._move_item(temporary_filename, filename)
388
+
389
+ def __repr__(self):
390
+ """Printable representation of the store location."""
391
+ return '{class_name}(location="{location}")'.format(
392
+ class_name=self.__class__.__name__, location=self.location)
393
+
394
+
395
+ class FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin):
396
+ """A StoreBackend used with local or network file systems."""
397
+
398
+ _open_item = staticmethod(open)
399
+ _item_exists = staticmethod(os.path.exists)
400
+ _move_item = staticmethod(concurrency_safe_rename)
401
+
402
+ def clear_location(self, location):
403
+ """Delete location on store."""
404
+ if (location == self.location):
405
+ rm_subdirs(location)
406
+ else:
407
+ shutil.rmtree(location, ignore_errors=True)
408
+
409
+ def create_location(self, location):
410
+ """Create object location on store"""
411
+ mkdirp(location)
412
+
413
+ def get_items(self):
414
+ """Returns the whole list of items available in the store."""
415
+ items = []
416
+
417
+ for dirpath, _, filenames in os.walk(self.location):
418
+ is_cache_hash_dir = re.match('[a-f0-9]{32}',
419
+ os.path.basename(dirpath))
420
+
421
+ if is_cache_hash_dir:
422
+ output_filename = os.path.join(dirpath, 'output.pkl')
423
+ try:
424
+ last_access = os.path.getatime(output_filename)
425
+ except OSError:
426
+ try:
427
+ last_access = os.path.getatime(dirpath)
428
+ except OSError:
429
+ # The directory has already been deleted
430
+ continue
431
+
432
+ last_access = datetime.datetime.fromtimestamp(last_access)
433
+ try:
434
+ full_filenames = [os.path.join(dirpath, fn)
435
+ for fn in filenames]
436
+ dirsize = sum(os.path.getsize(fn)
437
+ for fn in full_filenames)
438
+ except OSError:
439
+ # Either output_filename or one of the files in
440
+ # dirpath does not exist any more. We assume this
441
+ # directory is being cleaned by another process already
442
+ continue
443
+
444
+ items.append(CacheItemInfo(dirpath, dirsize,
445
+ last_access))
446
+
447
+ return items
448
+
449
+ def configure(self, location, verbose=1, backend_options=None):
450
+ """Configure the store backend.
451
+
452
+ For this backend, valid store options are 'compress' and 'mmap_mode'
453
+ """
454
+ if backend_options is None:
455
+ backend_options = {}
456
+
457
+ # setup location directory
458
+ self.location = location
459
+ if not os.path.exists(self.location):
460
+ mkdirp(self.location)
461
+
462
+ # item can be stored compressed for faster I/O
463
+ self.compress = backend_options.get('compress', False)
464
+
465
+ # FileSystemStoreBackend can be used with mmap_mode options under
466
+ # certain conditions.
467
+ mmap_mode = backend_options.get('mmap_mode')
468
+ if self.compress and mmap_mode is not None:
469
+ warnings.warn('Compressed items cannot be memmapped in a '
470
+ 'filesystem store. Option will be ignored.',
471
+ stacklevel=2)
472
+
473
+ self.mmap_mode = mmap_mode
474
+ self.verbose = verbose
env-llmeval/lib/python3.10/site-packages/joblib/_utils.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://stackoverflow.com/a/9558001/2536294
2
+
3
+ import ast
4
+ from dataclasses import dataclass
5
+ import operator as op
6
+
7
+
8
+ from ._multiprocessing_helpers import mp
9
+
10
+ if mp is not None:
11
+ from .externals.loky.process_executor import _ExceptionWithTraceback
12
+
13
+
14
+ # supported operators
15
+ operators = {
16
+ ast.Add: op.add,
17
+ ast.Sub: op.sub,
18
+ ast.Mult: op.mul,
19
+ ast.Div: op.truediv,
20
+ ast.FloorDiv: op.floordiv,
21
+ ast.Mod: op.mod,
22
+ ast.Pow: op.pow,
23
+ ast.USub: op.neg,
24
+ }
25
+
26
+
27
+ def eval_expr(expr):
28
+ """
29
+ >>> eval_expr('2*6')
30
+ 12
31
+ >>> eval_expr('2**6')
32
+ 64
33
+ >>> eval_expr('1 + 2*3**(4) / (6 + -7)')
34
+ -161.0
35
+ """
36
+ try:
37
+ return eval_(ast.parse(expr, mode="eval").body)
38
+ except (TypeError, SyntaxError, KeyError) as e:
39
+ raise ValueError(
40
+ f"{expr!r} is not a valid or supported arithmetic expression."
41
+ ) from e
42
+
43
+
44
+ def eval_(node):
45
+ if isinstance(node, ast.Constant): # <constant>
46
+ return node.value
47
+ elif isinstance(node, ast.BinOp): # <left> <operator> <right>
48
+ return operators[type(node.op)](eval_(node.left), eval_(node.right))
49
+ elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
50
+ return operators[type(node.op)](eval_(node.operand))
51
+ else:
52
+ raise TypeError(node)
53
+
54
+
55
+ @dataclass(frozen=True)
56
+ class _Sentinel:
57
+ """A sentinel to mark a parameter as not explicitly set"""
58
+ default_value: object
59
+
60
+ def __repr__(self):
61
+ return f"default({self.default_value!r})"
62
+
63
+
64
+ class _TracebackCapturingWrapper:
65
+ """Protect function call and return error with traceback."""
66
+
67
+ def __init__(self, func):
68
+ self.func = func
69
+
70
+ def __call__(self, **kwargs):
71
+ try:
72
+ return self.func(**kwargs)
73
+ except BaseException as e:
74
+ return _ExceptionWithTraceback(e)
75
+
76
+
77
+ def _retrieve_traceback_capturing_wrapped_call(out):
78
+ if isinstance(out, _ExceptionWithTraceback):
79
+ rebuild, args = out.__reduce__()
80
+ out = rebuild(*args)
81
+ if isinstance(out, BaseException):
82
+ raise out
83
+ return out
env-llmeval/lib/python3.10/site-packages/joblib/compressor.py ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Classes and functions for managing compressors."""
2
+
3
+ import io
4
+ import zlib
5
+ from joblib.backports import LooseVersion
6
+
7
+ try:
8
+ from threading import RLock
9
+ except ImportError:
10
+ from dummy_threading import RLock
11
+
12
+ try:
13
+ import bz2
14
+ except ImportError:
15
+ bz2 = None
16
+
17
+ try:
18
+ import lz4
19
+ from lz4.frame import LZ4FrameFile
20
+ except ImportError:
21
+ lz4 = None
22
+
23
+ try:
24
+ import lzma
25
+ except ImportError:
26
+ lzma = None
27
+
28
+
29
+ LZ4_NOT_INSTALLED_ERROR = ('LZ4 is not installed. Install it with pip: '
30
+ 'https://python-lz4.readthedocs.io/')
31
+
32
+ # Registered compressors
33
+ _COMPRESSORS = {}
34
+
35
+ # Magic numbers of supported compression file formats.
36
+ _ZFILE_PREFIX = b'ZF' # used with pickle files created before 0.9.3.
37
+ _ZLIB_PREFIX = b'\x78'
38
+ _GZIP_PREFIX = b'\x1f\x8b'
39
+ _BZ2_PREFIX = b'BZ'
40
+ _XZ_PREFIX = b'\xfd\x37\x7a\x58\x5a'
41
+ _LZMA_PREFIX = b'\x5d\x00'
42
+ _LZ4_PREFIX = b'\x04\x22\x4D\x18'
43
+
44
+
45
+ def register_compressor(compressor_name, compressor,
46
+ force=False):
47
+ """Register a new compressor.
48
+
49
+ Parameters
50
+ ----------
51
+ compressor_name: str.
52
+ The name of the compressor.
53
+ compressor: CompressorWrapper
54
+ An instance of a 'CompressorWrapper'.
55
+ """
56
+ global _COMPRESSORS
57
+ if not isinstance(compressor_name, str):
58
+ raise ValueError("Compressor name should be a string, "
59
+ "'{}' given.".format(compressor_name))
60
+
61
+ if not isinstance(compressor, CompressorWrapper):
62
+ raise ValueError("Compressor should implement the CompressorWrapper "
63
+ "interface, '{}' given.".format(compressor))
64
+
65
+ if (compressor.fileobj_factory is not None and
66
+ (not hasattr(compressor.fileobj_factory, 'read') or
67
+ not hasattr(compressor.fileobj_factory, 'write') or
68
+ not hasattr(compressor.fileobj_factory, 'seek') or
69
+ not hasattr(compressor.fileobj_factory, 'tell'))):
70
+ raise ValueError("Compressor 'fileobj_factory' attribute should "
71
+ "implement the file object interface, '{}' given."
72
+ .format(compressor.fileobj_factory))
73
+
74
+ if compressor_name in _COMPRESSORS and not force:
75
+ raise ValueError("Compressor '{}' already registered."
76
+ .format(compressor_name))
77
+
78
+ _COMPRESSORS[compressor_name] = compressor
79
+
80
+
81
+ class CompressorWrapper():
82
+ """A wrapper around a compressor file object.
83
+
84
+ Attributes
85
+ ----------
86
+ obj: a file-like object
87
+ The object must implement the buffer interface and will be used
88
+ internally to compress/decompress the data.
89
+ prefix: bytestring
90
+ A bytestring corresponding to the magic number that identifies the
91
+ file format associated to the compressor.
92
+ extension: str
93
+ The file extension used to automatically select this compressor during
94
+ a dump to a file.
95
+ """
96
+
97
+ def __init__(self, obj, prefix=b'', extension=''):
98
+ self.fileobj_factory = obj
99
+ self.prefix = prefix
100
+ self.extension = extension
101
+
102
+ def compressor_file(self, fileobj, compresslevel=None):
103
+ """Returns an instance of a compressor file object."""
104
+ if compresslevel is None:
105
+ return self.fileobj_factory(fileobj, 'wb')
106
+ else:
107
+ return self.fileobj_factory(fileobj, 'wb',
108
+ compresslevel=compresslevel)
109
+
110
+ def decompressor_file(self, fileobj):
111
+ """Returns an instance of a decompressor file object."""
112
+ return self.fileobj_factory(fileobj, 'rb')
113
+
114
+
115
+ class BZ2CompressorWrapper(CompressorWrapper):
116
+
117
+ prefix = _BZ2_PREFIX
118
+ extension = '.bz2'
119
+
120
+ def __init__(self):
121
+ if bz2 is not None:
122
+ self.fileobj_factory = bz2.BZ2File
123
+ else:
124
+ self.fileobj_factory = None
125
+
126
+ def _check_versions(self):
127
+ if bz2 is None:
128
+ raise ValueError('bz2 module is not compiled on your python '
129
+ 'standard library.')
130
+
131
+ def compressor_file(self, fileobj, compresslevel=None):
132
+ """Returns an instance of a compressor file object."""
133
+ self._check_versions()
134
+ if compresslevel is None:
135
+ return self.fileobj_factory(fileobj, 'wb')
136
+ else:
137
+ return self.fileobj_factory(fileobj, 'wb',
138
+ compresslevel=compresslevel)
139
+
140
+ def decompressor_file(self, fileobj):
141
+ """Returns an instance of a decompressor file object."""
142
+ self._check_versions()
143
+ fileobj = self.fileobj_factory(fileobj, 'rb')
144
+ return fileobj
145
+
146
+
147
+ class LZMACompressorWrapper(CompressorWrapper):
148
+
149
+ prefix = _LZMA_PREFIX
150
+ extension = '.lzma'
151
+ _lzma_format_name = 'FORMAT_ALONE'
152
+
153
+ def __init__(self):
154
+ if lzma is not None:
155
+ self.fileobj_factory = lzma.LZMAFile
156
+ self._lzma_format = getattr(lzma, self._lzma_format_name)
157
+ else:
158
+ self.fileobj_factory = None
159
+
160
+ def _check_versions(self):
161
+ if lzma is None:
162
+ raise ValueError('lzma module is not compiled on your python '
163
+ 'standard library.')
164
+
165
+ def compressor_file(self, fileobj, compresslevel=None):
166
+ """Returns an instance of a compressor file object."""
167
+ if compresslevel is None:
168
+ return self.fileobj_factory(fileobj, 'wb',
169
+ format=self._lzma_format)
170
+ else:
171
+ return self.fileobj_factory(fileobj, 'wb',
172
+ format=self._lzma_format,
173
+ preset=compresslevel)
174
+
175
+ def decompressor_file(self, fileobj):
176
+ """Returns an instance of a decompressor file object."""
177
+ return lzma.LZMAFile(fileobj, 'rb')
178
+
179
+
180
+ class XZCompressorWrapper(LZMACompressorWrapper):
181
+
182
+ prefix = _XZ_PREFIX
183
+ extension = '.xz'
184
+ _lzma_format_name = 'FORMAT_XZ'
185
+
186
+
187
+ class LZ4CompressorWrapper(CompressorWrapper):
188
+
189
+ prefix = _LZ4_PREFIX
190
+ extension = '.lz4'
191
+
192
+ def __init__(self):
193
+ if lz4 is not None:
194
+ self.fileobj_factory = LZ4FrameFile
195
+ else:
196
+ self.fileobj_factory = None
197
+
198
+ def _check_versions(self):
199
+ if lz4 is None:
200
+ raise ValueError(LZ4_NOT_INSTALLED_ERROR)
201
+ lz4_version = lz4.__version__
202
+ if lz4_version.startswith("v"):
203
+ lz4_version = lz4_version[1:]
204
+ if LooseVersion(lz4_version) < LooseVersion('0.19'):
205
+ raise ValueError(LZ4_NOT_INSTALLED_ERROR)
206
+
207
+ def compressor_file(self, fileobj, compresslevel=None):
208
+ """Returns an instance of a compressor file object."""
209
+ self._check_versions()
210
+ if compresslevel is None:
211
+ return self.fileobj_factory(fileobj, 'wb')
212
+ else:
213
+ return self.fileobj_factory(fileobj, 'wb',
214
+ compression_level=compresslevel)
215
+
216
+ def decompressor_file(self, fileobj):
217
+ """Returns an instance of a decompressor file object."""
218
+ self._check_versions()
219
+ return self.fileobj_factory(fileobj, 'rb')
220
+
221
+
222
+ ###############################################################################
223
+ # base file compression/decompression object definition
224
+ _MODE_CLOSED = 0
225
+ _MODE_READ = 1
226
+ _MODE_READ_EOF = 2
227
+ _MODE_WRITE = 3
228
+ _BUFFER_SIZE = 8192
229
+
230
+
231
+ class BinaryZlibFile(io.BufferedIOBase):
232
+ """A file object providing transparent zlib (de)compression.
233
+
234
+ TODO python2_drop: is it still needed since we dropped Python 2 support A
235
+ BinaryZlibFile can act as a wrapper for an existing file object, or refer
236
+ directly to a named file on disk.
237
+
238
+ Note that BinaryZlibFile provides only a *binary* file interface: data read
239
+ is returned as bytes, and data to be written should be given as bytes.
240
+
241
+ This object is an adaptation of the BZ2File object and is compatible with
242
+ versions of python >= 2.7.
243
+
244
+ If filename is a str or bytes object, it gives the name
245
+ of the file to be opened. Otherwise, it should be a file object,
246
+ which will be used to read or write the compressed data.
247
+
248
+ mode can be 'rb' for reading (default) or 'wb' for (over)writing
249
+
250
+ If mode is 'wb', compresslevel can be a number between 1
251
+ and 9 specifying the level of compression: 1 produces the least
252
+ compression, and 9 produces the most compression. 3 is the default.
253
+ """
254
+
255
+ wbits = zlib.MAX_WBITS
256
+
257
+ def __init__(self, filename, mode="rb", compresslevel=3):
258
+ # This lock must be recursive, so that BufferedIOBase's
259
+ # readline(), readlines() and writelines() don't deadlock.
260
+ self._lock = RLock()
261
+ self._fp = None
262
+ self._closefp = False
263
+ self._mode = _MODE_CLOSED
264
+ self._pos = 0
265
+ self._size = -1
266
+ self.compresslevel = compresslevel
267
+
268
+ if not isinstance(compresslevel, int) or not (1 <= compresslevel <= 9):
269
+ raise ValueError("'compresslevel' must be an integer "
270
+ "between 1 and 9. You provided 'compresslevel={}'"
271
+ .format(compresslevel))
272
+
273
+ if mode == "rb":
274
+ self._mode = _MODE_READ
275
+ self._decompressor = zlib.decompressobj(self.wbits)
276
+ self._buffer = b""
277
+ self._buffer_offset = 0
278
+ elif mode == "wb":
279
+ self._mode = _MODE_WRITE
280
+ self._compressor = zlib.compressobj(self.compresslevel,
281
+ zlib.DEFLATED, self.wbits,
282
+ zlib.DEF_MEM_LEVEL, 0)
283
+ else:
284
+ raise ValueError("Invalid mode: %r" % (mode,))
285
+
286
+ if isinstance(filename, str):
287
+ self._fp = io.open(filename, mode)
288
+ self._closefp = True
289
+ elif hasattr(filename, "read") or hasattr(filename, "write"):
290
+ self._fp = filename
291
+ else:
292
+ raise TypeError("filename must be a str or bytes object, "
293
+ "or a file")
294
+
295
+ def close(self):
296
+ """Flush and close the file.
297
+
298
+ May be called more than once without error. Once the file is
299
+ closed, any other operation on it will raise a ValueError.
300
+ """
301
+ with self._lock:
302
+ if self._mode == _MODE_CLOSED:
303
+ return
304
+ try:
305
+ if self._mode in (_MODE_READ, _MODE_READ_EOF):
306
+ self._decompressor = None
307
+ elif self._mode == _MODE_WRITE:
308
+ self._fp.write(self._compressor.flush())
309
+ self._compressor = None
310
+ finally:
311
+ try:
312
+ if self._closefp:
313
+ self._fp.close()
314
+ finally:
315
+ self._fp = None
316
+ self._closefp = False
317
+ self._mode = _MODE_CLOSED
318
+ self._buffer = b""
319
+ self._buffer_offset = 0
320
+
321
+ @property
322
+ def closed(self):
323
+ """True if this file is closed."""
324
+ return self._mode == _MODE_CLOSED
325
+
326
+ def fileno(self):
327
+ """Return the file descriptor for the underlying file."""
328
+ self._check_not_closed()
329
+ return self._fp.fileno()
330
+
331
+ def seekable(self):
332
+ """Return whether the file supports seeking."""
333
+ return self.readable() and self._fp.seekable()
334
+
335
+ def readable(self):
336
+ """Return whether the file was opened for reading."""
337
+ self._check_not_closed()
338
+ return self._mode in (_MODE_READ, _MODE_READ_EOF)
339
+
340
+ def writable(self):
341
+ """Return whether the file was opened for writing."""
342
+ self._check_not_closed()
343
+ return self._mode == _MODE_WRITE
344
+
345
+ # Mode-checking helper functions.
346
+
347
+ def _check_not_closed(self):
348
+ if self.closed:
349
+ fname = getattr(self._fp, 'name', None)
350
+ msg = "I/O operation on closed file"
351
+ if fname is not None:
352
+ msg += " {}".format(fname)
353
+ msg += "."
354
+ raise ValueError(msg)
355
+
356
+ def _check_can_read(self):
357
+ if self._mode not in (_MODE_READ, _MODE_READ_EOF):
358
+ self._check_not_closed()
359
+ raise io.UnsupportedOperation("File not open for reading")
360
+
361
+ def _check_can_write(self):
362
+ if self._mode != _MODE_WRITE:
363
+ self._check_not_closed()
364
+ raise io.UnsupportedOperation("File not open for writing")
365
+
366
+ def _check_can_seek(self):
367
+ if self._mode not in (_MODE_READ, _MODE_READ_EOF):
368
+ self._check_not_closed()
369
+ raise io.UnsupportedOperation("Seeking is only supported "
370
+ "on files open for reading")
371
+ if not self._fp.seekable():
372
+ raise io.UnsupportedOperation("The underlying file object "
373
+ "does not support seeking")
374
+
375
+ # Fill the readahead buffer if it is empty. Returns False on EOF.
376
+ def _fill_buffer(self):
377
+ if self._mode == _MODE_READ_EOF:
378
+ return False
379
+ # Depending on the input data, our call to the decompressor may not
380
+ # return any data. In this case, try again after reading another block.
381
+ while self._buffer_offset == len(self._buffer):
382
+ try:
383
+ rawblock = (self._decompressor.unused_data or
384
+ self._fp.read(_BUFFER_SIZE))
385
+ if not rawblock:
386
+ raise EOFError
387
+ except EOFError:
388
+ # End-of-stream marker and end of file. We're good.
389
+ self._mode = _MODE_READ_EOF
390
+ self._size = self._pos
391
+ return False
392
+ else:
393
+ self._buffer = self._decompressor.decompress(rawblock)
394
+ self._buffer_offset = 0
395
+ return True
396
+
397
+ # Read data until EOF.
398
+ # If return_data is false, consume the data without returning it.
399
+ def _read_all(self, return_data=True):
400
+ # The loop assumes that _buffer_offset is 0. Ensure that this is true.
401
+ self._buffer = self._buffer[self._buffer_offset:]
402
+ self._buffer_offset = 0
403
+
404
+ blocks = []
405
+ while self._fill_buffer():
406
+ if return_data:
407
+ blocks.append(self._buffer)
408
+ self._pos += len(self._buffer)
409
+ self._buffer = b""
410
+ if return_data:
411
+ return b"".join(blocks)
412
+
413
+ # Read a block of up to n bytes.
414
+ # If return_data is false, consume the data without returning it.
415
+ def _read_block(self, n_bytes, return_data=True):
416
+ # If we have enough data buffered, return immediately.
417
+ end = self._buffer_offset + n_bytes
418
+ if end <= len(self._buffer):
419
+ data = self._buffer[self._buffer_offset: end]
420
+ self._buffer_offset = end
421
+ self._pos += len(data)
422
+ return data if return_data else None
423
+
424
+ # The loop assumes that _buffer_offset is 0. Ensure that this is true.
425
+ self._buffer = self._buffer[self._buffer_offset:]
426
+ self._buffer_offset = 0
427
+
428
+ blocks = []
429
+ while n_bytes > 0 and self._fill_buffer():
430
+ if n_bytes < len(self._buffer):
431
+ data = self._buffer[:n_bytes]
432
+ self._buffer_offset = n_bytes
433
+ else:
434
+ data = self._buffer
435
+ self._buffer = b""
436
+ if return_data:
437
+ blocks.append(data)
438
+ self._pos += len(data)
439
+ n_bytes -= len(data)
440
+ if return_data:
441
+ return b"".join(blocks)
442
+
443
+ def read(self, size=-1):
444
+ """Read up to size uncompressed bytes from the file.
445
+
446
+ If size is negative or omitted, read until EOF is reached.
447
+ Returns b'' if the file is already at EOF.
448
+ """
449
+ with self._lock:
450
+ self._check_can_read()
451
+ if size == 0:
452
+ return b""
453
+ elif size < 0:
454
+ return self._read_all()
455
+ else:
456
+ return self._read_block(size)
457
+
458
+ def readinto(self, b):
459
+ """Read up to len(b) bytes into b.
460
+
461
+ Returns the number of bytes read (0 for EOF).
462
+ """
463
+ with self._lock:
464
+ return io.BufferedIOBase.readinto(self, b)
465
+
466
+ def write(self, data):
467
+ """Write a byte string to the file.
468
+
469
+ Returns the number of uncompressed bytes written, which is
470
+ always len(data). Note that due to buffering, the file on disk
471
+ may not reflect the data written until close() is called.
472
+ """
473
+ with self._lock:
474
+ self._check_can_write()
475
+ # Convert data type if called by io.BufferedWriter.
476
+ if isinstance(data, memoryview):
477
+ data = data.tobytes()
478
+
479
+ compressed = self._compressor.compress(data)
480
+ self._fp.write(compressed)
481
+ self._pos += len(data)
482
+ return len(data)
483
+
484
+ # Rewind the file to the beginning of the data stream.
485
+ def _rewind(self):
486
+ self._fp.seek(0, 0)
487
+ self._mode = _MODE_READ
488
+ self._pos = 0
489
+ self._decompressor = zlib.decompressobj(self.wbits)
490
+ self._buffer = b""
491
+ self._buffer_offset = 0
492
+
493
+ def seek(self, offset, whence=0):
494
+ """Change the file position.
495
+
496
+ The new position is specified by offset, relative to the
497
+ position indicated by whence. Values for whence are:
498
+
499
+ 0: start of stream (default); offset must not be negative
500
+ 1: current stream position
501
+ 2: end of stream; offset must not be positive
502
+
503
+ Returns the new file position.
504
+
505
+ Note that seeking is emulated, so depending on the parameters,
506
+ this operation may be extremely slow.
507
+ """
508
+ with self._lock:
509
+ self._check_can_seek()
510
+
511
+ # Recalculate offset as an absolute file position.
512
+ if whence == 0:
513
+ pass
514
+ elif whence == 1:
515
+ offset = self._pos + offset
516
+ elif whence == 2:
517
+ # Seeking relative to EOF - we need to know the file's size.
518
+ if self._size < 0:
519
+ self._read_all(return_data=False)
520
+ offset = self._size + offset
521
+ else:
522
+ raise ValueError("Invalid value for whence: %s" % (whence,))
523
+
524
+ # Make it so that offset is the number of bytes to skip forward.
525
+ if offset < self._pos:
526
+ self._rewind()
527
+ else:
528
+ offset -= self._pos
529
+
530
+ # Read and discard data until we reach the desired position.
531
+ self._read_block(offset, return_data=False)
532
+
533
+ return self._pos
534
+
535
+ def tell(self):
536
+ """Return the current file position."""
537
+ with self._lock:
538
+ self._check_not_closed()
539
+ return self._pos
540
+
541
+
542
+ class ZlibCompressorWrapper(CompressorWrapper):
543
+
544
+ def __init__(self):
545
+ CompressorWrapper.__init__(self, obj=BinaryZlibFile,
546
+ prefix=_ZLIB_PREFIX, extension='.z')
547
+
548
+
549
+ class BinaryGzipFile(BinaryZlibFile):
550
+ """A file object providing transparent gzip (de)compression.
551
+
552
+ If filename is a str or bytes object, it gives the name
553
+ of the file to be opened. Otherwise, it should be a file object,
554
+ which will be used to read or write the compressed data.
555
+
556
+ mode can be 'rb' for reading (default) or 'wb' for (over)writing
557
+
558
+ If mode is 'wb', compresslevel can be a number between 1
559
+ and 9 specifying the level of compression: 1 produces the least
560
+ compression, and 9 produces the most compression. 3 is the default.
561
+ """
562
+
563
+ wbits = 31 # zlib compressor/decompressor wbits value for gzip format.
564
+
565
+
566
+ class GzipCompressorWrapper(CompressorWrapper):
567
+
568
+ def __init__(self):
569
+ CompressorWrapper.__init__(self, obj=BinaryGzipFile,
570
+ prefix=_GZIP_PREFIX, extension='.gz')
env-llmeval/lib/python3.10/site-packages/joblib/disk.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Disk management utilities.
3
+ """
4
+
5
+ # Authors: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Lars Buitinck
7
+ # Copyright (c) 2010 Gael Varoquaux
8
+ # License: BSD Style, 3 clauses.
9
+
10
+
11
+ import os
12
+ import sys
13
+ import time
14
+ import errno
15
+ import shutil
16
+
17
+ from multiprocessing import util
18
+
19
+
20
+ try:
21
+ WindowsError
22
+ except NameError:
23
+ WindowsError = OSError
24
+
25
+
26
+ def disk_used(path):
27
+ """ Return the disk usage in a directory."""
28
+ size = 0
29
+ for file in os.listdir(path) + ['.']:
30
+ stat = os.stat(os.path.join(path, file))
31
+ if hasattr(stat, 'st_blocks'):
32
+ size += stat.st_blocks * 512
33
+ else:
34
+ # on some platform st_blocks is not available (e.g., Windows)
35
+ # approximate by rounding to next multiple of 512
36
+ size += (stat.st_size // 512 + 1) * 512
37
+ # We need to convert to int to avoid having longs on some systems (we
38
+ # don't want longs to avoid problems we SQLite)
39
+ return int(size / 1024.)
40
+
41
+
42
+ def memstr_to_bytes(text):
43
+ """ Convert a memory text to its value in bytes.
44
+ """
45
+ kilo = 1024
46
+ units = dict(K=kilo, M=kilo ** 2, G=kilo ** 3)
47
+ try:
48
+ size = int(units[text[-1]] * float(text[:-1]))
49
+ except (KeyError, ValueError) as e:
50
+ raise ValueError(
51
+ "Invalid literal for size give: %s (type %s) should be "
52
+ "alike '10G', '500M', '50K'." % (text, type(text))) from e
53
+ return size
54
+
55
+
56
+ def mkdirp(d):
57
+ """Ensure directory d exists (like mkdir -p on Unix)
58
+ No guarantee that the directory is writable.
59
+ """
60
+ try:
61
+ os.makedirs(d)
62
+ except OSError as e:
63
+ if e.errno != errno.EEXIST:
64
+ raise
65
+
66
+
67
+ # if a rmtree operation fails in rm_subdirs, wait for this much time (in secs),
68
+ # then retry up to RM_SUBDIRS_N_RETRY times. If it still fails, raise the
69
+ # exception. this mechanism ensures that the sub-process gc have the time to
70
+ # collect and close the memmaps before we fail.
71
+ RM_SUBDIRS_RETRY_TIME = 0.1
72
+ RM_SUBDIRS_N_RETRY = 10
73
+
74
+
75
+ def rm_subdirs(path, onerror=None):
76
+ """Remove all subdirectories in this path.
77
+
78
+ The directory indicated by `path` is left in place, and its subdirectories
79
+ are erased.
80
+
81
+ If onerror is set, it is called to handle the error with arguments (func,
82
+ path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
83
+ path is the argument to that function that caused it to fail; and
84
+ exc_info is a tuple returned by sys.exc_info(). If onerror is None,
85
+ an exception is raised.
86
+ """
87
+
88
+ # NOTE this code is adapted from the one in shutil.rmtree, and is
89
+ # just as fast
90
+
91
+ names = []
92
+ try:
93
+ names = os.listdir(path)
94
+ except os.error:
95
+ if onerror is not None:
96
+ onerror(os.listdir, path, sys.exc_info())
97
+ else:
98
+ raise
99
+
100
+ for name in names:
101
+ fullname = os.path.join(path, name)
102
+ delete_folder(fullname, onerror=onerror)
103
+
104
+
105
+ def delete_folder(folder_path, onerror=None, allow_non_empty=True):
106
+ """Utility function to cleanup a temporary folder if it still exists."""
107
+ if os.path.isdir(folder_path):
108
+ if onerror is not None:
109
+ shutil.rmtree(folder_path, False, onerror)
110
+ else:
111
+ # allow the rmtree to fail once, wait and re-try.
112
+ # if the error is raised again, fail
113
+ err_count = 0
114
+ while True:
115
+ files = os.listdir(folder_path)
116
+ try:
117
+ if len(files) == 0 or allow_non_empty:
118
+ shutil.rmtree(
119
+ folder_path, ignore_errors=False, onerror=None
120
+ )
121
+ util.debug(
122
+ "Successfully deleted {}".format(folder_path))
123
+ break
124
+ else:
125
+ raise OSError(
126
+ "Expected empty folder {} but got {} "
127
+ "files.".format(folder_path, len(files))
128
+ )
129
+ except (OSError, WindowsError):
130
+ err_count += 1
131
+ if err_count > RM_SUBDIRS_N_RETRY:
132
+ # the folder cannot be deleted right now. It maybe
133
+ # because some temporary files have not been deleted
134
+ # yet.
135
+ raise
136
+ time.sleep(RM_SUBDIRS_RETRY_TIME)
env-llmeval/lib/python3.10/site-packages/joblib/executor.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility function to construct a loky.ReusableExecutor with custom pickler.
2
+
3
+ This module provides efficient ways of working with data stored in
4
+ shared memory with numpy.memmap arrays without inducing any memory
5
+ copy between the parent and child processes.
6
+ """
7
+ # Author: Thomas Moreau <[email protected]>
8
+ # Copyright: 2017, Thomas Moreau
9
+ # License: BSD 3 clause
10
+
11
+ from ._memmapping_reducer import get_memmapping_reducers
12
+ from ._memmapping_reducer import TemporaryResourcesManager
13
+ from .externals.loky.reusable_executor import _ReusablePoolExecutor
14
+
15
+
16
+ _executor_args = None
17
+
18
+
19
+ def get_memmapping_executor(n_jobs, **kwargs):
20
+ return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs)
21
+
22
+
23
+ class MemmappingExecutor(_ReusablePoolExecutor):
24
+
25
+ @classmethod
26
+ def get_memmapping_executor(cls, n_jobs, timeout=300, initializer=None,
27
+ initargs=(), env=None, temp_folder=None,
28
+ context_id=None, **backend_args):
29
+ """Factory for ReusableExecutor with automatic memmapping for large
30
+ numpy arrays.
31
+ """
32
+ global _executor_args
33
+ # Check if we can reuse the executor here instead of deferring the test
34
+ # to loky as the reducers are objects that changes at each call.
35
+ executor_args = backend_args.copy()
36
+ executor_args.update(env if env else {})
37
+ executor_args.update(dict(
38
+ timeout=timeout, initializer=initializer, initargs=initargs))
39
+ reuse = _executor_args is None or _executor_args == executor_args
40
+ _executor_args = executor_args
41
+
42
+ manager = TemporaryResourcesManager(temp_folder)
43
+
44
+ # reducers access the temporary folder in which to store temporary
45
+ # pickles through a call to manager.resolve_temp_folder_name. resolving
46
+ # the folder name dynamically is useful to use different folders across
47
+ # calls of a same reusable executor
48
+ job_reducers, result_reducers = get_memmapping_reducers(
49
+ unlink_on_gc_collect=True,
50
+ temp_folder_resolver=manager.resolve_temp_folder_name,
51
+ **backend_args)
52
+ _executor, executor_is_reused = super().get_reusable_executor(
53
+ n_jobs, job_reducers=job_reducers, result_reducers=result_reducers,
54
+ reuse=reuse, timeout=timeout, initializer=initializer,
55
+ initargs=initargs, env=env
56
+ )
57
+
58
+ if not executor_is_reused:
59
+ # Only set a _temp_folder_manager for new executors. Reused
60
+ # executors already have a _temporary_folder_manager that must not
61
+ # be re-assigned like that because it is referenced in various
62
+ # places in the reducing machinery of the executor.
63
+ _executor._temp_folder_manager = manager
64
+
65
+ if context_id is not None:
66
+ # Only register the specified context once we know which manager
67
+ # the current executor is using, in order to not register an atexit
68
+ # finalizer twice for the same folder.
69
+ _executor._temp_folder_manager.register_new_context(context_id)
70
+
71
+ return _executor
72
+
73
+ def terminate(self, kill_workers=False):
74
+
75
+ self.shutdown(kill_workers=kill_workers)
76
+
77
+ # When workers are killed in a brutal manner, they cannot execute the
78
+ # finalizer of their shared memmaps. The refcount of those memmaps may
79
+ # be off by an unknown number, so instead of decref'ing them, we force
80
+ # delete the whole temporary folder, and unregister them. There is no
81
+ # risk of PermissionError at folder deletion because at this
82
+ # point, all child processes are dead, so all references to temporary
83
+ # memmaps are closed. Otherwise, just try to delete as much as possible
84
+ # with allow_non_empty=True but if we can't, it will be clean up later
85
+ # on by the resource_tracker.
86
+ with self._submit_resize_lock:
87
+ self._temp_folder_manager._clean_temporary_resources(
88
+ force=kill_workers, allow_non_empty=True
89
+ )
90
+
91
+ @property
92
+ def _temp_folder(self):
93
+ # Legacy property in tests. could be removed if we refactored the
94
+ # memmapping tests. SHOULD ONLY BE USED IN TESTS!
95
+ # We cache this property because it is called late in the tests - at
96
+ # this point, all context have been unregistered, and
97
+ # resolve_temp_folder_name raises an error.
98
+ if getattr(self, '_cached_temp_folder', None) is not None:
99
+ return self._cached_temp_folder
100
+ else:
101
+ self._cached_temp_folder = self._temp_folder_manager.resolve_temp_folder_name() # noqa
102
+ return self._cached_temp_folder
103
+
104
+
105
+ class _TestingMemmappingExecutor(MemmappingExecutor):
106
+ """Wrapper around ReusableExecutor to ease memmapping testing with Pool
107
+ and Executor. This is only for testing purposes.
108
+
109
+ """
110
+ def apply_async(self, func, args):
111
+ """Schedule a func to be run"""
112
+ future = self.submit(func, *args)
113
+ future.get = future.result
114
+ return future
115
+
116
+ def map(self, f, *args):
117
+ return list(super().map(f, *args))
env-llmeval/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (181 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import cloudpickle
2
+ from .cloudpickle import * # noqa
3
+
4
+ __doc__ = cloudpickle.__doc__
5
+
6
+ __version__ = "3.0.0"
7
+
8
+ __all__ = [ # noqa
9
+ "__version__",
10
+ "Pickler",
11
+ "CloudPickler",
12
+ "dumps",
13
+ "loads",
14
+ "dump",
15
+ "load",
16
+ "register_pickle_by_value",
17
+ "unregister_pickle_by_value",
18
+ ]
env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (416 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc ADDED
Binary file (36.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc ADDED
Binary file (606 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py ADDED
@@ -0,0 +1,1487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Pickler class to extend the standard pickle.Pickler functionality
2
+
3
+ The main objective is to make it natural to perform distributed computing on
4
+ clusters (such as PySpark, Dask, Ray...) with interactively defined code
5
+ (functions, classes, ...) written in notebooks or console.
6
+
7
+ In particular this pickler adds the following features:
8
+ - serialize interactively-defined or locally-defined functions, classes,
9
+ enums, typevars, lambdas and nested functions to compiled byte code;
10
+ - deal with some other non-serializable objects in an ad-hoc manner where
11
+ applicable.
12
+
13
+ This pickler is therefore meant to be used for the communication between short
14
+ lived Python processes running the same version of Python and libraries. In
15
+ particular, it is not meant to be used for long term storage of Python objects.
16
+
17
+ It does not include an unpickler, as standard Python unpickling suffices.
18
+
19
+ This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
20
+ <https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
21
+
22
+ Copyright (c) 2012-now, CloudPickle developers and contributors.
23
+ Copyright (c) 2012, Regents of the University of California.
24
+ Copyright (c) 2009 `PiCloud, Inc. <https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
25
+ All rights reserved.
26
+
27
+ Redistribution and use in source and binary forms, with or without
28
+ modification, are permitted provided that the following conditions
29
+ are met:
30
+ * Redistributions of source code must retain the above copyright
31
+ notice, this list of conditions and the following disclaimer.
32
+ * Redistributions in binary form must reproduce the above copyright
33
+ notice, this list of conditions and the following disclaimer in the
34
+ documentation and/or other materials provided with the distribution.
35
+ * Neither the name of the University of California, Berkeley nor the
36
+ names of its contributors may be used to endorse or promote
37
+ products derived from this software without specific prior written
38
+ permission.
39
+
40
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
46
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
47
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
48
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
49
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
50
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51
+ """
52
+
53
+ import _collections_abc
54
+ from collections import ChainMap, OrderedDict
55
+ import abc
56
+ import builtins
57
+ import copyreg
58
+ import dataclasses
59
+ import dis
60
+ from enum import Enum
61
+ import io
62
+ import itertools
63
+ import logging
64
+ import opcode
65
+ import pickle
66
+ from pickle import _getattribute
67
+ import platform
68
+ import struct
69
+ import sys
70
+ import threading
71
+ import types
72
+ import typing
73
+ import uuid
74
+ import warnings
75
+ import weakref
76
+
77
+ # The following import is required to be imported in the cloudpickle
78
+ # namespace to be able to load pickle files generated with older versions of
79
+ # cloudpickle. See: tests/test_backward_compat.py
80
+ from types import CellType # noqa: F401
81
+
82
+
83
+ # cloudpickle is meant for inter process communication: we expect all
84
+ # communicating processes to run the same Python version hence we favor
85
+ # communication speed over compatibility:
86
+ DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
87
+
88
+ # Names of modules whose resources should be treated as dynamic.
89
+ _PICKLE_BY_VALUE_MODULES = set()
90
+
91
+ # Track the provenance of reconstructed dynamic classes to make it possible to
92
+ # reconstruct instances from the matching singleton class definition when
93
+ # appropriate and preserve the usual "isinstance" semantics of Python objects.
94
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary()
95
+ _DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary()
96
+ _DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock()
97
+
98
+ PYPY = platform.python_implementation() == "PyPy"
99
+
100
+ builtin_code_type = None
101
+ if PYPY:
102
+ # builtin-code objects only exist in pypy
103
+ builtin_code_type = type(float.__new__.__code__)
104
+
105
+ _extract_code_globals_cache = weakref.WeakKeyDictionary()
106
+
107
+
108
+ def _get_or_create_tracker_id(class_def):
109
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
110
+ class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def)
111
+ if class_tracker_id is None:
112
+ class_tracker_id = uuid.uuid4().hex
113
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
114
+ _DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def
115
+ return class_tracker_id
116
+
117
+
118
+ def _lookup_class_or_track(class_tracker_id, class_def):
119
+ if class_tracker_id is not None:
120
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
121
+ class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault(
122
+ class_tracker_id, class_def
123
+ )
124
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
125
+ return class_def
126
+
127
+
128
+ def register_pickle_by_value(module):
129
+ """Register a module to make it functions and classes picklable by value.
130
+
131
+ By default, functions and classes that are attributes of an importable
132
+ module are to be pickled by reference, that is relying on re-importing
133
+ the attribute from the module at load time.
134
+
135
+ If `register_pickle_by_value(module)` is called, all its functions and
136
+ classes are subsequently to be pickled by value, meaning that they can
137
+ be loaded in Python processes where the module is not importable.
138
+
139
+ This is especially useful when developing a module in a distributed
140
+ execution environment: restarting the client Python process with the new
141
+ source code is enough: there is no need to re-install the new version
142
+ of the module on all the worker nodes nor to restart the workers.
143
+
144
+ Note: this feature is considered experimental. See the cloudpickle
145
+ README.md file for more details and limitations.
146
+ """
147
+ if not isinstance(module, types.ModuleType):
148
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
149
+ # In the future, cloudpickle may need a way to access any module registered
150
+ # for pickling by value in order to introspect relative imports inside
151
+ # functions pickled by value. (see
152
+ # https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633).
153
+ # This access can be ensured by checking that module is present in
154
+ # sys.modules at registering time and assuming that it will still be in
155
+ # there when accessed during pickling. Another alternative would be to
156
+ # store a weakref to the module. Even though cloudpickle does not implement
157
+ # this introspection yet, in order to avoid a possible breaking change
158
+ # later, we still enforce the presence of module inside sys.modules.
159
+ if module.__name__ not in sys.modules:
160
+ raise ValueError(
161
+ f"{module} was not imported correctly, have you used an "
162
+ "`import` statement to access it?"
163
+ )
164
+ _PICKLE_BY_VALUE_MODULES.add(module.__name__)
165
+
166
+
167
+ def unregister_pickle_by_value(module):
168
+ """Unregister that the input module should be pickled by value."""
169
+ if not isinstance(module, types.ModuleType):
170
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
171
+ if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
172
+ raise ValueError(f"{module} is not registered for pickle by value")
173
+ else:
174
+ _PICKLE_BY_VALUE_MODULES.remove(module.__name__)
175
+
176
+
177
+ def list_registry_pickle_by_value():
178
+ return _PICKLE_BY_VALUE_MODULES.copy()
179
+
180
+
181
+ def _is_registered_pickle_by_value(module):
182
+ module_name = module.__name__
183
+ if module_name in _PICKLE_BY_VALUE_MODULES:
184
+ return True
185
+ while True:
186
+ parent_name = module_name.rsplit(".", 1)[0]
187
+ if parent_name == module_name:
188
+ break
189
+ if parent_name in _PICKLE_BY_VALUE_MODULES:
190
+ return True
191
+ module_name = parent_name
192
+ return False
193
+
194
+
195
+ def _whichmodule(obj, name):
196
+ """Find the module an object belongs to.
197
+
198
+ This function differs from ``pickle.whichmodule`` in two ways:
199
+ - it does not mangle the cases where obj's module is __main__ and obj was
200
+ not found in any module.
201
+ - Errors arising during module introspection are ignored, as those errors
202
+ are considered unwanted side effects.
203
+ """
204
+ module_name = getattr(obj, "__module__", None)
205
+
206
+ if module_name is not None:
207
+ return module_name
208
+ # Protect the iteration by using a copy of sys.modules against dynamic
209
+ # modules that trigger imports of other modules upon calls to getattr or
210
+ # other threads importing at the same time.
211
+ for module_name, module in sys.modules.copy().items():
212
+ # Some modules such as coverage can inject non-module objects inside
213
+ # sys.modules
214
+ if (
215
+ module_name == "__main__"
216
+ or module is None
217
+ or not isinstance(module, types.ModuleType)
218
+ ):
219
+ continue
220
+ try:
221
+ if _getattribute(module, name)[0] is obj:
222
+ return module_name
223
+ except Exception:
224
+ pass
225
+ return None
226
+
227
+
228
+ def _should_pickle_by_reference(obj, name=None):
229
+ """Test whether an function or a class should be pickled by reference
230
+
231
+ Pickling by reference means by that the object (typically a function or a
232
+ class) is an attribute of a module that is assumed to be importable in the
233
+ target Python environment. Loading will therefore rely on importing the
234
+ module and then calling `getattr` on it to access the function or class.
235
+
236
+ Pickling by reference is the only option to pickle functions and classes
237
+ in the standard library. In cloudpickle the alternative option is to
238
+ pickle by value (for instance for interactively or locally defined
239
+ functions and classes or for attributes of modules that have been
240
+ explicitly registered to be pickled by value.
241
+ """
242
+ if isinstance(obj, types.FunctionType) or issubclass(type(obj), type):
243
+ module_and_name = _lookup_module_and_qualname(obj, name=name)
244
+ if module_and_name is None:
245
+ return False
246
+ module, name = module_and_name
247
+ return not _is_registered_pickle_by_value(module)
248
+
249
+ elif isinstance(obj, types.ModuleType):
250
+ # We assume that sys.modules is primarily used as a cache mechanism for
251
+ # the Python import machinery. Checking if a module has been added in
252
+ # is sys.modules therefore a cheap and simple heuristic to tell us
253
+ # whether we can assume that a given module could be imported by name
254
+ # in another Python process.
255
+ if _is_registered_pickle_by_value(obj):
256
+ return False
257
+ return obj.__name__ in sys.modules
258
+ else:
259
+ raise TypeError(
260
+ "cannot check importability of {} instances".format(type(obj).__name__)
261
+ )
262
+
263
+
264
+ def _lookup_module_and_qualname(obj, name=None):
265
+ if name is None:
266
+ name = getattr(obj, "__qualname__", None)
267
+ if name is None: # pragma: no cover
268
+ # This used to be needed for Python 2.7 support but is probably not
269
+ # needed anymore. However we keep the __name__ introspection in case
270
+ # users of cloudpickle rely on this old behavior for unknown reasons.
271
+ name = getattr(obj, "__name__", None)
272
+
273
+ module_name = _whichmodule(obj, name)
274
+
275
+ if module_name is None:
276
+ # In this case, obj.__module__ is None AND obj was not found in any
277
+ # imported module. obj is thus treated as dynamic.
278
+ return None
279
+
280
+ if module_name == "__main__":
281
+ return None
282
+
283
+ # Note: if module_name is in sys.modules, the corresponding module is
284
+ # assumed importable at unpickling time. See #357
285
+ module = sys.modules.get(module_name, None)
286
+ if module is None:
287
+ # The main reason why obj's module would not be imported is that this
288
+ # module has been dynamically created, using for example
289
+ # types.ModuleType. The other possibility is that module was removed
290
+ # from sys.modules after obj was created/imported. But this case is not
291
+ # supported, as the standard pickle does not support it either.
292
+ return None
293
+
294
+ try:
295
+ obj2, parent = _getattribute(module, name)
296
+ except AttributeError:
297
+ # obj was not found inside the module it points to
298
+ return None
299
+ if obj2 is not obj:
300
+ return None
301
+ return module, name
302
+
303
+
304
+ def _extract_code_globals(co):
305
+ """Find all globals names read or written to by codeblock co."""
306
+ out_names = _extract_code_globals_cache.get(co)
307
+ if out_names is None:
308
+ # We use a dict with None values instead of a set to get a
309
+ # deterministic order and avoid introducing non-deterministic pickle
310
+ # bytes as a results.
311
+ out_names = {name: None for name in _walk_global_ops(co)}
312
+
313
+ # Declaring a function inside another one using the "def ..." syntax
314
+ # generates a constant code object corresponding to the one of the
315
+ # nested function's As the nested function may itself need global
316
+ # variables, we need to introspect its code, extract its globals, (look
317
+ # for code object in it's co_consts attribute..) and add the result to
318
+ # code_globals
319
+ if co.co_consts:
320
+ for const in co.co_consts:
321
+ if isinstance(const, types.CodeType):
322
+ out_names.update(_extract_code_globals(const))
323
+
324
+ _extract_code_globals_cache[co] = out_names
325
+
326
+ return out_names
327
+
328
+
329
+ def _find_imported_submodules(code, top_level_dependencies):
330
+ """Find currently imported submodules used by a function.
331
+
332
+ Submodules used by a function need to be detected and referenced for the
333
+ function to work correctly at depickling time. Because submodules can be
334
+ referenced as attribute of their parent package (``package.submodule``), we
335
+ need a special introspection technique that does not rely on GLOBAL-related
336
+ opcodes to find references of them in a code object.
337
+
338
+ Example:
339
+ ```
340
+ import concurrent.futures
341
+ import cloudpickle
342
+ def func():
343
+ x = concurrent.futures.ThreadPoolExecutor
344
+ if __name__ == '__main__':
345
+ cloudpickle.dumps(func)
346
+ ```
347
+ The globals extracted by cloudpickle in the function's state include the
348
+ concurrent package, but not its submodule (here, concurrent.futures), which
349
+ is the module used by func. Find_imported_submodules will detect the usage
350
+ of concurrent.futures. Saving this module alongside with func will ensure
351
+ that calling func once depickled does not fail due to concurrent.futures
352
+ not being imported
353
+ """
354
+
355
+ subimports = []
356
+ # check if any known dependency is an imported package
357
+ for x in top_level_dependencies:
358
+ if (
359
+ isinstance(x, types.ModuleType)
360
+ and hasattr(x, "__package__")
361
+ and x.__package__
362
+ ):
363
+ # check if the package has any currently loaded sub-imports
364
+ prefix = x.__name__ + "."
365
+ # A concurrent thread could mutate sys.modules,
366
+ # make sure we iterate over a copy to avoid exceptions
367
+ for name in list(sys.modules):
368
+ # Older versions of pytest will add a "None" module to
369
+ # sys.modules.
370
+ if name is not None and name.startswith(prefix):
371
+ # check whether the function can address the sub-module
372
+ tokens = set(name[len(prefix) :].split("."))
373
+ if not tokens - set(code.co_names):
374
+ subimports.append(sys.modules[name])
375
+ return subimports
376
+
377
+
378
+ # relevant opcodes
379
+ STORE_GLOBAL = opcode.opmap["STORE_GLOBAL"]
380
+ DELETE_GLOBAL = opcode.opmap["DELETE_GLOBAL"]
381
+ LOAD_GLOBAL = opcode.opmap["LOAD_GLOBAL"]
382
+ GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL)
383
+ HAVE_ARGUMENT = dis.HAVE_ARGUMENT
384
+ EXTENDED_ARG = dis.EXTENDED_ARG
385
+
386
+
387
+ _BUILTIN_TYPE_NAMES = {}
388
+ for k, v in types.__dict__.items():
389
+ if type(v) is type:
390
+ _BUILTIN_TYPE_NAMES[v] = k
391
+
392
+
393
+ def _builtin_type(name):
394
+ if name == "ClassType": # pragma: no cover
395
+ # Backward compat to load pickle files generated with cloudpickle
396
+ # < 1.3 even if loading pickle files from older versions is not
397
+ # officially supported.
398
+ return type
399
+ return getattr(types, name)
400
+
401
+
402
+ def _walk_global_ops(code):
403
+ """Yield referenced name for global-referencing instructions in code."""
404
+ for instr in dis.get_instructions(code):
405
+ op = instr.opcode
406
+ if op in GLOBAL_OPS:
407
+ yield instr.argval
408
+
409
+
410
+ def _extract_class_dict(cls):
411
+ """Retrieve a copy of the dict of a class without the inherited method."""
412
+ clsdict = dict(cls.__dict__) # copy dict proxy to a dict
413
+ if len(cls.__bases__) == 1:
414
+ inherited_dict = cls.__bases__[0].__dict__
415
+ else:
416
+ inherited_dict = {}
417
+ for base in reversed(cls.__bases__):
418
+ inherited_dict.update(base.__dict__)
419
+ to_remove = []
420
+ for name, value in clsdict.items():
421
+ try:
422
+ base_value = inherited_dict[name]
423
+ if value is base_value:
424
+ to_remove.append(name)
425
+ except KeyError:
426
+ pass
427
+ for name in to_remove:
428
+ clsdict.pop(name)
429
+ return clsdict
430
+
431
+
432
+ def is_tornado_coroutine(func):
433
+ """Return whether `func` is a Tornado coroutine function.
434
+
435
+ Running coroutines are not supported.
436
+ """
437
+ warnings.warn(
438
+ "is_tornado_coroutine is deprecated in cloudpickle 3.0 and will be "
439
+ "removed in cloudpickle 4.0. Use tornado.gen.is_coroutine_function "
440
+ "directly instead.",
441
+ category=DeprecationWarning,
442
+ )
443
+ if "tornado.gen" not in sys.modules:
444
+ return False
445
+ gen = sys.modules["tornado.gen"]
446
+ if not hasattr(gen, "is_coroutine_function"):
447
+ # Tornado version is too old
448
+ return False
449
+ return gen.is_coroutine_function(func)
450
+
451
+
452
+ def subimport(name):
453
+ # We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is
454
+ # the name of a submodule, __import__ will return the top-level root module
455
+ # of this submodule. For instance, __import__('os.path') returns the `os`
456
+ # module.
457
+ __import__(name)
458
+ return sys.modules[name]
459
+
460
+
461
+ def dynamic_subimport(name, vars):
462
+ mod = types.ModuleType(name)
463
+ mod.__dict__.update(vars)
464
+ mod.__dict__["__builtins__"] = builtins.__dict__
465
+ return mod
466
+
467
+
468
+ def _get_cell_contents(cell):
469
+ try:
470
+ return cell.cell_contents
471
+ except ValueError:
472
+ # Handle empty cells explicitly with a sentinel value.
473
+ return _empty_cell_value
474
+
475
+
476
+ def instance(cls):
477
+ """Create a new instance of a class.
478
+
479
+ Parameters
480
+ ----------
481
+ cls : type
482
+ The class to create an instance of.
483
+
484
+ Returns
485
+ -------
486
+ instance : cls
487
+ A new instance of ``cls``.
488
+ """
489
+ return cls()
490
+
491
+
492
+ @instance
493
+ class _empty_cell_value:
494
+ """Sentinel for empty closures."""
495
+
496
+ @classmethod
497
+ def __reduce__(cls):
498
+ return cls.__name__
499
+
500
+
501
+ def _make_function(code, globals, name, argdefs, closure):
502
+ # Setting __builtins__ in globals is needed for nogil CPython.
503
+ globals["__builtins__"] = __builtins__
504
+ return types.FunctionType(code, globals, name, argdefs, closure)
505
+
506
+
507
+ def _make_empty_cell():
508
+ if False:
509
+ # trick the compiler into creating an empty cell in our lambda
510
+ cell = None
511
+ raise AssertionError("this route should not be executed")
512
+
513
+ return (lambda: cell).__closure__[0]
514
+
515
+
516
+ def _make_cell(value=_empty_cell_value):
517
+ cell = _make_empty_cell()
518
+ if value is not _empty_cell_value:
519
+ cell.cell_contents = value
520
+ return cell
521
+
522
+
523
+ def _make_skeleton_class(
524
+ type_constructor, name, bases, type_kwargs, class_tracker_id, extra
525
+ ):
526
+ """Build dynamic class with an empty __dict__ to be filled once memoized
527
+
528
+ If class_tracker_id is not None, try to lookup an existing class definition
529
+ matching that id. If none is found, track a newly reconstructed class
530
+ definition under that id so that other instances stemming from the same
531
+ class id will also reuse this class definition.
532
+
533
+ The "extra" variable is meant to be a dict (or None) that can be used for
534
+ forward compatibility shall the need arise.
535
+ """
536
+ skeleton_class = types.new_class(
537
+ name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs)
538
+ )
539
+ return _lookup_class_or_track(class_tracker_id, skeleton_class)
540
+
541
+
542
+ def _make_skeleton_enum(
543
+ bases, name, qualname, members, module, class_tracker_id, extra
544
+ ):
545
+ """Build dynamic enum with an empty __dict__ to be filled once memoized
546
+
547
+ The creation of the enum class is inspired by the code of
548
+ EnumMeta._create_.
549
+
550
+ If class_tracker_id is not None, try to lookup an existing enum definition
551
+ matching that id. If none is found, track a newly reconstructed enum
552
+ definition under that id so that other instances stemming from the same
553
+ class id will also reuse this enum definition.
554
+
555
+ The "extra" variable is meant to be a dict (or None) that can be used for
556
+ forward compatibility shall the need arise.
557
+ """
558
+ # enums always inherit from their base Enum class at the last position in
559
+ # the list of base classes:
560
+ enum_base = bases[-1]
561
+ metacls = enum_base.__class__
562
+ classdict = metacls.__prepare__(name, bases)
563
+
564
+ for member_name, member_value in members.items():
565
+ classdict[member_name] = member_value
566
+ enum_class = metacls.__new__(metacls, name, bases, classdict)
567
+ enum_class.__module__ = module
568
+ enum_class.__qualname__ = qualname
569
+
570
+ return _lookup_class_or_track(class_tracker_id, enum_class)
571
+
572
+
573
+ def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id):
574
+ tv = typing.TypeVar(
575
+ name,
576
+ *constraints,
577
+ bound=bound,
578
+ covariant=covariant,
579
+ contravariant=contravariant,
580
+ )
581
+ return _lookup_class_or_track(class_tracker_id, tv)
582
+
583
+
584
+ def _decompose_typevar(obj):
585
+ return (
586
+ obj.__name__,
587
+ obj.__bound__,
588
+ obj.__constraints__,
589
+ obj.__covariant__,
590
+ obj.__contravariant__,
591
+ _get_or_create_tracker_id(obj),
592
+ )
593
+
594
+
595
+ def _typevar_reduce(obj):
596
+ # TypeVar instances require the module information hence why we
597
+ # are not using the _should_pickle_by_reference directly
598
+ module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
599
+
600
+ if module_and_name is None:
601
+ return (_make_typevar, _decompose_typevar(obj))
602
+ elif _is_registered_pickle_by_value(module_and_name[0]):
603
+ return (_make_typevar, _decompose_typevar(obj))
604
+
605
+ return (getattr, module_and_name)
606
+
607
+
608
+ def _get_bases(typ):
609
+ if "__orig_bases__" in getattr(typ, "__dict__", {}):
610
+ # For generic types (see PEP 560)
611
+ # Note that simply checking `hasattr(typ, '__orig_bases__')` is not
612
+ # correct. Subclasses of a fully-parameterized generic class does not
613
+ # have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')`
614
+ # will return True because it's defined in the base class.
615
+ bases_attr = "__orig_bases__"
616
+ else:
617
+ # For regular class objects
618
+ bases_attr = "__bases__"
619
+ return getattr(typ, bases_attr)
620
+
621
+
622
+ def _make_dict_keys(obj, is_ordered=False):
623
+ if is_ordered:
624
+ return OrderedDict.fromkeys(obj).keys()
625
+ else:
626
+ return dict.fromkeys(obj).keys()
627
+
628
+
629
+ def _make_dict_values(obj, is_ordered=False):
630
+ if is_ordered:
631
+ return OrderedDict((i, _) for i, _ in enumerate(obj)).values()
632
+ else:
633
+ return {i: _ for i, _ in enumerate(obj)}.values()
634
+
635
+
636
+ def _make_dict_items(obj, is_ordered=False):
637
+ if is_ordered:
638
+ return OrderedDict(obj).items()
639
+ else:
640
+ return obj.items()
641
+
642
+
643
+ # COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
644
+ # -------------------------------------------------
645
+
646
+
647
+ def _class_getnewargs(obj):
648
+ type_kwargs = {}
649
+ if "__module__" in obj.__dict__:
650
+ type_kwargs["__module__"] = obj.__module__
651
+
652
+ __dict__ = obj.__dict__.get("__dict__", None)
653
+ if isinstance(__dict__, property):
654
+ type_kwargs["__dict__"] = __dict__
655
+
656
+ return (
657
+ type(obj),
658
+ obj.__name__,
659
+ _get_bases(obj),
660
+ type_kwargs,
661
+ _get_or_create_tracker_id(obj),
662
+ None,
663
+ )
664
+
665
+
666
+ def _enum_getnewargs(obj):
667
+ members = {e.name: e.value for e in obj}
668
+ return (
669
+ obj.__bases__,
670
+ obj.__name__,
671
+ obj.__qualname__,
672
+ members,
673
+ obj.__module__,
674
+ _get_or_create_tracker_id(obj),
675
+ None,
676
+ )
677
+
678
+
679
+ # COLLECTION OF OBJECTS RECONSTRUCTORS
680
+ # ------------------------------------
681
+ def _file_reconstructor(retval):
682
+ return retval
683
+
684
+
685
+ # COLLECTION OF OBJECTS STATE GETTERS
686
+ # -----------------------------------
687
+
688
+
689
+ def _function_getstate(func):
690
+ # - Put func's dynamic attributes (stored in func.__dict__) in state. These
691
+ # attributes will be restored at unpickling time using
692
+ # f.__dict__.update(state)
693
+ # - Put func's members into slotstate. Such attributes will be restored at
694
+ # unpickling time by iterating over slotstate and calling setattr(func,
695
+ # slotname, slotvalue)
696
+ slotstate = {
697
+ "__name__": func.__name__,
698
+ "__qualname__": func.__qualname__,
699
+ "__annotations__": func.__annotations__,
700
+ "__kwdefaults__": func.__kwdefaults__,
701
+ "__defaults__": func.__defaults__,
702
+ "__module__": func.__module__,
703
+ "__doc__": func.__doc__,
704
+ "__closure__": func.__closure__,
705
+ }
706
+
707
+ f_globals_ref = _extract_code_globals(func.__code__)
708
+ f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__}
709
+
710
+ if func.__closure__ is not None:
711
+ closure_values = list(map(_get_cell_contents, func.__closure__))
712
+ else:
713
+ closure_values = ()
714
+
715
+ # Extract currently-imported submodules used by func. Storing these modules
716
+ # in a smoke _cloudpickle_subimports attribute of the object's state will
717
+ # trigger the side effect of importing these modules at unpickling time
718
+ # (which is necessary for func to work correctly once depickled)
719
+ slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
720
+ func.__code__, itertools.chain(f_globals.values(), closure_values)
721
+ )
722
+ slotstate["__globals__"] = f_globals
723
+
724
+ state = func.__dict__
725
+ return state, slotstate
726
+
727
+
728
+ def _class_getstate(obj):
729
+ clsdict = _extract_class_dict(obj)
730
+ clsdict.pop("__weakref__", None)
731
+
732
+ if issubclass(type(obj), abc.ABCMeta):
733
+ # If obj is an instance of an ABCMeta subclass, don't pickle the
734
+ # cache/negative caches populated during isinstance/issubclass
735
+ # checks, but pickle the list of registered subclasses of obj.
736
+ clsdict.pop("_abc_cache", None)
737
+ clsdict.pop("_abc_negative_cache", None)
738
+ clsdict.pop("_abc_negative_cache_version", None)
739
+ registry = clsdict.pop("_abc_registry", None)
740
+ if registry is None:
741
+ # The abc caches and registered subclasses of a
742
+ # class are bundled into the single _abc_impl attribute
743
+ clsdict.pop("_abc_impl", None)
744
+ (registry, _, _, _) = abc._get_dump(obj)
745
+
746
+ clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry]
747
+ else:
748
+ # In the above if clause, registry is a set of weakrefs -- in
749
+ # this case, registry is a WeakSet
750
+ clsdict["_abc_impl"] = [type_ for type_ in registry]
751
+
752
+ if "__slots__" in clsdict:
753
+ # pickle string length optimization: member descriptors of obj are
754
+ # created automatically from obj's __slots__ attribute, no need to
755
+ # save them in obj's state
756
+ if isinstance(obj.__slots__, str):
757
+ clsdict.pop(obj.__slots__)
758
+ else:
759
+ for k in obj.__slots__:
760
+ clsdict.pop(k, None)
761
+
762
+ clsdict.pop("__dict__", None) # unpicklable property object
763
+
764
+ return (clsdict, {})
765
+
766
+
767
+ def _enum_getstate(obj):
768
+ clsdict, slotstate = _class_getstate(obj)
769
+
770
+ members = {e.name: e.value for e in obj}
771
+ # Cleanup the clsdict that will be passed to _make_skeleton_enum:
772
+ # Those attributes are already handled by the metaclass.
773
+ for attrname in [
774
+ "_generate_next_value_",
775
+ "_member_names_",
776
+ "_member_map_",
777
+ "_member_type_",
778
+ "_value2member_map_",
779
+ ]:
780
+ clsdict.pop(attrname, None)
781
+ for member in members:
782
+ clsdict.pop(member)
783
+ # Special handling of Enum subclasses
784
+ return clsdict, slotstate
785
+
786
+
787
+ # COLLECTIONS OF OBJECTS REDUCERS
788
+ # -------------------------------
789
+ # A reducer is a function taking a single argument (obj), and that returns a
790
+ # tuple with all the necessary data to re-construct obj. Apart from a few
791
+ # exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
792
+ # correctly pickle an object.
793
+ # While many built-in objects (Exceptions objects, instances of the "object"
794
+ # class, etc), are shipped with their own built-in reducer (invoked using
795
+ # obj.__reduce__), some do not. The following methods were created to "fill
796
+ # these holes".
797
+
798
+
799
+ def _code_reduce(obj):
800
+ """code object reducer."""
801
+ # If you are not sure about the order of arguments, take a look at help
802
+ # of the specific type from types, for example:
803
+ # >>> from types import CodeType
804
+ # >>> help(CodeType)
805
+ if hasattr(obj, "co_exceptiontable"):
806
+ # Python 3.11 and later: there are some new attributes
807
+ # related to the enhanced exceptions.
808
+ args = (
809
+ obj.co_argcount,
810
+ obj.co_posonlyargcount,
811
+ obj.co_kwonlyargcount,
812
+ obj.co_nlocals,
813
+ obj.co_stacksize,
814
+ obj.co_flags,
815
+ obj.co_code,
816
+ obj.co_consts,
817
+ obj.co_names,
818
+ obj.co_varnames,
819
+ obj.co_filename,
820
+ obj.co_name,
821
+ obj.co_qualname,
822
+ obj.co_firstlineno,
823
+ obj.co_linetable,
824
+ obj.co_exceptiontable,
825
+ obj.co_freevars,
826
+ obj.co_cellvars,
827
+ )
828
+ elif hasattr(obj, "co_linetable"):
829
+ # Python 3.10 and later: obj.co_lnotab is deprecated and constructor
830
+ # expects obj.co_linetable instead.
831
+ args = (
832
+ obj.co_argcount,
833
+ obj.co_posonlyargcount,
834
+ obj.co_kwonlyargcount,
835
+ obj.co_nlocals,
836
+ obj.co_stacksize,
837
+ obj.co_flags,
838
+ obj.co_code,
839
+ obj.co_consts,
840
+ obj.co_names,
841
+ obj.co_varnames,
842
+ obj.co_filename,
843
+ obj.co_name,
844
+ obj.co_firstlineno,
845
+ obj.co_linetable,
846
+ obj.co_freevars,
847
+ obj.co_cellvars,
848
+ )
849
+ elif hasattr(obj, "co_nmeta"): # pragma: no cover
850
+ # "nogil" Python: modified attributes from 3.9
851
+ args = (
852
+ obj.co_argcount,
853
+ obj.co_posonlyargcount,
854
+ obj.co_kwonlyargcount,
855
+ obj.co_nlocals,
856
+ obj.co_framesize,
857
+ obj.co_ndefaultargs,
858
+ obj.co_nmeta,
859
+ obj.co_flags,
860
+ obj.co_code,
861
+ obj.co_consts,
862
+ obj.co_varnames,
863
+ obj.co_filename,
864
+ obj.co_name,
865
+ obj.co_firstlineno,
866
+ obj.co_lnotab,
867
+ obj.co_exc_handlers,
868
+ obj.co_jump_table,
869
+ obj.co_freevars,
870
+ obj.co_cellvars,
871
+ obj.co_free2reg,
872
+ obj.co_cell2reg,
873
+ )
874
+ else:
875
+ # Backward compat for 3.8 and 3.9
876
+ args = (
877
+ obj.co_argcount,
878
+ obj.co_posonlyargcount,
879
+ obj.co_kwonlyargcount,
880
+ obj.co_nlocals,
881
+ obj.co_stacksize,
882
+ obj.co_flags,
883
+ obj.co_code,
884
+ obj.co_consts,
885
+ obj.co_names,
886
+ obj.co_varnames,
887
+ obj.co_filename,
888
+ obj.co_name,
889
+ obj.co_firstlineno,
890
+ obj.co_lnotab,
891
+ obj.co_freevars,
892
+ obj.co_cellvars,
893
+ )
894
+ return types.CodeType, args
895
+
896
+
897
+ def _cell_reduce(obj):
898
+ """Cell (containing values of a function's free variables) reducer."""
899
+ try:
900
+ obj.cell_contents
901
+ except ValueError: # cell is empty
902
+ return _make_empty_cell, ()
903
+ else:
904
+ return _make_cell, (obj.cell_contents,)
905
+
906
+
907
+ def _classmethod_reduce(obj):
908
+ orig_func = obj.__func__
909
+ return type(obj), (orig_func,)
910
+
911
+
912
+ def _file_reduce(obj):
913
+ """Save a file."""
914
+ import io
915
+
916
+ if not hasattr(obj, "name") or not hasattr(obj, "mode"):
917
+ raise pickle.PicklingError(
918
+ "Cannot pickle files that do not map to an actual file"
919
+ )
920
+ if obj is sys.stdout:
921
+ return getattr, (sys, "stdout")
922
+ if obj is sys.stderr:
923
+ return getattr, (sys, "stderr")
924
+ if obj is sys.stdin:
925
+ raise pickle.PicklingError("Cannot pickle standard input")
926
+ if obj.closed:
927
+ raise pickle.PicklingError("Cannot pickle closed files")
928
+ if hasattr(obj, "isatty") and obj.isatty():
929
+ raise pickle.PicklingError("Cannot pickle files that map to tty objects")
930
+ if "r" not in obj.mode and "+" not in obj.mode:
931
+ raise pickle.PicklingError(
932
+ "Cannot pickle files that are not opened for reading: %s" % obj.mode
933
+ )
934
+
935
+ name = obj.name
936
+
937
+ retval = io.StringIO()
938
+
939
+ try:
940
+ # Read the whole file
941
+ curloc = obj.tell()
942
+ obj.seek(0)
943
+ contents = obj.read()
944
+ obj.seek(curloc)
945
+ except OSError as e:
946
+ raise pickle.PicklingError(
947
+ "Cannot pickle file %s as it cannot be read" % name
948
+ ) from e
949
+ retval.write(contents)
950
+ retval.seek(curloc)
951
+
952
+ retval.name = name
953
+ return _file_reconstructor, (retval,)
954
+
955
+
956
+ def _getset_descriptor_reduce(obj):
957
+ return getattr, (obj.__objclass__, obj.__name__)
958
+
959
+
960
+ def _mappingproxy_reduce(obj):
961
+ return types.MappingProxyType, (dict(obj),)
962
+
963
+
964
+ def _memoryview_reduce(obj):
965
+ return bytes, (obj.tobytes(),)
966
+
967
+
968
+ def _module_reduce(obj):
969
+ if _should_pickle_by_reference(obj):
970
+ return subimport, (obj.__name__,)
971
+ else:
972
+ # Some external libraries can populate the "__builtins__" entry of a
973
+ # module's `__dict__` with unpicklable objects (see #316). For that
974
+ # reason, we do not attempt to pickle the "__builtins__" entry, and
975
+ # restore a default value for it at unpickling time.
976
+ state = obj.__dict__.copy()
977
+ state.pop("__builtins__", None)
978
+ return dynamic_subimport, (obj.__name__, state)
979
+
980
+
981
+ def _method_reduce(obj):
982
+ return (types.MethodType, (obj.__func__, obj.__self__))
983
+
984
+
985
+ def _logger_reduce(obj):
986
+ return logging.getLogger, (obj.name,)
987
+
988
+
989
+ def _root_logger_reduce(obj):
990
+ return logging.getLogger, ()
991
+
992
+
993
+ def _property_reduce(obj):
994
+ return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
995
+
996
+
997
+ def _weakset_reduce(obj):
998
+ return weakref.WeakSet, (list(obj),)
999
+
1000
+
1001
+ def _dynamic_class_reduce(obj):
1002
+ """Save a class that can't be referenced as a module attribute.
1003
+
1004
+ This method is used to serialize classes that are defined inside
1005
+ functions, or that otherwise can't be serialized as attribute lookups
1006
+ from importable modules.
1007
+ """
1008
+ if Enum is not None and issubclass(obj, Enum):
1009
+ return (
1010
+ _make_skeleton_enum,
1011
+ _enum_getnewargs(obj),
1012
+ _enum_getstate(obj),
1013
+ None,
1014
+ None,
1015
+ _class_setstate,
1016
+ )
1017
+ else:
1018
+ return (
1019
+ _make_skeleton_class,
1020
+ _class_getnewargs(obj),
1021
+ _class_getstate(obj),
1022
+ None,
1023
+ None,
1024
+ _class_setstate,
1025
+ )
1026
+
1027
+
1028
+ def _class_reduce(obj):
1029
+ """Select the reducer depending on the dynamic nature of the class obj."""
1030
+ if obj is type(None): # noqa
1031
+ return type, (None,)
1032
+ elif obj is type(Ellipsis):
1033
+ return type, (Ellipsis,)
1034
+ elif obj is type(NotImplemented):
1035
+ return type, (NotImplemented,)
1036
+ elif obj in _BUILTIN_TYPE_NAMES:
1037
+ return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
1038
+ elif not _should_pickle_by_reference(obj):
1039
+ return _dynamic_class_reduce(obj)
1040
+ return NotImplemented
1041
+
1042
+
1043
+ def _dict_keys_reduce(obj):
1044
+ # Safer not to ship the full dict as sending the rest might
1045
+ # be unintended and could potentially cause leaking of
1046
+ # sensitive information
1047
+ return _make_dict_keys, (list(obj),)
1048
+
1049
+
1050
+ def _dict_values_reduce(obj):
1051
+ # Safer not to ship the full dict as sending the rest might
1052
+ # be unintended and could potentially cause leaking of
1053
+ # sensitive information
1054
+ return _make_dict_values, (list(obj),)
1055
+
1056
+
1057
+ def _dict_items_reduce(obj):
1058
+ return _make_dict_items, (dict(obj),)
1059
+
1060
+
1061
+ def _odict_keys_reduce(obj):
1062
+ # Safer not to ship the full dict as sending the rest might
1063
+ # be unintended and could potentially cause leaking of
1064
+ # sensitive information
1065
+ return _make_dict_keys, (list(obj), True)
1066
+
1067
+
1068
+ def _odict_values_reduce(obj):
1069
+ # Safer not to ship the full dict as sending the rest might
1070
+ # be unintended and could potentially cause leaking of
1071
+ # sensitive information
1072
+ return _make_dict_values, (list(obj), True)
1073
+
1074
+
1075
+ def _odict_items_reduce(obj):
1076
+ return _make_dict_items, (dict(obj), True)
1077
+
1078
+
1079
+ def _dataclass_field_base_reduce(obj):
1080
+ return _get_dataclass_field_type_sentinel, (obj.name,)
1081
+
1082
+
1083
+ # COLLECTIONS OF OBJECTS STATE SETTERS
1084
+ # ------------------------------------
1085
+ # state setters are called at unpickling time, once the object is created and
1086
+ # it has to be updated to how it was at unpickling time.
1087
+
1088
+
1089
+ def _function_setstate(obj, state):
1090
+ """Update the state of a dynamic function.
1091
+
1092
+ As __closure__ and __globals__ are readonly attributes of a function, we
1093
+ cannot rely on the native setstate routine of pickle.load_build, that calls
1094
+ setattr on items of the slotstate. Instead, we have to modify them inplace.
1095
+ """
1096
+ state, slotstate = state
1097
+ obj.__dict__.update(state)
1098
+
1099
+ obj_globals = slotstate.pop("__globals__")
1100
+ obj_closure = slotstate.pop("__closure__")
1101
+ # _cloudpickle_subimports is a set of submodules that must be loaded for
1102
+ # the pickled function to work correctly at unpickling time. Now that these
1103
+ # submodules are depickled (hence imported), they can be removed from the
1104
+ # object's state (the object state only served as a reference holder to
1105
+ # these submodules)
1106
+ slotstate.pop("_cloudpickle_submodules")
1107
+
1108
+ obj.__globals__.update(obj_globals)
1109
+ obj.__globals__["__builtins__"] = __builtins__
1110
+
1111
+ if obj_closure is not None:
1112
+ for i, cell in enumerate(obj_closure):
1113
+ try:
1114
+ value = cell.cell_contents
1115
+ except ValueError: # cell is empty
1116
+ continue
1117
+ obj.__closure__[i].cell_contents = value
1118
+
1119
+ for k, v in slotstate.items():
1120
+ setattr(obj, k, v)
1121
+
1122
+
1123
+ def _class_setstate(obj, state):
1124
+ state, slotstate = state
1125
+ registry = None
1126
+ for attrname, attr in state.items():
1127
+ if attrname == "_abc_impl":
1128
+ registry = attr
1129
+ else:
1130
+ setattr(obj, attrname, attr)
1131
+ if registry is not None:
1132
+ for subclass in registry:
1133
+ obj.register(subclass)
1134
+
1135
+ return obj
1136
+
1137
+
1138
+ # COLLECTION OF DATACLASS UTILITIES
1139
+ # ---------------------------------
1140
+ # There are some internal sentinel values whose identity must be preserved when
1141
+ # unpickling dataclass fields. Each sentinel value has a unique name that we can
1142
+ # use to retrieve its identity at unpickling time.
1143
+
1144
+
1145
+ _DATACLASSE_FIELD_TYPE_SENTINELS = {
1146
+ dataclasses._FIELD.name: dataclasses._FIELD,
1147
+ dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR,
1148
+ dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR,
1149
+ }
1150
+
1151
+
1152
+ def _get_dataclass_field_type_sentinel(name):
1153
+ return _DATACLASSE_FIELD_TYPE_SENTINELS[name]
1154
+
1155
+
1156
+ class Pickler(pickle.Pickler):
1157
+ # set of reducers defined and used by cloudpickle (private)
1158
+ _dispatch_table = {}
1159
+ _dispatch_table[classmethod] = _classmethod_reduce
1160
+ _dispatch_table[io.TextIOWrapper] = _file_reduce
1161
+ _dispatch_table[logging.Logger] = _logger_reduce
1162
+ _dispatch_table[logging.RootLogger] = _root_logger_reduce
1163
+ _dispatch_table[memoryview] = _memoryview_reduce
1164
+ _dispatch_table[property] = _property_reduce
1165
+ _dispatch_table[staticmethod] = _classmethod_reduce
1166
+ _dispatch_table[CellType] = _cell_reduce
1167
+ _dispatch_table[types.CodeType] = _code_reduce
1168
+ _dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
1169
+ _dispatch_table[types.ModuleType] = _module_reduce
1170
+ _dispatch_table[types.MethodType] = _method_reduce
1171
+ _dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
1172
+ _dispatch_table[weakref.WeakSet] = _weakset_reduce
1173
+ _dispatch_table[typing.TypeVar] = _typevar_reduce
1174
+ _dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
1175
+ _dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
1176
+ _dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
1177
+ _dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce
1178
+ _dispatch_table[type(OrderedDict().values())] = _odict_values_reduce
1179
+ _dispatch_table[type(OrderedDict().items())] = _odict_items_reduce
1180
+ _dispatch_table[abc.abstractmethod] = _classmethod_reduce
1181
+ _dispatch_table[abc.abstractclassmethod] = _classmethod_reduce
1182
+ _dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce
1183
+ _dispatch_table[abc.abstractproperty] = _property_reduce
1184
+ _dispatch_table[dataclasses._FIELD_BASE] = _dataclass_field_base_reduce
1185
+
1186
+ dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
1187
+
1188
+ # function reducers are defined as instance methods of cloudpickle.Pickler
1189
+ # objects, as they rely on a cloudpickle.Pickler attribute (globals_ref)
1190
+ def _dynamic_function_reduce(self, func):
1191
+ """Reduce a function that is not pickleable via attribute lookup."""
1192
+ newargs = self._function_getnewargs(func)
1193
+ state = _function_getstate(func)
1194
+ return (_make_function, newargs, state, None, None, _function_setstate)
1195
+
1196
+ def _function_reduce(self, obj):
1197
+ """Reducer for function objects.
1198
+
1199
+ If obj is a top-level attribute of a file-backed module, this reducer
1200
+ returns NotImplemented, making the cloudpickle.Pickler fall back to
1201
+ traditional pickle.Pickler routines to save obj. Otherwise, it reduces
1202
+ obj using a custom cloudpickle reducer designed specifically to handle
1203
+ dynamic functions.
1204
+ """
1205
+ if _should_pickle_by_reference(obj):
1206
+ return NotImplemented
1207
+ else:
1208
+ return self._dynamic_function_reduce(obj)
1209
+
1210
+ def _function_getnewargs(self, func):
1211
+ code = func.__code__
1212
+
1213
+ # base_globals represents the future global namespace of func at
1214
+ # unpickling time. Looking it up and storing it in
1215
+ # cloudpickle.Pickler.globals_ref allow functions sharing the same
1216
+ # globals at pickling time to also share them once unpickled, at one
1217
+ # condition: since globals_ref is an attribute of a cloudpickle.Pickler
1218
+ # instance, and that a new cloudpickle.Pickler is created each time
1219
+ # cloudpickle.dump or cloudpickle.dumps is called, functions also need
1220
+ # to be saved within the same invocation of
1221
+ # cloudpickle.dump/cloudpickle.dumps (for example:
1222
+ # cloudpickle.dumps([f1, f2])). There is no such limitation when using
1223
+ # cloudpickle.Pickler.dump, as long as the multiple invocations are
1224
+ # bound to the same cloudpickle.Pickler instance.
1225
+ base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
1226
+
1227
+ if base_globals == {}:
1228
+ # Add module attributes used to resolve relative imports
1229
+ # instructions inside func.
1230
+ for k in ["__package__", "__name__", "__path__", "__file__"]:
1231
+ if k in func.__globals__:
1232
+ base_globals[k] = func.__globals__[k]
1233
+
1234
+ # Do not bind the free variables before the function is created to
1235
+ # avoid infinite recursion.
1236
+ if func.__closure__ is None:
1237
+ closure = None
1238
+ else:
1239
+ closure = tuple(_make_empty_cell() for _ in range(len(code.co_freevars)))
1240
+
1241
+ return code, base_globals, None, None, closure
1242
+
1243
+ def dump(self, obj):
1244
+ try:
1245
+ return super().dump(obj)
1246
+ except RuntimeError as e:
1247
+ if len(e.args) > 0 and "recursion" in e.args[0]:
1248
+ msg = "Could not pickle object as excessively deep recursion required."
1249
+ raise pickle.PicklingError(msg) from e
1250
+ else:
1251
+ raise
1252
+
1253
+ def __init__(self, file, protocol=None, buffer_callback=None):
1254
+ if protocol is None:
1255
+ protocol = DEFAULT_PROTOCOL
1256
+ super().__init__(file, protocol=protocol, buffer_callback=buffer_callback)
1257
+ # map functions __globals__ attribute ids, to ensure that functions
1258
+ # sharing the same global namespace at pickling time also share
1259
+ # their global namespace at unpickling time.
1260
+ self.globals_ref = {}
1261
+ self.proto = int(protocol)
1262
+
1263
+ if not PYPY:
1264
+ # pickle.Pickler is the C implementation of the CPython pickler and
1265
+ # therefore we rely on reduce_override method to customize the pickler
1266
+ # behavior.
1267
+
1268
+ # `cloudpickle.Pickler.dispatch` is only left for backward
1269
+ # compatibility - note that when using protocol 5,
1270
+ # `cloudpickle.Pickler.dispatch` is not an extension of
1271
+ # `pickle._Pickler.dispatch` dictionary, because `cloudpickle.Pickler`
1272
+ # subclasses the C-implemented `pickle.Pickler`, which does not expose
1273
+ # a `dispatch` attribute. Earlier versions of `cloudpickle.Pickler`
1274
+ # used `cloudpickle.Pickler.dispatch` as a class-level attribute
1275
+ # storing all reducers implemented by cloudpickle, but the attribute
1276
+ # name was not a great choice given because it would collide with a
1277
+ # similarly named attribute in the pure-Python `pickle._Pickler`
1278
+ # implementation in the standard library.
1279
+ dispatch = dispatch_table
1280
+
1281
+ # Implementation of the reducer_override callback, in order to
1282
+ # efficiently serialize dynamic functions and classes by subclassing
1283
+ # the C-implemented `pickle.Pickler`.
1284
+ # TODO: decorrelate reducer_override (which is tied to CPython's
1285
+ # implementation - would it make sense to backport it to pypy? - and
1286
+ # pickle's protocol 5 which is implementation agnostic. Currently, the
1287
+ # availability of both notions coincide on CPython's pickle, but it may
1288
+ # not be the case anymore when pypy implements protocol 5.
1289
+
1290
+ def reducer_override(self, obj):
1291
+ """Type-agnostic reducing callback for function and classes.
1292
+
1293
+ For performance reasons, subclasses of the C `pickle.Pickler` class
1294
+ cannot register custom reducers for functions and classes in the
1295
+ dispatch_table attribute. Reducers for such types must instead
1296
+ implemented via the special `reducer_override` method.
1297
+
1298
+ Note that this method will be called for any object except a few
1299
+ builtin-types (int, lists, dicts etc.), which differs from reducers
1300
+ in the Pickler's dispatch_table, each of them being invoked for
1301
+ objects of a specific type only.
1302
+
1303
+ This property comes in handy for classes: although most classes are
1304
+ instances of the ``type`` metaclass, some of them can be instances
1305
+ of other custom metaclasses (such as enum.EnumMeta for example). In
1306
+ particular, the metaclass will likely not be known in advance, and
1307
+ thus cannot be special-cased using an entry in the dispatch_table.
1308
+ reducer_override, among other things, allows us to register a
1309
+ reducer that will be called for any class, independently of its
1310
+ type.
1311
+
1312
+ Notes:
1313
+
1314
+ * reducer_override has the priority over dispatch_table-registered
1315
+ reducers.
1316
+ * reducer_override can be used to fix other limitations of
1317
+ cloudpickle for other types that suffered from type-specific
1318
+ reducers, such as Exceptions. See
1319
+ https://github.com/cloudpipe/cloudpickle/issues/248
1320
+ """
1321
+ t = type(obj)
1322
+ try:
1323
+ is_anyclass = issubclass(t, type)
1324
+ except TypeError: # t is not a class (old Boost; see SF #502085)
1325
+ is_anyclass = False
1326
+
1327
+ if is_anyclass:
1328
+ return _class_reduce(obj)
1329
+ elif isinstance(obj, types.FunctionType):
1330
+ return self._function_reduce(obj)
1331
+ else:
1332
+ # fallback to save_global, including the Pickler's
1333
+ # dispatch_table
1334
+ return NotImplemented
1335
+
1336
+ else:
1337
+ # When reducer_override is not available, hack the pure-Python
1338
+ # Pickler's types.FunctionType and type savers. Note: the type saver
1339
+ # must override Pickler.save_global, because pickle.py contains a
1340
+ # hard-coded call to save_global when pickling meta-classes.
1341
+ dispatch = pickle.Pickler.dispatch.copy()
1342
+
1343
+ def _save_reduce_pickle5(
1344
+ self,
1345
+ func,
1346
+ args,
1347
+ state=None,
1348
+ listitems=None,
1349
+ dictitems=None,
1350
+ state_setter=None,
1351
+ obj=None,
1352
+ ):
1353
+ save = self.save
1354
+ write = self.write
1355
+ self.save_reduce(
1356
+ func,
1357
+ args,
1358
+ state=None,
1359
+ listitems=listitems,
1360
+ dictitems=dictitems,
1361
+ obj=obj,
1362
+ )
1363
+ # backport of the Python 3.8 state_setter pickle operations
1364
+ save(state_setter)
1365
+ save(obj) # simple BINGET opcode as obj is already memoized.
1366
+ save(state)
1367
+ write(pickle.TUPLE2)
1368
+ # Trigger a state_setter(obj, state) function call.
1369
+ write(pickle.REDUCE)
1370
+ # The purpose of state_setter is to carry-out an
1371
+ # inplace modification of obj. We do not care about what the
1372
+ # method might return, so its output is eventually removed from
1373
+ # the stack.
1374
+ write(pickle.POP)
1375
+
1376
+ def save_global(self, obj, name=None, pack=struct.pack):
1377
+ """Main dispatch method.
1378
+
1379
+ The name of this method is somewhat misleading: all types get
1380
+ dispatched here.
1381
+ """
1382
+ if obj is type(None): # noqa
1383
+ return self.save_reduce(type, (None,), obj=obj)
1384
+ elif obj is type(Ellipsis):
1385
+ return self.save_reduce(type, (Ellipsis,), obj=obj)
1386
+ elif obj is type(NotImplemented):
1387
+ return self.save_reduce(type, (NotImplemented,), obj=obj)
1388
+ elif obj in _BUILTIN_TYPE_NAMES:
1389
+ return self.save_reduce(
1390
+ _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj
1391
+ )
1392
+
1393
+ if name is not None:
1394
+ super().save_global(obj, name=name)
1395
+ elif not _should_pickle_by_reference(obj, name=name):
1396
+ self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
1397
+ else:
1398
+ super().save_global(obj, name=name)
1399
+
1400
+ dispatch[type] = save_global
1401
+
1402
+ def save_function(self, obj, name=None):
1403
+ """Registered with the dispatch to handle all function types.
1404
+
1405
+ Determines what kind of function obj is (e.g. lambda, defined at
1406
+ interactive prompt, etc) and handles the pickling appropriately.
1407
+ """
1408
+ if _should_pickle_by_reference(obj, name=name):
1409
+ return super().save_global(obj, name=name)
1410
+ elif PYPY and isinstance(obj.__code__, builtin_code_type):
1411
+ return self.save_pypy_builtin_func(obj)
1412
+ else:
1413
+ return self._save_reduce_pickle5(
1414
+ *self._dynamic_function_reduce(obj), obj=obj
1415
+ )
1416
+
1417
+ def save_pypy_builtin_func(self, obj):
1418
+ """Save pypy equivalent of builtin functions.
1419
+
1420
+ PyPy does not have the concept of builtin-functions. Instead,
1421
+ builtin-functions are simple function instances, but with a
1422
+ builtin-code attribute.
1423
+ Most of the time, builtin functions should be pickled by attribute.
1424
+ But PyPy has flaky support for __qualname__, so some builtin
1425
+ functions such as float.__new__ will be classified as dynamic. For
1426
+ this reason only, we created this special routine. Because
1427
+ builtin-functions are not expected to have closure or globals,
1428
+ there is no additional hack (compared the one already implemented
1429
+ in pickle) to protect ourselves from reference cycles. A simple
1430
+ (reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
1431
+ also that PyPy improved their support for __qualname__ in v3.6, so
1432
+ this routing should be removed when cloudpickle supports only PyPy
1433
+ 3.6 and later.
1434
+ """
1435
+ rv = (
1436
+ types.FunctionType,
1437
+ (obj.__code__, {}, obj.__name__, obj.__defaults__, obj.__closure__),
1438
+ obj.__dict__,
1439
+ )
1440
+ self.save_reduce(*rv, obj=obj)
1441
+
1442
+ dispatch[types.FunctionType] = save_function
1443
+
1444
+
1445
+ # Shorthands similar to pickle.dump/pickle.dumps
1446
+
1447
+
1448
+ def dump(obj, file, protocol=None, buffer_callback=None):
1449
+ """Serialize obj as bytes streamed into file
1450
+
1451
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
1452
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
1453
+ speed between processes running the same Python version.
1454
+
1455
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
1456
+ compatibility with older versions of Python (although this is not always
1457
+ guaranteed to work because cloudpickle relies on some internal
1458
+ implementation details that can change from one Python version to the
1459
+ next).
1460
+ """
1461
+ Pickler(file, protocol=protocol, buffer_callback=buffer_callback).dump(obj)
1462
+
1463
+
1464
+ def dumps(obj, protocol=None, buffer_callback=None):
1465
+ """Serialize obj as a string of bytes allocated in memory
1466
+
1467
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
1468
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
1469
+ speed between processes running the same Python version.
1470
+
1471
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
1472
+ compatibility with older versions of Python (although this is not always
1473
+ guaranteed to work because cloudpickle relies on some internal
1474
+ implementation details that can change from one Python version to the
1475
+ next).
1476
+ """
1477
+ with io.BytesIO() as file:
1478
+ cp = Pickler(file, protocol=protocol, buffer_callback=buffer_callback)
1479
+ cp.dump(obj)
1480
+ return file.getvalue()
1481
+
1482
+
1483
+ # Include pickles unloading functions in this namespace for convenience.
1484
+ load, loads = pickle.load, pickle.loads
1485
+
1486
+ # Backward compat alias.
1487
+ CloudPickler = Pickler
env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compatibility module.
2
+
3
+ It can be necessary to load files generated by previous versions of cloudpickle
4
+ that rely on symbols being defined under the `cloudpickle.cloudpickle_fast`
5
+ namespace.
6
+
7
+ See: tests/test_backward_compat.py
8
+ """
9
+ from . import cloudpickle
10
+
11
+
12
+ def __getattr__(name):
13
+ return getattr(cloudpickle, name)
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc ADDED
Binary file (741 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc ADDED
Binary file (2.38 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc ADDED
Binary file (33 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from multiprocessing import synchronize
3
+
4
+ from .context import get_context
5
+
6
+
7
+ def _make_name():
8
+ return f"/loky-{os.getpid()}-{next(synchronize.SemLock._rand)}"
9
+
10
+
11
+ # monkey patch the name creation for multiprocessing
12
+ synchronize.SemLock._make_name = staticmethod(_make_name)
13
+
14
+ __all__ = ["get_context"]
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (539 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc ADDED
Binary file (5.05 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Extra reducers for Unix based system and connections objects
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/reduction.py (17/02/2017)
7
+ # * Add adapted reduction for LokyProcesses and socket/Connection
8
+ #
9
+ import os
10
+ import socket
11
+ import _socket
12
+ from multiprocessing.connection import Connection
13
+ from multiprocessing.context import get_spawning_popen
14
+
15
+ from .reduction import register
16
+
17
+ HAVE_SEND_HANDLE = (
18
+ hasattr(socket, "CMSG_LEN")
19
+ and hasattr(socket, "SCM_RIGHTS")
20
+ and hasattr(socket.socket, "sendmsg")
21
+ )
22
+
23
+
24
+ def _mk_inheritable(fd):
25
+ os.set_inheritable(fd, True)
26
+ return fd
27
+
28
+
29
+ def DupFd(fd):
30
+ """Return a wrapper for an fd."""
31
+ popen_obj = get_spawning_popen()
32
+ if popen_obj is not None:
33
+ return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
34
+ elif HAVE_SEND_HANDLE:
35
+ from multiprocessing import resource_sharer
36
+
37
+ return resource_sharer.DupFd(fd)
38
+ else:
39
+ raise TypeError(
40
+ "Cannot pickle connection object. This object can only be "
41
+ "passed when spawning a new process"
42
+ )
43
+
44
+
45
+ def _reduce_socket(s):
46
+ df = DupFd(s.fileno())
47
+ return _rebuild_socket, (df, s.family, s.type, s.proto)
48
+
49
+
50
+ def _rebuild_socket(df, family, type, proto):
51
+ fd = df.detach()
52
+ return socket.fromfd(fd, family, type, proto)
53
+
54
+
55
+ def rebuild_connection(df, readable, writable):
56
+ fd = df.detach()
57
+ return Connection(fd, readable, writable)
58
+
59
+
60
+ def reduce_connection(conn):
61
+ df = DupFd(conn.fileno())
62
+ return rebuild_connection, (df, conn.readable, conn.writable)
63
+
64
+
65
+ register(socket.socket, _reduce_socket)
66
+ register(_socket.socket, _reduce_socket)
67
+ register(Connection, reduce_connection)
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Extra reducers for Windows system and connections objects
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/reduction.py (17/02/2017)
7
+ # * Add adapted reduction for LokyProcesses and socket/PipeConnection
8
+ #
9
+ import socket
10
+ from multiprocessing import connection
11
+ from multiprocessing.reduction import _reduce_socket
12
+
13
+ from .reduction import register
14
+
15
+ # register reduction for win32 communication objects
16
+ register(socket.socket, _reduce_socket)
17
+ register(connection.Connection, connection.reduce_connection)
18
+ register(connection.PipeConnection, connection.reduce_pipe_connection)
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Basic context management with LokyContext
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/context.py
7
+ # * Create a context ensuring loky uses only objects that are compatible
8
+ # * Add LokyContext to the list of context of multiprocessing so loky can be
9
+ # used with multiprocessing.set_start_method
10
+ # * Implement a CFS-aware amd physical-core aware cpu_count function.
11
+ #
12
+ import os
13
+ import sys
14
+ import math
15
+ import subprocess
16
+ import traceback
17
+ import warnings
18
+ import multiprocessing as mp
19
+ from multiprocessing import get_context as mp_get_context
20
+ from multiprocessing.context import BaseContext
21
+
22
+
23
+ from .process import LokyProcess, LokyInitMainProcess
24
+
25
+ # Apparently, on older Python versions, loky cannot work 61 workers on Windows
26
+ # but instead 60: ¯\_(ツ)_/¯
27
+ if sys.version_info >= (3, 8):
28
+ from concurrent.futures.process import _MAX_WINDOWS_WORKERS
29
+
30
+ if sys.version_info < (3, 10):
31
+ _MAX_WINDOWS_WORKERS = _MAX_WINDOWS_WORKERS - 1
32
+ else:
33
+ # compat for versions before 3.8 which do not define this.
34
+ _MAX_WINDOWS_WORKERS = 60
35
+
36
+ START_METHODS = ["loky", "loky_init_main", "spawn"]
37
+ if sys.platform != "win32":
38
+ START_METHODS += ["fork", "forkserver"]
39
+
40
+ _DEFAULT_START_METHOD = None
41
+
42
+ # Cache for the number of physical cores to avoid repeating subprocess calls.
43
+ # It should not change during the lifetime of the program.
44
+ physical_cores_cache = None
45
+
46
+
47
+ def get_context(method=None):
48
+ # Try to overload the default context
49
+ method = method or _DEFAULT_START_METHOD or "loky"
50
+ if method == "fork":
51
+ # If 'fork' is explicitly requested, warn user about potential issues.
52
+ warnings.warn(
53
+ "`fork` start method should not be used with "
54
+ "`loky` as it does not respect POSIX. Try using "
55
+ "`spawn` or `loky` instead.",
56
+ UserWarning,
57
+ )
58
+ try:
59
+ return mp_get_context(method)
60
+ except ValueError:
61
+ raise ValueError(
62
+ f"Unknown context '{method}'. Value should be in "
63
+ f"{START_METHODS}."
64
+ )
65
+
66
+
67
+ def set_start_method(method, force=False):
68
+ global _DEFAULT_START_METHOD
69
+ if _DEFAULT_START_METHOD is not None and not force:
70
+ raise RuntimeError("context has already been set")
71
+ assert method is None or method in START_METHODS, (
72
+ f"'{method}' is not a valid start_method. It should be in "
73
+ f"{START_METHODS}"
74
+ )
75
+
76
+ _DEFAULT_START_METHOD = method
77
+
78
+
79
+ def get_start_method():
80
+ return _DEFAULT_START_METHOD
81
+
82
+
83
+ def cpu_count(only_physical_cores=False):
84
+ """Return the number of CPUs the current process can use.
85
+
86
+ The returned number of CPUs accounts for:
87
+ * the number of CPUs in the system, as given by
88
+ ``multiprocessing.cpu_count``;
89
+ * the CPU affinity settings of the current process
90
+ (available on some Unix systems);
91
+ * Cgroup CPU bandwidth limit (available on Linux only, typically
92
+ set by docker and similar container orchestration systems);
93
+ * the value of the LOKY_MAX_CPU_COUNT environment variable if defined.
94
+ and is given as the minimum of these constraints.
95
+
96
+ If ``only_physical_cores`` is True, return the number of physical cores
97
+ instead of the number of logical cores (hyperthreading / SMT). Note that
98
+ this option is not enforced if the number of usable cores is controlled in
99
+ any other way such as: process affinity, Cgroup restricted CPU bandwidth
100
+ or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical
101
+ cores is not found, return the number of logical cores.
102
+
103
+ Note that on Windows, the returned number of CPUs cannot exceed 61 (or 60 for
104
+ Python < 3.10), see:
105
+ https://bugs.python.org/issue26903.
106
+
107
+ It is also always larger or equal to 1.
108
+ """
109
+ # Note: os.cpu_count() is allowed to return None in its docstring
110
+ os_cpu_count = os.cpu_count() or 1
111
+ if sys.platform == "win32":
112
+ # On Windows, attempting to use more than 61 CPUs would result in a
113
+ # OS-level error. See https://bugs.python.org/issue26903. According to
114
+ # https://learn.microsoft.com/en-us/windows/win32/procthread/processor-groups
115
+ # it might be possible to go beyond with a lot of extra work but this
116
+ # does not look easy.
117
+ os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS)
118
+
119
+ cpu_count_user = _cpu_count_user(os_cpu_count)
120
+ aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1)
121
+
122
+ if not only_physical_cores:
123
+ return aggregate_cpu_count
124
+
125
+ if cpu_count_user < os_cpu_count:
126
+ # Respect user setting
127
+ return max(cpu_count_user, 1)
128
+
129
+ cpu_count_physical, exception = _count_physical_cores()
130
+ if cpu_count_physical != "not found":
131
+ return cpu_count_physical
132
+
133
+ # Fallback to default behavior
134
+ if exception is not None:
135
+ # warns only the first time
136
+ warnings.warn(
137
+ "Could not find the number of physical cores for the "
138
+ f"following reason:\n{exception}\n"
139
+ "Returning the number of logical cores instead. You can "
140
+ "silence this warning by setting LOKY_MAX_CPU_COUNT to "
141
+ "the number of cores you want to use."
142
+ )
143
+ traceback.print_tb(exception.__traceback__)
144
+
145
+ return aggregate_cpu_count
146
+
147
+
148
+ def _cpu_count_cgroup(os_cpu_count):
149
+ # Cgroup CPU bandwidth limit available in Linux since 2.6 kernel
150
+ cpu_max_fname = "/sys/fs/cgroup/cpu.max"
151
+ cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
152
+ cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
153
+ if os.path.exists(cpu_max_fname):
154
+ # cgroup v2
155
+ # https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
156
+ with open(cpu_max_fname) as fh:
157
+ cpu_quota_us, cpu_period_us = fh.read().strip().split()
158
+ elif os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname):
159
+ # cgroup v1
160
+ # https://www.kernel.org/doc/html/latest/scheduler/sched-bwc.html#management
161
+ with open(cfs_quota_fname) as fh:
162
+ cpu_quota_us = fh.read().strip()
163
+ with open(cfs_period_fname) as fh:
164
+ cpu_period_us = fh.read().strip()
165
+ else:
166
+ # No Cgroup CPU bandwidth limit (e.g. non-Linux platform)
167
+ cpu_quota_us = "max"
168
+ cpu_period_us = 100_000 # unused, for consistency with default values
169
+
170
+ if cpu_quota_us == "max":
171
+ # No active Cgroup quota on a Cgroup-capable platform
172
+ return os_cpu_count
173
+ else:
174
+ cpu_quota_us = int(cpu_quota_us)
175
+ cpu_period_us = int(cpu_period_us)
176
+ if cpu_quota_us > 0 and cpu_period_us > 0:
177
+ return math.ceil(cpu_quota_us / cpu_period_us)
178
+ else: # pragma: no cover
179
+ # Setting a negative cpu_quota_us value is a valid way to disable
180
+ # cgroup CPU bandwith limits
181
+ return os_cpu_count
182
+
183
+
184
+ def _cpu_count_affinity(os_cpu_count):
185
+ # Number of available CPUs given affinity settings
186
+ if hasattr(os, "sched_getaffinity"):
187
+ try:
188
+ return len(os.sched_getaffinity(0))
189
+ except NotImplementedError:
190
+ pass
191
+
192
+ # On PyPy and possibly other platforms, os.sched_getaffinity does not exist
193
+ # or raises NotImplementedError, let's try with the psutil if installed.
194
+ try:
195
+ import psutil
196
+
197
+ p = psutil.Process()
198
+ if hasattr(p, "cpu_affinity"):
199
+ return len(p.cpu_affinity())
200
+
201
+ except ImportError: # pragma: no cover
202
+ if (
203
+ sys.platform == "linux"
204
+ and os.environ.get("LOKY_MAX_CPU_COUNT") is None
205
+ ):
206
+ # PyPy does not implement os.sched_getaffinity on Linux which
207
+ # can cause severe oversubscription problems. Better warn the
208
+ # user in this particularly pathological case which can wreck
209
+ # havoc, typically on CI workers.
210
+ warnings.warn(
211
+ "Failed to inspect CPU affinity constraints on this system. "
212
+ "Please install psutil or explictly set LOKY_MAX_CPU_COUNT."
213
+ )
214
+
215
+ # This can happen for platforms that do not implement any kind of CPU
216
+ # infinity such as macOS-based platforms.
217
+ return os_cpu_count
218
+
219
+
220
+ def _cpu_count_user(os_cpu_count):
221
+ """Number of user defined available CPUs"""
222
+ cpu_count_affinity = _cpu_count_affinity(os_cpu_count)
223
+
224
+ cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count)
225
+
226
+ # User defined soft-limit passed as a loky specific environment variable.
227
+ cpu_count_loky = int(os.environ.get("LOKY_MAX_CPU_COUNT", os_cpu_count))
228
+
229
+ return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky)
230
+
231
+
232
+ def _count_physical_cores():
233
+ """Return a tuple (number of physical cores, exception)
234
+
235
+ If the number of physical cores is found, exception is set to None.
236
+ If it has not been found, return ("not found", exception).
237
+
238
+ The number of physical cores is cached to avoid repeating subprocess calls.
239
+ """
240
+ exception = None
241
+
242
+ # First check if the value is cached
243
+ global physical_cores_cache
244
+ if physical_cores_cache is not None:
245
+ return physical_cores_cache, exception
246
+
247
+ # Not cached yet, find it
248
+ try:
249
+ if sys.platform == "linux":
250
+ cpu_info = subprocess.run(
251
+ "lscpu --parse=core".split(), capture_output=True, text=True
252
+ )
253
+ cpu_info = cpu_info.stdout.splitlines()
254
+ cpu_info = {line for line in cpu_info if not line.startswith("#")}
255
+ cpu_count_physical = len(cpu_info)
256
+ elif sys.platform == "win32":
257
+ cpu_info = subprocess.run(
258
+ "wmic CPU Get NumberOfCores /Format:csv".split(),
259
+ capture_output=True,
260
+ text=True,
261
+ )
262
+ cpu_info = cpu_info.stdout.splitlines()
263
+ cpu_info = [
264
+ l.split(",")[1]
265
+ for l in cpu_info
266
+ if (l and l != "Node,NumberOfCores")
267
+ ]
268
+ cpu_count_physical = sum(map(int, cpu_info))
269
+ elif sys.platform == "darwin":
270
+ cpu_info = subprocess.run(
271
+ "sysctl -n hw.physicalcpu".split(),
272
+ capture_output=True,
273
+ text=True,
274
+ )
275
+ cpu_info = cpu_info.stdout
276
+ cpu_count_physical = int(cpu_info)
277
+ else:
278
+ raise NotImplementedError(f"unsupported platform: {sys.platform}")
279
+
280
+ # if cpu_count_physical < 1, we did not find a valid value
281
+ if cpu_count_physical < 1:
282
+ raise ValueError(f"found {cpu_count_physical} physical cores < 1")
283
+
284
+ except Exception as e:
285
+ exception = e
286
+ cpu_count_physical = "not found"
287
+
288
+ # Put the result in cache
289
+ physical_cores_cache = cpu_count_physical
290
+
291
+ return cpu_count_physical, exception
292
+
293
+
294
+ class LokyContext(BaseContext):
295
+ """Context relying on the LokyProcess."""
296
+
297
+ _name = "loky"
298
+ Process = LokyProcess
299
+ cpu_count = staticmethod(cpu_count)
300
+
301
+ def Queue(self, maxsize=0, reducers=None):
302
+ """Returns a queue object"""
303
+ from .queues import Queue
304
+
305
+ return Queue(maxsize, reducers=reducers, ctx=self.get_context())
306
+
307
+ def SimpleQueue(self, reducers=None):
308
+ """Returns a queue object"""
309
+ from .queues import SimpleQueue
310
+
311
+ return SimpleQueue(reducers=reducers, ctx=self.get_context())
312
+
313
+ if sys.platform != "win32":
314
+ """For Unix platform, use our custom implementation of synchronize
315
+ ensuring that we use the loky.backend.resource_tracker to clean-up
316
+ the semaphores in case of a worker crash.
317
+ """
318
+
319
+ def Semaphore(self, value=1):
320
+ """Returns a semaphore object"""
321
+ from .synchronize import Semaphore
322
+
323
+ return Semaphore(value=value)
324
+
325
+ def BoundedSemaphore(self, value):
326
+ """Returns a bounded semaphore object"""
327
+ from .synchronize import BoundedSemaphore
328
+
329
+ return BoundedSemaphore(value)
330
+
331
+ def Lock(self):
332
+ """Returns a lock object"""
333
+ from .synchronize import Lock
334
+
335
+ return Lock()
336
+
337
+ def RLock(self):
338
+ """Returns a recurrent lock object"""
339
+ from .synchronize import RLock
340
+
341
+ return RLock()
342
+
343
+ def Condition(self, lock=None):
344
+ """Returns a condition object"""
345
+ from .synchronize import Condition
346
+
347
+ return Condition(lock)
348
+
349
+ def Event(self):
350
+ """Returns an event object"""
351
+ from .synchronize import Event
352
+
353
+ return Event()
354
+
355
+
356
+ class LokyInitMainContext(LokyContext):
357
+ """Extra context with LokyProcess, which does load the main module
358
+
359
+ This context is used for compatibility in the case ``cloudpickle`` is not
360
+ present on the running system. This permits to load functions defined in
361
+ the ``main`` module, using proper safeguards. The declaration of the
362
+ ``executor`` should be protected by ``if __name__ == "__main__":`` and the
363
+ functions and variable used from main should be out of this block.
364
+
365
+ This mimics the default behavior of multiprocessing under Windows and the
366
+ behavior of the ``spawn`` start method on a posix system.
367
+ For more details, see the end of the following section of python doc
368
+ https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
369
+ """
370
+
371
+ _name = "loky_init_main"
372
+ Process = LokyInitMainProcess
373
+
374
+
375
+ # Register loky context so it works with multiprocessing.get_context
376
+ ctx_loky = LokyContext()
377
+ mp.context._concrete_contexts["loky"] = ctx_loky
378
+ mp.context._concrete_contexts["loky_init_main"] = LokyInitMainContext()
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Launch a subprocess using forkexec and make sure only the needed fd are
3
+ # shared in the two process.
4
+ #
5
+ # author: Thomas Moreau and Olivier Grisel
6
+ #
7
+ import os
8
+ import sys
9
+
10
+
11
+ def close_fds(keep_fds): # pragma: no cover
12
+ """Close all the file descriptors except those in keep_fds."""
13
+
14
+ # Make sure to keep stdout and stderr open for logging purpose
15
+ keep_fds = {*keep_fds, 1, 2}
16
+
17
+ # We try to retrieve all the open fds
18
+ try:
19
+ open_fds = {int(fd) for fd in os.listdir("/proc/self/fd")}
20
+ except FileNotFoundError:
21
+ import resource
22
+
23
+ max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
24
+ open_fds = {*range(max_nfds)}
25
+
26
+ for i in open_fds - keep_fds:
27
+ try:
28
+ os.close(i)
29
+ except OSError:
30
+ pass
31
+
32
+
33
+ def fork_exec(cmd, keep_fds, env=None):
34
+ # copy the environment variables to set in the child process
35
+ env = env or {}
36
+ child_env = {**os.environ, **env}
37
+
38
+ pid = os.fork()
39
+ if pid == 0: # pragma: no cover
40
+ close_fds(keep_fds)
41
+ os.execve(sys.executable, cmd, child_env)
42
+ else:
43
+ return pid
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Popen for LokyProcess.
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ import os
7
+ import sys
8
+ import signal
9
+ import pickle
10
+ from io import BytesIO
11
+ from multiprocessing import util, process
12
+ from multiprocessing.connection import wait
13
+ from multiprocessing.context import set_spawning_popen
14
+
15
+ from . import reduction, resource_tracker, spawn
16
+
17
+
18
+ __all__ = ["Popen"]
19
+
20
+
21
+ #
22
+ # Wrapper for an fd used while launching a process
23
+ #
24
+
25
+
26
+ class _DupFd:
27
+ def __init__(self, fd):
28
+ self.fd = reduction._mk_inheritable(fd)
29
+
30
+ def detach(self):
31
+ return self.fd
32
+
33
+
34
+ #
35
+ # Start child process using subprocess.Popen
36
+ #
37
+
38
+
39
+ class Popen:
40
+ method = "loky"
41
+ DupFd = _DupFd
42
+
43
+ def __init__(self, process_obj):
44
+ sys.stdout.flush()
45
+ sys.stderr.flush()
46
+ self.returncode = None
47
+ self._fds = []
48
+ self._launch(process_obj)
49
+
50
+ def duplicate_for_child(self, fd):
51
+ self._fds.append(fd)
52
+ return reduction._mk_inheritable(fd)
53
+
54
+ def poll(self, flag=os.WNOHANG):
55
+ if self.returncode is None:
56
+ while True:
57
+ try:
58
+ pid, sts = os.waitpid(self.pid, flag)
59
+ except OSError:
60
+ # Child process not yet created. See #1731717
61
+ # e.errno == errno.ECHILD == 10
62
+ return None
63
+ else:
64
+ break
65
+ if pid == self.pid:
66
+ if os.WIFSIGNALED(sts):
67
+ self.returncode = -os.WTERMSIG(sts)
68
+ else:
69
+ assert os.WIFEXITED(sts)
70
+ self.returncode = os.WEXITSTATUS(sts)
71
+ return self.returncode
72
+
73
+ def wait(self, timeout=None):
74
+ if self.returncode is None:
75
+ if timeout is not None:
76
+ if not wait([self.sentinel], timeout):
77
+ return None
78
+ # This shouldn't block if wait() returned successfully.
79
+ return self.poll(os.WNOHANG if timeout == 0.0 else 0)
80
+ return self.returncode
81
+
82
+ def terminate(self):
83
+ if self.returncode is None:
84
+ try:
85
+ os.kill(self.pid, signal.SIGTERM)
86
+ except ProcessLookupError:
87
+ pass
88
+ except OSError:
89
+ if self.wait(timeout=0.1) is None:
90
+ raise
91
+
92
+ def _launch(self, process_obj):
93
+
94
+ tracker_fd = resource_tracker._resource_tracker.getfd()
95
+
96
+ fp = BytesIO()
97
+ set_spawning_popen(self)
98
+ try:
99
+ prep_data = spawn.get_preparation_data(
100
+ process_obj._name,
101
+ getattr(process_obj, "init_main_module", True),
102
+ )
103
+ reduction.dump(prep_data, fp)
104
+ reduction.dump(process_obj, fp)
105
+
106
+ finally:
107
+ set_spawning_popen(None)
108
+
109
+ try:
110
+ parent_r, child_w = os.pipe()
111
+ child_r, parent_w = os.pipe()
112
+ # for fd in self._fds:
113
+ # _mk_inheritable(fd)
114
+
115
+ cmd_python = [sys.executable]
116
+ cmd_python += ["-m", self.__module__]
117
+ cmd_python += ["--process-name", str(process_obj.name)]
118
+ cmd_python += ["--pipe", str(reduction._mk_inheritable(child_r))]
119
+ reduction._mk_inheritable(child_w)
120
+ reduction._mk_inheritable(tracker_fd)
121
+ self._fds += [child_r, child_w, tracker_fd]
122
+ if sys.version_info >= (3, 8) and os.name == "posix":
123
+ mp_tracker_fd = prep_data["mp_tracker_args"]["fd"]
124
+ self.duplicate_for_child(mp_tracker_fd)
125
+
126
+ from .fork_exec import fork_exec
127
+
128
+ pid = fork_exec(cmd_python, self._fds, env=process_obj.env)
129
+ util.debug(
130
+ f"launched python with pid {pid} and cmd:\n{cmd_python}"
131
+ )
132
+ self.sentinel = parent_r
133
+
134
+ method = "getbuffer"
135
+ if not hasattr(fp, method):
136
+ method = "getvalue"
137
+ with os.fdopen(parent_w, "wb") as f:
138
+ f.write(getattr(fp, method)())
139
+ self.pid = pid
140
+ finally:
141
+ if parent_r is not None:
142
+ util.Finalize(self, os.close, (parent_r,))
143
+ for fd in (child_r, child_w):
144
+ if fd is not None:
145
+ os.close(fd)
146
+
147
+ @staticmethod
148
+ def thread_is_spawning():
149
+ return True
150
+
151
+
152
+ if __name__ == "__main__":
153
+ import argparse
154
+
155
+ parser = argparse.ArgumentParser("Command line parser")
156
+ parser.add_argument(
157
+ "--pipe", type=int, required=True, help="File handle for the pipe"
158
+ )
159
+ parser.add_argument(
160
+ "--process-name",
161
+ type=str,
162
+ default=None,
163
+ help="Identifier for debugging purpose",
164
+ )
165
+
166
+ args = parser.parse_args()
167
+
168
+ info = {}
169
+ exitcode = 1
170
+ try:
171
+ with os.fdopen(args.pipe, "rb") as from_parent:
172
+ process.current_process()._inheriting = True
173
+ try:
174
+ prep_data = pickle.load(from_parent)
175
+ spawn.prepare(prep_data)
176
+ process_obj = pickle.load(from_parent)
177
+ finally:
178
+ del process.current_process()._inheriting
179
+
180
+ exitcode = process_obj._bootstrap()
181
+ except Exception:
182
+ print("\n\n" + "-" * 80)
183
+ print(f"{args.process_name} failed with traceback: ")
184
+ print("-" * 80)
185
+ import traceback
186
+
187
+ print(traceback.format_exc())
188
+ print("\n" + "-" * 80)
189
+ finally:
190
+ if from_parent is not None:
191
+ from_parent.close()
192
+
193
+ sys.exit(exitcode)
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import msvcrt
4
+ import _winapi
5
+ from pickle import load
6
+ from multiprocessing import process, util
7
+ from multiprocessing.context import set_spawning_popen
8
+ from multiprocessing.popen_spawn_win32 import Popen as _Popen
9
+
10
+ from . import reduction, spawn
11
+
12
+
13
+ __all__ = ["Popen"]
14
+
15
+ #
16
+ #
17
+ #
18
+
19
+
20
+ def _path_eq(p1, p2):
21
+ return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)
22
+
23
+
24
+ WINENV = hasattr(sys, "_base_executable") and not _path_eq(
25
+ sys.executable, sys._base_executable
26
+ )
27
+
28
+
29
+ def _close_handles(*handles):
30
+ for handle in handles:
31
+ _winapi.CloseHandle(handle)
32
+
33
+
34
+ #
35
+ # We define a Popen class similar to the one from subprocess, but
36
+ # whose constructor takes a process object as its argument.
37
+ #
38
+
39
+
40
+ class Popen(_Popen):
41
+ """
42
+ Start a subprocess to run the code of a process object.
43
+
44
+ We differ from cpython implementation with the way we handle environment
45
+ variables, in order to be able to modify then in the child processes before
46
+ importing any library, in order to control the number of threads in C-level
47
+ threadpools.
48
+
49
+ We also use the loky preparation data, in particular to handle main_module
50
+ inits and the loky resource tracker.
51
+ """
52
+
53
+ method = "loky"
54
+
55
+ def __init__(self, process_obj):
56
+ prep_data = spawn.get_preparation_data(
57
+ process_obj._name, getattr(process_obj, "init_main_module", True)
58
+ )
59
+
60
+ # read end of pipe will be duplicated by the child process
61
+ # -- see spawn_main() in spawn.py.
62
+ #
63
+ # bpo-33929: Previously, the read end of pipe was "stolen" by the child
64
+ # process, but it leaked a handle if the child process had been
65
+ # terminated before it could steal the handle from the parent process.
66
+ rhandle, whandle = _winapi.CreatePipe(None, 0)
67
+ wfd = msvcrt.open_osfhandle(whandle, 0)
68
+ cmd = get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle)
69
+
70
+ python_exe = spawn.get_executable()
71
+
72
+ # copy the environment variables to set in the child process
73
+ child_env = {**os.environ, **process_obj.env}
74
+
75
+ # bpo-35797: When running in a venv, we bypass the redirect
76
+ # executor and launch our base Python.
77
+ if WINENV and _path_eq(python_exe, sys.executable):
78
+ cmd[0] = python_exe = sys._base_executable
79
+ child_env["__PYVENV_LAUNCHER__"] = sys.executable
80
+
81
+ cmd = " ".join(f'"{x}"' for x in cmd)
82
+
83
+ with open(wfd, "wb") as to_child:
84
+ # start process
85
+ try:
86
+ hp, ht, pid, _ = _winapi.CreateProcess(
87
+ python_exe,
88
+ cmd,
89
+ None,
90
+ None,
91
+ False,
92
+ 0,
93
+ child_env,
94
+ None,
95
+ None,
96
+ )
97
+ _winapi.CloseHandle(ht)
98
+ except BaseException:
99
+ _winapi.CloseHandle(rhandle)
100
+ raise
101
+
102
+ # set attributes of self
103
+ self.pid = pid
104
+ self.returncode = None
105
+ self._handle = hp
106
+ self.sentinel = int(hp)
107
+ self.finalizer = util.Finalize(
108
+ self, _close_handles, (self.sentinel, int(rhandle))
109
+ )
110
+
111
+ # send information to child
112
+ set_spawning_popen(self)
113
+ try:
114
+ reduction.dump(prep_data, to_child)
115
+ reduction.dump(process_obj, to_child)
116
+ finally:
117
+ set_spawning_popen(None)
118
+
119
+
120
+ def get_command_line(pipe_handle, parent_pid, **kwds):
121
+ """Returns prefix of command line used for spawning a child process."""
122
+ if getattr(sys, "frozen", False):
123
+ return [sys.executable, "--multiprocessing-fork", pipe_handle]
124
+ else:
125
+ prog = (
126
+ "from joblib.externals.loky.backend.popen_loky_win32 import main; "
127
+ f"main(pipe_handle={pipe_handle}, parent_pid={parent_pid})"
128
+ )
129
+ opts = util._args_from_interpreter_flags()
130
+ return [
131
+ spawn.get_executable(),
132
+ *opts,
133
+ "-c",
134
+ prog,
135
+ "--multiprocessing-fork",
136
+ ]
137
+
138
+
139
+ def is_forking(argv):
140
+ """Return whether commandline indicates we are forking."""
141
+ if len(argv) >= 2 and argv[1] == "--multiprocessing-fork":
142
+ return True
143
+ else:
144
+ return False
145
+
146
+
147
+ def main(pipe_handle, parent_pid=None):
148
+ """Run code specified by data received over pipe."""
149
+ assert is_forking(sys.argv), "Not forking"
150
+
151
+ if parent_pid is not None:
152
+ source_process = _winapi.OpenProcess(
153
+ _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid
154
+ )
155
+ else:
156
+ source_process = None
157
+ new_handle = reduction.duplicate(
158
+ pipe_handle, source_process=source_process
159
+ )
160
+ fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
161
+ parent_sentinel = source_process
162
+
163
+ with os.fdopen(fd, "rb", closefd=True) as from_parent:
164
+ process.current_process()._inheriting = True
165
+ try:
166
+ preparation_data = load(from_parent)
167
+ spawn.prepare(preparation_data, parent_sentinel)
168
+ self = load(from_parent)
169
+ finally:
170
+ del process.current_process()._inheriting
171
+
172
+ exitcode = self._bootstrap(parent_sentinel)
173
+ sys.exit(exitcode)
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # LokyProcess implementation
3
+ #
4
+ # authors: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # based on multiprocessing/process.py (17/02/2017)
7
+ #
8
+ import sys
9
+ from multiprocessing.context import assert_spawning
10
+ from multiprocessing.process import BaseProcess
11
+
12
+
13
+ class LokyProcess(BaseProcess):
14
+ _start_method = "loky"
15
+
16
+ def __init__(
17
+ self,
18
+ group=None,
19
+ target=None,
20
+ name=None,
21
+ args=(),
22
+ kwargs={},
23
+ daemon=None,
24
+ init_main_module=False,
25
+ env=None,
26
+ ):
27
+ super().__init__(
28
+ group=group,
29
+ target=target,
30
+ name=name,
31
+ args=args,
32
+ kwargs=kwargs,
33
+ daemon=daemon,
34
+ )
35
+ self.env = {} if env is None else env
36
+ self.authkey = self.authkey
37
+ self.init_main_module = init_main_module
38
+
39
+ @staticmethod
40
+ def _Popen(process_obj):
41
+ if sys.platform == "win32":
42
+ from .popen_loky_win32 import Popen
43
+ else:
44
+ from .popen_loky_posix import Popen
45
+ return Popen(process_obj)
46
+
47
+
48
+ class LokyInitMainProcess(LokyProcess):
49
+ _start_method = "loky_init_main"
50
+
51
+ def __init__(
52
+ self,
53
+ group=None,
54
+ target=None,
55
+ name=None,
56
+ args=(),
57
+ kwargs={},
58
+ daemon=None,
59
+ ):
60
+ super().__init__(
61
+ group=group,
62
+ target=target,
63
+ name=name,
64
+ args=args,
65
+ kwargs=kwargs,
66
+ daemon=daemon,
67
+ init_main_module=True,
68
+ )
69
+
70
+
71
+ #
72
+ # We subclass bytes to avoid accidental transmission of auth keys over network
73
+ #
74
+
75
+
76
+ class AuthenticationKey(bytes):
77
+ def __reduce__(self):
78
+ try:
79
+ assert_spawning(self)
80
+ except RuntimeError:
81
+ raise TypeError(
82
+ "Pickling an AuthenticationKey object is "
83
+ "disallowed for security reasons"
84
+ )
85
+ return AuthenticationKey, (bytes(self),)
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Queue and SimpleQueue implementation for loky
3
+ #
4
+ # authors: Thomas Moreau, Olivier Grisel
5
+ #
6
+ # based on multiprocessing/queues.py (16/02/2017)
7
+ # * Add some custom reducers for the Queues/SimpleQueue to tweak the
8
+ # pickling process. (overload Queue._feed/SimpleQueue.put)
9
+ #
10
+ import os
11
+ import sys
12
+ import errno
13
+ import weakref
14
+ import threading
15
+ from multiprocessing import util
16
+ from multiprocessing.queues import (
17
+ Full,
18
+ Queue as mp_Queue,
19
+ SimpleQueue as mp_SimpleQueue,
20
+ _sentinel,
21
+ )
22
+ from multiprocessing.context import assert_spawning
23
+
24
+ from .reduction import dumps
25
+
26
+
27
+ __all__ = ["Queue", "SimpleQueue", "Full"]
28
+
29
+
30
+ class Queue(mp_Queue):
31
+ def __init__(self, maxsize=0, reducers=None, ctx=None):
32
+ super().__init__(maxsize=maxsize, ctx=ctx)
33
+ self._reducers = reducers
34
+
35
+ # Use custom queue set/get state to be able to reduce the custom reducers
36
+ def __getstate__(self):
37
+ assert_spawning(self)
38
+ return (
39
+ self._ignore_epipe,
40
+ self._maxsize,
41
+ self._reader,
42
+ self._writer,
43
+ self._reducers,
44
+ self._rlock,
45
+ self._wlock,
46
+ self._sem,
47
+ self._opid,
48
+ )
49
+
50
+ def __setstate__(self, state):
51
+ (
52
+ self._ignore_epipe,
53
+ self._maxsize,
54
+ self._reader,
55
+ self._writer,
56
+ self._reducers,
57
+ self._rlock,
58
+ self._wlock,
59
+ self._sem,
60
+ self._opid,
61
+ ) = state
62
+ if sys.version_info >= (3, 9):
63
+ self._reset()
64
+ else:
65
+ self._after_fork()
66
+
67
+ # Overload _start_thread to correctly call our custom _feed
68
+ def _start_thread(self):
69
+ util.debug("Queue._start_thread()")
70
+
71
+ # Start thread which transfers data from buffer to pipe
72
+ self._buffer.clear()
73
+ self._thread = threading.Thread(
74
+ target=Queue._feed,
75
+ args=(
76
+ self._buffer,
77
+ self._notempty,
78
+ self._send_bytes,
79
+ self._wlock,
80
+ self._writer.close,
81
+ self._reducers,
82
+ self._ignore_epipe,
83
+ self._on_queue_feeder_error,
84
+ self._sem,
85
+ ),
86
+ name="QueueFeederThread",
87
+ )
88
+ self._thread.daemon = True
89
+
90
+ util.debug("doing self._thread.start()")
91
+ self._thread.start()
92
+ util.debug("... done self._thread.start()")
93
+
94
+ # On process exit we will wait for data to be flushed to pipe.
95
+ #
96
+ # However, if this process created the queue then all
97
+ # processes which use the queue will be descendants of this
98
+ # process. Therefore waiting for the queue to be flushed
99
+ # is pointless once all the child processes have been joined.
100
+ created_by_this_process = self._opid == os.getpid()
101
+ if not self._joincancelled and not created_by_this_process:
102
+ self._jointhread = util.Finalize(
103
+ self._thread,
104
+ Queue._finalize_join,
105
+ [weakref.ref(self._thread)],
106
+ exitpriority=-5,
107
+ )
108
+
109
+ # Send sentinel to the thread queue object when garbage collected
110
+ self._close = util.Finalize(
111
+ self,
112
+ Queue._finalize_close,
113
+ [self._buffer, self._notempty],
114
+ exitpriority=10,
115
+ )
116
+
117
+ # Overload the _feed methods to use our custom pickling strategy.
118
+ @staticmethod
119
+ def _feed(
120
+ buffer,
121
+ notempty,
122
+ send_bytes,
123
+ writelock,
124
+ close,
125
+ reducers,
126
+ ignore_epipe,
127
+ onerror,
128
+ queue_sem,
129
+ ):
130
+ util.debug("starting thread to feed data to pipe")
131
+ nacquire = notempty.acquire
132
+ nrelease = notempty.release
133
+ nwait = notempty.wait
134
+ bpopleft = buffer.popleft
135
+ sentinel = _sentinel
136
+ if sys.platform != "win32":
137
+ wacquire = writelock.acquire
138
+ wrelease = writelock.release
139
+ else:
140
+ wacquire = None
141
+
142
+ while True:
143
+ try:
144
+ nacquire()
145
+ try:
146
+ if not buffer:
147
+ nwait()
148
+ finally:
149
+ nrelease()
150
+ try:
151
+ while True:
152
+ obj = bpopleft()
153
+ if obj is sentinel:
154
+ util.debug("feeder thread got sentinel -- exiting")
155
+ close()
156
+ return
157
+
158
+ # serialize the data before acquiring the lock
159
+ obj_ = dumps(obj, reducers=reducers)
160
+ if wacquire is None:
161
+ send_bytes(obj_)
162
+ else:
163
+ wacquire()
164
+ try:
165
+ send_bytes(obj_)
166
+ finally:
167
+ wrelease()
168
+ # Remove references early to avoid leaking memory
169
+ del obj, obj_
170
+ except IndexError:
171
+ pass
172
+ except BaseException as e:
173
+ if ignore_epipe and getattr(e, "errno", 0) == errno.EPIPE:
174
+ return
175
+ # Since this runs in a daemon thread the resources it uses
176
+ # may be become unusable while the process is cleaning up.
177
+ # We ignore errors which happen after the process has
178
+ # started to cleanup.
179
+ if util.is_exiting():
180
+ util.info(f"error in queue thread: {e}")
181
+ return
182
+ else:
183
+ queue_sem.release()
184
+ onerror(e, obj)
185
+
186
+ def _on_queue_feeder_error(self, e, obj):
187
+ """
188
+ Private API hook called when feeding data in the background thread
189
+ raises an exception. For overriding by concurrent.futures.
190
+ """
191
+ import traceback
192
+
193
+ traceback.print_exc()
194
+
195
+
196
+ class SimpleQueue(mp_SimpleQueue):
197
+ def __init__(self, reducers=None, ctx=None):
198
+ super().__init__(ctx=ctx)
199
+
200
+ # Add possiblity to use custom reducers
201
+ self._reducers = reducers
202
+
203
+ def close(self):
204
+ self._reader.close()
205
+ self._writer.close()
206
+
207
+ # Use custom queue set/get state to be able to reduce the custom reducers
208
+ def __getstate__(self):
209
+ assert_spawning(self)
210
+ return (
211
+ self._reader,
212
+ self._writer,
213
+ self._reducers,
214
+ self._rlock,
215
+ self._wlock,
216
+ )
217
+
218
+ def __setstate__(self, state):
219
+ (
220
+ self._reader,
221
+ self._writer,
222
+ self._reducers,
223
+ self._rlock,
224
+ self._wlock,
225
+ ) = state
226
+
227
+ # Overload put to use our customizable reducer
228
+ def put(self, obj):
229
+ # serialize the data before acquiring the lock
230
+ obj = dumps(obj, reducers=self._reducers)
231
+ if self._wlock is None:
232
+ # writes to a message oriented win32 pipe are atomic
233
+ self._writer.send_bytes(obj)
234
+ else:
235
+ with self._wlock:
236
+ self._writer.send_bytes(obj)
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Customizable Pickler with some basic reducers
3
+ #
4
+ # author: Thomas Moreau
5
+ #
6
+ # adapted from multiprocessing/reduction.py (17/02/2017)
7
+ # * Replace the ForkingPickler with a similar _LokyPickler,
8
+ # * Add CustomizableLokyPickler to allow customizing pickling process
9
+ # on the fly.
10
+ #
11
+ import copyreg
12
+ import io
13
+ import functools
14
+ import types
15
+ import sys
16
+ import os
17
+
18
+ from multiprocessing import util
19
+ from pickle import loads, HIGHEST_PROTOCOL
20
+
21
+ ###############################################################################
22
+ # Enable custom pickling in Loky.
23
+
24
+ _dispatch_table = {}
25
+
26
+
27
+ def register(type_, reduce_function):
28
+ _dispatch_table[type_] = reduce_function
29
+
30
+
31
+ ###############################################################################
32
+ # Registers extra pickling routines to improve picklization for loky
33
+
34
+
35
+ # make methods picklable
36
+ def _reduce_method(m):
37
+ if m.__self__ is None:
38
+ return getattr, (m.__class__, m.__func__.__name__)
39
+ else:
40
+ return getattr, (m.__self__, m.__func__.__name__)
41
+
42
+
43
+ class _C:
44
+ def f(self):
45
+ pass
46
+
47
+ @classmethod
48
+ def h(cls):
49
+ pass
50
+
51
+
52
+ register(type(_C().f), _reduce_method)
53
+ register(type(_C.h), _reduce_method)
54
+
55
+
56
+ if not hasattr(sys, "pypy_version_info"):
57
+ # PyPy uses functions instead of method_descriptors and wrapper_descriptors
58
+ def _reduce_method_descriptor(m):
59
+ return getattr, (m.__objclass__, m.__name__)
60
+
61
+ register(type(list.append), _reduce_method_descriptor)
62
+ register(type(int.__add__), _reduce_method_descriptor)
63
+
64
+
65
+ # Make partial func pickable
66
+ def _reduce_partial(p):
67
+ return _rebuild_partial, (p.func, p.args, p.keywords or {})
68
+
69
+
70
+ def _rebuild_partial(func, args, keywords):
71
+ return functools.partial(func, *args, **keywords)
72
+
73
+
74
+ register(functools.partial, _reduce_partial)
75
+
76
+ if sys.platform != "win32":
77
+ from ._posix_reduction import _mk_inheritable # noqa: F401
78
+ else:
79
+ from . import _win_reduction # noqa: F401
80
+
81
+ # global variable to change the pickler behavior
82
+ try:
83
+ from joblib.externals import cloudpickle # noqa: F401
84
+
85
+ DEFAULT_ENV = "cloudpickle"
86
+ except ImportError:
87
+ # If cloudpickle is not present, fallback to pickle
88
+ DEFAULT_ENV = "pickle"
89
+
90
+ ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV)
91
+ _LokyPickler = None
92
+ _loky_pickler_name = None
93
+
94
+
95
+ def set_loky_pickler(loky_pickler=None):
96
+ global _LokyPickler, _loky_pickler_name
97
+
98
+ if loky_pickler is None:
99
+ loky_pickler = ENV_LOKY_PICKLER
100
+
101
+ loky_pickler_cls = None
102
+
103
+ # The default loky_pickler is cloudpickle
104
+ if loky_pickler in ["", None]:
105
+ loky_pickler = "cloudpickle"
106
+
107
+ if loky_pickler == _loky_pickler_name:
108
+ return
109
+
110
+ if loky_pickler == "cloudpickle":
111
+ from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls
112
+ else:
113
+ try:
114
+ from importlib import import_module
115
+
116
+ module_pickle = import_module(loky_pickler)
117
+ loky_pickler_cls = module_pickle.Pickler
118
+ except (ImportError, AttributeError) as e:
119
+ extra_info = (
120
+ "\nThis error occurred while setting loky_pickler to"
121
+ f" '{loky_pickler}', as required by the env variable "
122
+ "LOKY_PICKLER or the function set_loky_pickler."
123
+ )
124
+ e.args = (e.args[0] + extra_info,) + e.args[1:]
125
+ e.msg = e.args[0]
126
+ raise e
127
+
128
+ util.debug(
129
+ f"Using '{loky_pickler if loky_pickler else 'cloudpickle'}' for "
130
+ "serialization."
131
+ )
132
+
133
+ class CustomizablePickler(loky_pickler_cls):
134
+ _loky_pickler_cls = loky_pickler_cls
135
+
136
+ def _set_dispatch_table(self, dispatch_table):
137
+ for ancestor_class in self._loky_pickler_cls.mro():
138
+ dt_attribute = getattr(ancestor_class, "dispatch_table", None)
139
+ if isinstance(dt_attribute, types.MemberDescriptorType):
140
+ # Ancestor class (typically _pickle.Pickler) has a
141
+ # member_descriptor for its "dispatch_table" attribute. Use
142
+ # it to set the dispatch_table as a member instead of a
143
+ # dynamic attribute in the __dict__ of the instance,
144
+ # otherwise it will not be taken into account by the C
145
+ # implementation of the dump method if a subclass defines a
146
+ # class-level dispatch_table attribute as was done in
147
+ # cloudpickle 1.6.0:
148
+ # https://github.com/joblib/loky/pull/260
149
+ dt_attribute.__set__(self, dispatch_table)
150
+ break
151
+
152
+ # On top of member descriptor set, also use setattr such that code
153
+ # that directly access self.dispatch_table gets a consistent view
154
+ # of the same table.
155
+ self.dispatch_table = dispatch_table
156
+
157
+ def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
158
+ loky_pickler_cls.__init__(self, writer, protocol=protocol)
159
+ if reducers is None:
160
+ reducers = {}
161
+
162
+ if hasattr(self, "dispatch_table"):
163
+ # Force a copy that we will update without mutating the
164
+ # any class level defined dispatch_table.
165
+ loky_dt = dict(self.dispatch_table)
166
+ else:
167
+ # Use standard reducers as bases
168
+ loky_dt = copyreg.dispatch_table.copy()
169
+
170
+ # Register loky specific reducers
171
+ loky_dt.update(_dispatch_table)
172
+
173
+ # Set the new dispatch table, taking care of the fact that we
174
+ # need to use the member_descriptor when we inherit from a
175
+ # subclass of the C implementation of the Pickler base class
176
+ # with an class level dispatch_table attribute.
177
+ self._set_dispatch_table(loky_dt)
178
+
179
+ # Register the reducers
180
+ for type, reduce_func in reducers.items():
181
+ self.register(type, reduce_func)
182
+
183
+ def register(self, type, reduce_func):
184
+ """Attach a reducer function to a given type in the dispatch table."""
185
+ self.dispatch_table[type] = reduce_func
186
+
187
+ _LokyPickler = CustomizablePickler
188
+ _loky_pickler_name = loky_pickler
189
+
190
+
191
+ def get_loky_pickler_name():
192
+ global _loky_pickler_name
193
+ return _loky_pickler_name
194
+
195
+
196
+ def get_loky_pickler():
197
+ global _LokyPickler
198
+ return _LokyPickler
199
+
200
+
201
+ # Set it to its default value
202
+ set_loky_pickler()
203
+
204
+
205
+ def dump(obj, file, reducers=None, protocol=None):
206
+ """Replacement for pickle.dump() using _LokyPickler."""
207
+ global _LokyPickler
208
+ _LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
209
+
210
+
211
+ def dumps(obj, reducers=None, protocol=None):
212
+ global _LokyPickler
213
+
214
+ buf = io.BytesIO()
215
+ dump(obj, buf, reducers=reducers, protocol=protocol)
216
+ return buf.getbuffer()
217
+
218
+
219
+ __all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"]
220
+
221
+ if sys.platform == "win32":
222
+ from multiprocessing.reduction import duplicate
223
+
224
+ __all__ += ["duplicate"]
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Server process to keep track of unlinked resources, like folders and
3
+ # semaphores and clean them.
4
+ #
5
+ # author: Thomas Moreau
6
+ #
7
+ # adapted from multiprocessing/semaphore_tracker.py (17/02/2017)
8
+ # * include custom spawnv_passfds to start the process
9
+ # * add some VERBOSE logging
10
+ #
11
+ # TODO: multiprocessing.resource_tracker was contributed to Python 3.8 so
12
+ # once loky drops support for Python 3.7 it might be possible to stop
13
+ # maintaining this loky-specific fork. As a consequence, it might also be
14
+ # possible to stop maintaining the loky.backend.synchronize fork of
15
+ # multiprocessing.synchronize.
16
+
17
+ #
18
+ # On Unix we run a server process which keeps track of unlinked
19
+ # resources. The server ignores SIGINT and SIGTERM and reads from a
20
+ # pipe. The resource_tracker implements a reference counting scheme: each time
21
+ # a Python process anticipates the shared usage of a resource by another
22
+ # process, it signals the resource_tracker of this shared usage, and in return,
23
+ # the resource_tracker increments the resource's reference count by 1.
24
+ # Similarly, when access to a resource is closed by a Python process, the
25
+ # process notifies the resource_tracker by asking it to decrement the
26
+ # resource's reference count by 1. When the reference count drops to 0, the
27
+ # resource_tracker attempts to clean up the underlying resource.
28
+
29
+ # Finally, every other process connected to the resource tracker has a copy of
30
+ # the writable end of the pipe used to communicate with it, so the resource
31
+ # tracker gets EOF when all other processes have exited. Then the
32
+ # resource_tracker process unlinks any remaining leaked resources (with
33
+ # reference count above 0)
34
+
35
+ # For semaphores, this is important because the system only supports a limited
36
+ # number of named semaphores, and they will not be automatically removed till
37
+ # the next reboot. Without this resource tracker process, "killall python"
38
+ # would probably leave unlinked semaphores.
39
+
40
+ # Note that this behavior differs from CPython's resource_tracker, which only
41
+ # implements list of shared resources, and not a proper refcounting scheme.
42
+ # Also, CPython's resource tracker will only attempt to cleanup those shared
43
+ # resources once all procsses connected to the resouce tracker have exited.
44
+
45
+
46
+ import os
47
+ import shutil
48
+ import sys
49
+ import signal
50
+ import warnings
51
+ import threading
52
+ from _multiprocessing import sem_unlink
53
+ from multiprocessing import util
54
+
55
+ from . import spawn
56
+
57
+ if sys.platform == "win32":
58
+ import _winapi
59
+ import msvcrt
60
+ from multiprocessing.reduction import duplicate
61
+
62
+
63
+ __all__ = ["ensure_running", "register", "unregister"]
64
+
65
+ _HAVE_SIGMASK = hasattr(signal, "pthread_sigmask")
66
+ _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
67
+
68
+ _CLEANUP_FUNCS = {"folder": shutil.rmtree, "file": os.unlink}
69
+
70
+ if os.name == "posix":
71
+ _CLEANUP_FUNCS["semlock"] = sem_unlink
72
+
73
+
74
+ VERBOSE = False
75
+
76
+
77
+ class ResourceTracker:
78
+ def __init__(self):
79
+ self._lock = threading.Lock()
80
+ self._fd = None
81
+ self._pid = None
82
+
83
+ def getfd(self):
84
+ self.ensure_running()
85
+ return self._fd
86
+
87
+ def ensure_running(self):
88
+ """Make sure that resource tracker process is running.
89
+
90
+ This can be run from any process. Usually a child process will use
91
+ the resource created by its parent."""
92
+ with self._lock:
93
+ if self._fd is not None:
94
+ # resource tracker was launched before, is it still running?
95
+ if self._check_alive():
96
+ # => still alive
97
+ return
98
+ # => dead, launch it again
99
+ os.close(self._fd)
100
+ if os.name == "posix":
101
+ try:
102
+ # At this point, the resource_tracker process has been
103
+ # killed or crashed. Let's remove the process entry
104
+ # from the process table to avoid zombie processes.
105
+ os.waitpid(self._pid, 0)
106
+ except OSError:
107
+ # The process was terminated or is a child from an
108
+ # ancestor of the current process.
109
+ pass
110
+ self._fd = None
111
+ self._pid = None
112
+
113
+ warnings.warn(
114
+ "resource_tracker: process died unexpectedly, "
115
+ "relaunching. Some folders/sempahores might "
116
+ "leak."
117
+ )
118
+
119
+ fds_to_pass = []
120
+ try:
121
+ fds_to_pass.append(sys.stderr.fileno())
122
+ except Exception:
123
+ pass
124
+
125
+ r, w = os.pipe()
126
+ if sys.platform == "win32":
127
+ _r = duplicate(msvcrt.get_osfhandle(r), inheritable=True)
128
+ os.close(r)
129
+ r = _r
130
+
131
+ cmd = f"from {main.__module__} import main; main({r}, {VERBOSE})"
132
+ try:
133
+ fds_to_pass.append(r)
134
+ # process will out live us, so no need to wait on pid
135
+ exe = spawn.get_executable()
136
+ args = [exe, *util._args_from_interpreter_flags(), "-c", cmd]
137
+ util.debug(f"launching resource tracker: {args}")
138
+ # bpo-33613: Register a signal mask that will block the
139
+ # signals. This signal mask will be inherited by the child
140
+ # that is going to be spawned and will protect the child from a
141
+ # race condition that can make the child die before it
142
+ # registers signal handlers for SIGINT and SIGTERM. The mask is
143
+ # unregistered after spawning the child.
144
+ try:
145
+ if _HAVE_SIGMASK:
146
+ signal.pthread_sigmask(
147
+ signal.SIG_BLOCK, _IGNORED_SIGNALS
148
+ )
149
+ pid = spawnv_passfds(exe, args, fds_to_pass)
150
+ finally:
151
+ if _HAVE_SIGMASK:
152
+ signal.pthread_sigmask(
153
+ signal.SIG_UNBLOCK, _IGNORED_SIGNALS
154
+ )
155
+ except BaseException:
156
+ os.close(w)
157
+ raise
158
+ else:
159
+ self._fd = w
160
+ self._pid = pid
161
+ finally:
162
+ if sys.platform == "win32":
163
+ _winapi.CloseHandle(r)
164
+ else:
165
+ os.close(r)
166
+
167
+ def _check_alive(self):
168
+ """Check for the existence of the resource tracker process."""
169
+ try:
170
+ self._send("PROBE", "", "")
171
+ except BrokenPipeError:
172
+ return False
173
+ else:
174
+ return True
175
+
176
+ def register(self, name, rtype):
177
+ """Register a named resource, and increment its refcount."""
178
+ self.ensure_running()
179
+ self._send("REGISTER", name, rtype)
180
+
181
+ def unregister(self, name, rtype):
182
+ """Unregister a named resource with resource tracker."""
183
+ self.ensure_running()
184
+ self._send("UNREGISTER", name, rtype)
185
+
186
+ def maybe_unlink(self, name, rtype):
187
+ """Decrement the refcount of a resource, and delete it if it hits 0"""
188
+ self.ensure_running()
189
+ self._send("MAYBE_UNLINK", name, rtype)
190
+
191
+ def _send(self, cmd, name, rtype):
192
+ if len(name) > 512:
193
+ # posix guarantees that writes to a pipe of less than PIPE_BUF
194
+ # bytes are atomic, and that PIPE_BUF >= 512
195
+ raise ValueError("name too long")
196
+ msg = f"{cmd}:{name}:{rtype}\n".encode("ascii")
197
+ nbytes = os.write(self._fd, msg)
198
+ assert nbytes == len(msg)
199
+
200
+
201
+ _resource_tracker = ResourceTracker()
202
+ ensure_running = _resource_tracker.ensure_running
203
+ register = _resource_tracker.register
204
+ maybe_unlink = _resource_tracker.maybe_unlink
205
+ unregister = _resource_tracker.unregister
206
+ getfd = _resource_tracker.getfd
207
+
208
+
209
+ def main(fd, verbose=0):
210
+ """Run resource tracker."""
211
+ # protect the process from ^C and "killall python" etc
212
+ if verbose:
213
+ util.log_to_stderr(level=util.DEBUG)
214
+
215
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
216
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
217
+
218
+ if _HAVE_SIGMASK:
219
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
220
+
221
+ for f in (sys.stdin, sys.stdout):
222
+ try:
223
+ f.close()
224
+ except Exception:
225
+ pass
226
+
227
+ if verbose:
228
+ util.debug("Main resource tracker is running")
229
+
230
+ registry = {rtype: {} for rtype in _CLEANUP_FUNCS.keys()}
231
+ try:
232
+ # keep track of registered/unregistered resources
233
+ if sys.platform == "win32":
234
+ fd = msvcrt.open_osfhandle(fd, os.O_RDONLY)
235
+ with open(fd, "rb") as f:
236
+ while True:
237
+ line = f.readline()
238
+ if line == b"": # EOF
239
+ break
240
+ try:
241
+ splitted = line.strip().decode("ascii").split(":")
242
+ # name can potentially contain separator symbols (for
243
+ # instance folders on Windows)
244
+ cmd, name, rtype = (
245
+ splitted[0],
246
+ ":".join(splitted[1:-1]),
247
+ splitted[-1],
248
+ )
249
+
250
+ if cmd == "PROBE":
251
+ continue
252
+
253
+ if rtype not in _CLEANUP_FUNCS:
254
+ raise ValueError(
255
+ f"Cannot register {name} for automatic cleanup: "
256
+ f"unknown resource type ({rtype}). Resource type "
257
+ "should be one of the following: "
258
+ f"{list(_CLEANUP_FUNCS.keys())}"
259
+ )
260
+
261
+ if cmd == "REGISTER":
262
+ if name not in registry[rtype]:
263
+ registry[rtype][name] = 1
264
+ else:
265
+ registry[rtype][name] += 1
266
+
267
+ if verbose:
268
+ util.debug(
269
+ "[ResourceTracker] incremented refcount of "
270
+ f"{rtype} {name} "
271
+ f"(current {registry[rtype][name]})"
272
+ )
273
+ elif cmd == "UNREGISTER":
274
+ del registry[rtype][name]
275
+ if verbose:
276
+ util.debug(
277
+ f"[ResourceTracker] unregister {name} {rtype}: "
278
+ f"registry({len(registry)})"
279
+ )
280
+ elif cmd == "MAYBE_UNLINK":
281
+ registry[rtype][name] -= 1
282
+ if verbose:
283
+ util.debug(
284
+ "[ResourceTracker] decremented refcount of "
285
+ f"{rtype} {name} "
286
+ f"(current {registry[rtype][name]})"
287
+ )
288
+
289
+ if registry[rtype][name] == 0:
290
+ del registry[rtype][name]
291
+ try:
292
+ if verbose:
293
+ util.debug(
294
+ f"[ResourceTracker] unlink {name}"
295
+ )
296
+ _CLEANUP_FUNCS[rtype](name)
297
+ except Exception as e:
298
+ warnings.warn(
299
+ f"resource_tracker: {name}: {e!r}"
300
+ )
301
+
302
+ else:
303
+ raise RuntimeError(f"unrecognized command {cmd!r}")
304
+ except BaseException:
305
+ try:
306
+ sys.excepthook(*sys.exc_info())
307
+ except BaseException:
308
+ pass
309
+ finally:
310
+ # all processes have terminated; cleanup any remaining resources
311
+ def _unlink_resources(rtype_registry, rtype):
312
+ if rtype_registry:
313
+ try:
314
+ warnings.warn(
315
+ "resource_tracker: There appear to be "
316
+ f"{len(rtype_registry)} leaked {rtype} objects to "
317
+ "clean up at shutdown"
318
+ )
319
+ except Exception:
320
+ pass
321
+ for name in rtype_registry:
322
+ # For some reason the process which created and registered this
323
+ # resource has failed to unregister it. Presumably it has
324
+ # died. We therefore clean it up.
325
+ try:
326
+ _CLEANUP_FUNCS[rtype](name)
327
+ if verbose:
328
+ util.debug(f"[ResourceTracker] unlink {name}")
329
+ except Exception as e:
330
+ warnings.warn(f"resource_tracker: {name}: {e!r}")
331
+
332
+ for rtype, rtype_registry in registry.items():
333
+ if rtype == "folder":
334
+ continue
335
+ else:
336
+ _unlink_resources(rtype_registry, rtype)
337
+
338
+ # The default cleanup routine for folders deletes everything inside
339
+ # those folders recursively, which can include other resources tracked
340
+ # by the resource tracker). To limit the risk of the resource tracker
341
+ # attempting to delete twice a resource (once as part of a tracked
342
+ # folder, and once as a resource), we delete the folders after all
343
+ # other resource types.
344
+ if "folder" in registry:
345
+ _unlink_resources(registry["folder"], "folder")
346
+
347
+ if verbose:
348
+ util.debug("resource tracker shut down")
349
+
350
+
351
+ #
352
+ # Start a program with only specified fds kept open
353
+ #
354
+
355
+
356
+ def spawnv_passfds(path, args, passfds):
357
+ passfds = sorted(passfds)
358
+ if sys.platform != "win32":
359
+ errpipe_read, errpipe_write = os.pipe()
360
+ try:
361
+ from .reduction import _mk_inheritable
362
+ from .fork_exec import fork_exec
363
+
364
+ _pass = [_mk_inheritable(fd) for fd in passfds]
365
+ return fork_exec(args, _pass)
366
+ finally:
367
+ os.close(errpipe_read)
368
+ os.close(errpipe_write)
369
+ else:
370
+ cmd = " ".join(f'"{x}"' for x in args)
371
+ try:
372
+ _, ht, pid, _ = _winapi.CreateProcess(
373
+ path, cmd, None, None, True, 0, None, None, None
374
+ )
375
+ _winapi.CloseHandle(ht)
376
+ except BaseException:
377
+ pass
378
+ return pid
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Prepares and processes the data to setup the new process environment
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/spawn.py (17/02/2017)
7
+ # * Improve logging data
8
+ #
9
+ import os
10
+ import sys
11
+ import runpy
12
+ import textwrap
13
+ import types
14
+ from multiprocessing import process, util
15
+
16
+
17
+ if sys.platform != "win32":
18
+ WINEXE = False
19
+ WINSERVICE = False
20
+ else:
21
+ import msvcrt
22
+ from multiprocessing.reduction import duplicate
23
+
24
+ WINEXE = sys.platform == "win32" and getattr(sys, "frozen", False)
25
+ WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
26
+
27
+ if WINSERVICE:
28
+ _python_exe = os.path.join(sys.exec_prefix, "python.exe")
29
+ else:
30
+ _python_exe = sys.executable
31
+
32
+
33
+ def get_executable():
34
+ return _python_exe
35
+
36
+
37
+ def _check_not_importing_main():
38
+ if getattr(process.current_process(), "_inheriting", False):
39
+ raise RuntimeError(
40
+ textwrap.dedent(
41
+ """\
42
+ An attempt has been made to start a new process before the
43
+ current process has finished its bootstrapping phase.
44
+
45
+ This probably means that you are not using fork to start your
46
+ child processes and you have forgotten to use the proper idiom
47
+ in the main module:
48
+
49
+ if __name__ == '__main__':
50
+ freeze_support()
51
+ ...
52
+
53
+ The "freeze_support()" line can be omitted if the program
54
+ is not going to be frozen to produce an executable."""
55
+ )
56
+ )
57
+
58
+
59
+ def get_preparation_data(name, init_main_module=True):
60
+ """Return info about parent needed by child to unpickle process object."""
61
+ _check_not_importing_main()
62
+ d = dict(
63
+ log_to_stderr=util._log_to_stderr,
64
+ authkey=bytes(process.current_process().authkey),
65
+ name=name,
66
+ sys_argv=sys.argv,
67
+ orig_dir=process.ORIGINAL_DIR,
68
+ dir=os.getcwd(),
69
+ )
70
+
71
+ # Send sys_path and make sure the current directory will not be changed
72
+ d["sys_path"] = [p if p != "" else process.ORIGINAL_DIR for p in sys.path]
73
+
74
+ # Make sure to pass the information if the multiprocessing logger is active
75
+ if util._logger is not None:
76
+ d["log_level"] = util._logger.getEffectiveLevel()
77
+ if util._logger.handlers:
78
+ h = util._logger.handlers[0]
79
+ d["log_fmt"] = h.formatter._fmt
80
+
81
+ # Tell the child how to communicate with the resource_tracker
82
+ from .resource_tracker import _resource_tracker
83
+
84
+ _resource_tracker.ensure_running()
85
+ d["tracker_args"] = {"pid": _resource_tracker._pid}
86
+ if sys.platform == "win32":
87
+ d["tracker_args"]["fh"] = msvcrt.get_osfhandle(_resource_tracker._fd)
88
+ else:
89
+ d["tracker_args"]["fd"] = _resource_tracker._fd
90
+
91
+ if sys.version_info >= (3, 8) and os.name == "posix":
92
+ # joblib/loky#242: allow loky processes to retrieve the resource
93
+ # tracker of their parent in case the child processes depickles
94
+ # shared_memory objects, that are still tracked by multiprocessing's
95
+ # resource_tracker by default.
96
+ # XXX: this is a workaround that may be error prone: in the future, it
97
+ # would be better to have loky subclass multiprocessing's shared_memory
98
+ # to force registration of shared_memory segments via loky's
99
+ # resource_tracker.
100
+ from multiprocessing.resource_tracker import (
101
+ _resource_tracker as mp_resource_tracker,
102
+ )
103
+
104
+ # multiprocessing's resource_tracker must be running before loky
105
+ # process is created (othewise the child won't be able to use it if it
106
+ # is created later on)
107
+ mp_resource_tracker.ensure_running()
108
+ d["mp_tracker_args"] = {
109
+ "fd": mp_resource_tracker._fd,
110
+ "pid": mp_resource_tracker._pid,
111
+ }
112
+
113
+ # Figure out whether to initialise main in the subprocess as a module
114
+ # or through direct execution (or to leave it alone entirely)
115
+ if init_main_module:
116
+ main_module = sys.modules["__main__"]
117
+ try:
118
+ main_mod_name = getattr(main_module.__spec__, "name", None)
119
+ except BaseException:
120
+ main_mod_name = None
121
+ if main_mod_name is not None:
122
+ d["init_main_from_name"] = main_mod_name
123
+ elif sys.platform != "win32" or (not WINEXE and not WINSERVICE):
124
+ main_path = getattr(main_module, "__file__", None)
125
+ if main_path is not None:
126
+ if (
127
+ not os.path.isabs(main_path)
128
+ and process.ORIGINAL_DIR is not None
129
+ ):
130
+ main_path = os.path.join(process.ORIGINAL_DIR, main_path)
131
+ d["init_main_from_path"] = os.path.normpath(main_path)
132
+
133
+ return d
134
+
135
+
136
+ #
137
+ # Prepare current process
138
+ #
139
+ old_main_modules = []
140
+
141
+
142
+ def prepare(data, parent_sentinel=None):
143
+ """Try to get current process ready to unpickle process object."""
144
+ if "name" in data:
145
+ process.current_process().name = data["name"]
146
+
147
+ if "authkey" in data:
148
+ process.current_process().authkey = data["authkey"]
149
+
150
+ if "log_to_stderr" in data and data["log_to_stderr"]:
151
+ util.log_to_stderr()
152
+
153
+ if "log_level" in data:
154
+ util.get_logger().setLevel(data["log_level"])
155
+
156
+ if "log_fmt" in data:
157
+ import logging
158
+
159
+ util.get_logger().handlers[0].setFormatter(
160
+ logging.Formatter(data["log_fmt"])
161
+ )
162
+
163
+ if "sys_path" in data:
164
+ sys.path = data["sys_path"]
165
+
166
+ if "sys_argv" in data:
167
+ sys.argv = data["sys_argv"]
168
+
169
+ if "dir" in data:
170
+ os.chdir(data["dir"])
171
+
172
+ if "orig_dir" in data:
173
+ process.ORIGINAL_DIR = data["orig_dir"]
174
+
175
+ if "mp_tracker_args" in data:
176
+ from multiprocessing.resource_tracker import (
177
+ _resource_tracker as mp_resource_tracker,
178
+ )
179
+
180
+ mp_resource_tracker._fd = data["mp_tracker_args"]["fd"]
181
+ mp_resource_tracker._pid = data["mp_tracker_args"]["pid"]
182
+ if "tracker_args" in data:
183
+ from .resource_tracker import _resource_tracker
184
+
185
+ _resource_tracker._pid = data["tracker_args"]["pid"]
186
+ if sys.platform == "win32":
187
+ handle = data["tracker_args"]["fh"]
188
+ handle = duplicate(handle, source_process=parent_sentinel)
189
+ _resource_tracker._fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
190
+ else:
191
+ _resource_tracker._fd = data["tracker_args"]["fd"]
192
+
193
+ if "init_main_from_name" in data:
194
+ _fixup_main_from_name(data["init_main_from_name"])
195
+ elif "init_main_from_path" in data:
196
+ _fixup_main_from_path(data["init_main_from_path"])
197
+
198
+
199
+ # Multiprocessing module helpers to fix up the main module in
200
+ # spawned subprocesses
201
+ def _fixup_main_from_name(mod_name):
202
+ # __main__.py files for packages, directories, zip archives, etc, run
203
+ # their "main only" code unconditionally, so we don't even try to
204
+ # populate anything in __main__, nor do we make any changes to
205
+ # __main__ attributes
206
+ current_main = sys.modules["__main__"]
207
+ if mod_name == "__main__" or mod_name.endswith(".__main__"):
208
+ return
209
+
210
+ # If this process was forked, __main__ may already be populated
211
+ if getattr(current_main.__spec__, "name", None) == mod_name:
212
+ return
213
+
214
+ # Otherwise, __main__ may contain some non-main code where we need to
215
+ # support unpickling it properly. We rerun it as __mp_main__ and make
216
+ # the normal __main__ an alias to that
217
+ old_main_modules.append(current_main)
218
+ main_module = types.ModuleType("__mp_main__")
219
+ main_content = runpy.run_module(
220
+ mod_name, run_name="__mp_main__", alter_sys=True
221
+ )
222
+ main_module.__dict__.update(main_content)
223
+ sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module
224
+
225
+
226
+ def _fixup_main_from_path(main_path):
227
+ # If this process was forked, __main__ may already be populated
228
+ current_main = sys.modules["__main__"]
229
+
230
+ # Unfortunately, the main ipython launch script historically had no
231
+ # "if __name__ == '__main__'" guard, so we work around that
232
+ # by treating it like a __main__.py file
233
+ # See https://github.com/ipython/ipython/issues/4698
234
+ main_name = os.path.splitext(os.path.basename(main_path))[0]
235
+ if main_name == "ipython":
236
+ return
237
+
238
+ # Otherwise, if __file__ already has the setting we expect,
239
+ # there's nothing more to do
240
+ if getattr(current_main, "__file__", None) == main_path:
241
+ return
242
+
243
+ # If the parent process has sent a path through rather than a module
244
+ # name we assume it is an executable script that may contain
245
+ # non-main code that needs to be executed
246
+ old_main_modules.append(current_main)
247
+ main_module = types.ModuleType("__mp_main__")
248
+ main_content = runpy.run_path(main_path, run_name="__mp_main__")
249
+ main_module.__dict__.update(main_content)
250
+ sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module
env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Synchronization primitives based on our SemLock implementation
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/synchronize.py (17/02/2017)
7
+ # * Remove ctx argument for compatibility reason
8
+ # * Registers a cleanup function with the loky resource_tracker to remove the
9
+ # semaphore when the process dies instead.
10
+ #
11
+ # TODO: investigate which Python version is required to be able to use
12
+ # multiprocessing.resource_tracker and therefore multiprocessing.synchronize
13
+ # instead of a loky-specific fork.
14
+
15
+ import os
16
+ import sys
17
+ import tempfile
18
+ import threading
19
+ import _multiprocessing
20
+ from time import time as _time
21
+ from multiprocessing import process, util
22
+ from multiprocessing.context import assert_spawning
23
+
24
+ from . import resource_tracker
25
+
26
+ __all__ = [
27
+ "Lock",
28
+ "RLock",
29
+ "Semaphore",
30
+ "BoundedSemaphore",
31
+ "Condition",
32
+ "Event",
33
+ ]
34
+ # Try to import the mp.synchronize module cleanly, if it fails
35
+ # raise ImportError for platforms lacking a working sem_open implementation.
36
+ # See issue 3770
37
+ try:
38
+ from _multiprocessing import SemLock as _SemLock
39
+ from _multiprocessing import sem_unlink
40
+ except ImportError:
41
+ raise ImportError(
42
+ "This platform lacks a functioning sem_open"
43
+ " implementation, therefore, the required"
44
+ " synchronization primitives needed will not"
45
+ " function, see issue 3770."
46
+ )
47
+
48
+ #
49
+ # Constants
50
+ #
51
+
52
+ RECURSIVE_MUTEX, SEMAPHORE = range(2)
53
+ SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
54
+
55
+
56
+ #
57
+ # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
58
+ #
59
+
60
+
61
+ class SemLock:
62
+
63
+ _rand = tempfile._RandomNameSequence()
64
+
65
+ def __init__(self, kind, value, maxvalue, name=None):
66
+ # unlink_now is only used on win32 or when we are using fork.
67
+ unlink_now = False
68
+ if name is None:
69
+ # Try to find an unused name for the SemLock instance.
70
+ for _ in range(100):
71
+ try:
72
+ self._semlock = _SemLock(
73
+ kind, value, maxvalue, SemLock._make_name(), unlink_now
74
+ )
75
+ except FileExistsError: # pragma: no cover
76
+ pass
77
+ else:
78
+ break
79
+ else: # pragma: no cover
80
+ raise FileExistsError("cannot find name for semaphore")
81
+ else:
82
+ self._semlock = _SemLock(kind, value, maxvalue, name, unlink_now)
83
+ self.name = name
84
+ util.debug(
85
+ f"created semlock with handle {self._semlock.handle} and name "
86
+ f'"{self.name}"'
87
+ )
88
+
89
+ self._make_methods()
90
+
91
+ def _after_fork(obj):
92
+ obj._semlock._after_fork()
93
+
94
+ util.register_after_fork(self, _after_fork)
95
+
96
+ # When the object is garbage collected or the
97
+ # process shuts down we unlink the semaphore name
98
+ resource_tracker.register(self._semlock.name, "semlock")
99
+ util.Finalize(
100
+ self, SemLock._cleanup, (self._semlock.name,), exitpriority=0
101
+ )
102
+
103
+ @staticmethod
104
+ def _cleanup(name):
105
+ try:
106
+ sem_unlink(name)
107
+ except FileNotFoundError:
108
+ # Already unlinked, possibly by user code: ignore and make sure to
109
+ # unregister the semaphore from the resource tracker.
110
+ pass
111
+ finally:
112
+ resource_tracker.unregister(name, "semlock")
113
+
114
+ def _make_methods(self):
115
+ self.acquire = self._semlock.acquire
116
+ self.release = self._semlock.release
117
+
118
+ def __enter__(self):
119
+ return self._semlock.acquire()
120
+
121
+ def __exit__(self, *args):
122
+ return self._semlock.release()
123
+
124
+ def __getstate__(self):
125
+ assert_spawning(self)
126
+ sl = self._semlock
127
+ h = sl.handle
128
+ return (h, sl.kind, sl.maxvalue, sl.name)
129
+
130
+ def __setstate__(self, state):
131
+ self._semlock = _SemLock._rebuild(*state)
132
+ util.debug(
133
+ f'recreated blocker with handle {state[0]!r} and name "{state[3]}"'
134
+ )
135
+ self._make_methods()
136
+
137
+ @staticmethod
138
+ def _make_name():
139
+ # OSX does not support long names for semaphores
140
+ return f"/loky-{os.getpid()}-{next(SemLock._rand)}"
141
+
142
+
143
+ #
144
+ # Semaphore
145
+ #
146
+
147
+
148
+ class Semaphore(SemLock):
149
+ def __init__(self, value=1):
150
+ SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
151
+
152
+ def get_value(self):
153
+ if sys.platform == "darwin":
154
+ raise NotImplementedError("OSX does not implement sem_getvalue")
155
+ return self._semlock._get_value()
156
+
157
+ def __repr__(self):
158
+ try:
159
+ value = self._semlock._get_value()
160
+ except Exception:
161
+ value = "unknown"
162
+ return f"<{self.__class__.__name__}(value={value})>"
163
+
164
+
165
+ #
166
+ # Bounded semaphore
167
+ #
168
+
169
+
170
+ class BoundedSemaphore(Semaphore):
171
+ def __init__(self, value=1):
172
+ SemLock.__init__(self, SEMAPHORE, value, value)
173
+
174
+ def __repr__(self):
175
+ try:
176
+ value = self._semlock._get_value()
177
+ except Exception:
178
+ value = "unknown"
179
+ return (
180
+ f"<{self.__class__.__name__}(value={value}, "
181
+ f"maxvalue={self._semlock.maxvalue})>"
182
+ )
183
+
184
+
185
+ #
186
+ # Non-recursive lock
187
+ #
188
+
189
+
190
+ class Lock(SemLock):
191
+ def __init__(self):
192
+ super().__init__(SEMAPHORE, 1, 1)
193
+
194
+ def __repr__(self):
195
+ try:
196
+ if self._semlock._is_mine():
197
+ name = process.current_process().name
198
+ if threading.current_thread().name != "MainThread":
199
+ name = f"{name}|{threading.current_thread().name}"
200
+ elif self._semlock._get_value() == 1:
201
+ name = "None"
202
+ elif self._semlock._count() > 0:
203
+ name = "SomeOtherThread"
204
+ else:
205
+ name = "SomeOtherProcess"
206
+ except Exception:
207
+ name = "unknown"
208
+ return f"<{self.__class__.__name__}(owner={name})>"
209
+
210
+
211
+ #
212
+ # Recursive lock
213
+ #
214
+
215
+
216
+ class RLock(SemLock):
217
+ def __init__(self):
218
+ super().__init__(RECURSIVE_MUTEX, 1, 1)
219
+
220
+ def __repr__(self):
221
+ try:
222
+ if self._semlock._is_mine():
223
+ name = process.current_process().name
224
+ if threading.current_thread().name != "MainThread":
225
+ name = f"{name}|{threading.current_thread().name}"
226
+ count = self._semlock._count()
227
+ elif self._semlock._get_value() == 1:
228
+ name, count = "None", 0
229
+ elif self._semlock._count() > 0:
230
+ name, count = "SomeOtherThread", "nonzero"
231
+ else:
232
+ name, count = "SomeOtherProcess", "nonzero"
233
+ except Exception:
234
+ name, count = "unknown", "unknown"
235
+ return f"<{self.__class__.__name__}({name}, {count})>"
236
+
237
+
238
+ #
239
+ # Condition variable
240
+ #
241
+
242
+
243
+ class Condition:
244
+ def __init__(self, lock=None):
245
+ self._lock = lock or RLock()
246
+ self._sleeping_count = Semaphore(0)
247
+ self._woken_count = Semaphore(0)
248
+ self._wait_semaphore = Semaphore(0)
249
+ self._make_methods()
250
+
251
+ def __getstate__(self):
252
+ assert_spawning(self)
253
+ return (
254
+ self._lock,
255
+ self._sleeping_count,
256
+ self._woken_count,
257
+ self._wait_semaphore,
258
+ )
259
+
260
+ def __setstate__(self, state):
261
+ (
262
+ self._lock,
263
+ self._sleeping_count,
264
+ self._woken_count,
265
+ self._wait_semaphore,
266
+ ) = state
267
+ self._make_methods()
268
+
269
+ def __enter__(self):
270
+ return self._lock.__enter__()
271
+
272
+ def __exit__(self, *args):
273
+ return self._lock.__exit__(*args)
274
+
275
+ def _make_methods(self):
276
+ self.acquire = self._lock.acquire
277
+ self.release = self._lock.release
278
+
279
+ def __repr__(self):
280
+ try:
281
+ num_waiters = (
282
+ self._sleeping_count._semlock._get_value()
283
+ - self._woken_count._semlock._get_value()
284
+ )
285
+ except Exception:
286
+ num_waiters = "unknown"
287
+ return f"<{self.__class__.__name__}({self._lock}, {num_waiters})>"
288
+
289
+ def wait(self, timeout=None):
290
+ assert (
291
+ self._lock._semlock._is_mine()
292
+ ), "must acquire() condition before using wait()"
293
+
294
+ # indicate that this thread is going to sleep
295
+ self._sleeping_count.release()
296
+
297
+ # release lock
298
+ count = self._lock._semlock._count()
299
+ for _ in range(count):
300
+ self._lock.release()
301
+
302
+ try:
303
+ # wait for notification or timeout
304
+ return self._wait_semaphore.acquire(True, timeout)
305
+ finally:
306
+ # indicate that this thread has woken
307
+ self._woken_count.release()
308
+
309
+ # reacquire lock
310
+ for _ in range(count):
311
+ self._lock.acquire()
312
+
313
+ def notify(self):
314
+ assert self._lock._semlock._is_mine(), "lock is not owned"
315
+ assert not self._wait_semaphore.acquire(False)
316
+
317
+ # to take account of timeouts since last notify() we subtract
318
+ # woken_count from sleeping_count and rezero woken_count
319
+ while self._woken_count.acquire(False):
320
+ res = self._sleeping_count.acquire(False)
321
+ assert res
322
+
323
+ if self._sleeping_count.acquire(False): # try grabbing a sleeper
324
+ self._wait_semaphore.release() # wake up one sleeper
325
+ self._woken_count.acquire() # wait for the sleeper to wake
326
+
327
+ # rezero _wait_semaphore in case a timeout just happened
328
+ self._wait_semaphore.acquire(False)
329
+
330
+ def notify_all(self):
331
+ assert self._lock._semlock._is_mine(), "lock is not owned"
332
+ assert not self._wait_semaphore.acquire(False)
333
+
334
+ # to take account of timeouts since last notify*() we subtract
335
+ # woken_count from sleeping_count and rezero woken_count
336
+ while self._woken_count.acquire(False):
337
+ res = self._sleeping_count.acquire(False)
338
+ assert res
339
+
340
+ sleepers = 0
341
+ while self._sleeping_count.acquire(False):
342
+ self._wait_semaphore.release() # wake up one sleeper
343
+ sleepers += 1
344
+
345
+ if sleepers:
346
+ for _ in range(sleepers):
347
+ self._woken_count.acquire() # wait for a sleeper to wake
348
+
349
+ # rezero wait_semaphore in case some timeouts just happened
350
+ while self._wait_semaphore.acquire(False):
351
+ pass
352
+
353
+ def wait_for(self, predicate, timeout=None):
354
+ result = predicate()
355
+ if result:
356
+ return result
357
+ if timeout is not None:
358
+ endtime = _time() + timeout
359
+ else:
360
+ endtime = None
361
+ waittime = None
362
+ while not result:
363
+ if endtime is not None:
364
+ waittime = endtime - _time()
365
+ if waittime <= 0:
366
+ break
367
+ self.wait(waittime)
368
+ result = predicate()
369
+ return result
370
+
371
+
372
+ #
373
+ # Event
374
+ #
375
+
376
+
377
+ class Event:
378
+ def __init__(self):
379
+ self._cond = Condition(Lock())
380
+ self._flag = Semaphore(0)
381
+
382
+ def is_set(self):
383
+ with self._cond:
384
+ if self._flag.acquire(False):
385
+ self._flag.release()
386
+ return True
387
+ return False
388
+
389
+ def set(self):
390
+ with self._cond:
391
+ self._flag.acquire(False)
392
+ self._flag.release()
393
+ self._cond.notify_all()
394
+
395
+ def clear(self):
396
+ with self._cond:
397
+ self._flag.acquire(False)
398
+
399
+ def wait(self, timeout=None):
400
+ with self._cond:
401
+ if self._flag.acquire(False):
402
+ self._flag.release()
403
+ else:
404
+ self._cond.wait(timeout)
405
+
406
+ if self._flag.acquire(False):
407
+ self._flag.release()
408
+ return True
409
+ return False