applied-ai-018 commited on
Commit
6db9ec4
·
verified ·
1 Parent(s): 37c8cde

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/19.input_layernorm.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/19.input_layernorm.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  6. venv/lib/python3.10/site-packages/multiprocess/__pycache__/__info__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/multiprocess/__pycache__/__init__.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/multiprocess/__pycache__/connection.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/multiprocess/__pycache__/context.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/multiprocess/__pycache__/forkserver.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/multiprocess/__pycache__/heap.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/multiprocess/__pycache__/managers.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/multiprocess/__pycache__/pool.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_fork.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_forkserver.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/multiprocess/__pycache__/process.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/multiprocess/__pycache__/queues.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/multiprocess/__pycache__/reduction.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/multiprocess/__pycache__/resource_sharer.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/multiprocess/__pycache__/resource_tracker.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/multiprocess/__pycache__/shared_memory.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/multiprocess/__pycache__/sharedctypes.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/multiprocess/__pycache__/spawn.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/multiprocess/__pycache__/synchronize.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/multiprocess/__pycache__/util.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/multiprocess/context.py +376 -0
  29. venv/lib/python3.10/site-packages/multiprocess/dummy/__init__.py +126 -0
  30. venv/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/connection.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/multiprocess/dummy/connection.py +75 -0
  33. venv/lib/python3.10/site-packages/multiprocess/managers.py +1378 -0
  34. venv/lib/python3.10/site-packages/multiprocess/popen_fork.py +83 -0
  35. venv/lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py +72 -0
  36. venv/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py +131 -0
  37. venv/lib/python3.10/site-packages/multiprocess/process.py +438 -0
  38. venv/lib/python3.10/site-packages/multiprocess/reduction.py +284 -0
  39. venv/lib/python3.10/site-packages/multiprocess/resource_sharer.py +154 -0
  40. venv/lib/python3.10/site-packages/multiprocess/tests/__init__.py +0 -0
  41. venv/lib/python3.10/site-packages/multiprocess/tests/__main__.py +34 -0
  42. venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/multiprocess/tests/mp_fork_bomb.py +18 -0
ckpts/universal/global_step40/zero/19.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ced0f2d1fdbd76cf895d36727809de67b20139df7662cd022b883efe9ecb5622
3
+ size 9387
ckpts/universal/global_step40/zero/19.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bdbea9caf0e7e7b5bb293f6687533d0d585bd9059633a9c31d6d0a2e1680028
3
+ size 9293
ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7beded3102893a73e3cd4abef5837f706bc2f9b1537f51dbda8aa73d5209deed
3
+ size 33555612
ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3676a7f071672905f8ad6b2c66ff7213221cf0254c6f145dbccd47940b460d8b
3
+ size 33555627
ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd96afbbffe38b02733be55c4e3ecc8f22e0e28a982e3a3dae31519806e16c87
3
+ size 33555533
venv/lib/python3.10/site-packages/multiprocess/__pycache__/__info__.cpython-310.pyc ADDED
Binary file (7.91 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/connection.cpython-310.pyc ADDED
Binary file (25.7 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/context.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/forkserver.cpython-310.pyc ADDED
Binary file (8.46 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/heap.cpython-310.pyc ADDED
Binary file (7.71 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/managers.cpython-310.pyc ADDED
Binary file (40.8 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/pool.cpython-310.pyc ADDED
Binary file (25.2 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_fork.cpython-310.pyc ADDED
Binary file (2.56 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_forkserver.cpython-310.pyc ADDED
Binary file (2.5 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc ADDED
Binary file (2.39 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc ADDED
Binary file (3.51 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/process.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/queues.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/reduction.cpython-310.pyc ADDED
Binary file (8.46 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/resource_sharer.cpython-310.pyc ADDED
Binary file (5.34 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/resource_tracker.cpython-310.pyc ADDED
Binary file (5.59 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/shared_memory.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/sharedctypes.cpython-310.pyc ADDED
Binary file (7.14 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/spawn.cpython-310.pyc ADDED
Binary file (6.77 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/synchronize.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/__pycache__/util.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/context.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import threading
4
+
5
+ from . import process
6
+ from . import reduction
7
+
8
+ __all__ = ()
9
+
10
+ #
11
+ # Exceptions
12
+ #
13
+
14
+ class ProcessError(Exception):
15
+ pass
16
+
17
+ class BufferTooShort(ProcessError):
18
+ pass
19
+
20
+ class TimeoutError(ProcessError):
21
+ pass
22
+
23
+ class AuthenticationError(ProcessError):
24
+ pass
25
+
26
+ #
27
+ # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py
28
+ #
29
+
30
+ class BaseContext(object):
31
+
32
+ ProcessError = ProcessError
33
+ BufferTooShort = BufferTooShort
34
+ TimeoutError = TimeoutError
35
+ AuthenticationError = AuthenticationError
36
+
37
+ current_process = staticmethod(process.current_process)
38
+ parent_process = staticmethod(process.parent_process)
39
+ active_children = staticmethod(process.active_children)
40
+
41
+ def cpu_count(self):
42
+ '''Returns the number of CPUs in the system'''
43
+ num = os.cpu_count()
44
+ if num is None:
45
+ raise NotImplementedError('cannot determine number of cpus')
46
+ else:
47
+ return num
48
+
49
+ def Manager(self):
50
+ '''Returns a manager associated with a running server process
51
+
52
+ The managers methods such as `Lock()`, `Condition()` and `Queue()`
53
+ can be used to create shared objects.
54
+ '''
55
+ from .managers import SyncManager
56
+ m = SyncManager(ctx=self.get_context())
57
+ m.start()
58
+ return m
59
+
60
+ def Pipe(self, duplex=True):
61
+ '''Returns two connection object connected by a pipe'''
62
+ from .connection import Pipe
63
+ return Pipe(duplex)
64
+
65
+ def Lock(self):
66
+ '''Returns a non-recursive lock object'''
67
+ from .synchronize import Lock
68
+ return Lock(ctx=self.get_context())
69
+
70
+ def RLock(self):
71
+ '''Returns a recursive lock object'''
72
+ from .synchronize import RLock
73
+ return RLock(ctx=self.get_context())
74
+
75
+ def Condition(self, lock=None):
76
+ '''Returns a condition object'''
77
+ from .synchronize import Condition
78
+ return Condition(lock, ctx=self.get_context())
79
+
80
+ def Semaphore(self, value=1):
81
+ '''Returns a semaphore object'''
82
+ from .synchronize import Semaphore
83
+ return Semaphore(value, ctx=self.get_context())
84
+
85
+ def BoundedSemaphore(self, value=1):
86
+ '''Returns a bounded semaphore object'''
87
+ from .synchronize import BoundedSemaphore
88
+ return BoundedSemaphore(value, ctx=self.get_context())
89
+
90
+ def Event(self):
91
+ '''Returns an event object'''
92
+ from .synchronize import Event
93
+ return Event(ctx=self.get_context())
94
+
95
+ def Barrier(self, parties, action=None, timeout=None):
96
+ '''Returns a barrier object'''
97
+ from .synchronize import Barrier
98
+ return Barrier(parties, action, timeout, ctx=self.get_context())
99
+
100
+ def Queue(self, maxsize=0):
101
+ '''Returns a queue object'''
102
+ from .queues import Queue
103
+ return Queue(maxsize, ctx=self.get_context())
104
+
105
+ def JoinableQueue(self, maxsize=0):
106
+ '''Returns a queue object'''
107
+ from .queues import JoinableQueue
108
+ return JoinableQueue(maxsize, ctx=self.get_context())
109
+
110
+ def SimpleQueue(self):
111
+ '''Returns a queue object'''
112
+ from .queues import SimpleQueue
113
+ return SimpleQueue(ctx=self.get_context())
114
+
115
+ def Pool(self, processes=None, initializer=None, initargs=(),
116
+ maxtasksperchild=None):
117
+ '''Returns a process pool object'''
118
+ from .pool import Pool
119
+ return Pool(processes, initializer, initargs, maxtasksperchild,
120
+ context=self.get_context())
121
+
122
+ def RawValue(self, typecode_or_type, *args):
123
+ '''Returns a shared object'''
124
+ from .sharedctypes import RawValue
125
+ return RawValue(typecode_or_type, *args)
126
+
127
+ def RawArray(self, typecode_or_type, size_or_initializer):
128
+ '''Returns a shared array'''
129
+ from .sharedctypes import RawArray
130
+ return RawArray(typecode_or_type, size_or_initializer)
131
+
132
+ def Value(self, typecode_or_type, *args, lock=True):
133
+ '''Returns a synchronized shared object'''
134
+ from .sharedctypes import Value
135
+ return Value(typecode_or_type, *args, lock=lock,
136
+ ctx=self.get_context())
137
+
138
+ def Array(self, typecode_or_type, size_or_initializer, *, lock=True):
139
+ '''Returns a synchronized shared array'''
140
+ from .sharedctypes import Array
141
+ return Array(typecode_or_type, size_or_initializer, lock=lock,
142
+ ctx=self.get_context())
143
+
144
+ def freeze_support(self):
145
+ '''Check whether this is a fake forked process in a frozen executable.
146
+ If so then run code specified by commandline and exit.
147
+ '''
148
+ if sys.platform == 'win32' and getattr(sys, 'frozen', False):
149
+ from .spawn import freeze_support
150
+ freeze_support()
151
+
152
+ def get_logger(self):
153
+ '''Return package logger -- if it does not already exist then
154
+ it is created.
155
+ '''
156
+ from .util import get_logger
157
+ return get_logger()
158
+
159
+ def log_to_stderr(self, level=None):
160
+ '''Turn on logging and add a handler which prints to stderr'''
161
+ from .util import log_to_stderr
162
+ return log_to_stderr(level)
163
+
164
+ def allow_connection_pickling(self):
165
+ '''Install support for sending connections and sockets
166
+ between processes
167
+ '''
168
+ # This is undocumented. In previous versions of multiprocessing
169
+ # its only effect was to make socket objects inheritable on Windows.
170
+ from . import connection
171
+
172
+ def set_executable(self, executable):
173
+ '''Sets the path to a python.exe or pythonw.exe binary used to run
174
+ child processes instead of sys.executable when using the 'spawn'
175
+ start method. Useful for people embedding Python.
176
+ '''
177
+ from .spawn import set_executable
178
+ set_executable(executable)
179
+
180
+ def set_forkserver_preload(self, module_names):
181
+ '''Set list of module names to try to load in forkserver process.
182
+ This is really just a hint.
183
+ '''
184
+ from .forkserver import set_forkserver_preload
185
+ set_forkserver_preload(module_names)
186
+
187
+ def get_context(self, method=None):
188
+ if method is None:
189
+ return self
190
+ try:
191
+ ctx = _concrete_contexts[method]
192
+ except KeyError:
193
+ raise ValueError('cannot find context for %r' % method) from None
194
+ ctx._check_available()
195
+ return ctx
196
+
197
+ def get_start_method(self, allow_none=False):
198
+ return self._name
199
+
200
+ def set_start_method(self, method, force=False):
201
+ raise ValueError('cannot set start method of concrete context')
202
+
203
+ @property
204
+ def reducer(self):
205
+ '''Controls how objects will be reduced to a form that can be
206
+ shared with other processes.'''
207
+ return globals().get('reduction')
208
+
209
+ @reducer.setter
210
+ def reducer(self, reduction):
211
+ globals()['reduction'] = reduction
212
+
213
+ def _check_available(self):
214
+ pass
215
+
216
+ #
217
+ # Type of default context -- underlying context can be set at most once
218
+ #
219
+
220
+ class Process(process.BaseProcess):
221
+ _start_method = None
222
+ @staticmethod
223
+ def _Popen(process_obj):
224
+ return _default_context.get_context().Process._Popen(process_obj)
225
+
226
+ @staticmethod
227
+ def _after_fork():
228
+ return _default_context.get_context().Process._after_fork()
229
+
230
+ class DefaultContext(BaseContext):
231
+ Process = Process
232
+
233
+ def __init__(self, context):
234
+ self._default_context = context
235
+ self._actual_context = None
236
+
237
+ def get_context(self, method=None):
238
+ if method is None:
239
+ if self._actual_context is None:
240
+ self._actual_context = self._default_context
241
+ return self._actual_context
242
+ else:
243
+ return super().get_context(method)
244
+
245
+ def set_start_method(self, method, force=False):
246
+ if self._actual_context is not None and not force:
247
+ raise RuntimeError('context has already been set')
248
+ if method is None and force:
249
+ self._actual_context = None
250
+ return
251
+ self._actual_context = self.get_context(method)
252
+
253
+ def get_start_method(self, allow_none=False):
254
+ if self._actual_context is None:
255
+ if allow_none:
256
+ return None
257
+ self._actual_context = self._default_context
258
+ return self._actual_context._name
259
+
260
+ def get_all_start_methods(self):
261
+ if sys.platform == 'win32':
262
+ return ['spawn']
263
+ else:
264
+ methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn']
265
+ if reduction.HAVE_SEND_HANDLE:
266
+ methods.append('forkserver')
267
+ return methods
268
+
269
+
270
+ #
271
+ # Context types for fixed start method
272
+ #
273
+
274
+ if sys.platform != 'win32':
275
+
276
+ class ForkProcess(process.BaseProcess):
277
+ _start_method = 'fork'
278
+ @staticmethod
279
+ def _Popen(process_obj):
280
+ from .popen_fork import Popen
281
+ return Popen(process_obj)
282
+
283
+ class SpawnProcess(process.BaseProcess):
284
+ _start_method = 'spawn'
285
+ @staticmethod
286
+ def _Popen(process_obj):
287
+ from .popen_spawn_posix import Popen
288
+ return Popen(process_obj)
289
+
290
+ @staticmethod
291
+ def _after_fork():
292
+ # process is spawned, nothing to do
293
+ pass
294
+
295
+ class ForkServerProcess(process.BaseProcess):
296
+ _start_method = 'forkserver'
297
+ @staticmethod
298
+ def _Popen(process_obj):
299
+ from .popen_forkserver import Popen
300
+ return Popen(process_obj)
301
+
302
+ class ForkContext(BaseContext):
303
+ _name = 'fork'
304
+ Process = ForkProcess
305
+
306
+ class SpawnContext(BaseContext):
307
+ _name = 'spawn'
308
+ Process = SpawnProcess
309
+
310
+ class ForkServerContext(BaseContext):
311
+ _name = 'forkserver'
312
+ Process = ForkServerProcess
313
+ def _check_available(self):
314
+ if not reduction.HAVE_SEND_HANDLE:
315
+ raise ValueError('forkserver start method not available')
316
+
317
+ _concrete_contexts = {
318
+ 'fork': ForkContext(),
319
+ 'spawn': SpawnContext(),
320
+ 'forkserver': ForkServerContext(),
321
+ }
322
+ if sys.platform == 'darwin':
323
+ # bpo-33725: running arbitrary code after fork() is no longer reliable
324
+ # on macOS since macOS 10.14 (Mojave). Use spawn by default instead.
325
+ _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn
326
+ else:
327
+ _default_context = DefaultContext(_concrete_contexts['fork'])
328
+
329
+ else:
330
+
331
+ class SpawnProcess(process.BaseProcess):
332
+ _start_method = 'spawn'
333
+ @staticmethod
334
+ def _Popen(process_obj):
335
+ from .popen_spawn_win32 import Popen
336
+ return Popen(process_obj)
337
+
338
+ @staticmethod
339
+ def _after_fork():
340
+ # process is spawned, nothing to do
341
+ pass
342
+
343
+ class SpawnContext(BaseContext):
344
+ _name = 'spawn'
345
+ Process = SpawnProcess
346
+
347
+ _concrete_contexts = {
348
+ 'spawn': SpawnContext(),
349
+ }
350
+ _default_context = DefaultContext(_concrete_contexts['spawn'])
351
+
352
+ #
353
+ # Force the start method
354
+ #
355
+
356
+ def _force_start_method(method):
357
+ _default_context._actual_context = _concrete_contexts[method]
358
+
359
+ #
360
+ # Check that the current thread is spawning a child process
361
+ #
362
+
363
+ _tls = threading.local()
364
+
365
+ def get_spawning_popen():
366
+ return getattr(_tls, 'spawning_popen', None)
367
+
368
+ def set_spawning_popen(popen):
369
+ _tls.spawning_popen = popen
370
+
371
+ def assert_spawning(obj):
372
+ if get_spawning_popen() is None:
373
+ raise RuntimeError(
374
+ '%s objects should only be shared between processes'
375
+ ' through inheritance' % type(obj).__name__
376
+ )
venv/lib/python3.10/site-packages/multiprocess/dummy/__init__.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Support for the API of the multiprocessing package using threads
3
+ #
4
+ # multiprocessing/dummy/__init__.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = [
11
+ 'Process', 'current_process', 'active_children', 'freeze_support',
12
+ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
13
+ 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
14
+ ]
15
+
16
+ #
17
+ # Imports
18
+ #
19
+
20
+ import threading
21
+ import sys
22
+ import weakref
23
+ import array
24
+
25
+ from .connection import Pipe
26
+ from threading import Lock, RLock, Semaphore, BoundedSemaphore
27
+ from threading import Event, Condition, Barrier
28
+ from queue import Queue
29
+
30
+ #
31
+ #
32
+ #
33
+
34
+ class DummyProcess(threading.Thread):
35
+
36
+ def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
37
+ threading.Thread.__init__(self, group, target, name, args, kwargs)
38
+ self._pid = None
39
+ self._children = weakref.WeakKeyDictionary()
40
+ self._start_called = False
41
+ self._parent = current_process()
42
+
43
+ def start(self):
44
+ if self._parent is not current_process():
45
+ raise RuntimeError(
46
+ "Parent is {0!r} but current_process is {1!r}".format(
47
+ self._parent, current_process()))
48
+ self._start_called = True
49
+ if hasattr(self._parent, '_children'):
50
+ self._parent._children[self] = None
51
+ threading.Thread.start(self)
52
+
53
+ @property
54
+ def exitcode(self):
55
+ if self._start_called and not self.is_alive():
56
+ return 0
57
+ else:
58
+ return None
59
+
60
+ #
61
+ #
62
+ #
63
+
64
+ Process = DummyProcess
65
+ current_process = threading.current_thread
66
+ current_process()._children = weakref.WeakKeyDictionary()
67
+
68
+ def active_children():
69
+ children = current_process()._children
70
+ for p in list(children):
71
+ if not p.is_alive():
72
+ children.pop(p, None)
73
+ return list(children)
74
+
75
+ def freeze_support():
76
+ pass
77
+
78
+ #
79
+ #
80
+ #
81
+
82
+ class Namespace(object):
83
+ def __init__(self, /, **kwds):
84
+ self.__dict__.update(kwds)
85
+ def __repr__(self):
86
+ items = list(self.__dict__.items())
87
+ temp = []
88
+ for name, value in items:
89
+ if not name.startswith('_'):
90
+ temp.append('%s=%r' % (name, value))
91
+ temp.sort()
92
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
93
+
94
+ dict = dict
95
+ list = list
96
+
97
+ def Array(typecode, sequence, lock=True):
98
+ return array.array(typecode, sequence)
99
+
100
+ class Value(object):
101
+ def __init__(self, typecode, value, lock=True):
102
+ self._typecode = typecode
103
+ self._value = value
104
+
105
+ @property
106
+ def value(self):
107
+ return self._value
108
+
109
+ @value.setter
110
+ def value(self, value):
111
+ self._value = value
112
+
113
+ def __repr__(self):
114
+ return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
115
+
116
+ def Manager():
117
+ return sys.modules[__name__]
118
+
119
+ def shutdown():
120
+ pass
121
+
122
+ def Pool(processes=None, initializer=None, initargs=()):
123
+ from ..pool import ThreadPool
124
+ return ThreadPool(processes, initializer, initargs)
125
+
126
+ JoinableQueue = Queue
venv/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.91 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/connection.cpython-310.pyc ADDED
Binary file (2.61 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/dummy/connection.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Analogue of `multiprocessing.connection` which uses queues instead of sockets
3
+ #
4
+ # multiprocessing/dummy/connection.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = [ 'Client', 'Listener', 'Pipe' ]
11
+
12
+ from queue import Queue
13
+
14
+
15
+ families = [None]
16
+
17
+
18
+ class Listener(object):
19
+
20
+ def __init__(self, address=None, family=None, backlog=1):
21
+ self._backlog_queue = Queue(backlog)
22
+
23
+ def accept(self):
24
+ return Connection(*self._backlog_queue.get())
25
+
26
+ def close(self):
27
+ self._backlog_queue = None
28
+
29
+ @property
30
+ def address(self):
31
+ return self._backlog_queue
32
+
33
+ def __enter__(self):
34
+ return self
35
+
36
+ def __exit__(self, exc_type, exc_value, exc_tb):
37
+ self.close()
38
+
39
+
40
+ def Client(address):
41
+ _in, _out = Queue(), Queue()
42
+ address.put((_out, _in))
43
+ return Connection(_in, _out)
44
+
45
+
46
+ def Pipe(duplex=True):
47
+ a, b = Queue(), Queue()
48
+ return Connection(a, b), Connection(b, a)
49
+
50
+
51
+ class Connection(object):
52
+
53
+ def __init__(self, _in, _out):
54
+ self._out = _out
55
+ self._in = _in
56
+ self.send = self.send_bytes = _out.put
57
+ self.recv = self.recv_bytes = _in.get
58
+
59
+ def poll(self, timeout=0.0):
60
+ if self._in.qsize() > 0:
61
+ return True
62
+ if timeout <= 0.0:
63
+ return False
64
+ with self._in.not_empty:
65
+ self._in.not_empty.wait(timeout)
66
+ return self._in.qsize() > 0
67
+
68
+ def close(self):
69
+ pass
70
+
71
+ def __enter__(self):
72
+ return self
73
+
74
+ def __exit__(self, exc_type, exc_value, exc_tb):
75
+ self.close()
venv/lib/python3.10/site-packages/multiprocess/managers.py ADDED
@@ -0,0 +1,1378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module providing manager classes for dealing
3
+ # with shared objects
4
+ #
5
+ # multiprocessing/managers.py
6
+ #
7
+ # Copyright (c) 2006-2008, R Oudkerk
8
+ # Licensed to PSF under a Contributor Agreement.
9
+ #
10
+
11
+ __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
12
+
13
+ #
14
+ # Imports
15
+ #
16
+
17
+ import sys
18
+ import threading
19
+ import signal
20
+ import array
21
+ import queue
22
+ import time
23
+ import types
24
+ import os
25
+ from os import getpid
26
+
27
+ from traceback import format_exc
28
+
29
+ from . import connection
30
+ from .context import reduction, get_spawning_popen, ProcessError
31
+ from . import pool
32
+ from . import process
33
+ from . import util
34
+ from . import get_context
35
+ try:
36
+ from . import shared_memory
37
+ except ImportError:
38
+ HAS_SHMEM = False
39
+ else:
40
+ HAS_SHMEM = True
41
+ __all__.append('SharedMemoryManager')
42
+
43
+ #
44
+ # Register some things for pickling
45
+ #
46
+
47
+ def reduce_array(a):
48
+ return array.array, (a.typecode, a.tobytes())
49
+ reduction.register(array.array, reduce_array)
50
+
51
+ view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
52
+ if view_types[0] is not list: # only needed in Py3.0
53
+ def rebuild_as_list(obj):
54
+ return list, (list(obj),)
55
+ for view_type in view_types:
56
+ reduction.register(view_type, rebuild_as_list)
57
+
58
+ #
59
+ # Type for identifying shared objects
60
+ #
61
+
62
+ class Token(object):
63
+ '''
64
+ Type to uniquely identify a shared object
65
+ '''
66
+ __slots__ = ('typeid', 'address', 'id')
67
+
68
+ def __init__(self, typeid, address, id):
69
+ (self.typeid, self.address, self.id) = (typeid, address, id)
70
+
71
+ def __getstate__(self):
72
+ return (self.typeid, self.address, self.id)
73
+
74
+ def __setstate__(self, state):
75
+ (self.typeid, self.address, self.id) = state
76
+
77
+ def __repr__(self):
78
+ return '%s(typeid=%r, address=%r, id=%r)' % \
79
+ (self.__class__.__name__, self.typeid, self.address, self.id)
80
+
81
+ #
82
+ # Function for communication with a manager's server process
83
+ #
84
+
85
+ def dispatch(c, id, methodname, args=(), kwds={}):
86
+ '''
87
+ Send a message to manager using connection `c` and return response
88
+ '''
89
+ c.send((id, methodname, args, kwds))
90
+ kind, result = c.recv()
91
+ if kind == '#RETURN':
92
+ return result
93
+ raise convert_to_error(kind, result)
94
+
95
+ def convert_to_error(kind, result):
96
+ if kind == '#ERROR':
97
+ return result
98
+ elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
99
+ if not isinstance(result, str):
100
+ raise TypeError(
101
+ "Result {0!r} (kind '{1}') type is {2}, not str".format(
102
+ result, kind, type(result)))
103
+ if kind == '#UNSERIALIZABLE':
104
+ return RemoteError('Unserializable message: %s\n' % result)
105
+ else:
106
+ return RemoteError(result)
107
+ else:
108
+ return ValueError('Unrecognized message type {!r}'.format(kind))
109
+
110
+ class RemoteError(Exception):
111
+ def __str__(self):
112
+ return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
113
+
114
+ #
115
+ # Functions for finding the method names of an object
116
+ #
117
+
118
+ def all_methods(obj):
119
+ '''
120
+ Return a list of names of methods of `obj`
121
+ '''
122
+ temp = []
123
+ for name in dir(obj):
124
+ func = getattr(obj, name)
125
+ if callable(func):
126
+ temp.append(name)
127
+ return temp
128
+
129
+ def public_methods(obj):
130
+ '''
131
+ Return a list of names of methods of `obj` which do not start with '_'
132
+ '''
133
+ return [name for name in all_methods(obj) if name[0] != '_']
134
+
135
+ #
136
+ # Server which is run in a process controlled by a manager
137
+ #
138
+
139
+ class Server(object):
140
+ '''
141
+ Server class which runs in a process controlled by a manager object
142
+ '''
143
+ public = ['shutdown', 'create', 'accept_connection', 'get_methods',
144
+ 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
145
+
146
+ def __init__(self, registry, address, authkey, serializer):
147
+ if not isinstance(authkey, bytes):
148
+ raise TypeError(
149
+ "Authkey {0!r} is type {1!s}, not bytes".format(
150
+ authkey, type(authkey)))
151
+ self.registry = registry
152
+ self.authkey = process.AuthenticationString(authkey)
153
+ Listener, Client = listener_client[serializer]
154
+
155
+ # do authentication later
156
+ self.listener = Listener(address=address, backlog=16)
157
+ self.address = self.listener.address
158
+
159
+ self.id_to_obj = {'0': (None, ())}
160
+ self.id_to_refcount = {}
161
+ self.id_to_local_proxy_obj = {}
162
+ self.mutex = threading.Lock()
163
+
164
+ def serve_forever(self):
165
+ '''
166
+ Run the server forever
167
+ '''
168
+ self.stop_event = threading.Event()
169
+ process.current_process()._manager_server = self
170
+ try:
171
+ accepter = threading.Thread(target=self.accepter)
172
+ accepter.daemon = True
173
+ accepter.start()
174
+ try:
175
+ while not self.stop_event.is_set():
176
+ self.stop_event.wait(1)
177
+ except (KeyboardInterrupt, SystemExit):
178
+ pass
179
+ finally:
180
+ if sys.stdout != sys.__stdout__: # what about stderr?
181
+ util.debug('resetting stdout, stderr')
182
+ sys.stdout = sys.__stdout__
183
+ sys.stderr = sys.__stderr__
184
+ sys.exit(0)
185
+
186
+ def accepter(self):
187
+ while True:
188
+ try:
189
+ c = self.listener.accept()
190
+ except OSError:
191
+ continue
192
+ t = threading.Thread(target=self.handle_request, args=(c,))
193
+ t.daemon = True
194
+ t.start()
195
+
196
+ def _handle_request(self, c):
197
+ request = None
198
+ try:
199
+ connection.deliver_challenge(c, self.authkey)
200
+ connection.answer_challenge(c, self.authkey)
201
+ request = c.recv()
202
+ ignore, funcname, args, kwds = request
203
+ assert funcname in self.public, '%r unrecognized' % funcname
204
+ func = getattr(self, funcname)
205
+ except Exception:
206
+ msg = ('#TRACEBACK', format_exc())
207
+ else:
208
+ try:
209
+ result = func(c, *args, **kwds)
210
+ except Exception:
211
+ msg = ('#TRACEBACK', format_exc())
212
+ else:
213
+ msg = ('#RETURN', result)
214
+
215
+ try:
216
+ c.send(msg)
217
+ except Exception as e:
218
+ try:
219
+ c.send(('#TRACEBACK', format_exc()))
220
+ except Exception:
221
+ pass
222
+ util.info('Failure to send message: %r', msg)
223
+ util.info(' ... request was %r', request)
224
+ util.info(' ... exception was %r', e)
225
+
226
+ def handle_request(self, conn):
227
+ '''
228
+ Handle a new connection
229
+ '''
230
+ try:
231
+ self._handle_request(conn)
232
+ except SystemExit:
233
+ # Server.serve_client() calls sys.exit(0) on EOF
234
+ pass
235
+ finally:
236
+ conn.close()
237
+
238
+ def serve_client(self, conn):
239
+ '''
240
+ Handle requests from the proxies in a particular process/thread
241
+ '''
242
+ util.debug('starting server thread to service %r',
243
+ threading.current_thread().name)
244
+
245
+ recv = conn.recv
246
+ send = conn.send
247
+ id_to_obj = self.id_to_obj
248
+
249
+ while not self.stop_event.is_set():
250
+
251
+ try:
252
+ methodname = obj = None
253
+ request = recv()
254
+ ident, methodname, args, kwds = request
255
+ try:
256
+ obj, exposed, gettypeid = id_to_obj[ident]
257
+ except KeyError as ke:
258
+ try:
259
+ obj, exposed, gettypeid = \
260
+ self.id_to_local_proxy_obj[ident]
261
+ except KeyError:
262
+ raise ke
263
+
264
+ if methodname not in exposed:
265
+ raise AttributeError(
266
+ 'method %r of %r object is not in exposed=%r' %
267
+ (methodname, type(obj), exposed)
268
+ )
269
+
270
+ function = getattr(obj, methodname)
271
+
272
+ try:
273
+ res = function(*args, **kwds)
274
+ except Exception as e:
275
+ msg = ('#ERROR', e)
276
+ else:
277
+ typeid = gettypeid and gettypeid.get(methodname, None)
278
+ if typeid:
279
+ rident, rexposed = self.create(conn, typeid, res)
280
+ token = Token(typeid, self.address, rident)
281
+ msg = ('#PROXY', (rexposed, token))
282
+ else:
283
+ msg = ('#RETURN', res)
284
+
285
+ except AttributeError:
286
+ if methodname is None:
287
+ msg = ('#TRACEBACK', format_exc())
288
+ else:
289
+ try:
290
+ fallback_func = self.fallback_mapping[methodname]
291
+ result = fallback_func(
292
+ self, conn, ident, obj, *args, **kwds
293
+ )
294
+ msg = ('#RETURN', result)
295
+ except Exception:
296
+ msg = ('#TRACEBACK', format_exc())
297
+
298
+ except EOFError:
299
+ util.debug('got EOF -- exiting thread serving %r',
300
+ threading.current_thread().name)
301
+ sys.exit(0)
302
+
303
+ except Exception:
304
+ msg = ('#TRACEBACK', format_exc())
305
+
306
+ try:
307
+ try:
308
+ send(msg)
309
+ except Exception:
310
+ send(('#UNSERIALIZABLE', format_exc()))
311
+ except Exception as e:
312
+ util.info('exception in thread serving %r',
313
+ threading.current_thread().name)
314
+ util.info(' ... message was %r', msg)
315
+ util.info(' ... exception was %r', e)
316
+ conn.close()
317
+ sys.exit(1)
318
+
319
+ def fallback_getvalue(self, conn, ident, obj):
320
+ return obj
321
+
322
+ def fallback_str(self, conn, ident, obj):
323
+ return str(obj)
324
+
325
+ def fallback_repr(self, conn, ident, obj):
326
+ return repr(obj)
327
+
328
+ fallback_mapping = {
329
+ '__str__':fallback_str,
330
+ '__repr__':fallback_repr,
331
+ '#GETVALUE':fallback_getvalue
332
+ }
333
+
334
+ def dummy(self, c):
335
+ pass
336
+
337
+ def debug_info(self, c):
338
+ '''
339
+ Return some info --- useful to spot problems with refcounting
340
+ '''
341
+ # Perhaps include debug info about 'c'?
342
+ with self.mutex:
343
+ result = []
344
+ keys = list(self.id_to_refcount.keys())
345
+ keys.sort()
346
+ for ident in keys:
347
+ if ident != '0':
348
+ result.append(' %s: refcount=%s\n %s' %
349
+ (ident, self.id_to_refcount[ident],
350
+ str(self.id_to_obj[ident][0])[:75]))
351
+ return '\n'.join(result)
352
+
353
+ def number_of_objects(self, c):
354
+ '''
355
+ Number of shared objects
356
+ '''
357
+ # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
358
+ return len(self.id_to_refcount)
359
+
360
+ def shutdown(self, c):
361
+ '''
362
+ Shutdown this process
363
+ '''
364
+ try:
365
+ util.debug('manager received shutdown message')
366
+ c.send(('#RETURN', None))
367
+ except:
368
+ import traceback
369
+ traceback.print_exc()
370
+ finally:
371
+ self.stop_event.set()
372
+
373
+ def create(self, c, typeid, /, *args, **kwds):
374
+ '''
375
+ Create a new shared object and return its id
376
+ '''
377
+ with self.mutex:
378
+ callable, exposed, method_to_typeid, proxytype = \
379
+ self.registry[typeid]
380
+
381
+ if callable is None:
382
+ if kwds or (len(args) != 1):
383
+ raise ValueError(
384
+ "Without callable, must have one non-keyword argument")
385
+ obj = args[0]
386
+ else:
387
+ obj = callable(*args, **kwds)
388
+
389
+ if exposed is None:
390
+ exposed = public_methods(obj)
391
+ if method_to_typeid is not None:
392
+ if not isinstance(method_to_typeid, dict):
393
+ raise TypeError(
394
+ "Method_to_typeid {0!r}: type {1!s}, not dict".format(
395
+ method_to_typeid, type(method_to_typeid)))
396
+ exposed = list(exposed) + list(method_to_typeid)
397
+
398
+ ident = '%x' % id(obj) # convert to string because xmlrpclib
399
+ # only has 32 bit signed integers
400
+ util.debug('%r callable returned object with id %r', typeid, ident)
401
+
402
+ self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
403
+ if ident not in self.id_to_refcount:
404
+ self.id_to_refcount[ident] = 0
405
+
406
+ self.incref(c, ident)
407
+ return ident, tuple(exposed)
408
+
409
+ def get_methods(self, c, token):
410
+ '''
411
+ Return the methods of the shared object indicated by token
412
+ '''
413
+ return tuple(self.id_to_obj[token.id][1])
414
+
415
+ def accept_connection(self, c, name):
416
+ '''
417
+ Spawn a new thread to serve this connection
418
+ '''
419
+ threading.current_thread().name = name
420
+ c.send(('#RETURN', None))
421
+ self.serve_client(c)
422
+
423
+ def incref(self, c, ident):
424
+ with self.mutex:
425
+ try:
426
+ self.id_to_refcount[ident] += 1
427
+ except KeyError as ke:
428
+ # If no external references exist but an internal (to the
429
+ # manager) still does and a new external reference is created
430
+ # from it, restore the manager's tracking of it from the
431
+ # previously stashed internal ref.
432
+ if ident in self.id_to_local_proxy_obj:
433
+ self.id_to_refcount[ident] = 1
434
+ self.id_to_obj[ident] = \
435
+ self.id_to_local_proxy_obj[ident]
436
+ obj, exposed, gettypeid = self.id_to_obj[ident]
437
+ util.debug('Server re-enabled tracking & INCREF %r', ident)
438
+ else:
439
+ raise ke
440
+
441
+ def decref(self, c, ident):
442
+ if ident not in self.id_to_refcount and \
443
+ ident in self.id_to_local_proxy_obj:
444
+ util.debug('Server DECREF skipping %r', ident)
445
+ return
446
+
447
+ with self.mutex:
448
+ if self.id_to_refcount[ident] <= 0:
449
+ raise AssertionError(
450
+ "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
451
+ ident, self.id_to_obj[ident],
452
+ self.id_to_refcount[ident]))
453
+ self.id_to_refcount[ident] -= 1
454
+ if self.id_to_refcount[ident] == 0:
455
+ del self.id_to_refcount[ident]
456
+
457
+ if ident not in self.id_to_refcount:
458
+ # Two-step process in case the object turns out to contain other
459
+ # proxy objects (e.g. a managed list of managed lists).
460
+ # Otherwise, deleting self.id_to_obj[ident] would trigger the
461
+ # deleting of the stored value (another managed object) which would
462
+ # in turn attempt to acquire the mutex that is already held here.
463
+ self.id_to_obj[ident] = (None, (), None) # thread-safe
464
+ util.debug('disposing of obj with id %r', ident)
465
+ with self.mutex:
466
+ del self.id_to_obj[ident]
467
+
468
+
469
+ #
470
+ # Class to represent state of a manager
471
+ #
472
+
473
+ class State(object):
474
+ __slots__ = ['value']
475
+ INITIAL = 0
476
+ STARTED = 1
477
+ SHUTDOWN = 2
478
+
479
+ #
480
+ # Mapping from serializer name to Listener and Client types
481
+ #
482
+
483
+ listener_client = { #XXX: register dill?
484
+ 'pickle' : (connection.Listener, connection.Client),
485
+ 'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
486
+ }
487
+
488
+ #
489
+ # Definition of BaseManager
490
+ #
491
+
492
+ class BaseManager(object):
493
+ '''
494
+ Base class for managers
495
+ '''
496
+ _registry = {}
497
+ _Server = Server
498
+
499
+ def __init__(self, address=None, authkey=None, serializer='pickle',
500
+ ctx=None):
501
+ if authkey is None:
502
+ authkey = process.current_process().authkey
503
+ self._address = address # XXX not final address if eg ('', 0)
504
+ self._authkey = process.AuthenticationString(authkey)
505
+ self._state = State()
506
+ self._state.value = State.INITIAL
507
+ self._serializer = serializer
508
+ self._Listener, self._Client = listener_client[serializer]
509
+ self._ctx = ctx or get_context()
510
+
511
+ def get_server(self):
512
+ '''
513
+ Return server object with serve_forever() method and address attribute
514
+ '''
515
+ if self._state.value != State.INITIAL:
516
+ if self._state.value == State.STARTED:
517
+ raise ProcessError("Already started server")
518
+ elif self._state.value == State.SHUTDOWN:
519
+ raise ProcessError("Manager has shut down")
520
+ else:
521
+ raise ProcessError(
522
+ "Unknown state {!r}".format(self._state.value))
523
+ return Server(self._registry, self._address,
524
+ self._authkey, self._serializer)
525
+
526
+ def connect(self):
527
+ '''
528
+ Connect manager object to the server process
529
+ '''
530
+ Listener, Client = listener_client[self._serializer]
531
+ conn = Client(self._address, authkey=self._authkey)
532
+ dispatch(conn, None, 'dummy')
533
+ self._state.value = State.STARTED
534
+
535
+ def start(self, initializer=None, initargs=()):
536
+ '''
537
+ Spawn a server process for this manager object
538
+ '''
539
+ if self._state.value != State.INITIAL:
540
+ if self._state.value == State.STARTED:
541
+ raise ProcessError("Already started server")
542
+ elif self._state.value == State.SHUTDOWN:
543
+ raise ProcessError("Manager has shut down")
544
+ else:
545
+ raise ProcessError(
546
+ "Unknown state {!r}".format(self._state.value))
547
+
548
+ if initializer is not None and not callable(initializer):
549
+ raise TypeError('initializer must be a callable')
550
+
551
+ # pipe over which we will retrieve address of server
552
+ reader, writer = connection.Pipe(duplex=False)
553
+
554
+ # spawn process which runs a server
555
+ self._process = self._ctx.Process(
556
+ target=type(self)._run_server,
557
+ args=(self._registry, self._address, self._authkey,
558
+ self._serializer, writer, initializer, initargs),
559
+ )
560
+ ident = ':'.join(str(i) for i in self._process._identity)
561
+ self._process.name = type(self).__name__ + '-' + ident
562
+ self._process.start()
563
+
564
+ # get address of server
565
+ writer.close()
566
+ self._address = reader.recv()
567
+ reader.close()
568
+
569
+ # register a finalizer
570
+ self._state.value = State.STARTED
571
+ self.shutdown = util.Finalize(
572
+ self, type(self)._finalize_manager,
573
+ args=(self._process, self._address, self._authkey,
574
+ self._state, self._Client),
575
+ exitpriority=0
576
+ )
577
+
578
+ @classmethod
579
+ def _run_server(cls, registry, address, authkey, serializer, writer,
580
+ initializer=None, initargs=()):
581
+ '''
582
+ Create a server, report its address and run it
583
+ '''
584
+ # bpo-36368: protect server process from KeyboardInterrupt signals
585
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
586
+
587
+ if initializer is not None:
588
+ initializer(*initargs)
589
+
590
+ # create server
591
+ server = cls._Server(registry, address, authkey, serializer)
592
+
593
+ # inform parent process of the server's address
594
+ writer.send(server.address)
595
+ writer.close()
596
+
597
+ # run the manager
598
+ util.info('manager serving at %r', server.address)
599
+ server.serve_forever()
600
+
601
+ def _create(self, typeid, /, *args, **kwds):
602
+ '''
603
+ Create a new shared object; return the token and exposed tuple
604
+ '''
605
+ assert self._state.value == State.STARTED, 'server not yet started'
606
+ conn = self._Client(self._address, authkey=self._authkey)
607
+ try:
608
+ id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
609
+ finally:
610
+ conn.close()
611
+ return Token(typeid, self._address, id), exposed
612
+
613
+ def join(self, timeout=None):
614
+ '''
615
+ Join the manager process (if it has been spawned)
616
+ '''
617
+ if self._process is not None:
618
+ self._process.join(timeout)
619
+ if not self._process.is_alive():
620
+ self._process = None
621
+
622
+ def _debug_info(self):
623
+ '''
624
+ Return some info about the servers shared objects and connections
625
+ '''
626
+ conn = self._Client(self._address, authkey=self._authkey)
627
+ try:
628
+ return dispatch(conn, None, 'debug_info')
629
+ finally:
630
+ conn.close()
631
+
632
+ def _number_of_objects(self):
633
+ '''
634
+ Return the number of shared objects
635
+ '''
636
+ conn = self._Client(self._address, authkey=self._authkey)
637
+ try:
638
+ return dispatch(conn, None, 'number_of_objects')
639
+ finally:
640
+ conn.close()
641
+
642
+ def __enter__(self):
643
+ if self._state.value == State.INITIAL:
644
+ self.start()
645
+ if self._state.value != State.STARTED:
646
+ if self._state.value == State.INITIAL:
647
+ raise ProcessError("Unable to start server")
648
+ elif self._state.value == State.SHUTDOWN:
649
+ raise ProcessError("Manager has shut down")
650
+ else:
651
+ raise ProcessError(
652
+ "Unknown state {!r}".format(self._state.value))
653
+ return self
654
+
655
+ def __exit__(self, exc_type, exc_val, exc_tb):
656
+ self.shutdown()
657
+
658
+ @staticmethod
659
+ def _finalize_manager(process, address, authkey, state, _Client):
660
+ '''
661
+ Shutdown the manager process; will be registered as a finalizer
662
+ '''
663
+ if process.is_alive():
664
+ util.info('sending shutdown message to manager')
665
+ try:
666
+ conn = _Client(address, authkey=authkey)
667
+ try:
668
+ dispatch(conn, None, 'shutdown')
669
+ finally:
670
+ conn.close()
671
+ except Exception:
672
+ pass
673
+
674
+ process.join(timeout=1.0)
675
+ if process.is_alive():
676
+ util.info('manager still alive')
677
+ if hasattr(process, 'terminate'):
678
+ util.info('trying to `terminate()` manager process')
679
+ process.terminate()
680
+ process.join(timeout=1.0)
681
+ if process.is_alive():
682
+ util.info('manager still alive after terminate')
683
+
684
+ state.value = State.SHUTDOWN
685
+ try:
686
+ del BaseProxy._address_to_local[address]
687
+ except KeyError:
688
+ pass
689
+
690
+ @property
691
+ def address(self):
692
+ return self._address
693
+
694
+ @classmethod
695
+ def register(cls, typeid, callable=None, proxytype=None, exposed=None,
696
+ method_to_typeid=None, create_method=True):
697
+ '''
698
+ Register a typeid with the manager type
699
+ '''
700
+ if '_registry' not in cls.__dict__:
701
+ cls._registry = cls._registry.copy()
702
+
703
+ if proxytype is None:
704
+ proxytype = AutoProxy
705
+
706
+ exposed = exposed or getattr(proxytype, '_exposed_', None)
707
+
708
+ method_to_typeid = method_to_typeid or \
709
+ getattr(proxytype, '_method_to_typeid_', None)
710
+
711
+ if method_to_typeid:
712
+ for key, value in list(method_to_typeid.items()): # isinstance?
713
+ assert type(key) is str, '%r is not a string' % key
714
+ assert type(value) is str, '%r is not a string' % value
715
+
716
+ cls._registry[typeid] = (
717
+ callable, exposed, method_to_typeid, proxytype
718
+ )
719
+
720
+ if create_method:
721
+ def temp(self, /, *args, **kwds):
722
+ util.debug('requesting creation of a shared %r object', typeid)
723
+ token, exp = self._create(typeid, *args, **kwds)
724
+ proxy = proxytype(
725
+ token, self._serializer, manager=self,
726
+ authkey=self._authkey, exposed=exp
727
+ )
728
+ conn = self._Client(token.address, authkey=self._authkey)
729
+ dispatch(conn, None, 'decref', (token.id,))
730
+ return proxy
731
+ temp.__name__ = typeid
732
+ setattr(cls, typeid, temp)
733
+
734
+ #
735
+ # Subclass of set which get cleared after a fork
736
+ #
737
+
738
+ class ProcessLocalSet(set):
739
+ def __init__(self):
740
+ util.register_after_fork(self, lambda obj: obj.clear())
741
+ def __reduce__(self):
742
+ return type(self), ()
743
+
744
+ #
745
+ # Definition of BaseProxy
746
+ #
747
+
748
+ class BaseProxy(object):
749
+ '''
750
+ A base for proxies of shared objects
751
+ '''
752
+ _address_to_local = {}
753
+ _mutex = util.ForkAwareThreadLock()
754
+
755
+ def __init__(self, token, serializer, manager=None,
756
+ authkey=None, exposed=None, incref=True, manager_owned=False):
757
+ with BaseProxy._mutex:
758
+ tls_idset = BaseProxy._address_to_local.get(token.address, None)
759
+ if tls_idset is None:
760
+ tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
761
+ BaseProxy._address_to_local[token.address] = tls_idset
762
+
763
+ # self._tls is used to record the connection used by this
764
+ # thread to communicate with the manager at token.address
765
+ self._tls = tls_idset[0]
766
+
767
+ # self._idset is used to record the identities of all shared
768
+ # objects for which the current process owns references and
769
+ # which are in the manager at token.address
770
+ self._idset = tls_idset[1]
771
+
772
+ self._token = token
773
+ self._id = self._token.id
774
+ self._manager = manager
775
+ self._serializer = serializer
776
+ self._Client = listener_client[serializer][1]
777
+
778
+ # Should be set to True only when a proxy object is being created
779
+ # on the manager server; primary use case: nested proxy objects.
780
+ # RebuildProxy detects when a proxy is being created on the manager
781
+ # and sets this value appropriately.
782
+ self._owned_by_manager = manager_owned
783
+
784
+ if authkey is not None:
785
+ self._authkey = process.AuthenticationString(authkey)
786
+ elif self._manager is not None:
787
+ self._authkey = self._manager._authkey
788
+ else:
789
+ self._authkey = process.current_process().authkey
790
+
791
+ if incref:
792
+ self._incref()
793
+
794
+ util.register_after_fork(self, BaseProxy._after_fork)
795
+
796
+ def _connect(self):
797
+ util.debug('making connection to manager')
798
+ name = process.current_process().name
799
+ if threading.current_thread().name != 'MainThread':
800
+ name += '|' + threading.current_thread().name
801
+ conn = self._Client(self._token.address, authkey=self._authkey)
802
+ dispatch(conn, None, 'accept_connection', (name,))
803
+ self._tls.connection = conn
804
+
805
+ def _callmethod(self, methodname, args=(), kwds={}):
806
+ '''
807
+ Try to call a method of the referent and return a copy of the result
808
+ '''
809
+ try:
810
+ conn = self._tls.connection
811
+ except AttributeError:
812
+ util.debug('thread %r does not own a connection',
813
+ threading.current_thread().name)
814
+ self._connect()
815
+ conn = self._tls.connection
816
+
817
+ conn.send((self._id, methodname, args, kwds))
818
+ kind, result = conn.recv()
819
+
820
+ if kind == '#RETURN':
821
+ return result
822
+ elif kind == '#PROXY':
823
+ exposed, token = result
824
+ proxytype = self._manager._registry[token.typeid][-1]
825
+ token.address = self._token.address
826
+ proxy = proxytype(
827
+ token, self._serializer, manager=self._manager,
828
+ authkey=self._authkey, exposed=exposed
829
+ )
830
+ conn = self._Client(token.address, authkey=self._authkey)
831
+ dispatch(conn, None, 'decref', (token.id,))
832
+ return proxy
833
+ raise convert_to_error(kind, result)
834
+
835
+ def _getvalue(self):
836
+ '''
837
+ Get a copy of the value of the referent
838
+ '''
839
+ return self._callmethod('#GETVALUE')
840
+
841
+ def _incref(self):
842
+ if self._owned_by_manager:
843
+ util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
844
+ return
845
+
846
+ conn = self._Client(self._token.address, authkey=self._authkey)
847
+ dispatch(conn, None, 'incref', (self._id,))
848
+ util.debug('INCREF %r', self._token.id)
849
+
850
+ self._idset.add(self._id)
851
+
852
+ state = self._manager and self._manager._state
853
+
854
+ self._close = util.Finalize(
855
+ self, BaseProxy._decref,
856
+ args=(self._token, self._authkey, state,
857
+ self._tls, self._idset, self._Client),
858
+ exitpriority=10
859
+ )
860
+
861
+ @staticmethod
862
+ def _decref(token, authkey, state, tls, idset, _Client):
863
+ idset.discard(token.id)
864
+
865
+ # check whether manager is still alive
866
+ if state is None or state.value == State.STARTED:
867
+ # tell manager this process no longer cares about referent
868
+ try:
869
+ util.debug('DECREF %r', token.id)
870
+ conn = _Client(token.address, authkey=authkey)
871
+ dispatch(conn, None, 'decref', (token.id,))
872
+ except Exception as e:
873
+ util.debug('... decref failed %s', e)
874
+
875
+ else:
876
+ util.debug('DECREF %r -- manager already shutdown', token.id)
877
+
878
+ # check whether we can close this thread's connection because
879
+ # the process owns no more references to objects for this manager
880
+ if not idset and hasattr(tls, 'connection'):
881
+ util.debug('thread %r has no more proxies so closing conn',
882
+ threading.current_thread().name)
883
+ tls.connection.close()
884
+ del tls.connection
885
+
886
+ def _after_fork(self):
887
+ self._manager = None
888
+ try:
889
+ self._incref()
890
+ except Exception as e:
891
+ # the proxy may just be for a manager which has shutdown
892
+ util.info('incref failed: %s' % e)
893
+
894
+ def __reduce__(self):
895
+ kwds = {}
896
+ if get_spawning_popen() is not None:
897
+ kwds['authkey'] = self._authkey
898
+
899
+ if getattr(self, '_isauto', False):
900
+ kwds['exposed'] = self._exposed_
901
+ return (RebuildProxy,
902
+ (AutoProxy, self._token, self._serializer, kwds))
903
+ else:
904
+ return (RebuildProxy,
905
+ (type(self), self._token, self._serializer, kwds))
906
+
907
+ def __deepcopy__(self, memo):
908
+ return self._getvalue()
909
+
910
+ def __repr__(self):
911
+ return '<%s object, typeid %r at %#x>' % \
912
+ (type(self).__name__, self._token.typeid, id(self))
913
+
914
+ def __str__(self):
915
+ '''
916
+ Return representation of the referent (or a fall-back if that fails)
917
+ '''
918
+ try:
919
+ return self._callmethod('__repr__')
920
+ except Exception:
921
+ return repr(self)[:-1] + "; '__str__()' failed>"
922
+
923
+ #
924
+ # Function used for unpickling
925
+ #
926
+
927
+ def RebuildProxy(func, token, serializer, kwds):
928
+ '''
929
+ Function used for unpickling proxy objects.
930
+ '''
931
+ server = getattr(process.current_process(), '_manager_server', None)
932
+ if server and server.address == token.address:
933
+ util.debug('Rebuild a proxy owned by manager, token=%r', token)
934
+ kwds['manager_owned'] = True
935
+ if token.id not in server.id_to_local_proxy_obj:
936
+ server.id_to_local_proxy_obj[token.id] = \
937
+ server.id_to_obj[token.id]
938
+ incref = (
939
+ kwds.pop('incref', True) and
940
+ not getattr(process.current_process(), '_inheriting', False)
941
+ )
942
+ return func(token, serializer, incref=incref, **kwds)
943
+
944
+ #
945
+ # Functions to create proxies and proxy types
946
+ #
947
+
948
+ def MakeProxyType(name, exposed, _cache={}):
949
+ '''
950
+ Return a proxy type whose methods are given by `exposed`
951
+ '''
952
+ exposed = tuple(exposed)
953
+ try:
954
+ return _cache[(name, exposed)]
955
+ except KeyError:
956
+ pass
957
+
958
+ dic = {}
959
+
960
+ for meth in exposed:
961
+ exec('''def %s(self, /, *args, **kwds):
962
+ return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
963
+
964
+ ProxyType = type(name, (BaseProxy,), dic)
965
+ ProxyType._exposed_ = exposed
966
+ _cache[(name, exposed)] = ProxyType
967
+ return ProxyType
968
+
969
+
970
+ def AutoProxy(token, serializer, manager=None, authkey=None,
971
+ exposed=None, incref=True, manager_owned=False):
972
+ '''
973
+ Return an auto-proxy for `token`
974
+ '''
975
+ _Client = listener_client[serializer][1]
976
+
977
+ if exposed is None:
978
+ conn = _Client(token.address, authkey=authkey)
979
+ try:
980
+ exposed = dispatch(conn, None, 'get_methods', (token,))
981
+ finally:
982
+ conn.close()
983
+
984
+ if authkey is None and manager is not None:
985
+ authkey = manager._authkey
986
+ if authkey is None:
987
+ authkey = process.current_process().authkey
988
+
989
+ ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
990
+ proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
991
+ incref=incref, manager_owned=manager_owned)
992
+ proxy._isauto = True
993
+ return proxy
994
+
995
+ #
996
+ # Types/callables which we will register with SyncManager
997
+ #
998
+
999
+ class Namespace(object):
1000
+ def __init__(self, /, **kwds):
1001
+ self.__dict__.update(kwds)
1002
+ def __repr__(self):
1003
+ items = list(self.__dict__.items())
1004
+ temp = []
1005
+ for name, value in items:
1006
+ if not name.startswith('_'):
1007
+ temp.append('%s=%r' % (name, value))
1008
+ temp.sort()
1009
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
1010
+
1011
+ class Value(object):
1012
+ def __init__(self, typecode, value, lock=True):
1013
+ self._typecode = typecode
1014
+ self._value = value
1015
+ def get(self):
1016
+ return self._value
1017
+ def set(self, value):
1018
+ self._value = value
1019
+ def __repr__(self):
1020
+ return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
1021
+ value = property(get, set)
1022
+
1023
+ def Array(typecode, sequence, lock=True):
1024
+ return array.array(typecode, sequence)
1025
+
1026
+ #
1027
+ # Proxy types used by SyncManager
1028
+ #
1029
+
1030
+ class IteratorProxy(BaseProxy):
1031
+ _exposed_ = ('__next__', 'send', 'throw', 'close')
1032
+ def __iter__(self):
1033
+ return self
1034
+ def __next__(self, *args):
1035
+ return self._callmethod('__next__', args)
1036
+ def send(self, *args):
1037
+ return self._callmethod('send', args)
1038
+ def throw(self, *args):
1039
+ return self._callmethod('throw', args)
1040
+ def close(self, *args):
1041
+ return self._callmethod('close', args)
1042
+
1043
+
1044
+ class AcquirerProxy(BaseProxy):
1045
+ _exposed_ = ('acquire', 'release')
1046
+ def acquire(self, blocking=True, timeout=None):
1047
+ args = (blocking,) if timeout is None else (blocking, timeout)
1048
+ return self._callmethod('acquire', args)
1049
+ def release(self):
1050
+ return self._callmethod('release')
1051
+ def __enter__(self):
1052
+ return self._callmethod('acquire')
1053
+ def __exit__(self, exc_type, exc_val, exc_tb):
1054
+ return self._callmethod('release')
1055
+
1056
+
1057
+ class ConditionProxy(AcquirerProxy):
1058
+ _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
1059
+ def wait(self, timeout=None):
1060
+ return self._callmethod('wait', (timeout,))
1061
+ def notify(self, n=1):
1062
+ return self._callmethod('notify', (n,))
1063
+ def notify_all(self):
1064
+ return self._callmethod('notify_all')
1065
+ def wait_for(self, predicate, timeout=None):
1066
+ result = predicate()
1067
+ if result:
1068
+ return result
1069
+ if timeout is not None:
1070
+ endtime = getattr(time,'monotonic',time.time)() + timeout
1071
+ else:
1072
+ endtime = None
1073
+ waittime = None
1074
+ while not result:
1075
+ if endtime is not None:
1076
+ waittime = endtime - getattr(time,'monotonic',time.time)()
1077
+ if waittime <= 0:
1078
+ break
1079
+ self.wait(waittime)
1080
+ result = predicate()
1081
+ return result
1082
+
1083
+
1084
+ class EventProxy(BaseProxy):
1085
+ _exposed_ = ('is_set', 'set', 'clear', 'wait')
1086
+ def is_set(self):
1087
+ return self._callmethod('is_set')
1088
+ def set(self):
1089
+ return self._callmethod('set')
1090
+ def clear(self):
1091
+ return self._callmethod('clear')
1092
+ def wait(self, timeout=None):
1093
+ return self._callmethod('wait', (timeout,))
1094
+
1095
+
1096
+ class BarrierProxy(BaseProxy):
1097
+ _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
1098
+ def wait(self, timeout=None):
1099
+ return self._callmethod('wait', (timeout,))
1100
+ def abort(self):
1101
+ return self._callmethod('abort')
1102
+ def reset(self):
1103
+ return self._callmethod('reset')
1104
+ @property
1105
+ def parties(self):
1106
+ return self._callmethod('__getattribute__', ('parties',))
1107
+ @property
1108
+ def n_waiting(self):
1109
+ return self._callmethod('__getattribute__', ('n_waiting',))
1110
+ @property
1111
+ def broken(self):
1112
+ return self._callmethod('__getattribute__', ('broken',))
1113
+
1114
+
1115
+ class NamespaceProxy(BaseProxy):
1116
+ _exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
1117
+ def __getattr__(self, key):
1118
+ if key[0] == '_':
1119
+ return object.__getattribute__(self, key)
1120
+ callmethod = object.__getattribute__(self, '_callmethod')
1121
+ return callmethod('__getattribute__', (key,))
1122
+ def __setattr__(self, key, value):
1123
+ if key[0] == '_':
1124
+ return object.__setattr__(self, key, value)
1125
+ callmethod = object.__getattribute__(self, '_callmethod')
1126
+ return callmethod('__setattr__', (key, value))
1127
+ def __delattr__(self, key):
1128
+ if key[0] == '_':
1129
+ return object.__delattr__(self, key)
1130
+ callmethod = object.__getattribute__(self, '_callmethod')
1131
+ return callmethod('__delattr__', (key,))
1132
+
1133
+
1134
+ class ValueProxy(BaseProxy):
1135
+ _exposed_ = ('get', 'set')
1136
+ def get(self):
1137
+ return self._callmethod('get')
1138
+ def set(self, value):
1139
+ return self._callmethod('set', (value,))
1140
+ value = property(get, set)
1141
+
1142
+ __class_getitem__ = classmethod(types.GenericAlias)
1143
+
1144
+
1145
+ BaseListProxy = MakeProxyType('BaseListProxy', (
1146
+ '__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
1147
+ '__mul__', '__reversed__', '__rmul__', '__setitem__',
1148
+ 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
1149
+ 'reverse', 'sort', '__imul__'
1150
+ ))
1151
+ class ListProxy(BaseListProxy):
1152
+ def __iadd__(self, value):
1153
+ self._callmethod('extend', (value,))
1154
+ return self
1155
+ def __imul__(self, value):
1156
+ self._callmethod('__imul__', (value,))
1157
+ return self
1158
+
1159
+
1160
+ DictProxy = MakeProxyType('DictProxy', (
1161
+ '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
1162
+ '__setitem__', 'clear', 'copy', 'get', 'items',
1163
+ 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
1164
+ ))
1165
+ DictProxy._method_to_typeid_ = {
1166
+ '__iter__': 'Iterator',
1167
+ }
1168
+
1169
+
1170
+ ArrayProxy = MakeProxyType('ArrayProxy', (
1171
+ '__len__', '__getitem__', '__setitem__'
1172
+ ))
1173
+
1174
+
1175
+ BasePoolProxy = MakeProxyType('PoolProxy', (
1176
+ 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
1177
+ 'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
1178
+ ))
1179
+ BasePoolProxy._method_to_typeid_ = {
1180
+ 'apply_async': 'AsyncResult',
1181
+ 'map_async': 'AsyncResult',
1182
+ 'starmap_async': 'AsyncResult',
1183
+ 'imap': 'Iterator',
1184
+ 'imap_unordered': 'Iterator'
1185
+ }
1186
+ class PoolProxy(BasePoolProxy):
1187
+ def __enter__(self):
1188
+ return self
1189
+ def __exit__(self, exc_type, exc_val, exc_tb):
1190
+ self.terminate()
1191
+
1192
+ #
1193
+ # Definition of SyncManager
1194
+ #
1195
+
1196
+ class SyncManager(BaseManager):
1197
+ '''
1198
+ Subclass of `BaseManager` which supports a number of shared object types.
1199
+
1200
+ The types registered are those intended for the synchronization
1201
+ of threads, plus `dict`, `list` and `Namespace`.
1202
+
1203
+ The `multiprocess.Manager()` function creates started instances of
1204
+ this class.
1205
+ '''
1206
+
1207
+ SyncManager.register('Queue', queue.Queue)
1208
+ SyncManager.register('JoinableQueue', queue.Queue)
1209
+ SyncManager.register('Event', threading.Event, EventProxy)
1210
+ SyncManager.register('Lock', threading.Lock, AcquirerProxy)
1211
+ SyncManager.register('RLock', threading.RLock, AcquirerProxy)
1212
+ SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
1213
+ SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
1214
+ AcquirerProxy)
1215
+ SyncManager.register('Condition', threading.Condition, ConditionProxy)
1216
+ SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
1217
+ SyncManager.register('Pool', pool.Pool, PoolProxy)
1218
+ SyncManager.register('list', list, ListProxy)
1219
+ SyncManager.register('dict', dict, DictProxy)
1220
+ SyncManager.register('Value', Value, ValueProxy)
1221
+ SyncManager.register('Array', Array, ArrayProxy)
1222
+ SyncManager.register('Namespace', Namespace, NamespaceProxy)
1223
+
1224
+ # types returned by methods of PoolProxy
1225
+ SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
1226
+ SyncManager.register('AsyncResult', create_method=False)
1227
+
1228
+ #
1229
+ # Definition of SharedMemoryManager and SharedMemoryServer
1230
+ #
1231
+
1232
+ if HAS_SHMEM:
1233
+ class _SharedMemoryTracker:
1234
+ "Manages one or more shared memory segments."
1235
+
1236
+ def __init__(self, name, segment_names=[]):
1237
+ self.shared_memory_context_name = name
1238
+ self.segment_names = segment_names
1239
+
1240
+ def register_segment(self, segment_name):
1241
+ "Adds the supplied shared memory block name to tracker."
1242
+ util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
1243
+ self.segment_names.append(segment_name)
1244
+
1245
+ def destroy_segment(self, segment_name):
1246
+ """Calls unlink() on the shared memory block with the supplied name
1247
+ and removes it from the list of blocks being tracked."""
1248
+ util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
1249
+ self.segment_names.remove(segment_name)
1250
+ segment = shared_memory.SharedMemory(segment_name)
1251
+ segment.close()
1252
+ segment.unlink()
1253
+
1254
+ def unlink(self):
1255
+ "Calls destroy_segment() on all tracked shared memory blocks."
1256
+ for segment_name in self.segment_names[:]:
1257
+ self.destroy_segment(segment_name)
1258
+
1259
+ def __del__(self):
1260
+ util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
1261
+ self.unlink()
1262
+
1263
+ def __getstate__(self):
1264
+ return (self.shared_memory_context_name, self.segment_names)
1265
+
1266
+ def __setstate__(self, state):
1267
+ self.__init__(*state)
1268
+
1269
+
1270
+ class SharedMemoryServer(Server):
1271
+
1272
+ public = Server.public + \
1273
+ ['track_segment', 'release_segment', 'list_segments']
1274
+
1275
+ def __init__(self, *args, **kwargs):
1276
+ Server.__init__(self, *args, **kwargs)
1277
+ address = self.address
1278
+ # The address of Linux abstract namespaces can be bytes
1279
+ if isinstance(address, bytes):
1280
+ address = os.fsdecode(address)
1281
+ self.shared_memory_context = \
1282
+ _SharedMemoryTracker(f"shm_{address}_{getpid()}")
1283
+ util.debug(f"SharedMemoryServer started by pid {getpid()}")
1284
+
1285
+ def create(self, c, typeid, /, *args, **kwargs):
1286
+ """Create a new distributed-shared object (not backed by a shared
1287
+ memory block) and return its id to be used in a Proxy Object."""
1288
+ # Unless set up as a shared proxy, don't make shared_memory_context
1289
+ # a standard part of kwargs. This makes things easier for supplying
1290
+ # simple functions.
1291
+ if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
1292
+ kwargs['shared_memory_context'] = self.shared_memory_context
1293
+ return Server.create(self, c, typeid, *args, **kwargs)
1294
+
1295
+ def shutdown(self, c):
1296
+ "Call unlink() on all tracked shared memory, terminate the Server."
1297
+ self.shared_memory_context.unlink()
1298
+ return Server.shutdown(self, c)
1299
+
1300
+ def track_segment(self, c, segment_name):
1301
+ "Adds the supplied shared memory block name to Server's tracker."
1302
+ self.shared_memory_context.register_segment(segment_name)
1303
+
1304
+ def release_segment(self, c, segment_name):
1305
+ """Calls unlink() on the shared memory block with the supplied name
1306
+ and removes it from the tracker instance inside the Server."""
1307
+ self.shared_memory_context.destroy_segment(segment_name)
1308
+
1309
+ def list_segments(self, c):
1310
+ """Returns a list of names of shared memory blocks that the Server
1311
+ is currently tracking."""
1312
+ return self.shared_memory_context.segment_names
1313
+
1314
+
1315
+ class SharedMemoryManager(BaseManager):
1316
+ """Like SyncManager but uses SharedMemoryServer instead of Server.
1317
+
1318
+ It provides methods for creating and returning SharedMemory instances
1319
+ and for creating a list-like object (ShareableList) backed by shared
1320
+ memory. It also provides methods that create and return Proxy Objects
1321
+ that support synchronization across processes (i.e. multi-process-safe
1322
+ locks and semaphores).
1323
+ """
1324
+
1325
+ _Server = SharedMemoryServer
1326
+
1327
+ def __init__(self, *args, **kwargs):
1328
+ if os.name == "posix":
1329
+ # bpo-36867: Ensure the resource_tracker is running before
1330
+ # launching the manager process, so that concurrent
1331
+ # shared_memory manipulation both in the manager and in the
1332
+ # current process does not create two resource_tracker
1333
+ # processes.
1334
+ from . import resource_tracker
1335
+ resource_tracker.ensure_running()
1336
+ BaseManager.__init__(self, *args, **kwargs)
1337
+ util.debug(f"{self.__class__.__name__} created by pid {getpid()}")
1338
+
1339
+ def __del__(self):
1340
+ util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
1341
+ pass
1342
+
1343
+ def get_server(self):
1344
+ 'Better than monkeypatching for now; merge into Server ultimately'
1345
+ if self._state.value != State.INITIAL:
1346
+ if self._state.value == State.STARTED:
1347
+ raise ProcessError("Already started SharedMemoryServer")
1348
+ elif self._state.value == State.SHUTDOWN:
1349
+ raise ProcessError("SharedMemoryManager has shut down")
1350
+ else:
1351
+ raise ProcessError(
1352
+ "Unknown state {!r}".format(self._state.value))
1353
+ return self._Server(self._registry, self._address,
1354
+ self._authkey, self._serializer)
1355
+
1356
+ def SharedMemory(self, size):
1357
+ """Returns a new SharedMemory instance with the specified size in
1358
+ bytes, to be tracked by the manager."""
1359
+ with self._Client(self._address, authkey=self._authkey) as conn:
1360
+ sms = shared_memory.SharedMemory(None, create=True, size=size)
1361
+ try:
1362
+ dispatch(conn, None, 'track_segment', (sms.name,))
1363
+ except BaseException as e:
1364
+ sms.unlink()
1365
+ raise e
1366
+ return sms
1367
+
1368
+ def ShareableList(self, sequence):
1369
+ """Returns a new ShareableList instance populated with the values
1370
+ from the input sequence, to be tracked by the manager."""
1371
+ with self._Client(self._address, authkey=self._authkey) as conn:
1372
+ sl = shared_memory.ShareableList(sequence)
1373
+ try:
1374
+ dispatch(conn, None, 'track_segment', (sl.shm.name,))
1375
+ except BaseException as e:
1376
+ sl.shm.unlink()
1377
+ raise e
1378
+ return sl
venv/lib/python3.10/site-packages/multiprocess/popen_fork.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import signal
3
+
4
+ from . import util
5
+
6
+ __all__ = ['Popen']
7
+
8
+ #
9
+ # Start child process using fork
10
+ #
11
+
12
+ class Popen(object):
13
+ method = 'fork'
14
+
15
+ def __init__(self, process_obj):
16
+ util._flush_std_streams()
17
+ self.returncode = None
18
+ self.finalizer = None
19
+ self._launch(process_obj)
20
+
21
+ def duplicate_for_child(self, fd):
22
+ return fd
23
+
24
+ def poll(self, flag=os.WNOHANG):
25
+ if self.returncode is None:
26
+ try:
27
+ pid, sts = os.waitpid(self.pid, flag)
28
+ except OSError:
29
+ # Child process not yet created. See #1731717
30
+ # e.errno == errno.ECHILD == 10
31
+ return None
32
+ if pid == self.pid:
33
+ self.returncode = os.waitstatus_to_exitcode(sts)
34
+ return self.returncode
35
+
36
+ def wait(self, timeout=None):
37
+ if self.returncode is None:
38
+ if timeout is not None:
39
+ from multiprocess.connection import wait
40
+ if not wait([self.sentinel], timeout):
41
+ return None
42
+ # This shouldn't block if wait() returned successfully.
43
+ return self.poll(os.WNOHANG if timeout == 0.0 else 0)
44
+ return self.returncode
45
+
46
+ def _send_signal(self, sig):
47
+ if self.returncode is None:
48
+ try:
49
+ os.kill(self.pid, sig)
50
+ except ProcessLookupError:
51
+ pass
52
+ except OSError:
53
+ if self.wait(timeout=0.1) is None:
54
+ raise
55
+
56
+ def terminate(self):
57
+ self._send_signal(signal.SIGTERM)
58
+
59
+ def kill(self):
60
+ self._send_signal(signal.SIGKILL)
61
+
62
+ def _launch(self, process_obj):
63
+ code = 1
64
+ parent_r, child_w = os.pipe()
65
+ child_r, parent_w = os.pipe()
66
+ self.pid = os.fork()
67
+ if self.pid == 0:
68
+ try:
69
+ os.close(parent_r)
70
+ os.close(parent_w)
71
+ code = process_obj._bootstrap(parent_sentinel=child_r)
72
+ finally:
73
+ os._exit(code)
74
+ else:
75
+ os.close(child_w)
76
+ os.close(child_r)
77
+ self.finalizer = util.Finalize(self, util.close_fds,
78
+ (parent_r, parent_w,))
79
+ self.sentinel = parent_r
80
+
81
+ def close(self):
82
+ if self.finalizer is not None:
83
+ self.finalizer()
venv/lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+
4
+ from .context import reduction, set_spawning_popen
5
+ from . import popen_fork
6
+ from . import spawn
7
+ from . import util
8
+
9
+ __all__ = ['Popen']
10
+
11
+
12
+ #
13
+ # Wrapper for an fd used while launching a process
14
+ #
15
+
16
+ class _DupFd(object):
17
+ def __init__(self, fd):
18
+ self.fd = fd
19
+ def detach(self):
20
+ return self.fd
21
+
22
+ #
23
+ # Start child process using a fresh interpreter
24
+ #
25
+
26
+ class Popen(popen_fork.Popen):
27
+ method = 'spawn'
28
+ DupFd = _DupFd
29
+
30
+ def __init__(self, process_obj):
31
+ self._fds = []
32
+ super().__init__(process_obj)
33
+
34
+ def duplicate_for_child(self, fd):
35
+ self._fds.append(fd)
36
+ return fd
37
+
38
+ def _launch(self, process_obj):
39
+ from . import resource_tracker
40
+ tracker_fd = resource_tracker.getfd()
41
+ self._fds.append(tracker_fd)
42
+ prep_data = spawn.get_preparation_data(process_obj._name)
43
+ fp = io.BytesIO()
44
+ set_spawning_popen(self)
45
+ try:
46
+ reduction.dump(prep_data, fp)
47
+ reduction.dump(process_obj, fp)
48
+ finally:
49
+ set_spawning_popen(None)
50
+
51
+ parent_r = child_w = child_r = parent_w = None
52
+ try:
53
+ parent_r, child_w = os.pipe()
54
+ child_r, parent_w = os.pipe()
55
+ cmd = spawn.get_command_line(tracker_fd=tracker_fd,
56
+ pipe_handle=child_r)
57
+ self._fds.extend([child_r, child_w])
58
+ self.pid = util.spawnv_passfds(spawn.get_executable(),
59
+ cmd, self._fds)
60
+ self.sentinel = parent_r
61
+ with open(parent_w, 'wb', closefd=False) as f:
62
+ f.write(fp.getbuffer())
63
+ finally:
64
+ fds_to_close = []
65
+ for fd in (parent_r, parent_w):
66
+ if fd is not None:
67
+ fds_to_close.append(fd)
68
+ self.finalizer = util.Finalize(self, util.close_fds, fds_to_close)
69
+
70
+ for fd in (child_r, child_w):
71
+ if fd is not None:
72
+ os.close(fd)
venv/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import msvcrt
3
+ import signal
4
+ import sys
5
+ import _winapi
6
+
7
+ from .context import reduction, get_spawning_popen, set_spawning_popen
8
+ from . import spawn
9
+ from . import util
10
+
11
+ __all__ = ['Popen']
12
+
13
+ #
14
+ #
15
+ #
16
+
17
+ TERMINATE = 0x10000
18
+ WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
19
+ WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
20
+
21
+
22
+ def _path_eq(p1, p2):
23
+ return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)
24
+
25
+ WINENV = not _path_eq(sys.executable, sys._base_executable)
26
+
27
+
28
+ def _close_handles(*handles):
29
+ for handle in handles:
30
+ _winapi.CloseHandle(handle)
31
+
32
+
33
+ #
34
+ # We define a Popen class similar to the one from subprocess, but
35
+ # whose constructor takes a process object as its argument.
36
+ #
37
+
38
+ class Popen(object):
39
+ '''
40
+ Start a subprocess to run the code of a process object
41
+ '''
42
+ method = 'spawn'
43
+
44
+ def __init__(self, process_obj):
45
+ prep_data = spawn.get_preparation_data(process_obj._name)
46
+
47
+ # read end of pipe will be duplicated by the child process
48
+ # -- see spawn_main() in spawn.py.
49
+ #
50
+ # bpo-33929: Previously, the read end of pipe was "stolen" by the child
51
+ # process, but it leaked a handle if the child process had been
52
+ # terminated before it could steal the handle from the parent process.
53
+ rhandle, whandle = _winapi.CreatePipe(None, 0)
54
+ wfd = msvcrt.open_osfhandle(whandle, 0)
55
+ cmd = spawn.get_command_line(parent_pid=os.getpid(),
56
+ pipe_handle=rhandle)
57
+ cmd = ' '.join('"%s"' % x for x in cmd)
58
+
59
+ python_exe = spawn.get_executable()
60
+
61
+ # bpo-35797: When running in a venv, we bypass the redirect
62
+ # executor and launch our base Python.
63
+ if WINENV and _path_eq(python_exe, sys.executable):
64
+ python_exe = sys._base_executable
65
+ env = os.environ.copy()
66
+ env["__PYVENV_LAUNCHER__"] = sys.executable
67
+ else:
68
+ env = None
69
+
70
+ with open(wfd, 'wb', closefd=True) as to_child:
71
+ # start process
72
+ try:
73
+ hp, ht, pid, tid = _winapi.CreateProcess(
74
+ python_exe, cmd,
75
+ None, None, False, 0, env, None, None)
76
+ _winapi.CloseHandle(ht)
77
+ except:
78
+ _winapi.CloseHandle(rhandle)
79
+ raise
80
+
81
+ # set attributes of self
82
+ self.pid = pid
83
+ self.returncode = None
84
+ self._handle = hp
85
+ self.sentinel = int(hp)
86
+ self.finalizer = util.Finalize(self, _close_handles,
87
+ (self.sentinel, int(rhandle)))
88
+
89
+ # send information to child
90
+ set_spawning_popen(self)
91
+ try:
92
+ reduction.dump(prep_data, to_child)
93
+ reduction.dump(process_obj, to_child)
94
+ finally:
95
+ set_spawning_popen(None)
96
+
97
+ def duplicate_for_child(self, handle):
98
+ assert self is get_spawning_popen()
99
+ return reduction.duplicate(handle, self.sentinel)
100
+
101
+ def wait(self, timeout=None):
102
+ if self.returncode is None:
103
+ if timeout is None:
104
+ msecs = _winapi.INFINITE
105
+ else:
106
+ msecs = max(0, int(timeout * 1000 + 0.5))
107
+
108
+ res = _winapi.WaitForSingleObject(int(self._handle), msecs)
109
+ if res == _winapi.WAIT_OBJECT_0:
110
+ code = _winapi.GetExitCodeProcess(self._handle)
111
+ if code == TERMINATE:
112
+ code = -signal.SIGTERM
113
+ self.returncode = code
114
+
115
+ return self.returncode
116
+
117
+ def poll(self):
118
+ return self.wait(timeout=0)
119
+
120
+ def terminate(self):
121
+ if self.returncode is None:
122
+ try:
123
+ _winapi.TerminateProcess(int(self._handle), TERMINATE)
124
+ except OSError:
125
+ if self.wait(timeout=1.0) is None:
126
+ raise
127
+
128
+ kill = terminate
129
+
130
+ def close(self):
131
+ self.finalizer()
venv/lib/python3.10/site-packages/multiprocess/process.py ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module providing the `Process` class which emulates `threading.Thread`
3
+ #
4
+ # multiprocessing/process.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = ['BaseProcess', 'current_process', 'active_children',
11
+ 'parent_process']
12
+
13
+ #
14
+ # Imports
15
+ #
16
+
17
+ import os
18
+ import sys
19
+ import signal
20
+ import itertools
21
+ import threading
22
+ from _weakrefset import WeakSet
23
+
24
+ #
25
+ #
26
+ #
27
+
28
+ try:
29
+ ORIGINAL_DIR = os.path.abspath(os.getcwd())
30
+ except OSError:
31
+ ORIGINAL_DIR = None
32
+
33
+ #
34
+ # Public functions
35
+ #
36
+
37
+ def current_process():
38
+ '''
39
+ Return process object representing the current process
40
+ '''
41
+ return _current_process
42
+
43
+ def active_children():
44
+ '''
45
+ Return list of process objects corresponding to live child processes
46
+ '''
47
+ _cleanup()
48
+ return list(_children)
49
+
50
+
51
+ def parent_process():
52
+ '''
53
+ Return process object representing the parent process
54
+ '''
55
+ return _parent_process
56
+
57
+ #
58
+ #
59
+ #
60
+
61
+ def _cleanup():
62
+ # check for processes which have finished
63
+ for p in list(_children):
64
+ if p._popen.poll() is not None:
65
+ _children.discard(p)
66
+
67
+ #
68
+ # The `Process` class
69
+ #
70
+
71
+ class BaseProcess(object):
72
+ '''
73
+ Process objects represent activity that is run in a separate process
74
+
75
+ The class is analogous to `threading.Thread`
76
+ '''
77
+ def _Popen(self):
78
+ raise NotImplementedError
79
+
80
+ def __init__(self, group=None, target=None, name=None, args=(), kwargs={},
81
+ *, daemon=None):
82
+ assert group is None, 'group argument must be None for now'
83
+ count = next(_process_counter)
84
+ self._identity = _current_process._identity + (count,)
85
+ self._config = _current_process._config.copy()
86
+ self._parent_pid = os.getpid()
87
+ self._parent_name = _current_process.name
88
+ self._popen = None
89
+ self._closed = False
90
+ self._target = target
91
+ self._args = tuple(args)
92
+ self._kwargs = dict(kwargs)
93
+ self._name = name or type(self).__name__ + '-' + \
94
+ ':'.join(str(i) for i in self._identity)
95
+ if daemon is not None:
96
+ self.daemon = daemon
97
+ _dangling.add(self)
98
+
99
+ def _check_closed(self):
100
+ if self._closed:
101
+ raise ValueError("process object is closed")
102
+
103
+ def run(self):
104
+ '''
105
+ Method to be run in sub-process; can be overridden in sub-class
106
+ '''
107
+ if self._target:
108
+ self._target(*self._args, **self._kwargs)
109
+
110
+ def start(self):
111
+ '''
112
+ Start child process
113
+ '''
114
+ self._check_closed()
115
+ assert self._popen is None, 'cannot start a process twice'
116
+ assert self._parent_pid == os.getpid(), \
117
+ 'can only start a process object created by current process'
118
+ assert not _current_process._config.get('daemon'), \
119
+ 'daemonic processes are not allowed to have children'
120
+ _cleanup()
121
+ self._popen = self._Popen(self)
122
+ self._sentinel = self._popen.sentinel
123
+ # Avoid a refcycle if the target function holds an indirect
124
+ # reference to the process object (see bpo-30775)
125
+ del self._target, self._args, self._kwargs
126
+ _children.add(self)
127
+
128
+ def terminate(self):
129
+ '''
130
+ Terminate process; sends SIGTERM signal or uses TerminateProcess()
131
+ '''
132
+ self._check_closed()
133
+ self._popen.terminate()
134
+
135
+ def kill(self):
136
+ '''
137
+ Terminate process; sends SIGKILL signal or uses TerminateProcess()
138
+ '''
139
+ self._check_closed()
140
+ self._popen.kill()
141
+
142
+ def join(self, timeout=None):
143
+ '''
144
+ Wait until child process terminates
145
+ '''
146
+ self._check_closed()
147
+ assert self._parent_pid == os.getpid(), 'can only join a child process'
148
+ assert self._popen is not None, 'can only join a started process'
149
+ res = self._popen.wait(timeout)
150
+ if res is not None:
151
+ _children.discard(self)
152
+
153
+ def is_alive(self):
154
+ '''
155
+ Return whether process is alive
156
+ '''
157
+ self._check_closed()
158
+ if self is _current_process:
159
+ return True
160
+ assert self._parent_pid == os.getpid(), 'can only test a child process'
161
+
162
+ if self._popen is None:
163
+ return False
164
+
165
+ returncode = self._popen.poll()
166
+ if returncode is None:
167
+ return True
168
+ else:
169
+ _children.discard(self)
170
+ return False
171
+
172
+ def close(self):
173
+ '''
174
+ Close the Process object.
175
+
176
+ This method releases resources held by the Process object. It is
177
+ an error to call this method if the child process is still running.
178
+ '''
179
+ if self._popen is not None:
180
+ if self._popen.poll() is None:
181
+ raise ValueError("Cannot close a process while it is still running. "
182
+ "You should first call join() or terminate().")
183
+ self._popen.close()
184
+ self._popen = None
185
+ del self._sentinel
186
+ _children.discard(self)
187
+ self._closed = True
188
+
189
+ @property
190
+ def name(self):
191
+ return self._name
192
+
193
+ @name.setter
194
+ def name(self, name):
195
+ assert isinstance(name, str), 'name must be a string'
196
+ self._name = name
197
+
198
+ @property
199
+ def daemon(self):
200
+ '''
201
+ Return whether process is a daemon
202
+ '''
203
+ return self._config.get('daemon', False)
204
+
205
+ @daemon.setter
206
+ def daemon(self, daemonic):
207
+ '''
208
+ Set whether process is a daemon
209
+ '''
210
+ assert self._popen is None, 'process has already started'
211
+ self._config['daemon'] = daemonic
212
+
213
+ @property
214
+ def authkey(self):
215
+ return self._config['authkey']
216
+
217
+ @authkey.setter
218
+ def authkey(self, authkey):
219
+ '''
220
+ Set authorization key of process
221
+ '''
222
+ self._config['authkey'] = AuthenticationString(authkey)
223
+
224
+ @property
225
+ def exitcode(self):
226
+ '''
227
+ Return exit code of process or `None` if it has yet to stop
228
+ '''
229
+ self._check_closed()
230
+ if self._popen is None:
231
+ return self._popen
232
+ return self._popen.poll()
233
+
234
+ @property
235
+ def ident(self):
236
+ '''
237
+ Return identifier (PID) of process or `None` if it has yet to start
238
+ '''
239
+ self._check_closed()
240
+ if self is _current_process:
241
+ return os.getpid()
242
+ else:
243
+ return self._popen and self._popen.pid
244
+
245
+ pid = ident
246
+
247
+ @property
248
+ def sentinel(self):
249
+ '''
250
+ Return a file descriptor (Unix) or handle (Windows) suitable for
251
+ waiting for process termination.
252
+ '''
253
+ self._check_closed()
254
+ try:
255
+ return self._sentinel
256
+ except AttributeError:
257
+ raise ValueError("process not started") from None
258
+
259
+ def __repr__(self):
260
+ exitcode = None
261
+ if self is _current_process:
262
+ status = 'started'
263
+ elif self._closed:
264
+ status = 'closed'
265
+ elif self._parent_pid != os.getpid():
266
+ status = 'unknown'
267
+ elif self._popen is None:
268
+ status = 'initial'
269
+ else:
270
+ exitcode = self._popen.poll()
271
+ if exitcode is not None:
272
+ status = 'stopped'
273
+ else:
274
+ status = 'started'
275
+
276
+ info = [type(self).__name__, 'name=%r' % self._name]
277
+ if self._popen is not None:
278
+ info.append('pid=%s' % self._popen.pid)
279
+ info.append('parent=%s' % self._parent_pid)
280
+ info.append(status)
281
+ if exitcode is not None:
282
+ exitcode = _exitcode_to_name.get(exitcode, exitcode)
283
+ info.append('exitcode=%s' % exitcode)
284
+ if self.daemon:
285
+ info.append('daemon')
286
+ return '<%s>' % ' '.join(info)
287
+
288
+ ##
289
+
290
+ def _bootstrap(self, parent_sentinel=None):
291
+ from . import util, context
292
+ global _current_process, _parent_process, _process_counter, _children
293
+
294
+ try:
295
+ if self._start_method is not None:
296
+ context._force_start_method(self._start_method)
297
+ _process_counter = itertools.count(1)
298
+ _children = set()
299
+ util._close_stdin()
300
+ old_process = _current_process
301
+ _current_process = self
302
+ _parent_process = _ParentProcess(
303
+ self._parent_name, self._parent_pid, parent_sentinel)
304
+ if threading._HAVE_THREAD_NATIVE_ID:
305
+ threading.main_thread()._set_native_id()
306
+ try:
307
+ self._after_fork()
308
+ finally:
309
+ # delay finalization of the old process object until after
310
+ # _run_after_forkers() is executed
311
+ del old_process
312
+ util.info('child process calling self.run()')
313
+ try:
314
+ self.run()
315
+ exitcode = 0
316
+ finally:
317
+ util._exit_function()
318
+ except SystemExit as e:
319
+ if e.code is None:
320
+ exitcode = 0
321
+ elif isinstance(e.code, int):
322
+ exitcode = e.code
323
+ else:
324
+ sys.stderr.write(str(e.code) + '\n')
325
+ exitcode = 1
326
+ except:
327
+ exitcode = 1
328
+ import traceback
329
+ sys.stderr.write('Process %s:\n' % self.name)
330
+ traceback.print_exc()
331
+ finally:
332
+ threading._shutdown()
333
+ util.info('process exiting with exitcode %d' % exitcode)
334
+ util._flush_std_streams()
335
+
336
+ return exitcode
337
+
338
+ @staticmethod
339
+ def _after_fork():
340
+ from . import util
341
+ util._finalizer_registry.clear()
342
+ util._run_after_forkers()
343
+
344
+
345
+ #
346
+ # We subclass bytes to avoid accidental transmission of auth keys over network
347
+ #
348
+
349
+ class AuthenticationString(bytes):
350
+ def __reduce__(self):
351
+ from .context import get_spawning_popen
352
+ if get_spawning_popen() is None:
353
+ raise TypeError(
354
+ 'Pickling an AuthenticationString object is '
355
+ 'disallowed for security reasons'
356
+ )
357
+ return AuthenticationString, (bytes(self),)
358
+
359
+
360
+ #
361
+ # Create object representing the parent process
362
+ #
363
+
364
+ class _ParentProcess(BaseProcess):
365
+
366
+ def __init__(self, name, pid, sentinel):
367
+ self._identity = ()
368
+ self._name = name
369
+ self._pid = pid
370
+ self._parent_pid = None
371
+ self._popen = None
372
+ self._closed = False
373
+ self._sentinel = sentinel
374
+ self._config = {}
375
+
376
+ def is_alive(self):
377
+ from multiprocess.connection import wait
378
+ return not wait([self._sentinel], timeout=0)
379
+
380
+ @property
381
+ def ident(self):
382
+ return self._pid
383
+
384
+ def join(self, timeout=None):
385
+ '''
386
+ Wait until parent process terminates
387
+ '''
388
+ from multiprocess.connection import wait
389
+ wait([self._sentinel], timeout=timeout)
390
+
391
+ pid = ident
392
+
393
+ #
394
+ # Create object representing the main process
395
+ #
396
+
397
+ class _MainProcess(BaseProcess):
398
+
399
+ def __init__(self):
400
+ self._identity = ()
401
+ self._name = 'MainProcess'
402
+ self._parent_pid = None
403
+ self._popen = None
404
+ self._closed = False
405
+ self._config = {'authkey': AuthenticationString(os.urandom(32)),
406
+ 'semprefix': '/mp'}
407
+ # Note that some versions of FreeBSD only allow named
408
+ # semaphores to have names of up to 14 characters. Therefore
409
+ # we choose a short prefix.
410
+ #
411
+ # On MacOSX in a sandbox it may be necessary to use a
412
+ # different prefix -- see #19478.
413
+ #
414
+ # Everything in self._config will be inherited by descendant
415
+ # processes.
416
+
417
+ def close(self):
418
+ pass
419
+
420
+
421
+ _parent_process = None
422
+ _current_process = _MainProcess()
423
+ _process_counter = itertools.count(1)
424
+ _children = set()
425
+ del _MainProcess
426
+
427
+ #
428
+ # Give names to some return codes
429
+ #
430
+
431
+ _exitcode_to_name = {}
432
+
433
+ for name, signum in list(signal.__dict__.items()):
434
+ if name[:3]=='SIG' and '_' not in name:
435
+ _exitcode_to_name[-signum] = f'-{name}'
436
+
437
+ # For debug and leak testing
438
+ _dangling = WeakSet()
venv/lib/python3.10/site-packages/multiprocess/reduction.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module which deals with pickling of objects.
3
+ #
4
+ # multiprocessing/reduction.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ from abc import ABCMeta
11
+ import copyreg
12
+ import functools
13
+ import io
14
+ import os
15
+ try:
16
+ import dill as pickle
17
+ except ImportError:
18
+ import pickle
19
+ import socket
20
+ import sys
21
+
22
+ from . import context
23
+
24
+ __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']
25
+
26
+
27
+ HAVE_SEND_HANDLE = (sys.platform == 'win32' or
28
+ (hasattr(socket, 'CMSG_LEN') and
29
+ hasattr(socket, 'SCM_RIGHTS') and
30
+ hasattr(socket.socket, 'sendmsg')))
31
+
32
+ #
33
+ # Pickler subclass
34
+ #
35
+
36
+ class ForkingPickler(pickle.Pickler):
37
+ '''Pickler subclass used by multiprocess.'''
38
+ _extra_reducers = {}
39
+ _copyreg_dispatch_table = copyreg.dispatch_table
40
+
41
+ def __init__(self, *args, **kwds):
42
+ super().__init__(*args, **kwds)
43
+ self.dispatch_table = self._copyreg_dispatch_table.copy()
44
+ self.dispatch_table.update(self._extra_reducers)
45
+
46
+ @classmethod
47
+ def register(cls, type, reduce):
48
+ '''Register a reduce function for a type.'''
49
+ cls._extra_reducers[type] = reduce
50
+
51
+ @classmethod
52
+ def dumps(cls, obj, protocol=None, *args, **kwds):
53
+ buf = io.BytesIO()
54
+ cls(buf, protocol, *args, **kwds).dump(obj)
55
+ return buf.getbuffer()
56
+
57
+ loads = pickle.loads
58
+
59
+ register = ForkingPickler.register
60
+
61
+ def dump(obj, file, protocol=None, *args, **kwds):
62
+ '''Replacement for pickle.dump() using ForkingPickler.'''
63
+ ForkingPickler(file, protocol, *args, **kwds).dump(obj)
64
+
65
+ #
66
+ # Platform specific definitions
67
+ #
68
+
69
+ if sys.platform == 'win32':
70
+ # Windows
71
+ __all__ += ['DupHandle', 'duplicate', 'steal_handle']
72
+ import _winapi
73
+
74
+ def duplicate(handle, target_process=None, inheritable=False,
75
+ *, source_process=None):
76
+ '''Duplicate a handle. (target_process is a handle not a pid!)'''
77
+ current_process = _winapi.GetCurrentProcess()
78
+ if source_process is None:
79
+ source_process = current_process
80
+ if target_process is None:
81
+ target_process = current_process
82
+ return _winapi.DuplicateHandle(
83
+ source_process, handle, target_process,
84
+ 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)
85
+
86
+ def steal_handle(source_pid, handle):
87
+ '''Steal a handle from process identified by source_pid.'''
88
+ source_process_handle = _winapi.OpenProcess(
89
+ _winapi.PROCESS_DUP_HANDLE, False, source_pid)
90
+ try:
91
+ return _winapi.DuplicateHandle(
92
+ source_process_handle, handle,
93
+ _winapi.GetCurrentProcess(), 0, False,
94
+ _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
95
+ finally:
96
+ _winapi.CloseHandle(source_process_handle)
97
+
98
+ def send_handle(conn, handle, destination_pid):
99
+ '''Send a handle over a local connection.'''
100
+ dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
101
+ conn.send(dh)
102
+
103
+ def recv_handle(conn):
104
+ '''Receive a handle over a local connection.'''
105
+ return conn.recv().detach()
106
+
107
+ class DupHandle(object):
108
+ '''Picklable wrapper for a handle.'''
109
+ def __init__(self, handle, access, pid=None):
110
+ if pid is None:
111
+ # We just duplicate the handle in the current process and
112
+ # let the receiving process steal the handle.
113
+ pid = os.getpid()
114
+ proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
115
+ try:
116
+ self._handle = _winapi.DuplicateHandle(
117
+ _winapi.GetCurrentProcess(),
118
+ handle, proc, access, False, 0)
119
+ finally:
120
+ _winapi.CloseHandle(proc)
121
+ self._access = access
122
+ self._pid = pid
123
+
124
+ def detach(self):
125
+ '''Get the handle. This should only be called once.'''
126
+ # retrieve handle from process which currently owns it
127
+ if self._pid == os.getpid():
128
+ # The handle has already been duplicated for this process.
129
+ return self._handle
130
+ # We must steal the handle from the process whose pid is self._pid.
131
+ proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
132
+ self._pid)
133
+ try:
134
+ return _winapi.DuplicateHandle(
135
+ proc, self._handle, _winapi.GetCurrentProcess(),
136
+ self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
137
+ finally:
138
+ _winapi.CloseHandle(proc)
139
+
140
+ else:
141
+ # Unix
142
+ __all__ += ['DupFd', 'sendfds', 'recvfds']
143
+ import array
144
+
145
+ # On MacOSX we should acknowledge receipt of fds -- see Issue14669
146
+ ACKNOWLEDGE = sys.platform == 'darwin'
147
+
148
+ def sendfds(sock, fds):
149
+ '''Send an array of fds over an AF_UNIX socket.'''
150
+ fds = array.array('i', fds)
151
+ msg = bytes([len(fds) % 256])
152
+ sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
153
+ if ACKNOWLEDGE and sock.recv(1) != b'A':
154
+ raise RuntimeError('did not receive acknowledgement of fd')
155
+
156
+ def recvfds(sock, size):
157
+ '''Receive an array of fds over an AF_UNIX socket.'''
158
+ a = array.array('i')
159
+ bytes_size = a.itemsize * size
160
+ msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size))
161
+ if not msg and not ancdata:
162
+ raise EOFError
163
+ try:
164
+ if ACKNOWLEDGE:
165
+ sock.send(b'A')
166
+ if len(ancdata) != 1:
167
+ raise RuntimeError('received %d items of ancdata' %
168
+ len(ancdata))
169
+ cmsg_level, cmsg_type, cmsg_data = ancdata[0]
170
+ if (cmsg_level == socket.SOL_SOCKET and
171
+ cmsg_type == socket.SCM_RIGHTS):
172
+ if len(cmsg_data) % a.itemsize != 0:
173
+ raise ValueError
174
+ a.frombytes(cmsg_data)
175
+ if len(a) % 256 != msg[0]:
176
+ raise AssertionError(
177
+ "Len is {0:n} but msg[0] is {1!r}".format(
178
+ len(a), msg[0]))
179
+ return list(a)
180
+ except (ValueError, IndexError):
181
+ pass
182
+ raise RuntimeError('Invalid data received')
183
+
184
+ def send_handle(conn, handle, destination_pid):
185
+ '''Send a handle over a local connection.'''
186
+ with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
187
+ sendfds(s, [handle])
188
+
189
+ def recv_handle(conn):
190
+ '''Receive a handle over a local connection.'''
191
+ with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
192
+ return recvfds(s, 1)[0]
193
+
194
+ def DupFd(fd):
195
+ '''Return a wrapper for an fd.'''
196
+ popen_obj = context.get_spawning_popen()
197
+ if popen_obj is not None:
198
+ return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
199
+ elif HAVE_SEND_HANDLE:
200
+ from . import resource_sharer
201
+ return resource_sharer.DupFd(fd)
202
+ else:
203
+ raise ValueError('SCM_RIGHTS appears not to be available')
204
+
205
+ #
206
+ # Try making some callable types picklable
207
+ #
208
+
209
+ def _reduce_method(m):
210
+ if m.__self__ is None:
211
+ return getattr, (m.__class__, m.__func__.__name__)
212
+ else:
213
+ return getattr, (m.__self__, m.__func__.__name__)
214
+ class _C:
215
+ def f(self):
216
+ pass
217
+ register(type(_C().f), _reduce_method)
218
+
219
+
220
+ def _reduce_method_descriptor(m):
221
+ return getattr, (m.__objclass__, m.__name__)
222
+ register(type(list.append), _reduce_method_descriptor)
223
+ register(type(int.__add__), _reduce_method_descriptor)
224
+
225
+
226
+ def _reduce_partial(p):
227
+ return _rebuild_partial, (p.func, p.args, p.keywords or {})
228
+ def _rebuild_partial(func, args, keywords):
229
+ return functools.partial(func, *args, **keywords)
230
+ register(functools.partial, _reduce_partial)
231
+
232
+ #
233
+ # Make sockets picklable
234
+ #
235
+
236
+ if sys.platform == 'win32':
237
+ def _reduce_socket(s):
238
+ from .resource_sharer import DupSocket
239
+ return _rebuild_socket, (DupSocket(s),)
240
+ def _rebuild_socket(ds):
241
+ return ds.detach()
242
+ register(socket.socket, _reduce_socket)
243
+
244
+ else:
245
+ def _reduce_socket(s):
246
+ df = DupFd(s.fileno())
247
+ return _rebuild_socket, (df, s.family, s.type, s.proto)
248
+ def _rebuild_socket(df, family, type, proto):
249
+ fd = df.detach()
250
+ return socket.socket(family, type, proto, fileno=fd)
251
+ register(socket.socket, _reduce_socket)
252
+
253
+
254
+ class AbstractReducer(metaclass=ABCMeta):
255
+ '''Abstract base class for use in implementing a Reduction class
256
+ suitable for use in replacing the standard reduction mechanism
257
+ used in multiprocess.'''
258
+ ForkingPickler = ForkingPickler
259
+ register = register
260
+ dump = dump
261
+ send_handle = send_handle
262
+ recv_handle = recv_handle
263
+
264
+ if sys.platform == 'win32':
265
+ steal_handle = steal_handle
266
+ duplicate = duplicate
267
+ DupHandle = DupHandle
268
+ else:
269
+ sendfds = sendfds
270
+ recvfds = recvfds
271
+ DupFd = DupFd
272
+
273
+ _reduce_method = _reduce_method
274
+ _reduce_method_descriptor = _reduce_method_descriptor
275
+ _rebuild_partial = _rebuild_partial
276
+ _reduce_socket = _reduce_socket
277
+ _rebuild_socket = _rebuild_socket
278
+
279
+ def __init__(self, *args):
280
+ register(type(_C().f), _reduce_method)
281
+ register(type(list.append), _reduce_method_descriptor)
282
+ register(type(int.__add__), _reduce_method_descriptor)
283
+ register(functools.partial, _reduce_partial)
284
+ register(socket.socket, _reduce_socket)
venv/lib/python3.10/site-packages/multiprocess/resource_sharer.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # We use a background thread for sharing fds on Unix, and for sharing sockets on
3
+ # Windows.
4
+ #
5
+ # A client which wants to pickle a resource registers it with the resource
6
+ # sharer and gets an identifier in return. The unpickling process will connect
7
+ # to the resource sharer, sends the identifier and its pid, and then receives
8
+ # the resource.
9
+ #
10
+
11
+ import os
12
+ import signal
13
+ import socket
14
+ import sys
15
+ import threading
16
+
17
+ from . import process
18
+ from .context import reduction
19
+ from . import util
20
+
21
+ __all__ = ['stop']
22
+
23
+
24
+ if sys.platform == 'win32':
25
+ __all__ += ['DupSocket']
26
+
27
+ class DupSocket(object):
28
+ '''Picklable wrapper for a socket.'''
29
+ def __init__(self, sock):
30
+ new_sock = sock.dup()
31
+ def send(conn, pid):
32
+ share = new_sock.share(pid)
33
+ conn.send_bytes(share)
34
+ self._id = _resource_sharer.register(send, new_sock.close)
35
+
36
+ def detach(self):
37
+ '''Get the socket. This should only be called once.'''
38
+ with _resource_sharer.get_connection(self._id) as conn:
39
+ share = conn.recv_bytes()
40
+ return socket.fromshare(share)
41
+
42
+ else:
43
+ __all__ += ['DupFd']
44
+
45
+ class DupFd(object):
46
+ '''Wrapper for fd which can be used at any time.'''
47
+ def __init__(self, fd):
48
+ new_fd = os.dup(fd)
49
+ def send(conn, pid):
50
+ reduction.send_handle(conn, new_fd, pid)
51
+ def close():
52
+ os.close(new_fd)
53
+ self._id = _resource_sharer.register(send, close)
54
+
55
+ def detach(self):
56
+ '''Get the fd. This should only be called once.'''
57
+ with _resource_sharer.get_connection(self._id) as conn:
58
+ return reduction.recv_handle(conn)
59
+
60
+
61
+ class _ResourceSharer(object):
62
+ '''Manager for resources using background thread.'''
63
+ def __init__(self):
64
+ self._key = 0
65
+ self._cache = {}
66
+ self._lock = threading.Lock()
67
+ self._listener = None
68
+ self._address = None
69
+ self._thread = None
70
+ util.register_after_fork(self, _ResourceSharer._afterfork)
71
+
72
+ def register(self, send, close):
73
+ '''Register resource, returning an identifier.'''
74
+ with self._lock:
75
+ if self._address is None:
76
+ self._start()
77
+ self._key += 1
78
+ self._cache[self._key] = (send, close)
79
+ return (self._address, self._key)
80
+
81
+ @staticmethod
82
+ def get_connection(ident):
83
+ '''Return connection from which to receive identified resource.'''
84
+ from .connection import Client
85
+ address, key = ident
86
+ c = Client(address, authkey=process.current_process().authkey)
87
+ c.send((key, os.getpid()))
88
+ return c
89
+
90
+ def stop(self, timeout=None):
91
+ '''Stop the background thread and clear registered resources.'''
92
+ from .connection import Client
93
+ with self._lock:
94
+ if self._address is not None:
95
+ c = Client(self._address,
96
+ authkey=process.current_process().authkey)
97
+ c.send(None)
98
+ c.close()
99
+ self._thread.join(timeout)
100
+ if self._thread.is_alive():
101
+ util.sub_warning('_ResourceSharer thread did '
102
+ 'not stop when asked')
103
+ self._listener.close()
104
+ self._thread = None
105
+ self._address = None
106
+ self._listener = None
107
+ for key, (send, close) in self._cache.items():
108
+ close()
109
+ self._cache.clear()
110
+
111
+ def _afterfork(self):
112
+ for key, (send, close) in self._cache.items():
113
+ close()
114
+ self._cache.clear()
115
+ self._lock._at_fork_reinit()
116
+ if self._listener is not None:
117
+ self._listener.close()
118
+ self._listener = None
119
+ self._address = None
120
+ self._thread = None
121
+
122
+ def _start(self):
123
+ from .connection import Listener
124
+ assert self._listener is None, "Already have Listener"
125
+ util.debug('starting listener and thread for sending handles')
126
+ self._listener = Listener(authkey=process.current_process().authkey)
127
+ self._address = self._listener.address
128
+ t = threading.Thread(target=self._serve)
129
+ t.daemon = True
130
+ t.start()
131
+ self._thread = t
132
+
133
+ def _serve(self):
134
+ if hasattr(signal, 'pthread_sigmask'):
135
+ signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
136
+ while 1:
137
+ try:
138
+ with self._listener.accept() as conn:
139
+ msg = conn.recv()
140
+ if msg is None:
141
+ break
142
+ key, destination_pid = msg
143
+ send, close = self._cache.pop(key)
144
+ try:
145
+ send(conn, destination_pid)
146
+ finally:
147
+ close()
148
+ except:
149
+ if not util.is_exiting():
150
+ sys.excepthook(*sys.exc_info())
151
+
152
+
153
+ _resource_sharer = _ResourceSharer()
154
+ stop = _resource_sharer.stop
venv/lib/python3.10/site-packages/multiprocess/tests/__init__.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/multiprocess/tests/__main__.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ #
3
+ # Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
4
+ # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation.
5
+ # License: 3-clause BSD. The full license text is available at:
6
+ # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE
7
+
8
+ import glob
9
+ import os
10
+ import sys
11
+ import subprocess as sp
12
+ python = sys.executable
13
+ try:
14
+ import pox
15
+ python = pox.which_python(version=True) or python
16
+ except ImportError:
17
+ pass
18
+ shell = sys.platform[:3] == 'win'
19
+
20
+ suite = os.path.dirname(__file__) or os.path.curdir
21
+ tests = glob.glob(suite + os.path.sep + 'test_*.py')
22
+ tests = glob.glob(suite + os.path.sep + '__init__.py') + \
23
+ [i for i in tests if 'main' not in i]
24
+
25
+
26
+ if __name__ == '__main__':
27
+
28
+ failed = 0
29
+ for test in tests:
30
+ p = sp.Popen([python, test], shell=shell).wait()
31
+ if p:
32
+ failed = 1
33
+ print('')
34
+ exit(failed)
venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (888 Bytes). View file
 
venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc ADDED
Binary file (533 Bytes). View file
 
venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc ADDED
Binary file (599 Bytes). View file
 
venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc ADDED
Binary file (634 Bytes). View file
 
venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc ADDED
Binary file (583 Bytes). View file
 
venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc ADDED
Binary file (477 Bytes). View file
 
venv/lib/python3.10/site-packages/multiprocess/tests/mp_fork_bomb.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing, sys
2
+
3
+ def foo():
4
+ print("123")
5
+
6
+ # Because "if __name__ == '__main__'" is missing this will not work
7
+ # correctly on Windows. However, we should get a RuntimeError rather
8
+ # than the Windows equivalent of a fork bomb.
9
+
10
+ if len(sys.argv) > 1:
11
+ multiprocessing.set_start_method(sys.argv[1])
12
+ else:
13
+ multiprocessing.set_start_method('spawn')
14
+
15
+ p = multiprocessing.Process(target=foo)
16
+ p.start()
17
+ p.join()
18
+ sys.exit(p.exitcode)