applied-ai-018 commited on
Commit
37c8cde
·
verified ·
1 Parent(s): ae5b593

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/13.input_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/13.input_layernorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step40/zero/13.input_layernorm.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step40/zero/19.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step40/zero/25.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  6. ckpts/universal/global_step40/zero/25.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  7. ckpts/universal/global_step40/zero/25.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  8. ckpts/universal/global_step40/zero/7.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  9. venv/lib/python3.10/site-packages/multiprocess/__info__.py +221 -0
  10. venv/lib/python3.10/site-packages/multiprocess/__init__.py +66 -0
  11. venv/lib/python3.10/site-packages/multiprocess/connection.py +976 -0
  12. venv/lib/python3.10/site-packages/multiprocess/forkserver.py +347 -0
  13. venv/lib/python3.10/site-packages/multiprocess/heap.py +337 -0
  14. venv/lib/python3.10/site-packages/multiprocess/pool.py +957 -0
  15. venv/lib/python3.10/site-packages/multiprocess/popen_forkserver.py +74 -0
  16. venv/lib/python3.10/site-packages/multiprocess/queues.py +382 -0
  17. venv/lib/python3.10/site-packages/multiprocess/resource_tracker.py +242 -0
  18. venv/lib/python3.10/site-packages/multiprocess/shared_memory.py +534 -0
  19. venv/lib/python3.10/site-packages/multiprocess/sharedctypes.py +240 -0
  20. venv/lib/python3.10/site-packages/multiprocess/spawn.py +297 -0
  21. venv/lib/python3.10/site-packages/multiprocess/synchronize.py +400 -0
  22. venv/lib/python3.10/site-packages/multiprocess/util.py +489 -0
  23. venv/lib/python3.10/site-packages/pynvml-11.5.0.dist-info/INSTALLER +1 -0
  24. venv/lib/python3.10/site-packages/pynvml-11.5.0.dist-info/LICENSE.txt +27 -0
  25. venv/lib/python3.10/site-packages/pynvml-11.5.0.dist-info/METADATA +233 -0
  26. venv/lib/python3.10/site-packages/pynvml-11.5.0.dist-info/RECORD +14 -0
  27. venv/lib/python3.10/site-packages/pynvml-11.5.0.dist-info/WHEEL +5 -0
  28. venv/lib/python3.10/site-packages/pynvml-11.5.0.dist-info/top_level.txt +1 -0
  29. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/ACT +0 -0
  30. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Adelaide +0 -0
  31. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Brisbane +0 -0
  32. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Broken_Hill +0 -0
  33. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Canberra +0 -0
  34. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Currie +0 -0
  35. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Darwin +0 -0
  36. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Eucla +0 -0
  37. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Hobart +0 -0
  38. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/LHI +0 -0
  39. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Lindeman +0 -0
  40. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Lord_Howe +0 -0
  41. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Melbourne +0 -0
  42. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/NSW +0 -0
  43. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/North +0 -0
  44. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Perth +0 -0
  45. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Queensland +0 -0
  46. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/South +0 -0
  47. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Sydney +0 -0
  48. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Tasmania +0 -0
  49. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Victoria +0 -0
  50. venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/West +0 -0
ckpts/universal/global_step40/zero/13.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e58579a19cd8a542f0f8fe1a267f259b6069bad4460f3da684f6bd2fe20c9ef
3
+ size 9372
ckpts/universal/global_step40/zero/13.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edf6dee77765092eec35f4c9d9085c3622740c774fc2a2dcda242c355d560556
3
+ size 9387
ckpts/universal/global_step40/zero/13.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b64509ff1e1e015541fdec54e80f16c54054d323aa10e2e67c17d3b4e7f449e
3
+ size 9293
ckpts/universal/global_step40/zero/19.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ffc648657f01dfc3dfd1a5143ef38acc69bdfc188e514716df2607c7bf2b2aa
3
+ size 33555627
ckpts/universal/global_step40/zero/25.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b47d44cdf30672e4df2a627ee98e23cfdfce5a052d30a0ff3e5df7d91732ee5
3
+ size 33555612
ckpts/universal/global_step40/zero/25.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98455ce729b62717de26b190cae4a287405d9d42f995ef476044f4ab286f5716
3
+ size 33555627
ckpts/universal/global_step40/zero/25.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36d28e2498559b63a0346fb3c5412f5be819a19523cedb46fbca16dd7d146740
3
+ size 33555533
ckpts/universal/global_step40/zero/7.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:455786ac99da98017083059d20c12ae9ead708522af2cea64a43b50f0b40ac4b
3
+ size 33555533
venv/lib/python3.10/site-packages/multiprocess/__info__.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ #
3
+ # Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
4
+ # Copyright (c) 2024 The Uncertainty Quantification Foundation.
5
+ # License: 3-clause BSD. The full license text is available at:
6
+ # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE
7
+ '''
8
+ -----------------------------------------------------------------
9
+ multiprocess: better multiprocessing and multithreading in Python
10
+ -----------------------------------------------------------------
11
+
12
+ About Multiprocess
13
+ ==================
14
+
15
+ ``multiprocess`` is a fork of ``multiprocessing``. ``multiprocess`` extends ``multiprocessing`` to provide enhanced serialization, using `dill`. ``multiprocess`` leverages ``multiprocessing`` to support the spawning of processes using the API of the Python standard library's ``threading`` module. ``multiprocessing`` has been distributed as part of the standard library since Python 2.6.
16
+
17
+ ``multiprocess`` is part of ``pathos``, a Python framework for heterogeneous computing.
18
+ ``multiprocess`` is in active development, so any user feedback, bug reports, comments,
19
+ or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/multiprocess/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query.
20
+
21
+
22
+ Major Features
23
+ ==============
24
+
25
+ ``multiprocess`` enables:
26
+
27
+ - objects to be transferred between processes using pipes or multi-producer/multi-consumer queues
28
+ - objects to be shared between processes using a server process or (for simple data) shared memory
29
+
30
+ ``multiprocess`` provides:
31
+
32
+ - equivalents of all the synchronization primitives in ``threading``
33
+ - a ``Pool`` class to facilitate submitting tasks to worker processes
34
+ - enhanced serialization, using ``dill``
35
+
36
+
37
+ Current Release
38
+ ===============
39
+
40
+ The latest released version of ``multiprocess`` is available from:
41
+
42
+ https://pypi.org/project/multiprocess
43
+
44
+ ``multiprocess`` is distributed under a 3-clause BSD license, and is a fork of ``multiprocessing``.
45
+
46
+
47
+ Development Version
48
+ ===================
49
+
50
+ You can get the latest development version with all the shiny new features at:
51
+
52
+ https://github.com/uqfoundation
53
+
54
+ If you have a new contribution, please submit a pull request.
55
+
56
+
57
+ Installation
58
+ ============
59
+
60
+ ``multiprocess`` can be installed with ``pip``::
61
+
62
+ $ pip install multiprocess
63
+
64
+ For Python 2, a C compiler is required to build the included extension module from source. Python 3 and binary installs do not require a C compiler.
65
+
66
+
67
+ Requirements
68
+ ============
69
+
70
+ ``multiprocess`` requires:
71
+
72
+ - ``python`` (or ``pypy``), **>=3.8**
73
+ - ``setuptools``, **>=42**
74
+ - ``dill``, **>=0.3.8**
75
+
76
+
77
+ Basic Usage
78
+ ===========
79
+
80
+ The ``multiprocess.Process`` class follows the API of ``threading.Thread``.
81
+ For example ::
82
+
83
+ from multiprocess import Process, Queue
84
+
85
+ def f(q):
86
+ q.put('hello world')
87
+
88
+ if __name__ == '__main__':
89
+ q = Queue()
90
+ p = Process(target=f, args=[q])
91
+ p.start()
92
+ print (q.get())
93
+ p.join()
94
+
95
+ Synchronization primitives like locks, semaphores and conditions are
96
+ available, for example ::
97
+
98
+ >>> from multiprocess import Condition
99
+ >>> c = Condition()
100
+ >>> print (c)
101
+ <Condition(<RLock(None, 0)>), 0>
102
+ >>> c.acquire()
103
+ True
104
+ >>> print (c)
105
+ <Condition(<RLock(MainProcess, 1)>), 0>
106
+
107
+ One can also use a manager to create shared objects either in shared
108
+ memory or in a server process, for example ::
109
+
110
+ >>> from multiprocess import Manager
111
+ >>> manager = Manager()
112
+ >>> l = manager.list(range(10))
113
+ >>> l.reverse()
114
+ >>> print (l)
115
+ [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
116
+ >>> print (repr(l))
117
+ <Proxy[list] object at 0x00E1B3B0>
118
+
119
+ Tasks can be offloaded to a pool of worker processes in various ways,
120
+ for example ::
121
+
122
+ >>> from multiprocess import Pool
123
+ >>> def f(x): return x*x
124
+ ...
125
+ >>> p = Pool(4)
126
+ >>> result = p.map_async(f, range(10))
127
+ >>> print (result.get(timeout=1))
128
+ [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
129
+
130
+ When ``dill`` is installed, serialization is extended to most objects,
131
+ for example ::
132
+
133
+ >>> from multiprocess import Pool
134
+ >>> p = Pool(4)
135
+ >>> print (p.map(lambda x: (lambda y:y**2)(x) + x, xrange(10)))
136
+ [0, 2, 6, 12, 20, 30, 42, 56, 72, 90]
137
+
138
+
139
+ More Information
140
+ ================
141
+
142
+ Probably the best way to get started is to look at the documentation at
143
+ http://multiprocess.rtfd.io. Also see ``multiprocess.tests`` for scripts that
144
+ demonstrate how ``multiprocess`` can be used to leverge multiple processes
145
+ to execute Python in parallel. You can run the test suite with
146
+ ``python -m multiprocess.tests``. As ``multiprocess`` conforms to the
147
+ ``multiprocessing`` interface, the examples and documentation found at
148
+ http://docs.python.org/library/multiprocessing.html also apply to
149
+ ``multiprocess`` if one will ``import multiprocessing as multiprocess``.
150
+ See https://github.com/uqfoundation/multiprocess/tree/master/py3.12/examples
151
+ for a set of examples that demonstrate some basic use cases and benchmarking
152
+ for running Python code in parallel. Please feel free to submit a ticket on
153
+ github, or ask a question on stackoverflow (**@Mike McKerns**). If you would
154
+ like to share how you use ``multiprocess`` in your work, please send an email
155
+ (to **mmckerns at uqfoundation dot org**).
156
+
157
+
158
+ Citation
159
+ ========
160
+
161
+ If you use ``multiprocess`` to do research that leads to publication, we ask that you
162
+ acknowledge use of ``multiprocess`` by citing the following in your publication::
163
+
164
+ M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis,
165
+ "Building a framework for predictive science", Proceedings of
166
+ the 10th Python in Science Conference, 2011;
167
+ http://arxiv.org/pdf/1202.1056
168
+
169
+ Michael McKerns and Michael Aivazis,
170
+ "pathos: a framework for heterogeneous computing", 2010- ;
171
+ https://uqfoundation.github.io/project/pathos
172
+
173
+ Please see https://uqfoundation.github.io/project/pathos or
174
+ http://arxiv.org/pdf/1202.1056 for further information.
175
+
176
+ '''
177
+
178
+ __all__ = []
179
+ __version__ = '0.70.16'
180
+ __author__ = 'Mike McKerns'
181
+
182
+ __license__ = '''
183
+ Copyright (c) 2008-2016 California Institute of Technology.
184
+ Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
185
+ All rights reserved.
186
+
187
+ This software forks the python package "multiprocessing". Licence and
188
+ copyright information for multiprocessing can be found in "COPYING".
189
+
190
+ This software is available subject to the conditions and terms laid
191
+ out below. By downloading and using this software you are agreeing
192
+ to the following conditions.
193
+
194
+ Redistribution and use in source and binary forms, with or without
195
+ modification, are permitted provided that the following conditions
196
+ are met:
197
+
198
+ - Redistributions of source code must retain the above copyright
199
+ notice, this list of conditions and the following disclaimer.
200
+
201
+ - Redistributions in binary form must reproduce the above copyright
202
+ notice, this list of conditions and the following disclaimer in the
203
+ documentation and/or other materials provided with the distribution.
204
+
205
+ - Neither the names of the copyright holders nor the names of any of
206
+ the contributors may be used to endorse or promote products derived
207
+ from this software without specific prior written permission.
208
+
209
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
210
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
211
+ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
212
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
213
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
214
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
215
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
216
+ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
217
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
218
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
219
+ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
220
+
221
+ '''
venv/lib/python3.10/site-packages/multiprocess/__init__.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Package analogous to 'threading.py' but using processes
3
+ #
4
+ # multiprocessing/__init__.py
5
+ #
6
+ # This package is intended to duplicate the functionality (and much of
7
+ # the API) of threading.py but uses processes instead of threads. A
8
+ # subpackage 'multiprocessing.dummy' has the same API but is a simple
9
+ # wrapper for 'threading'.
10
+ #
11
+ # Original: Copyright (c) 2006-2008, R Oudkerk
12
+ # Original: Licensed to PSF under a Contributor Agreement.
13
+ # Forked by Mike McKerns, to support enhanced serialization.
14
+
15
+ # author, version, license, and long description
16
+ try: # the package is installed
17
+ from .__info__ import __version__, __author__, __doc__, __license__
18
+ except: # pragma: no cover
19
+ import os
20
+ import sys
21
+ root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
22
+ sys.path.append(root)
23
+ # get distribution meta info
24
+ from version import (__version__, __author__,
25
+ get_license_text, get_readme_as_rst)
26
+ __license__ = get_license_text(os.path.join(root, 'LICENSE'))
27
+ __license__ = "\n%s" % __license__
28
+ __doc__ = get_readme_as_rst(os.path.join(root, 'README.md'))
29
+ del os, sys, root, get_license_text, get_readme_as_rst
30
+
31
+
32
+ import sys
33
+ from . import context
34
+
35
+ #
36
+ # Copy stuff from default context
37
+ #
38
+
39
+ __all__ = [x for x in dir(context._default_context) if not x.startswith('_')]
40
+ globals().update((name, getattr(context._default_context, name)) for name in __all__)
41
+
42
+ #
43
+ # XXX These should not really be documented or public.
44
+ #
45
+
46
+ SUBDEBUG = 5
47
+ SUBWARNING = 25
48
+
49
+ #
50
+ # Alias for main module -- will be reset by bootstrapping child processes
51
+ #
52
+
53
+ if '__main__' in sys.modules:
54
+ sys.modules['__mp_main__'] = sys.modules['__main__']
55
+
56
+
57
+ def license():
58
+ """print license"""
59
+ print (__license__)
60
+ return
61
+
62
+ def citation():
63
+ """print citation"""
64
+ print (__doc__[-491:-118])
65
+ return
66
+
venv/lib/python3.10/site-packages/multiprocess/connection.py ADDED
@@ -0,0 +1,976 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # A higher level module for using sockets (or Windows named pipes)
3
+ #
4
+ # multiprocessing/connection.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ]
11
+
12
+ import io
13
+ import os
14
+ import sys
15
+ import socket
16
+ import struct
17
+ import time
18
+ import tempfile
19
+ import itertools
20
+
21
+ try:
22
+ import _multiprocess as _multiprocessing
23
+ except ImportError:
24
+ import _multiprocessing
25
+
26
+ from . import util
27
+
28
+ from . import AuthenticationError, BufferTooShort
29
+ from .context import reduction
30
+ _ForkingPickler = reduction.ForkingPickler
31
+
32
+ try:
33
+ import _winapi
34
+ from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE
35
+ except ImportError:
36
+ if sys.platform == 'win32':
37
+ raise
38
+ _winapi = None
39
+
40
+ #
41
+ #
42
+ #
43
+
44
+ BUFSIZE = 8192
45
+ # A very generous timeout when it comes to local connections...
46
+ CONNECTION_TIMEOUT = 20.
47
+
48
+ _mmap_counter = itertools.count()
49
+
50
+ default_family = 'AF_INET'
51
+ families = ['AF_INET']
52
+
53
+ if hasattr(socket, 'AF_UNIX'):
54
+ default_family = 'AF_UNIX'
55
+ families += ['AF_UNIX']
56
+
57
+ if sys.platform == 'win32':
58
+ default_family = 'AF_PIPE'
59
+ families += ['AF_PIPE']
60
+
61
+
62
+ def _init_timeout(timeout=CONNECTION_TIMEOUT):
63
+ return getattr(time,'monotonic',time.time)() + timeout
64
+
65
+ def _check_timeout(t):
66
+ return getattr(time,'monotonic',time.time)() > t
67
+
68
+ #
69
+ #
70
+ #
71
+
72
+ def arbitrary_address(family):
73
+ '''
74
+ Return an arbitrary free address for the given family
75
+ '''
76
+ if family == 'AF_INET':
77
+ return ('localhost', 0)
78
+ elif family == 'AF_UNIX':
79
+ return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())
80
+ elif family == 'AF_PIPE':
81
+ return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
82
+ (os.getpid(), next(_mmap_counter)), dir="")
83
+ else:
84
+ raise ValueError('unrecognized family')
85
+
86
+ def _validate_family(family):
87
+ '''
88
+ Checks if the family is valid for the current environment.
89
+ '''
90
+ if sys.platform != 'win32' and family == 'AF_PIPE':
91
+ raise ValueError('Family %s is not recognized.' % family)
92
+
93
+ if sys.platform == 'win32' and family == 'AF_UNIX':
94
+ # double check
95
+ if not hasattr(socket, family):
96
+ raise ValueError('Family %s is not recognized.' % family)
97
+
98
+ def address_type(address):
99
+ '''
100
+ Return the types of the address
101
+
102
+ This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
103
+ '''
104
+ if type(address) == tuple:
105
+ return 'AF_INET'
106
+ elif type(address) is str and address.startswith('\\\\'):
107
+ return 'AF_PIPE'
108
+ elif type(address) is str or util.is_abstract_socket_namespace(address):
109
+ return 'AF_UNIX'
110
+ else:
111
+ raise ValueError('address type of %r unrecognized' % address)
112
+
113
+ #
114
+ # Connection classes
115
+ #
116
+
117
+ class _ConnectionBase:
118
+ _handle = None
119
+
120
+ def __init__(self, handle, readable=True, writable=True):
121
+ handle = handle.__index__()
122
+ if handle < 0:
123
+ raise ValueError("invalid handle")
124
+ if not readable and not writable:
125
+ raise ValueError(
126
+ "at least one of `readable` and `writable` must be True")
127
+ self._handle = handle
128
+ self._readable = readable
129
+ self._writable = writable
130
+
131
+ # XXX should we use util.Finalize instead of a __del__?
132
+
133
+ def __del__(self):
134
+ if self._handle is not None:
135
+ self._close()
136
+
137
+ def _check_closed(self):
138
+ if self._handle is None:
139
+ raise OSError("handle is closed")
140
+
141
+ def _check_readable(self):
142
+ if not self._readable:
143
+ raise OSError("connection is write-only")
144
+
145
+ def _check_writable(self):
146
+ if not self._writable:
147
+ raise OSError("connection is read-only")
148
+
149
+ def _bad_message_length(self):
150
+ if self._writable:
151
+ self._readable = False
152
+ else:
153
+ self.close()
154
+ raise OSError("bad message length")
155
+
156
+ @property
157
+ def closed(self):
158
+ """True if the connection is closed"""
159
+ return self._handle is None
160
+
161
+ @property
162
+ def readable(self):
163
+ """True if the connection is readable"""
164
+ return self._readable
165
+
166
+ @property
167
+ def writable(self):
168
+ """True if the connection is writable"""
169
+ return self._writable
170
+
171
+ def fileno(self):
172
+ """File descriptor or handle of the connection"""
173
+ self._check_closed()
174
+ return self._handle
175
+
176
+ def close(self):
177
+ """Close the connection"""
178
+ if self._handle is not None:
179
+ try:
180
+ self._close()
181
+ finally:
182
+ self._handle = None
183
+
184
+ def send_bytes(self, buf, offset=0, size=None):
185
+ """Send the bytes data from a bytes-like object"""
186
+ self._check_closed()
187
+ self._check_writable()
188
+ m = memoryview(buf)
189
+ # HACK for byte-indexing of non-bytewise buffers (e.g. array.array)
190
+ if m.itemsize > 1:
191
+ m = memoryview(bytes(m))
192
+ n = len(m)
193
+ if offset < 0:
194
+ raise ValueError("offset is negative")
195
+ if n < offset:
196
+ raise ValueError("buffer length < offset")
197
+ if size is None:
198
+ size = n - offset
199
+ elif size < 0:
200
+ raise ValueError("size is negative")
201
+ elif offset + size > n:
202
+ raise ValueError("buffer length < offset + size")
203
+ self._send_bytes(m[offset:offset + size])
204
+
205
+ def send(self, obj):
206
+ """Send a (picklable) object"""
207
+ self._check_closed()
208
+ self._check_writable()
209
+ self._send_bytes(_ForkingPickler.dumps(obj))
210
+
211
+ def recv_bytes(self, maxlength=None):
212
+ """
213
+ Receive bytes data as a bytes object.
214
+ """
215
+ self._check_closed()
216
+ self._check_readable()
217
+ if maxlength is not None and maxlength < 0:
218
+ raise ValueError("negative maxlength")
219
+ buf = self._recv_bytes(maxlength)
220
+ if buf is None:
221
+ self._bad_message_length()
222
+ return buf.getvalue()
223
+
224
+ def recv_bytes_into(self, buf, offset=0):
225
+ """
226
+ Receive bytes data into a writeable bytes-like object.
227
+ Return the number of bytes read.
228
+ """
229
+ self._check_closed()
230
+ self._check_readable()
231
+ with memoryview(buf) as m:
232
+ # Get bytesize of arbitrary buffer
233
+ itemsize = m.itemsize
234
+ bytesize = itemsize * len(m)
235
+ if offset < 0:
236
+ raise ValueError("negative offset")
237
+ elif offset > bytesize:
238
+ raise ValueError("offset too large")
239
+ result = self._recv_bytes()
240
+ size = result.tell()
241
+ if bytesize < offset + size:
242
+ raise BufferTooShort(result.getvalue())
243
+ # Message can fit in dest
244
+ result.seek(0)
245
+ result.readinto(m[offset // itemsize :
246
+ (offset + size) // itemsize])
247
+ return size
248
+
249
+ def recv(self):
250
+ """Receive a (picklable) object"""
251
+ self._check_closed()
252
+ self._check_readable()
253
+ buf = self._recv_bytes()
254
+ return _ForkingPickler.loads(buf.getbuffer())
255
+
256
+ def poll(self, timeout=0.0):
257
+ """Whether there is any input available to be read"""
258
+ self._check_closed()
259
+ self._check_readable()
260
+ return self._poll(timeout)
261
+
262
+ def __enter__(self):
263
+ return self
264
+
265
+ def __exit__(self, exc_type, exc_value, exc_tb):
266
+ self.close()
267
+
268
+
269
+ if _winapi:
270
+
271
+ class PipeConnection(_ConnectionBase):
272
+ """
273
+ Connection class based on a Windows named pipe.
274
+ Overlapped I/O is used, so the handles must have been created
275
+ with FILE_FLAG_OVERLAPPED.
276
+ """
277
+ _got_empty_message = False
278
+
279
+ def _close(self, _CloseHandle=_winapi.CloseHandle):
280
+ _CloseHandle(self._handle)
281
+
282
+ def _send_bytes(self, buf):
283
+ ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
284
+ try:
285
+ if err == _winapi.ERROR_IO_PENDING:
286
+ waitres = _winapi.WaitForMultipleObjects(
287
+ [ov.event], False, INFINITE)
288
+ assert waitres == WAIT_OBJECT_0
289
+ except:
290
+ ov.cancel()
291
+ raise
292
+ finally:
293
+ nwritten, err = ov.GetOverlappedResult(True)
294
+ assert err == 0
295
+ assert nwritten == len(buf)
296
+
297
+ def _recv_bytes(self, maxsize=None):
298
+ if self._got_empty_message:
299
+ self._got_empty_message = False
300
+ return io.BytesIO()
301
+ else:
302
+ bsize = 128 if maxsize is None else min(maxsize, 128)
303
+ try:
304
+ ov, err = _winapi.ReadFile(self._handle, bsize,
305
+ overlapped=True)
306
+ try:
307
+ if err == _winapi.ERROR_IO_PENDING:
308
+ waitres = _winapi.WaitForMultipleObjects(
309
+ [ov.event], False, INFINITE)
310
+ assert waitres == WAIT_OBJECT_0
311
+ except:
312
+ ov.cancel()
313
+ raise
314
+ finally:
315
+ nread, err = ov.GetOverlappedResult(True)
316
+ if err == 0:
317
+ f = io.BytesIO()
318
+ f.write(ov.getbuffer())
319
+ return f
320
+ elif err == _winapi.ERROR_MORE_DATA:
321
+ return self._get_more_data(ov, maxsize)
322
+ except OSError as e:
323
+ if e.winerror == _winapi.ERROR_BROKEN_PIPE:
324
+ raise EOFError
325
+ else:
326
+ raise
327
+ raise RuntimeError("shouldn't get here; expected KeyboardInterrupt")
328
+
329
+ def _poll(self, timeout):
330
+ if (self._got_empty_message or
331
+ _winapi.PeekNamedPipe(self._handle)[0] != 0):
332
+ return True
333
+ return bool(wait([self], timeout))
334
+
335
+ def _get_more_data(self, ov, maxsize):
336
+ buf = ov.getbuffer()
337
+ f = io.BytesIO()
338
+ f.write(buf)
339
+ left = _winapi.PeekNamedPipe(self._handle)[1]
340
+ assert left > 0
341
+ if maxsize is not None and len(buf) + left > maxsize:
342
+ self._bad_message_length()
343
+ ov, err = _winapi.ReadFile(self._handle, left, overlapped=True)
344
+ rbytes, err = ov.GetOverlappedResult(True)
345
+ assert err == 0
346
+ assert rbytes == left
347
+ f.write(ov.getbuffer())
348
+ return f
349
+
350
+
351
+ class Connection(_ConnectionBase):
352
+ """
353
+ Connection class based on an arbitrary file descriptor (Unix only), or
354
+ a socket handle (Windows).
355
+ """
356
+
357
+ if _winapi:
358
+ def _close(self, _close=_multiprocessing.closesocket):
359
+ _close(self._handle)
360
+ _write = _multiprocessing.send
361
+ _read = _multiprocessing.recv
362
+ else:
363
+ def _close(self, _close=os.close):
364
+ _close(self._handle)
365
+ _write = os.write
366
+ _read = os.read
367
+
368
+ def _send(self, buf, write=_write):
369
+ remaining = len(buf)
370
+ while True:
371
+ n = write(self._handle, buf)
372
+ remaining -= n
373
+ if remaining == 0:
374
+ break
375
+ buf = buf[n:]
376
+
377
+ def _recv(self, size, read=_read):
378
+ buf = io.BytesIO()
379
+ handle = self._handle
380
+ remaining = size
381
+ while remaining > 0:
382
+ chunk = read(handle, remaining)
383
+ n = len(chunk)
384
+ if n == 0:
385
+ if remaining == size:
386
+ raise EOFError
387
+ else:
388
+ raise OSError("got end of file during message")
389
+ buf.write(chunk)
390
+ remaining -= n
391
+ return buf
392
+
393
+ def _send_bytes(self, buf):
394
+ n = len(buf)
395
+ if n > 0x7fffffff:
396
+ pre_header = struct.pack("!i", -1)
397
+ header = struct.pack("!Q", n)
398
+ self._send(pre_header)
399
+ self._send(header)
400
+ self._send(buf)
401
+ else:
402
+ # For wire compatibility with 3.7 and lower
403
+ header = struct.pack("!i", n)
404
+ if n > 16384:
405
+ # The payload is large so Nagle's algorithm won't be triggered
406
+ # and we'd better avoid the cost of concatenation.
407
+ self._send(header)
408
+ self._send(buf)
409
+ else:
410
+ # Issue #20540: concatenate before sending, to avoid delays due
411
+ # to Nagle's algorithm on a TCP socket.
412
+ # Also note we want to avoid sending a 0-length buffer separately,
413
+ # to avoid "broken pipe" errors if the other end closed the pipe.
414
+ self._send(header + buf)
415
+
416
+ def _recv_bytes(self, maxsize=None):
417
+ buf = self._recv(4)
418
+ size, = struct.unpack("!i", buf.getvalue())
419
+ if size == -1:
420
+ buf = self._recv(8)
421
+ size, = struct.unpack("!Q", buf.getvalue())
422
+ if maxsize is not None and size > maxsize:
423
+ return None
424
+ return self._recv(size)
425
+
426
+ def _poll(self, timeout):
427
+ r = wait([self], timeout)
428
+ return bool(r)
429
+
430
+
431
+ #
432
+ # Public functions
433
+ #
434
+
435
+ class Listener(object):
436
+ '''
437
+ Returns a listener object.
438
+
439
+ This is a wrapper for a bound socket which is 'listening' for
440
+ connections, or for a Windows named pipe.
441
+ '''
442
+ def __init__(self, address=None, family=None, backlog=1, authkey=None):
443
+ family = family or (address and address_type(address)) \
444
+ or default_family
445
+ address = address or arbitrary_address(family)
446
+
447
+ _validate_family(family)
448
+ if family == 'AF_PIPE':
449
+ self._listener = PipeListener(address, backlog)
450
+ else:
451
+ self._listener = SocketListener(address, family, backlog)
452
+
453
+ if authkey is not None and not isinstance(authkey, bytes):
454
+ raise TypeError('authkey should be a byte string')
455
+
456
+ self._authkey = authkey
457
+
458
+ def accept(self):
459
+ '''
460
+ Accept a connection on the bound socket or named pipe of `self`.
461
+
462
+ Returns a `Connection` object.
463
+ '''
464
+ if self._listener is None:
465
+ raise OSError('listener is closed')
466
+ c = self._listener.accept()
467
+ if self._authkey:
468
+ deliver_challenge(c, self._authkey)
469
+ answer_challenge(c, self._authkey)
470
+ return c
471
+
472
+ def close(self):
473
+ '''
474
+ Close the bound socket or named pipe of `self`.
475
+ '''
476
+ listener = self._listener
477
+ if listener is not None:
478
+ self._listener = None
479
+ listener.close()
480
+
481
+ @property
482
+ def address(self):
483
+ return self._listener._address
484
+
485
+ @property
486
+ def last_accepted(self):
487
+ return self._listener._last_accepted
488
+
489
+ def __enter__(self):
490
+ return self
491
+
492
+ def __exit__(self, exc_type, exc_value, exc_tb):
493
+ self.close()
494
+
495
+
496
+ def Client(address, family=None, authkey=None):
497
+ '''
498
+ Returns a connection to the address of a `Listener`
499
+ '''
500
+ family = family or address_type(address)
501
+ _validate_family(family)
502
+ if family == 'AF_PIPE':
503
+ c = PipeClient(address)
504
+ else:
505
+ c = SocketClient(address)
506
+
507
+ if authkey is not None and not isinstance(authkey, bytes):
508
+ raise TypeError('authkey should be a byte string')
509
+
510
+ if authkey is not None:
511
+ answer_challenge(c, authkey)
512
+ deliver_challenge(c, authkey)
513
+
514
+ return c
515
+
516
+
517
+ if sys.platform != 'win32':
518
+
519
+ def Pipe(duplex=True):
520
+ '''
521
+ Returns pair of connection objects at either end of a pipe
522
+ '''
523
+ if duplex:
524
+ s1, s2 = socket.socketpair()
525
+ s1.setblocking(True)
526
+ s2.setblocking(True)
527
+ c1 = Connection(s1.detach())
528
+ c2 = Connection(s2.detach())
529
+ else:
530
+ fd1, fd2 = os.pipe()
531
+ c1 = Connection(fd1, writable=False)
532
+ c2 = Connection(fd2, readable=False)
533
+
534
+ return c1, c2
535
+
536
+ else:
537
+
538
+ def Pipe(duplex=True):
539
+ '''
540
+ Returns pair of connection objects at either end of a pipe
541
+ '''
542
+ address = arbitrary_address('AF_PIPE')
543
+ if duplex:
544
+ openmode = _winapi.PIPE_ACCESS_DUPLEX
545
+ access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
546
+ obsize, ibsize = BUFSIZE, BUFSIZE
547
+ else:
548
+ openmode = _winapi.PIPE_ACCESS_INBOUND
549
+ access = _winapi.GENERIC_WRITE
550
+ obsize, ibsize = 0, BUFSIZE
551
+
552
+ h1 = _winapi.CreateNamedPipe(
553
+ address, openmode | _winapi.FILE_FLAG_OVERLAPPED |
554
+ _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,
555
+ _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
556
+ _winapi.PIPE_WAIT,
557
+ 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER,
558
+ # default security descriptor: the handle cannot be inherited
559
+ _winapi.NULL
560
+ )
561
+ h2 = _winapi.CreateFile(
562
+ address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
563
+ _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
564
+ )
565
+ _winapi.SetNamedPipeHandleState(
566
+ h2, _winapi.PIPE_READMODE_MESSAGE, None, None
567
+ )
568
+
569
+ overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True)
570
+ _, err = overlapped.GetOverlappedResult(True)
571
+ assert err == 0
572
+
573
+ c1 = PipeConnection(h1, writable=duplex)
574
+ c2 = PipeConnection(h2, readable=duplex)
575
+
576
+ return c1, c2
577
+
578
+ #
579
+ # Definitions for connections based on sockets
580
+ #
581
+
582
+ class SocketListener(object):
583
+ '''
584
+ Representation of a socket which is bound to an address and listening
585
+ '''
586
+ def __init__(self, address, family, backlog=1):
587
+ self._socket = socket.socket(getattr(socket, family))
588
+ try:
589
+ # SO_REUSEADDR has different semantics on Windows (issue #2550).
590
+ if os.name == 'posix':
591
+ self._socket.setsockopt(socket.SOL_SOCKET,
592
+ socket.SO_REUSEADDR, 1)
593
+ self._socket.setblocking(True)
594
+ self._socket.bind(address)
595
+ self._socket.listen(backlog)
596
+ self._address = self._socket.getsockname()
597
+ except OSError:
598
+ self._socket.close()
599
+ raise
600
+ self._family = family
601
+ self._last_accepted = None
602
+
603
+ if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address):
604
+ # Linux abstract socket namespaces do not need to be explicitly unlinked
605
+ self._unlink = util.Finalize(
606
+ self, os.unlink, args=(address,), exitpriority=0
607
+ )
608
+ else:
609
+ self._unlink = None
610
+
611
+ def accept(self):
612
+ s, self._last_accepted = self._socket.accept()
613
+ s.setblocking(True)
614
+ return Connection(s.detach())
615
+
616
+ def close(self):
617
+ try:
618
+ self._socket.close()
619
+ finally:
620
+ unlink = self._unlink
621
+ if unlink is not None:
622
+ self._unlink = None
623
+ unlink()
624
+
625
+
626
+ def SocketClient(address):
627
+ '''
628
+ Return a connection object connected to the socket given by `address`
629
+ '''
630
+ family = address_type(address)
631
+ with socket.socket( getattr(socket, family) ) as s:
632
+ s.setblocking(True)
633
+ s.connect(address)
634
+ return Connection(s.detach())
635
+
636
+ #
637
+ # Definitions for connections based on named pipes
638
+ #
639
+
640
+ if sys.platform == 'win32':
641
+
642
+ class PipeListener(object):
643
+ '''
644
+ Representation of a named pipe
645
+ '''
646
+ def __init__(self, address, backlog=None):
647
+ self._address = address
648
+ self._handle_queue = [self._new_handle(first=True)]
649
+
650
+ self._last_accepted = None
651
+ util.sub_debug('listener created with address=%r', self._address)
652
+ self.close = util.Finalize(
653
+ self, PipeListener._finalize_pipe_listener,
654
+ args=(self._handle_queue, self._address), exitpriority=0
655
+ )
656
+
657
+ def _new_handle(self, first=False):
658
+ flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
659
+ if first:
660
+ flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
661
+ return _winapi.CreateNamedPipe(
662
+ self._address, flags,
663
+ _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
664
+ _winapi.PIPE_WAIT,
665
+ _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
666
+ _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
667
+ )
668
+
669
+ def accept(self):
670
+ self._handle_queue.append(self._new_handle())
671
+ handle = self._handle_queue.pop(0)
672
+ try:
673
+ ov = _winapi.ConnectNamedPipe(handle, overlapped=True)
674
+ except OSError as e:
675
+ if e.winerror != _winapi.ERROR_NO_DATA:
676
+ raise
677
+ # ERROR_NO_DATA can occur if a client has already connected,
678
+ # written data and then disconnected -- see Issue 14725.
679
+ else:
680
+ try:
681
+ res = _winapi.WaitForMultipleObjects(
682
+ [ov.event], False, INFINITE)
683
+ except:
684
+ ov.cancel()
685
+ _winapi.CloseHandle(handle)
686
+ raise
687
+ finally:
688
+ _, err = ov.GetOverlappedResult(True)
689
+ assert err == 0
690
+ return PipeConnection(handle)
691
+
692
+ @staticmethod
693
+ def _finalize_pipe_listener(queue, address):
694
+ util.sub_debug('closing listener with address=%r', address)
695
+ for handle in queue:
696
+ _winapi.CloseHandle(handle)
697
+
698
+ def PipeClient(address):
699
+ '''
700
+ Return a connection object connected to the pipe given by `address`
701
+ '''
702
+ t = _init_timeout()
703
+ while 1:
704
+ try:
705
+ _winapi.WaitNamedPipe(address, 1000)
706
+ h = _winapi.CreateFile(
707
+ address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE,
708
+ 0, _winapi.NULL, _winapi.OPEN_EXISTING,
709
+ _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
710
+ )
711
+ except OSError as e:
712
+ if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT,
713
+ _winapi.ERROR_PIPE_BUSY) or _check_timeout(t):
714
+ raise
715
+ else:
716
+ break
717
+ else:
718
+ raise
719
+
720
+ _winapi.SetNamedPipeHandleState(
721
+ h, _winapi.PIPE_READMODE_MESSAGE, None, None
722
+ )
723
+ return PipeConnection(h)
724
+
725
+ #
726
+ # Authentication stuff
727
+ #
728
+
729
+ MESSAGE_LENGTH = 20
730
+
731
+ CHALLENGE = b'#CHALLENGE#'
732
+ WELCOME = b'#WELCOME#'
733
+ FAILURE = b'#FAILURE#'
734
+
735
+ def deliver_challenge(connection, authkey):
736
+ import hmac
737
+ if not isinstance(authkey, bytes):
738
+ raise ValueError(
739
+ "Authkey must be bytes, not {0!s}".format(type(authkey)))
740
+ message = os.urandom(MESSAGE_LENGTH)
741
+ connection.send_bytes(CHALLENGE + message)
742
+ digest = hmac.new(authkey, message, 'md5').digest()
743
+ response = connection.recv_bytes(256) # reject large message
744
+ if response == digest:
745
+ connection.send_bytes(WELCOME)
746
+ else:
747
+ connection.send_bytes(FAILURE)
748
+ raise AuthenticationError('digest received was wrong')
749
+
750
+ def answer_challenge(connection, authkey):
751
+ import hmac
752
+ if not isinstance(authkey, bytes):
753
+ raise ValueError(
754
+ "Authkey must be bytes, not {0!s}".format(type(authkey)))
755
+ message = connection.recv_bytes(256) # reject large message
756
+ assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
757
+ message = message[len(CHALLENGE):]
758
+ digest = hmac.new(authkey, message, 'md5').digest()
759
+ connection.send_bytes(digest)
760
+ response = connection.recv_bytes(256) # reject large message
761
+ if response != WELCOME:
762
+ raise AuthenticationError('digest sent was rejected')
763
+
764
+ #
765
+ # Support for using xmlrpclib for serialization
766
+ #
767
+
768
+ class ConnectionWrapper(object):
769
+ def __init__(self, conn, dumps, loads):
770
+ self._conn = conn
771
+ self._dumps = dumps
772
+ self._loads = loads
773
+ for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
774
+ obj = getattr(conn, attr)
775
+ setattr(self, attr, obj)
776
+ def send(self, obj):
777
+ s = self._dumps(obj)
778
+ self._conn.send_bytes(s)
779
+ def recv(self):
780
+ s = self._conn.recv_bytes()
781
+ return self._loads(s)
782
+
783
+ def _xml_dumps(obj):
784
+ return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8')
785
+
786
+ def _xml_loads(s):
787
+ (obj,), method = xmlrpclib.loads(s.decode('utf-8'))
788
+ return obj
789
+
790
+ class XmlListener(Listener):
791
+ def accept(self):
792
+ global xmlrpclib
793
+ import xmlrpc.client as xmlrpclib
794
+ obj = Listener.accept(self)
795
+ return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
796
+
797
+ def XmlClient(*args, **kwds):
798
+ global xmlrpclib
799
+ import xmlrpc.client as xmlrpclib
800
+ return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
801
+
802
+ #
803
+ # Wait
804
+ #
805
+
806
+ if sys.platform == 'win32':
807
+
808
+ def _exhaustive_wait(handles, timeout):
809
+ # Return ALL handles which are currently signalled. (Only
810
+ # returning the first signalled might create starvation issues.)
811
+ L = list(handles)
812
+ ready = []
813
+ while L:
814
+ res = _winapi.WaitForMultipleObjects(L, False, timeout)
815
+ if res == WAIT_TIMEOUT:
816
+ break
817
+ elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):
818
+ res -= WAIT_OBJECT_0
819
+ elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L):
820
+ res -= WAIT_ABANDONED_0
821
+ else:
822
+ raise RuntimeError('Should not get here')
823
+ ready.append(L[res])
824
+ L = L[res+1:]
825
+ timeout = 0
826
+ return ready
827
+
828
+ _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED}
829
+
830
+ def wait(object_list, timeout=None):
831
+ '''
832
+ Wait till an object in object_list is ready/readable.
833
+
834
+ Returns list of those objects in object_list which are ready/readable.
835
+ '''
836
+ if timeout is None:
837
+ timeout = INFINITE
838
+ elif timeout < 0:
839
+ timeout = 0
840
+ else:
841
+ timeout = int(timeout * 1000 + 0.5)
842
+
843
+ object_list = list(object_list)
844
+ waithandle_to_obj = {}
845
+ ov_list = []
846
+ ready_objects = set()
847
+ ready_handles = set()
848
+
849
+ try:
850
+ for o in object_list:
851
+ try:
852
+ fileno = getattr(o, 'fileno')
853
+ except AttributeError:
854
+ waithandle_to_obj[o.__index__()] = o
855
+ else:
856
+ # start an overlapped read of length zero
857
+ try:
858
+ ov, err = _winapi.ReadFile(fileno(), 0, True)
859
+ except OSError as e:
860
+ ov, err = None, e.winerror
861
+ if err not in _ready_errors:
862
+ raise
863
+ if err == _winapi.ERROR_IO_PENDING:
864
+ ov_list.append(ov)
865
+ waithandle_to_obj[ov.event] = o
866
+ else:
867
+ # If o.fileno() is an overlapped pipe handle and
868
+ # err == 0 then there is a zero length message
869
+ # in the pipe, but it HAS NOT been consumed...
870
+ if ov and sys.getwindowsversion()[:2] >= (6, 2):
871
+ # ... except on Windows 8 and later, where
872
+ # the message HAS been consumed.
873
+ try:
874
+ _, err = ov.GetOverlappedResult(False)
875
+ except OSError as e:
876
+ err = e.winerror
877
+ if not err and hasattr(o, '_got_empty_message'):
878
+ o._got_empty_message = True
879
+ ready_objects.add(o)
880
+ timeout = 0
881
+
882
+ ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)
883
+ finally:
884
+ # request that overlapped reads stop
885
+ for ov in ov_list:
886
+ ov.cancel()
887
+
888
+ # wait for all overlapped reads to stop
889
+ for ov in ov_list:
890
+ try:
891
+ _, err = ov.GetOverlappedResult(True)
892
+ except OSError as e:
893
+ err = e.winerror
894
+ if err not in _ready_errors:
895
+ raise
896
+ if err != _winapi.ERROR_OPERATION_ABORTED:
897
+ o = waithandle_to_obj[ov.event]
898
+ ready_objects.add(o)
899
+ if err == 0:
900
+ # If o.fileno() is an overlapped pipe handle then
901
+ # a zero length message HAS been consumed.
902
+ if hasattr(o, '_got_empty_message'):
903
+ o._got_empty_message = True
904
+
905
+ ready_objects.update(waithandle_to_obj[h] for h in ready_handles)
906
+ return [o for o in object_list if o in ready_objects]
907
+
908
+ else:
909
+
910
+ import selectors
911
+
912
+ # poll/select have the advantage of not requiring any extra file
913
+ # descriptor, contrarily to epoll/kqueue (also, they require a single
914
+ # syscall).
915
+ if hasattr(selectors, 'PollSelector'):
916
+ _WaitSelector = selectors.PollSelector
917
+ else:
918
+ _WaitSelector = selectors.SelectSelector
919
+
920
+ def wait(object_list, timeout=None):
921
+ '''
922
+ Wait till an object in object_list is ready/readable.
923
+
924
+ Returns list of those objects in object_list which are ready/readable.
925
+ '''
926
+ with _WaitSelector() as selector:
927
+ for obj in object_list:
928
+ selector.register(obj, selectors.EVENT_READ)
929
+
930
+ if timeout is not None:
931
+ deadline = getattr(time,'monotonic',time.time)() + timeout
932
+
933
+ while True:
934
+ ready = selector.select(timeout)
935
+ if ready:
936
+ return [key.fileobj for (key, events) in ready]
937
+ else:
938
+ if timeout is not None:
939
+ timeout = deadline - getattr(time,'monotonic',time.time)()
940
+ if timeout < 0:
941
+ return ready
942
+
943
+ #
944
+ # Make connection and socket objects sharable if possible
945
+ #
946
+
947
+ if sys.platform == 'win32':
948
+ def reduce_connection(conn):
949
+ handle = conn.fileno()
950
+ with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
951
+ from . import resource_sharer
952
+ ds = resource_sharer.DupSocket(s)
953
+ return rebuild_connection, (ds, conn.readable, conn.writable)
954
+ def rebuild_connection(ds, readable, writable):
955
+ sock = ds.detach()
956
+ return Connection(sock.detach(), readable, writable)
957
+ reduction.register(Connection, reduce_connection)
958
+
959
+ def reduce_pipe_connection(conn):
960
+ access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
961
+ (_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
962
+ dh = reduction.DupHandle(conn.fileno(), access)
963
+ return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
964
+ def rebuild_pipe_connection(dh, readable, writable):
965
+ handle = dh.detach()
966
+ return PipeConnection(handle, readable, writable)
967
+ reduction.register(PipeConnection, reduce_pipe_connection)
968
+
969
+ else:
970
+ def reduce_connection(conn):
971
+ df = reduction.DupFd(conn.fileno())
972
+ return rebuild_connection, (df, conn.readable, conn.writable)
973
+ def rebuild_connection(df, readable, writable):
974
+ fd = df.detach()
975
+ return Connection(fd, readable, writable)
976
+ reduction.register(Connection, reduce_connection)
venv/lib/python3.10/site-packages/multiprocess/forkserver.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import errno
2
+ import os
3
+ import selectors
4
+ import signal
5
+ import socket
6
+ import struct
7
+ import sys
8
+ import threading
9
+ import warnings
10
+
11
+ from . import connection
12
+ from . import process
13
+ from .context import reduction
14
+ from . import resource_tracker
15
+ from . import spawn
16
+ from . import util
17
+
18
+ __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process',
19
+ 'set_forkserver_preload']
20
+
21
+ #
22
+ #
23
+ #
24
+
25
+ MAXFDS_TO_SEND = 256
26
+ SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t
27
+
28
+ #
29
+ # Forkserver class
30
+ #
31
+
32
+ class ForkServer(object):
33
+
34
+ def __init__(self):
35
+ self._forkserver_address = None
36
+ self._forkserver_alive_fd = None
37
+ self._forkserver_pid = None
38
+ self._inherited_fds = None
39
+ self._lock = threading.Lock()
40
+ self._preload_modules = ['__main__']
41
+
42
+ def _stop(self):
43
+ # Method used by unit tests to stop the server
44
+ with self._lock:
45
+ self._stop_unlocked()
46
+
47
+ def _stop_unlocked(self):
48
+ if self._forkserver_pid is None:
49
+ return
50
+
51
+ # close the "alive" file descriptor asks the server to stop
52
+ os.close(self._forkserver_alive_fd)
53
+ self._forkserver_alive_fd = None
54
+
55
+ os.waitpid(self._forkserver_pid, 0)
56
+ self._forkserver_pid = None
57
+
58
+ if not util.is_abstract_socket_namespace(self._forkserver_address):
59
+ os.unlink(self._forkserver_address)
60
+ self._forkserver_address = None
61
+
62
+ def set_forkserver_preload(self, modules_names):
63
+ '''Set list of module names to try to load in forkserver process.'''
64
+ if not all(type(mod) is str for mod in self._preload_modules):
65
+ raise TypeError('module_names must be a list of strings')
66
+ self._preload_modules = modules_names
67
+
68
+ def get_inherited_fds(self):
69
+ '''Return list of fds inherited from parent process.
70
+
71
+ This returns None if the current process was not started by fork
72
+ server.
73
+ '''
74
+ return self._inherited_fds
75
+
76
+ def connect_to_new_process(self, fds):
77
+ '''Request forkserver to create a child process.
78
+
79
+ Returns a pair of fds (status_r, data_w). The calling process can read
80
+ the child process's pid and (eventually) its returncode from status_r.
81
+ The calling process should write to data_w the pickled preparation and
82
+ process data.
83
+ '''
84
+ self.ensure_running()
85
+ if len(fds) + 4 >= MAXFDS_TO_SEND:
86
+ raise ValueError('too many fds')
87
+ with socket.socket(socket.AF_UNIX) as client:
88
+ client.connect(self._forkserver_address)
89
+ parent_r, child_w = os.pipe()
90
+ child_r, parent_w = os.pipe()
91
+ allfds = [child_r, child_w, self._forkserver_alive_fd,
92
+ resource_tracker.getfd()]
93
+ allfds += fds
94
+ try:
95
+ reduction.sendfds(client, allfds)
96
+ return parent_r, parent_w
97
+ except:
98
+ os.close(parent_r)
99
+ os.close(parent_w)
100
+ raise
101
+ finally:
102
+ os.close(child_r)
103
+ os.close(child_w)
104
+
105
+ def ensure_running(self):
106
+ '''Make sure that a fork server is running.
107
+
108
+ This can be called from any process. Note that usually a child
109
+ process will just reuse the forkserver started by its parent, so
110
+ ensure_running() will do nothing.
111
+ '''
112
+ with self._lock:
113
+ resource_tracker.ensure_running()
114
+ if self._forkserver_pid is not None:
115
+ # forkserver was launched before, is it still running?
116
+ pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG)
117
+ if not pid:
118
+ # still alive
119
+ return
120
+ # dead, launch it again
121
+ os.close(self._forkserver_alive_fd)
122
+ self._forkserver_address = None
123
+ self._forkserver_alive_fd = None
124
+ self._forkserver_pid = None
125
+
126
+ cmd = ('from multiprocess.forkserver import main; ' +
127
+ 'main(%d, %d, %r, **%r)')
128
+
129
+ if self._preload_modules:
130
+ desired_keys = {'main_path', 'sys_path'}
131
+ data = spawn.get_preparation_data('ignore')
132
+ data = {x: y for x, y in data.items() if x in desired_keys}
133
+ else:
134
+ data = {}
135
+
136
+ with socket.socket(socket.AF_UNIX) as listener:
137
+ address = connection.arbitrary_address('AF_UNIX')
138
+ listener.bind(address)
139
+ if not util.is_abstract_socket_namespace(address):
140
+ os.chmod(address, 0o600)
141
+ listener.listen()
142
+
143
+ # all client processes own the write end of the "alive" pipe;
144
+ # when they all terminate the read end becomes ready.
145
+ alive_r, alive_w = os.pipe()
146
+ try:
147
+ fds_to_pass = [listener.fileno(), alive_r]
148
+ cmd %= (listener.fileno(), alive_r, self._preload_modules,
149
+ data)
150
+ exe = spawn.get_executable()
151
+ args = [exe] + util._args_from_interpreter_flags()
152
+ args += ['-c', cmd]
153
+ pid = util.spawnv_passfds(exe, args, fds_to_pass)
154
+ except:
155
+ os.close(alive_w)
156
+ raise
157
+ finally:
158
+ os.close(alive_r)
159
+ self._forkserver_address = address
160
+ self._forkserver_alive_fd = alive_w
161
+ self._forkserver_pid = pid
162
+
163
+ #
164
+ #
165
+ #
166
+
167
+ def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
168
+ '''Run forkserver.'''
169
+ if preload:
170
+ if '__main__' in preload and main_path is not None:
171
+ process.current_process()._inheriting = True
172
+ try:
173
+ spawn.import_main_path(main_path)
174
+ finally:
175
+ del process.current_process()._inheriting
176
+ for modname in preload:
177
+ try:
178
+ __import__(modname)
179
+ except ImportError:
180
+ pass
181
+
182
+ util._close_stdin()
183
+
184
+ sig_r, sig_w = os.pipe()
185
+ os.set_blocking(sig_r, False)
186
+ os.set_blocking(sig_w, False)
187
+
188
+ def sigchld_handler(*_unused):
189
+ # Dummy signal handler, doesn't do anything
190
+ pass
191
+
192
+ handlers = {
193
+ # unblocking SIGCHLD allows the wakeup fd to notify our event loop
194
+ signal.SIGCHLD: sigchld_handler,
195
+ # protect the process from ^C
196
+ signal.SIGINT: signal.SIG_IGN,
197
+ }
198
+ old_handlers = {sig: signal.signal(sig, val)
199
+ for (sig, val) in handlers.items()}
200
+
201
+ # calling os.write() in the Python signal handler is racy
202
+ signal.set_wakeup_fd(sig_w)
203
+
204
+ # map child pids to client fds
205
+ pid_to_fd = {}
206
+
207
+ with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \
208
+ selectors.DefaultSelector() as selector:
209
+ _forkserver._forkserver_address = listener.getsockname()
210
+
211
+ selector.register(listener, selectors.EVENT_READ)
212
+ selector.register(alive_r, selectors.EVENT_READ)
213
+ selector.register(sig_r, selectors.EVENT_READ)
214
+
215
+ while True:
216
+ try:
217
+ while True:
218
+ rfds = [key.fileobj for (key, events) in selector.select()]
219
+ if rfds:
220
+ break
221
+
222
+ if alive_r in rfds:
223
+ # EOF because no more client processes left
224
+ assert os.read(alive_r, 1) == b'', "Not at EOF?"
225
+ raise SystemExit
226
+
227
+ if sig_r in rfds:
228
+ # Got SIGCHLD
229
+ os.read(sig_r, 65536) # exhaust
230
+ while True:
231
+ # Scan for child processes
232
+ try:
233
+ pid, sts = os.waitpid(-1, os.WNOHANG)
234
+ except ChildProcessError:
235
+ break
236
+ if pid == 0:
237
+ break
238
+ child_w = pid_to_fd.pop(pid, None)
239
+ if child_w is not None:
240
+ returncode = os.waitstatus_to_exitcode(sts)
241
+ # Send exit code to client process
242
+ try:
243
+ write_signed(child_w, returncode)
244
+ except BrokenPipeError:
245
+ # client vanished
246
+ pass
247
+ os.close(child_w)
248
+ else:
249
+ # This shouldn't happen really
250
+ warnings.warn('forkserver: waitpid returned '
251
+ 'unexpected pid %d' % pid)
252
+
253
+ if listener in rfds:
254
+ # Incoming fork request
255
+ with listener.accept()[0] as s:
256
+ # Receive fds from client
257
+ fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
258
+ if len(fds) > MAXFDS_TO_SEND:
259
+ raise RuntimeError(
260
+ "Too many ({0:n}) fds to send".format(
261
+ len(fds)))
262
+ child_r, child_w, *fds = fds
263
+ s.close()
264
+ pid = os.fork()
265
+ if pid == 0:
266
+ # Child
267
+ code = 1
268
+ try:
269
+ listener.close()
270
+ selector.close()
271
+ unused_fds = [alive_r, child_w, sig_r, sig_w]
272
+ unused_fds.extend(pid_to_fd.values())
273
+ code = _serve_one(child_r, fds,
274
+ unused_fds,
275
+ old_handlers)
276
+ except Exception:
277
+ sys.excepthook(*sys.exc_info())
278
+ sys.stderr.flush()
279
+ finally:
280
+ os._exit(code)
281
+ else:
282
+ # Send pid to client process
283
+ try:
284
+ write_signed(child_w, pid)
285
+ except BrokenPipeError:
286
+ # client vanished
287
+ pass
288
+ pid_to_fd[pid] = child_w
289
+ os.close(child_r)
290
+ for fd in fds:
291
+ os.close(fd)
292
+
293
+ except OSError as e:
294
+ if e.errno != errno.ECONNABORTED:
295
+ raise
296
+
297
+
298
+ def _serve_one(child_r, fds, unused_fds, handlers):
299
+ # close unnecessary stuff and reset signal handlers
300
+ signal.set_wakeup_fd(-1)
301
+ for sig, val in handlers.items():
302
+ signal.signal(sig, val)
303
+ for fd in unused_fds:
304
+ os.close(fd)
305
+
306
+ (_forkserver._forkserver_alive_fd,
307
+ resource_tracker._resource_tracker._fd,
308
+ *_forkserver._inherited_fds) = fds
309
+
310
+ # Run process object received over pipe
311
+ parent_sentinel = os.dup(child_r)
312
+ code = spawn._main(child_r, parent_sentinel)
313
+
314
+ return code
315
+
316
+
317
+ #
318
+ # Read and write signed numbers
319
+ #
320
+
321
+ def read_signed(fd):
322
+ data = b''
323
+ length = SIGNED_STRUCT.size
324
+ while len(data) < length:
325
+ s = os.read(fd, length - len(data))
326
+ if not s:
327
+ raise EOFError('unexpected EOF')
328
+ data += s
329
+ return SIGNED_STRUCT.unpack(data)[0]
330
+
331
+ def write_signed(fd, n):
332
+ msg = SIGNED_STRUCT.pack(n)
333
+ while msg:
334
+ nbytes = os.write(fd, msg)
335
+ if nbytes == 0:
336
+ raise RuntimeError('should not get here')
337
+ msg = msg[nbytes:]
338
+
339
+ #
340
+ #
341
+ #
342
+
343
+ _forkserver = ForkServer()
344
+ ensure_running = _forkserver.ensure_running
345
+ get_inherited_fds = _forkserver.get_inherited_fds
346
+ connect_to_new_process = _forkserver.connect_to_new_process
347
+ set_forkserver_preload = _forkserver.set_forkserver_preload
venv/lib/python3.10/site-packages/multiprocess/heap.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module which supports allocation of memory from an mmap
3
+ #
4
+ # multiprocessing/heap.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ import bisect
11
+ from collections import defaultdict
12
+ import mmap
13
+ import os
14
+ import sys
15
+ import tempfile
16
+ import threading
17
+
18
+ from .context import reduction, assert_spawning
19
+ from . import util
20
+
21
+ __all__ = ['BufferWrapper']
22
+
23
+ #
24
+ # Inheritable class which wraps an mmap, and from which blocks can be allocated
25
+ #
26
+
27
+ if sys.platform == 'win32':
28
+
29
+ import _winapi
30
+
31
+ class Arena(object):
32
+ """
33
+ A shared memory area backed by anonymous memory (Windows).
34
+ """
35
+
36
+ _rand = tempfile._RandomNameSequence()
37
+
38
+ def __init__(self, size):
39
+ self.size = size
40
+ for i in range(100):
41
+ name = 'pym-%d-%s' % (os.getpid(), next(self._rand))
42
+ buf = mmap.mmap(-1, size, tagname=name)
43
+ if _winapi.GetLastError() == 0:
44
+ break
45
+ # We have reopened a preexisting mmap.
46
+ buf.close()
47
+ else:
48
+ raise FileExistsError('Cannot find name for new mmap')
49
+ self.name = name
50
+ self.buffer = buf
51
+ self._state = (self.size, self.name)
52
+
53
+ def __getstate__(self):
54
+ assert_spawning(self)
55
+ return self._state
56
+
57
+ def __setstate__(self, state):
58
+ self.size, self.name = self._state = state
59
+ # Reopen existing mmap
60
+ self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
61
+ # XXX Temporarily preventing buildbot failures while determining
62
+ # XXX the correct long-term fix. See issue 23060
63
+ #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS
64
+
65
+ else:
66
+
67
+ class Arena(object):
68
+ """
69
+ A shared memory area backed by a temporary file (POSIX).
70
+ """
71
+
72
+ if sys.platform == 'linux':
73
+ _dir_candidates = ['/dev/shm']
74
+ else:
75
+ _dir_candidates = []
76
+
77
+ def __init__(self, size, fd=-1):
78
+ self.size = size
79
+ self.fd = fd
80
+ if fd == -1:
81
+ # Arena is created anew (if fd != -1, it means we're coming
82
+ # from rebuild_arena() below)
83
+ self.fd, name = tempfile.mkstemp(
84
+ prefix='pym-%d-'%os.getpid(),
85
+ dir=self._choose_dir(size))
86
+ os.unlink(name)
87
+ util.Finalize(self, os.close, (self.fd,))
88
+ os.ftruncate(self.fd, size)
89
+ self.buffer = mmap.mmap(self.fd, self.size)
90
+
91
+ def _choose_dir(self, size):
92
+ # Choose a non-storage backed directory if possible,
93
+ # to improve performance
94
+ for d in self._dir_candidates:
95
+ st = os.statvfs(d)
96
+ if st.f_bavail * st.f_frsize >= size: # enough free space?
97
+ return d
98
+ return util.get_temp_dir()
99
+
100
+ def reduce_arena(a):
101
+ if a.fd == -1:
102
+ raise ValueError('Arena is unpicklable because '
103
+ 'forking was enabled when it was created')
104
+ return rebuild_arena, (a.size, reduction.DupFd(a.fd))
105
+
106
+ def rebuild_arena(size, dupfd):
107
+ return Arena(size, dupfd.detach())
108
+
109
+ reduction.register(Arena, reduce_arena)
110
+
111
+ #
112
+ # Class allowing allocation of chunks of memory from arenas
113
+ #
114
+
115
+ class Heap(object):
116
+
117
+ # Minimum malloc() alignment
118
+ _alignment = 8
119
+
120
+ _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB
121
+ _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2
122
+
123
+ def __init__(self, size=mmap.PAGESIZE):
124
+ self._lastpid = os.getpid()
125
+ self._lock = threading.Lock()
126
+ # Current arena allocation size
127
+ self._size = size
128
+ # A sorted list of available block sizes in arenas
129
+ self._lengths = []
130
+
131
+ # Free block management:
132
+ # - map each block size to a list of `(Arena, start, stop)` blocks
133
+ self._len_to_seq = {}
134
+ # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block
135
+ # starting at that offset
136
+ self._start_to_block = {}
137
+ # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block
138
+ # ending at that offset
139
+ self._stop_to_block = {}
140
+
141
+ # Map arenas to their `(Arena, start, stop)` blocks in use
142
+ self._allocated_blocks = defaultdict(set)
143
+ self._arenas = []
144
+
145
+ # List of pending blocks to free - see comment in free() below
146
+ self._pending_free_blocks = []
147
+
148
+ # Statistics
149
+ self._n_mallocs = 0
150
+ self._n_frees = 0
151
+
152
+ @staticmethod
153
+ def _roundup(n, alignment):
154
+ # alignment must be a power of 2
155
+ mask = alignment - 1
156
+ return (n + mask) & ~mask
157
+
158
+ def _new_arena(self, size):
159
+ # Create a new arena with at least the given *size*
160
+ length = self._roundup(max(self._size, size), mmap.PAGESIZE)
161
+ # We carve larger and larger arenas, for efficiency, until we
162
+ # reach a large-ish size (roughly L3 cache-sized)
163
+ if self._size < self._DOUBLE_ARENA_SIZE_UNTIL:
164
+ self._size *= 2
165
+ util.info('allocating a new mmap of length %d', length)
166
+ arena = Arena(length)
167
+ self._arenas.append(arena)
168
+ return (arena, 0, length)
169
+
170
+ def _discard_arena(self, arena):
171
+ # Possibly delete the given (unused) arena
172
+ length = arena.size
173
+ # Reusing an existing arena is faster than creating a new one, so
174
+ # we only reclaim space if it's large enough.
175
+ if length < self._DISCARD_FREE_SPACE_LARGER_THAN:
176
+ return
177
+ blocks = self._allocated_blocks.pop(arena)
178
+ assert not blocks
179
+ del self._start_to_block[(arena, 0)]
180
+ del self._stop_to_block[(arena, length)]
181
+ self._arenas.remove(arena)
182
+ seq = self._len_to_seq[length]
183
+ seq.remove((arena, 0, length))
184
+ if not seq:
185
+ del self._len_to_seq[length]
186
+ self._lengths.remove(length)
187
+
188
+ def _malloc(self, size):
189
+ # returns a large enough block -- it might be much larger
190
+ i = bisect.bisect_left(self._lengths, size)
191
+ if i == len(self._lengths):
192
+ return self._new_arena(size)
193
+ else:
194
+ length = self._lengths[i]
195
+ seq = self._len_to_seq[length]
196
+ block = seq.pop()
197
+ if not seq:
198
+ del self._len_to_seq[length], self._lengths[i]
199
+
200
+ (arena, start, stop) = block
201
+ del self._start_to_block[(arena, start)]
202
+ del self._stop_to_block[(arena, stop)]
203
+ return block
204
+
205
+ def _add_free_block(self, block):
206
+ # make block available and try to merge with its neighbours in the arena
207
+ (arena, start, stop) = block
208
+
209
+ try:
210
+ prev_block = self._stop_to_block[(arena, start)]
211
+ except KeyError:
212
+ pass
213
+ else:
214
+ start, _ = self._absorb(prev_block)
215
+
216
+ try:
217
+ next_block = self._start_to_block[(arena, stop)]
218
+ except KeyError:
219
+ pass
220
+ else:
221
+ _, stop = self._absorb(next_block)
222
+
223
+ block = (arena, start, stop)
224
+ length = stop - start
225
+
226
+ try:
227
+ self._len_to_seq[length].append(block)
228
+ except KeyError:
229
+ self._len_to_seq[length] = [block]
230
+ bisect.insort(self._lengths, length)
231
+
232
+ self._start_to_block[(arena, start)] = block
233
+ self._stop_to_block[(arena, stop)] = block
234
+
235
+ def _absorb(self, block):
236
+ # deregister this block so it can be merged with a neighbour
237
+ (arena, start, stop) = block
238
+ del self._start_to_block[(arena, start)]
239
+ del self._stop_to_block[(arena, stop)]
240
+
241
+ length = stop - start
242
+ seq = self._len_to_seq[length]
243
+ seq.remove(block)
244
+ if not seq:
245
+ del self._len_to_seq[length]
246
+ self._lengths.remove(length)
247
+
248
+ return start, stop
249
+
250
+ def _remove_allocated_block(self, block):
251
+ arena, start, stop = block
252
+ blocks = self._allocated_blocks[arena]
253
+ blocks.remove((start, stop))
254
+ if not blocks:
255
+ # Arena is entirely free, discard it from this process
256
+ self._discard_arena(arena)
257
+
258
+ def _free_pending_blocks(self):
259
+ # Free all the blocks in the pending list - called with the lock held.
260
+ while True:
261
+ try:
262
+ block = self._pending_free_blocks.pop()
263
+ except IndexError:
264
+ break
265
+ self._add_free_block(block)
266
+ self._remove_allocated_block(block)
267
+
268
+ def free(self, block):
269
+ # free a block returned by malloc()
270
+ # Since free() can be called asynchronously by the GC, it could happen
271
+ # that it's called while self._lock is held: in that case,
272
+ # self._lock.acquire() would deadlock (issue #12352). To avoid that, a
273
+ # trylock is used instead, and if the lock can't be acquired
274
+ # immediately, the block is added to a list of blocks to be freed
275
+ # synchronously sometimes later from malloc() or free(), by calling
276
+ # _free_pending_blocks() (appending and retrieving from a list is not
277
+ # strictly thread-safe but under CPython it's atomic thanks to the GIL).
278
+ if os.getpid() != self._lastpid:
279
+ raise ValueError(
280
+ "My pid ({0:n}) is not last pid {1:n}".format(
281
+ os.getpid(),self._lastpid))
282
+ if not self._lock.acquire(False):
283
+ # can't acquire the lock right now, add the block to the list of
284
+ # pending blocks to free
285
+ self._pending_free_blocks.append(block)
286
+ else:
287
+ # we hold the lock
288
+ try:
289
+ self._n_frees += 1
290
+ self._free_pending_blocks()
291
+ self._add_free_block(block)
292
+ self._remove_allocated_block(block)
293
+ finally:
294
+ self._lock.release()
295
+
296
+ def malloc(self, size):
297
+ # return a block of right size (possibly rounded up)
298
+ if size < 0:
299
+ raise ValueError("Size {0:n} out of range".format(size))
300
+ if sys.maxsize <= size:
301
+ raise OverflowError("Size {0:n} too large".format(size))
302
+ if os.getpid() != self._lastpid:
303
+ self.__init__() # reinitialize after fork
304
+ with self._lock:
305
+ self._n_mallocs += 1
306
+ # allow pending blocks to be marked available
307
+ self._free_pending_blocks()
308
+ size = self._roundup(max(size, 1), self._alignment)
309
+ (arena, start, stop) = self._malloc(size)
310
+ real_stop = start + size
311
+ if real_stop < stop:
312
+ # if the returned block is larger than necessary, mark
313
+ # the remainder available
314
+ self._add_free_block((arena, real_stop, stop))
315
+ self._allocated_blocks[arena].add((start, real_stop))
316
+ return (arena, start, real_stop)
317
+
318
+ #
319
+ # Class wrapping a block allocated out of a Heap -- can be inherited by child process
320
+ #
321
+
322
+ class BufferWrapper(object):
323
+
324
+ _heap = Heap()
325
+
326
+ def __init__(self, size):
327
+ if size < 0:
328
+ raise ValueError("Size {0:n} out of range".format(size))
329
+ if sys.maxsize <= size:
330
+ raise OverflowError("Size {0:n} too large".format(size))
331
+ block = BufferWrapper._heap.malloc(size)
332
+ self._state = (block, size)
333
+ util.Finalize(self, BufferWrapper._heap.free, args=(block,))
334
+
335
+ def create_memoryview(self):
336
+ (arena, start, stop), size = self._state
337
+ return memoryview(arena.buffer)[start:start+size]
venv/lib/python3.10/site-packages/multiprocess/pool.py ADDED
@@ -0,0 +1,957 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module providing the `Pool` class for managing a process pool
3
+ #
4
+ # multiprocessing/pool.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = ['Pool', 'ThreadPool']
11
+
12
+ #
13
+ # Imports
14
+ #
15
+
16
+ import collections
17
+ import itertools
18
+ import os
19
+ import queue
20
+ import threading
21
+ import time
22
+ import traceback
23
+ import types
24
+ import warnings
25
+
26
+ # If threading is available then ThreadPool should be provided. Therefore
27
+ # we avoid top-level imports which are liable to fail on some systems.
28
+ from . import util
29
+ from . import get_context, TimeoutError
30
+ from .connection import wait
31
+
32
+ #
33
+ # Constants representing the state of a pool
34
+ #
35
+
36
+ INIT = "INIT"
37
+ RUN = "RUN"
38
+ CLOSE = "CLOSE"
39
+ TERMINATE = "TERMINATE"
40
+
41
+ #
42
+ # Miscellaneous
43
+ #
44
+
45
+ job_counter = itertools.count()
46
+
47
+ def mapstar(args):
48
+ return list(map(*args))
49
+
50
+ def starmapstar(args):
51
+ return list(itertools.starmap(args[0], args[1]))
52
+
53
+ #
54
+ # Hack to embed stringification of remote traceback in local traceback
55
+ #
56
+
57
+ class RemoteTraceback(Exception):
58
+ def __init__(self, tb):
59
+ self.tb = tb
60
+ def __str__(self):
61
+ return self.tb
62
+
63
+ class ExceptionWithTraceback:
64
+ def __init__(self, exc, tb):
65
+ tb = traceback.format_exception(type(exc), exc, tb)
66
+ tb = ''.join(tb)
67
+ self.exc = exc
68
+ self.tb = '\n"""\n%s"""' % tb
69
+ def __reduce__(self):
70
+ return rebuild_exc, (self.exc, self.tb)
71
+
72
+ def rebuild_exc(exc, tb):
73
+ exc.__cause__ = RemoteTraceback(tb)
74
+ return exc
75
+
76
+ #
77
+ # Code run by worker processes
78
+ #
79
+
80
+ class MaybeEncodingError(Exception):
81
+ """Wraps possible unpickleable errors, so they can be
82
+ safely sent through the socket."""
83
+
84
+ def __init__(self, exc, value):
85
+ self.exc = repr(exc)
86
+ self.value = repr(value)
87
+ super(MaybeEncodingError, self).__init__(self.exc, self.value)
88
+
89
+ def __str__(self):
90
+ return "Error sending result: '%s'. Reason: '%s'" % (self.value,
91
+ self.exc)
92
+
93
+ def __repr__(self):
94
+ return "<%s: %s>" % (self.__class__.__name__, self)
95
+
96
+
97
+ def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
98
+ wrap_exception=False):
99
+ if (maxtasks is not None) and not (isinstance(maxtasks, int)
100
+ and maxtasks >= 1):
101
+ raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks))
102
+ put = outqueue.put
103
+ get = inqueue.get
104
+ if hasattr(inqueue, '_writer'):
105
+ inqueue._writer.close()
106
+ outqueue._reader.close()
107
+
108
+ if initializer is not None:
109
+ initializer(*initargs)
110
+
111
+ completed = 0
112
+ while maxtasks is None or (maxtasks and completed < maxtasks):
113
+ try:
114
+ task = get()
115
+ except (EOFError, OSError):
116
+ util.debug('worker got EOFError or OSError -- exiting')
117
+ break
118
+
119
+ if task is None:
120
+ util.debug('worker got sentinel -- exiting')
121
+ break
122
+
123
+ job, i, func, args, kwds = task
124
+ try:
125
+ result = (True, func(*args, **kwds))
126
+ except Exception as e:
127
+ if wrap_exception and func is not _helper_reraises_exception:
128
+ e = ExceptionWithTraceback(e, e.__traceback__)
129
+ result = (False, e)
130
+ try:
131
+ put((job, i, result))
132
+ except Exception as e:
133
+ wrapped = MaybeEncodingError(e, result[1])
134
+ util.debug("Possible encoding error while sending result: %s" % (
135
+ wrapped))
136
+ put((job, i, (False, wrapped)))
137
+
138
+ task = job = result = func = args = kwds = None
139
+ completed += 1
140
+ util.debug('worker exiting after %d tasks' % completed)
141
+
142
+ def _helper_reraises_exception(ex):
143
+ 'Pickle-able helper function for use by _guarded_task_generation.'
144
+ raise ex
145
+
146
+ #
147
+ # Class representing a process pool
148
+ #
149
+
150
+ class _PoolCache(dict):
151
+ """
152
+ Class that implements a cache for the Pool class that will notify
153
+ the pool management threads every time the cache is emptied. The
154
+ notification is done by the use of a queue that is provided when
155
+ instantiating the cache.
156
+ """
157
+ def __init__(self, /, *args, notifier=None, **kwds):
158
+ self.notifier = notifier
159
+ super().__init__(*args, **kwds)
160
+
161
+ def __delitem__(self, item):
162
+ super().__delitem__(item)
163
+
164
+ # Notify that the cache is empty. This is important because the
165
+ # pool keeps maintaining workers until the cache gets drained. This
166
+ # eliminates a race condition in which a task is finished after the
167
+ # the pool's _handle_workers method has enter another iteration of the
168
+ # loop. In this situation, the only event that can wake up the pool
169
+ # is the cache to be emptied (no more tasks available).
170
+ if not self:
171
+ self.notifier.put(None)
172
+
173
+ class Pool(object):
174
+ '''
175
+ Class which supports an async version of applying functions to arguments.
176
+ '''
177
+ _wrap_exception = True
178
+
179
+ @staticmethod
180
+ def Process(ctx, *args, **kwds):
181
+ return ctx.Process(*args, **kwds)
182
+
183
+ def __init__(self, processes=None, initializer=None, initargs=(),
184
+ maxtasksperchild=None, context=None):
185
+ # Attributes initialized early to make sure that they exist in
186
+ # __del__() if __init__() raises an exception
187
+ self._pool = []
188
+ self._state = INIT
189
+
190
+ self._ctx = context or get_context()
191
+ self._setup_queues()
192
+ self._taskqueue = queue.SimpleQueue()
193
+ # The _change_notifier queue exist to wake up self._handle_workers()
194
+ # when the cache (self._cache) is empty or when there is a change in
195
+ # the _state variable of the thread that runs _handle_workers.
196
+ self._change_notifier = self._ctx.SimpleQueue()
197
+ self._cache = _PoolCache(notifier=self._change_notifier)
198
+ self._maxtasksperchild = maxtasksperchild
199
+ self._initializer = initializer
200
+ self._initargs = initargs
201
+
202
+ if processes is None:
203
+ processes = os.cpu_count() or 1
204
+ if processes < 1:
205
+ raise ValueError("Number of processes must be at least 1")
206
+ if maxtasksperchild is not None:
207
+ if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0:
208
+ raise ValueError("maxtasksperchild must be a positive int or None")
209
+
210
+ if initializer is not None and not callable(initializer):
211
+ raise TypeError('initializer must be a callable')
212
+
213
+ self._processes = processes
214
+ try:
215
+ self._repopulate_pool()
216
+ except Exception:
217
+ for p in self._pool:
218
+ if p.exitcode is None:
219
+ p.terminate()
220
+ for p in self._pool:
221
+ p.join()
222
+ raise
223
+
224
+ sentinels = self._get_sentinels()
225
+
226
+ self._worker_handler = threading.Thread(
227
+ target=Pool._handle_workers,
228
+ args=(self._cache, self._taskqueue, self._ctx, self.Process,
229
+ self._processes, self._pool, self._inqueue, self._outqueue,
230
+ self._initializer, self._initargs, self._maxtasksperchild,
231
+ self._wrap_exception, sentinels, self._change_notifier)
232
+ )
233
+ self._worker_handler.daemon = True
234
+ self._worker_handler._state = RUN
235
+ self._worker_handler.start()
236
+
237
+
238
+ self._task_handler = threading.Thread(
239
+ target=Pool._handle_tasks,
240
+ args=(self._taskqueue, self._quick_put, self._outqueue,
241
+ self._pool, self._cache)
242
+ )
243
+ self._task_handler.daemon = True
244
+ self._task_handler._state = RUN
245
+ self._task_handler.start()
246
+
247
+ self._result_handler = threading.Thread(
248
+ target=Pool._handle_results,
249
+ args=(self._outqueue, self._quick_get, self._cache)
250
+ )
251
+ self._result_handler.daemon = True
252
+ self._result_handler._state = RUN
253
+ self._result_handler.start()
254
+
255
+ self._terminate = util.Finalize(
256
+ self, self._terminate_pool,
257
+ args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
258
+ self._change_notifier, self._worker_handler, self._task_handler,
259
+ self._result_handler, self._cache),
260
+ exitpriority=15
261
+ )
262
+ self._state = RUN
263
+
264
+ # Copy globals as function locals to make sure that they are available
265
+ # during Python shutdown when the Pool is destroyed.
266
+ def __del__(self, _warn=warnings.warn, RUN=RUN):
267
+ if self._state == RUN:
268
+ _warn(f"unclosed running multiprocessing pool {self!r}",
269
+ ResourceWarning, source=self)
270
+ if getattr(self, '_change_notifier', None) is not None:
271
+ self._change_notifier.put(None)
272
+
273
+ def __repr__(self):
274
+ cls = self.__class__
275
+ return (f'<{cls.__module__}.{cls.__qualname__} '
276
+ f'state={self._state} '
277
+ f'pool_size={len(self._pool)}>')
278
+
279
+ def _get_sentinels(self):
280
+ task_queue_sentinels = [self._outqueue._reader]
281
+ self_notifier_sentinels = [self._change_notifier._reader]
282
+ return [*task_queue_sentinels, *self_notifier_sentinels]
283
+
284
+ @staticmethod
285
+ def _get_worker_sentinels(workers):
286
+ return [worker.sentinel for worker in
287
+ workers if hasattr(worker, "sentinel")]
288
+
289
+ @staticmethod
290
+ def _join_exited_workers(pool):
291
+ """Cleanup after any worker processes which have exited due to reaching
292
+ their specified lifetime. Returns True if any workers were cleaned up.
293
+ """
294
+ cleaned = False
295
+ for i in reversed(range(len(pool))):
296
+ worker = pool[i]
297
+ if worker.exitcode is not None:
298
+ # worker exited
299
+ util.debug('cleaning up worker %d' % i)
300
+ worker.join()
301
+ cleaned = True
302
+ del pool[i]
303
+ return cleaned
304
+
305
+ def _repopulate_pool(self):
306
+ return self._repopulate_pool_static(self._ctx, self.Process,
307
+ self._processes,
308
+ self._pool, self._inqueue,
309
+ self._outqueue, self._initializer,
310
+ self._initargs,
311
+ self._maxtasksperchild,
312
+ self._wrap_exception)
313
+
314
+ @staticmethod
315
+ def _repopulate_pool_static(ctx, Process, processes, pool, inqueue,
316
+ outqueue, initializer, initargs,
317
+ maxtasksperchild, wrap_exception):
318
+ """Bring the number of pool processes up to the specified number,
319
+ for use after reaping workers which have exited.
320
+ """
321
+ for i in range(processes - len(pool)):
322
+ w = Process(ctx, target=worker,
323
+ args=(inqueue, outqueue,
324
+ initializer,
325
+ initargs, maxtasksperchild,
326
+ wrap_exception))
327
+ w.name = w.name.replace('Process', 'PoolWorker')
328
+ w.daemon = True
329
+ w.start()
330
+ pool.append(w)
331
+ util.debug('added worker')
332
+
333
+ @staticmethod
334
+ def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue,
335
+ initializer, initargs, maxtasksperchild,
336
+ wrap_exception):
337
+ """Clean up any exited workers and start replacements for them.
338
+ """
339
+ if Pool._join_exited_workers(pool):
340
+ Pool._repopulate_pool_static(ctx, Process, processes, pool,
341
+ inqueue, outqueue, initializer,
342
+ initargs, maxtasksperchild,
343
+ wrap_exception)
344
+
345
+ def _setup_queues(self):
346
+ self._inqueue = self._ctx.SimpleQueue()
347
+ self._outqueue = self._ctx.SimpleQueue()
348
+ self._quick_put = self._inqueue._writer.send
349
+ self._quick_get = self._outqueue._reader.recv
350
+
351
+ def _check_running(self):
352
+ if self._state != RUN:
353
+ raise ValueError("Pool not running")
354
+
355
+ def apply(self, func, args=(), kwds={}):
356
+ '''
357
+ Equivalent of `func(*args, **kwds)`.
358
+ Pool must be running.
359
+ '''
360
+ return self.apply_async(func, args, kwds).get()
361
+
362
+ def map(self, func, iterable, chunksize=None):
363
+ '''
364
+ Apply `func` to each element in `iterable`, collecting the results
365
+ in a list that is returned.
366
+ '''
367
+ return self._map_async(func, iterable, mapstar, chunksize).get()
368
+
369
+ def starmap(self, func, iterable, chunksize=None):
370
+ '''
371
+ Like `map()` method but the elements of the `iterable` are expected to
372
+ be iterables as well and will be unpacked as arguments. Hence
373
+ `func` and (a, b) becomes func(a, b).
374
+ '''
375
+ return self._map_async(func, iterable, starmapstar, chunksize).get()
376
+
377
+ def starmap_async(self, func, iterable, chunksize=None, callback=None,
378
+ error_callback=None):
379
+ '''
380
+ Asynchronous version of `starmap()` method.
381
+ '''
382
+ return self._map_async(func, iterable, starmapstar, chunksize,
383
+ callback, error_callback)
384
+
385
+ def _guarded_task_generation(self, result_job, func, iterable):
386
+ '''Provides a generator of tasks for imap and imap_unordered with
387
+ appropriate handling for iterables which throw exceptions during
388
+ iteration.'''
389
+ try:
390
+ i = -1
391
+ for i, x in enumerate(iterable):
392
+ yield (result_job, i, func, (x,), {})
393
+ except Exception as e:
394
+ yield (result_job, i+1, _helper_reraises_exception, (e,), {})
395
+
396
+ def imap(self, func, iterable, chunksize=1):
397
+ '''
398
+ Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
399
+ '''
400
+ self._check_running()
401
+ if chunksize == 1:
402
+ result = IMapIterator(self)
403
+ self._taskqueue.put(
404
+ (
405
+ self._guarded_task_generation(result._job, func, iterable),
406
+ result._set_length
407
+ ))
408
+ return result
409
+ else:
410
+ if chunksize < 1:
411
+ raise ValueError(
412
+ "Chunksize must be 1+, not {0:n}".format(
413
+ chunksize))
414
+ task_batches = Pool._get_tasks(func, iterable, chunksize)
415
+ result = IMapIterator(self)
416
+ self._taskqueue.put(
417
+ (
418
+ self._guarded_task_generation(result._job,
419
+ mapstar,
420
+ task_batches),
421
+ result._set_length
422
+ ))
423
+ return (item for chunk in result for item in chunk)
424
+
425
+ def imap_unordered(self, func, iterable, chunksize=1):
426
+ '''
427
+ Like `imap()` method but ordering of results is arbitrary.
428
+ '''
429
+ self._check_running()
430
+ if chunksize == 1:
431
+ result = IMapUnorderedIterator(self)
432
+ self._taskqueue.put(
433
+ (
434
+ self._guarded_task_generation(result._job, func, iterable),
435
+ result._set_length
436
+ ))
437
+ return result
438
+ else:
439
+ if chunksize < 1:
440
+ raise ValueError(
441
+ "Chunksize must be 1+, not {0!r}".format(chunksize))
442
+ task_batches = Pool._get_tasks(func, iterable, chunksize)
443
+ result = IMapUnorderedIterator(self)
444
+ self._taskqueue.put(
445
+ (
446
+ self._guarded_task_generation(result._job,
447
+ mapstar,
448
+ task_batches),
449
+ result._set_length
450
+ ))
451
+ return (item for chunk in result for item in chunk)
452
+
453
+ def apply_async(self, func, args=(), kwds={}, callback=None,
454
+ error_callback=None):
455
+ '''
456
+ Asynchronous version of `apply()` method.
457
+ '''
458
+ self._check_running()
459
+ result = ApplyResult(self, callback, error_callback)
460
+ self._taskqueue.put(([(result._job, 0, func, args, kwds)], None))
461
+ return result
462
+
463
+ def map_async(self, func, iterable, chunksize=None, callback=None,
464
+ error_callback=None):
465
+ '''
466
+ Asynchronous version of `map()` method.
467
+ '''
468
+ return self._map_async(func, iterable, mapstar, chunksize, callback,
469
+ error_callback)
470
+
471
+ def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
472
+ error_callback=None):
473
+ '''
474
+ Helper function to implement map, starmap and their async counterparts.
475
+ '''
476
+ self._check_running()
477
+ if not hasattr(iterable, '__len__'):
478
+ iterable = list(iterable)
479
+
480
+ if chunksize is None:
481
+ chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
482
+ if extra:
483
+ chunksize += 1
484
+ if len(iterable) == 0:
485
+ chunksize = 0
486
+
487
+ task_batches = Pool._get_tasks(func, iterable, chunksize)
488
+ result = MapResult(self, chunksize, len(iterable), callback,
489
+ error_callback=error_callback)
490
+ self._taskqueue.put(
491
+ (
492
+ self._guarded_task_generation(result._job,
493
+ mapper,
494
+ task_batches),
495
+ None
496
+ )
497
+ )
498
+ return result
499
+
500
+ @staticmethod
501
+ def _wait_for_updates(sentinels, change_notifier, timeout=None):
502
+ wait(sentinels, timeout=timeout)
503
+ while not change_notifier.empty():
504
+ change_notifier.get()
505
+
506
+ @classmethod
507
+ def _handle_workers(cls, cache, taskqueue, ctx, Process, processes,
508
+ pool, inqueue, outqueue, initializer, initargs,
509
+ maxtasksperchild, wrap_exception, sentinels,
510
+ change_notifier):
511
+ thread = threading.current_thread()
512
+
513
+ # Keep maintaining workers until the cache gets drained, unless the pool
514
+ # is terminated.
515
+ while thread._state == RUN or (cache and thread._state != TERMINATE):
516
+ cls._maintain_pool(ctx, Process, processes, pool, inqueue,
517
+ outqueue, initializer, initargs,
518
+ maxtasksperchild, wrap_exception)
519
+
520
+ current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels]
521
+
522
+ cls._wait_for_updates(current_sentinels, change_notifier)
523
+ # send sentinel to stop workers
524
+ taskqueue.put(None)
525
+ util.debug('worker handler exiting')
526
+
527
+ @staticmethod
528
+ def _handle_tasks(taskqueue, put, outqueue, pool, cache):
529
+ thread = threading.current_thread()
530
+
531
+ for taskseq, set_length in iter(taskqueue.get, None):
532
+ task = None
533
+ try:
534
+ # iterating taskseq cannot fail
535
+ for task in taskseq:
536
+ if thread._state != RUN:
537
+ util.debug('task handler found thread._state != RUN')
538
+ break
539
+ try:
540
+ put(task)
541
+ except Exception as e:
542
+ job, idx = task[:2]
543
+ try:
544
+ cache[job]._set(idx, (False, e))
545
+ except KeyError:
546
+ pass
547
+ else:
548
+ if set_length:
549
+ util.debug('doing set_length()')
550
+ idx = task[1] if task else -1
551
+ set_length(idx + 1)
552
+ continue
553
+ break
554
+ finally:
555
+ task = taskseq = job = None
556
+ else:
557
+ util.debug('task handler got sentinel')
558
+
559
+ try:
560
+ # tell result handler to finish when cache is empty
561
+ util.debug('task handler sending sentinel to result handler')
562
+ outqueue.put(None)
563
+
564
+ # tell workers there is no more work
565
+ util.debug('task handler sending sentinel to workers')
566
+ for p in pool:
567
+ put(None)
568
+ except OSError:
569
+ util.debug('task handler got OSError when sending sentinels')
570
+
571
+ util.debug('task handler exiting')
572
+
573
+ @staticmethod
574
+ def _handle_results(outqueue, get, cache):
575
+ thread = threading.current_thread()
576
+
577
+ while 1:
578
+ try:
579
+ task = get()
580
+ except (OSError, EOFError):
581
+ util.debug('result handler got EOFError/OSError -- exiting')
582
+ return
583
+
584
+ if thread._state != RUN:
585
+ assert thread._state == TERMINATE, "Thread not in TERMINATE"
586
+ util.debug('result handler found thread._state=TERMINATE')
587
+ break
588
+
589
+ if task is None:
590
+ util.debug('result handler got sentinel')
591
+ break
592
+
593
+ job, i, obj = task
594
+ try:
595
+ cache[job]._set(i, obj)
596
+ except KeyError:
597
+ pass
598
+ task = job = obj = None
599
+
600
+ while cache and thread._state != TERMINATE:
601
+ try:
602
+ task = get()
603
+ except (OSError, EOFError):
604
+ util.debug('result handler got EOFError/OSError -- exiting')
605
+ return
606
+
607
+ if task is None:
608
+ util.debug('result handler ignoring extra sentinel')
609
+ continue
610
+ job, i, obj = task
611
+ try:
612
+ cache[job]._set(i, obj)
613
+ except KeyError:
614
+ pass
615
+ task = job = obj = None
616
+
617
+ if hasattr(outqueue, '_reader'):
618
+ util.debug('ensuring that outqueue is not full')
619
+ # If we don't make room available in outqueue then
620
+ # attempts to add the sentinel (None) to outqueue may
621
+ # block. There is guaranteed to be no more than 2 sentinels.
622
+ try:
623
+ for i in range(10):
624
+ if not outqueue._reader.poll():
625
+ break
626
+ get()
627
+ except (OSError, EOFError):
628
+ pass
629
+
630
+ util.debug('result handler exiting: len(cache)=%s, thread._state=%s',
631
+ len(cache), thread._state)
632
+
633
+ @staticmethod
634
+ def _get_tasks(func, it, size):
635
+ it = iter(it)
636
+ while 1:
637
+ x = tuple(itertools.islice(it, size))
638
+ if not x:
639
+ return
640
+ yield (func, x)
641
+
642
+ def __reduce__(self):
643
+ raise NotImplementedError(
644
+ 'pool objects cannot be passed between processes or pickled'
645
+ )
646
+
647
+ def close(self):
648
+ util.debug('closing pool')
649
+ if self._state == RUN:
650
+ self._state = CLOSE
651
+ self._worker_handler._state = CLOSE
652
+ self._change_notifier.put(None)
653
+
654
+ def terminate(self):
655
+ util.debug('terminating pool')
656
+ self._state = TERMINATE
657
+ self._terminate()
658
+
659
+ def join(self):
660
+ util.debug('joining pool')
661
+ if self._state == RUN:
662
+ raise ValueError("Pool is still running")
663
+ elif self._state not in (CLOSE, TERMINATE):
664
+ raise ValueError("In unknown state")
665
+ self._worker_handler.join()
666
+ self._task_handler.join()
667
+ self._result_handler.join()
668
+ for p in self._pool:
669
+ p.join()
670
+
671
+ @staticmethod
672
+ def _help_stuff_finish(inqueue, task_handler, size):
673
+ # task_handler may be blocked trying to put items on inqueue
674
+ util.debug('removing tasks from inqueue until task handler finished')
675
+ inqueue._rlock.acquire()
676
+ while task_handler.is_alive() and inqueue._reader.poll():
677
+ inqueue._reader.recv()
678
+ time.sleep(0)
679
+
680
+ @classmethod
681
+ def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier,
682
+ worker_handler, task_handler, result_handler, cache):
683
+ # this is guaranteed to only be called once
684
+ util.debug('finalizing pool')
685
+
686
+ # Notify that the worker_handler state has been changed so the
687
+ # _handle_workers loop can be unblocked (and exited) in order to
688
+ # send the finalization sentinel all the workers.
689
+ worker_handler._state = TERMINATE
690
+ change_notifier.put(None)
691
+
692
+ task_handler._state = TERMINATE
693
+
694
+ util.debug('helping task handler/workers to finish')
695
+ cls._help_stuff_finish(inqueue, task_handler, len(pool))
696
+
697
+ if (not result_handler.is_alive()) and (len(cache) != 0):
698
+ raise AssertionError(
699
+ "Cannot have cache with result_hander not alive")
700
+
701
+ result_handler._state = TERMINATE
702
+ change_notifier.put(None)
703
+ outqueue.put(None) # sentinel
704
+
705
+ # We must wait for the worker handler to exit before terminating
706
+ # workers because we don't want workers to be restarted behind our back.
707
+ util.debug('joining worker handler')
708
+ if threading.current_thread() is not worker_handler:
709
+ worker_handler.join()
710
+
711
+ # Terminate workers which haven't already finished.
712
+ if pool and hasattr(pool[0], 'terminate'):
713
+ util.debug('terminating workers')
714
+ for p in pool:
715
+ if p.exitcode is None:
716
+ p.terminate()
717
+
718
+ util.debug('joining task handler')
719
+ if threading.current_thread() is not task_handler:
720
+ task_handler.join()
721
+
722
+ util.debug('joining result handler')
723
+ if threading.current_thread() is not result_handler:
724
+ result_handler.join()
725
+
726
+ if pool and hasattr(pool[0], 'terminate'):
727
+ util.debug('joining pool workers')
728
+ for p in pool:
729
+ if p.is_alive():
730
+ # worker has not yet exited
731
+ util.debug('cleaning up worker %d' % p.pid)
732
+ p.join()
733
+
734
+ def __enter__(self):
735
+ self._check_running()
736
+ return self
737
+
738
+ def __exit__(self, exc_type, exc_val, exc_tb):
739
+ self.terminate()
740
+
741
+ #
742
+ # Class whose instances are returned by `Pool.apply_async()`
743
+ #
744
+
745
+ class ApplyResult(object):
746
+
747
+ def __init__(self, pool, callback, error_callback):
748
+ self._pool = pool
749
+ self._event = threading.Event()
750
+ self._job = next(job_counter)
751
+ self._cache = pool._cache
752
+ self._callback = callback
753
+ self._error_callback = error_callback
754
+ self._cache[self._job] = self
755
+
756
+ def ready(self):
757
+ return self._event.is_set()
758
+
759
+ def successful(self):
760
+ if not self.ready():
761
+ raise ValueError("{0!r} not ready".format(self))
762
+ return self._success
763
+
764
+ def wait(self, timeout=None):
765
+ self._event.wait(timeout)
766
+
767
+ def get(self, timeout=None):
768
+ self.wait(timeout)
769
+ if not self.ready():
770
+ raise TimeoutError
771
+ if self._success:
772
+ return self._value
773
+ else:
774
+ raise self._value
775
+
776
+ def _set(self, i, obj):
777
+ self._success, self._value = obj
778
+ if self._callback and self._success:
779
+ self._callback(self._value)
780
+ if self._error_callback and not self._success:
781
+ self._error_callback(self._value)
782
+ self._event.set()
783
+ del self._cache[self._job]
784
+ self._pool = None
785
+
786
+ __class_getitem__ = classmethod(types.GenericAlias)
787
+
788
+ AsyncResult = ApplyResult # create alias -- see #17805
789
+
790
+ #
791
+ # Class whose instances are returned by `Pool.map_async()`
792
+ #
793
+
794
+ class MapResult(ApplyResult):
795
+
796
+ def __init__(self, pool, chunksize, length, callback, error_callback):
797
+ ApplyResult.__init__(self, pool, callback,
798
+ error_callback=error_callback)
799
+ self._success = True
800
+ self._value = [None] * length
801
+ self._chunksize = chunksize
802
+ if chunksize <= 0:
803
+ self._number_left = 0
804
+ self._event.set()
805
+ del self._cache[self._job]
806
+ else:
807
+ self._number_left = length//chunksize + bool(length % chunksize)
808
+
809
+ def _set(self, i, success_result):
810
+ self._number_left -= 1
811
+ success, result = success_result
812
+ if success and self._success:
813
+ self._value[i*self._chunksize:(i+1)*self._chunksize] = result
814
+ if self._number_left == 0:
815
+ if self._callback:
816
+ self._callback(self._value)
817
+ del self._cache[self._job]
818
+ self._event.set()
819
+ self._pool = None
820
+ else:
821
+ if not success and self._success:
822
+ # only store first exception
823
+ self._success = False
824
+ self._value = result
825
+ if self._number_left == 0:
826
+ # only consider the result ready once all jobs are done
827
+ if self._error_callback:
828
+ self._error_callback(self._value)
829
+ del self._cache[self._job]
830
+ self._event.set()
831
+ self._pool = None
832
+
833
+ #
834
+ # Class whose instances are returned by `Pool.imap()`
835
+ #
836
+
837
+ class IMapIterator(object):
838
+
839
+ def __init__(self, pool):
840
+ self._pool = pool
841
+ self._cond = threading.Condition(threading.Lock())
842
+ self._job = next(job_counter)
843
+ self._cache = pool._cache
844
+ self._items = collections.deque()
845
+ self._index = 0
846
+ self._length = None
847
+ self._unsorted = {}
848
+ self._cache[self._job] = self
849
+
850
+ def __iter__(self):
851
+ return self
852
+
853
+ def next(self, timeout=None):
854
+ with self._cond:
855
+ try:
856
+ item = self._items.popleft()
857
+ except IndexError:
858
+ if self._index == self._length:
859
+ self._pool = None
860
+ raise StopIteration from None
861
+ self._cond.wait(timeout)
862
+ try:
863
+ item = self._items.popleft()
864
+ except IndexError:
865
+ if self._index == self._length:
866
+ self._pool = None
867
+ raise StopIteration from None
868
+ raise TimeoutError from None
869
+
870
+ success, value = item
871
+ if success:
872
+ return value
873
+ raise value
874
+
875
+ __next__ = next # XXX
876
+
877
+ def _set(self, i, obj):
878
+ with self._cond:
879
+ if self._index == i:
880
+ self._items.append(obj)
881
+ self._index += 1
882
+ while self._index in self._unsorted:
883
+ obj = self._unsorted.pop(self._index)
884
+ self._items.append(obj)
885
+ self._index += 1
886
+ self._cond.notify()
887
+ else:
888
+ self._unsorted[i] = obj
889
+
890
+ if self._index == self._length:
891
+ del self._cache[self._job]
892
+ self._pool = None
893
+
894
+ def _set_length(self, length):
895
+ with self._cond:
896
+ self._length = length
897
+ if self._index == self._length:
898
+ self._cond.notify()
899
+ del self._cache[self._job]
900
+ self._pool = None
901
+
902
+ #
903
+ # Class whose instances are returned by `Pool.imap_unordered()`
904
+ #
905
+
906
+ class IMapUnorderedIterator(IMapIterator):
907
+
908
+ def _set(self, i, obj):
909
+ with self._cond:
910
+ self._items.append(obj)
911
+ self._index += 1
912
+ self._cond.notify()
913
+ if self._index == self._length:
914
+ del self._cache[self._job]
915
+ self._pool = None
916
+
917
+ #
918
+ #
919
+ #
920
+
921
+ class ThreadPool(Pool):
922
+ _wrap_exception = False
923
+
924
+ @staticmethod
925
+ def Process(ctx, *args, **kwds):
926
+ from .dummy import Process
927
+ return Process(*args, **kwds)
928
+
929
+ def __init__(self, processes=None, initializer=None, initargs=()):
930
+ Pool.__init__(self, processes, initializer, initargs)
931
+
932
+ def _setup_queues(self):
933
+ self._inqueue = queue.SimpleQueue()
934
+ self._outqueue = queue.SimpleQueue()
935
+ self._quick_put = self._inqueue.put
936
+ self._quick_get = self._outqueue.get
937
+
938
+ def _get_sentinels(self):
939
+ return [self._change_notifier._reader]
940
+
941
+ @staticmethod
942
+ def _get_worker_sentinels(workers):
943
+ return []
944
+
945
+ @staticmethod
946
+ def _help_stuff_finish(inqueue, task_handler, size):
947
+ # drain inqueue, and put sentinels at its head to make workers finish
948
+ try:
949
+ while True:
950
+ inqueue.get(block=False)
951
+ except queue.Empty:
952
+ pass
953
+ for i in range(size):
954
+ inqueue.put(None)
955
+
956
+ def _wait_for_updates(self, sentinels, change_notifier, timeout):
957
+ time.sleep(timeout)
venv/lib/python3.10/site-packages/multiprocess/popen_forkserver.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+
4
+ from .context import reduction, set_spawning_popen
5
+ if not reduction.HAVE_SEND_HANDLE:
6
+ raise ImportError('No support for sending fds between processes')
7
+ from . import forkserver
8
+ from . import popen_fork
9
+ from . import spawn
10
+ from . import util
11
+
12
+
13
+ __all__ = ['Popen']
14
+
15
+ #
16
+ # Wrapper for an fd used while launching a process
17
+ #
18
+
19
+ class _DupFd(object):
20
+ def __init__(self, ind):
21
+ self.ind = ind
22
+ def detach(self):
23
+ return forkserver.get_inherited_fds()[self.ind]
24
+
25
+ #
26
+ # Start child process using a server process
27
+ #
28
+
29
+ class Popen(popen_fork.Popen):
30
+ method = 'forkserver'
31
+ DupFd = _DupFd
32
+
33
+ def __init__(self, process_obj):
34
+ self._fds = []
35
+ super().__init__(process_obj)
36
+
37
+ def duplicate_for_child(self, fd):
38
+ self._fds.append(fd)
39
+ return len(self._fds) - 1
40
+
41
+ def _launch(self, process_obj):
42
+ prep_data = spawn.get_preparation_data(process_obj._name)
43
+ buf = io.BytesIO()
44
+ set_spawning_popen(self)
45
+ try:
46
+ reduction.dump(prep_data, buf)
47
+ reduction.dump(process_obj, buf)
48
+ finally:
49
+ set_spawning_popen(None)
50
+
51
+ self.sentinel, w = forkserver.connect_to_new_process(self._fds)
52
+ # Keep a duplicate of the data pipe's write end as a sentinel of the
53
+ # parent process used by the child process.
54
+ _parent_w = os.dup(w)
55
+ self.finalizer = util.Finalize(self, util.close_fds,
56
+ (_parent_w, self.sentinel))
57
+ with open(w, 'wb', closefd=True) as f:
58
+ f.write(buf.getbuffer())
59
+ self.pid = forkserver.read_signed(self.sentinel)
60
+
61
+ def poll(self, flag=os.WNOHANG):
62
+ if self.returncode is None:
63
+ from multiprocess.connection import wait
64
+ timeout = 0 if flag == os.WNOHANG else None
65
+ if not wait([self.sentinel], timeout):
66
+ return None
67
+ try:
68
+ self.returncode = forkserver.read_signed(self.sentinel)
69
+ except (OSError, EOFError):
70
+ # This should not happen usually, but perhaps the forkserver
71
+ # process itself got killed
72
+ self.returncode = 255
73
+
74
+ return self.returncode
venv/lib/python3.10/site-packages/multiprocess/queues.py ADDED
@@ -0,0 +1,382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module implementing queues
3
+ #
4
+ # multiprocessing/queues.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
11
+
12
+ import sys
13
+ import os
14
+ import threading
15
+ import collections
16
+ import time
17
+ import types
18
+ import weakref
19
+ import errno
20
+
21
+ from queue import Empty, Full
22
+
23
+ try:
24
+ import _multiprocess as _multiprocessing
25
+ except ImportError:
26
+ import _multiprocessing
27
+
28
+ from . import connection
29
+ from . import context
30
+ _ForkingPickler = context.reduction.ForkingPickler
31
+
32
+ from .util import debug, info, Finalize, register_after_fork, is_exiting
33
+
34
+ #
35
+ # Queue type using a pipe, buffer and thread
36
+ #
37
+
38
+ class Queue(object):
39
+
40
+ def __init__(self, maxsize=0, *, ctx):
41
+ if maxsize <= 0:
42
+ # Can raise ImportError (see issues #3770 and #23400)
43
+ from .synchronize import SEM_VALUE_MAX as maxsize
44
+ self._maxsize = maxsize
45
+ self._reader, self._writer = connection.Pipe(duplex=False)
46
+ self._rlock = ctx.Lock()
47
+ self._opid = os.getpid()
48
+ if sys.platform == 'win32':
49
+ self._wlock = None
50
+ else:
51
+ self._wlock = ctx.Lock()
52
+ self._sem = ctx.BoundedSemaphore(maxsize)
53
+ # For use by concurrent.futures
54
+ self._ignore_epipe = False
55
+ self._reset()
56
+
57
+ if sys.platform != 'win32':
58
+ register_after_fork(self, Queue._after_fork)
59
+
60
+ def __getstate__(self):
61
+ context.assert_spawning(self)
62
+ return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
63
+ self._rlock, self._wlock, self._sem, self._opid)
64
+
65
+ def __setstate__(self, state):
66
+ (self._ignore_epipe, self._maxsize, self._reader, self._writer,
67
+ self._rlock, self._wlock, self._sem, self._opid) = state
68
+ self._reset()
69
+
70
+ def _after_fork(self):
71
+ debug('Queue._after_fork()')
72
+ self._reset(after_fork=True)
73
+
74
+ def _reset(self, after_fork=False):
75
+ if after_fork:
76
+ self._notempty._at_fork_reinit()
77
+ else:
78
+ self._notempty = threading.Condition(threading.Lock())
79
+ self._buffer = collections.deque()
80
+ self._thread = None
81
+ self._jointhread = None
82
+ self._joincancelled = False
83
+ self._closed = False
84
+ self._close = None
85
+ self._send_bytes = self._writer.send_bytes
86
+ self._recv_bytes = self._reader.recv_bytes
87
+ self._poll = self._reader.poll
88
+
89
+ def put(self, obj, block=True, timeout=None):
90
+ if self._closed:
91
+ raise ValueError(f"Queue {self!r} is closed")
92
+ if not self._sem.acquire(block, timeout):
93
+ raise Full
94
+
95
+ with self._notempty:
96
+ if self._thread is None:
97
+ self._start_thread()
98
+ self._buffer.append(obj)
99
+ self._notempty.notify()
100
+
101
+ def get(self, block=True, timeout=None):
102
+ if self._closed:
103
+ raise ValueError(f"Queue {self!r} is closed")
104
+ if block and timeout is None:
105
+ with self._rlock:
106
+ res = self._recv_bytes()
107
+ self._sem.release()
108
+ else:
109
+ if block:
110
+ deadline = getattr(time,'monotonic',time.time)() + timeout
111
+ if not self._rlock.acquire(block, timeout):
112
+ raise Empty
113
+ try:
114
+ if block:
115
+ timeout = deadline - getattr(time,'monotonic',time.time)()
116
+ if not self._poll(timeout):
117
+ raise Empty
118
+ elif not self._poll():
119
+ raise Empty
120
+ res = self._recv_bytes()
121
+ self._sem.release()
122
+ finally:
123
+ self._rlock.release()
124
+ # unserialize the data after having released the lock
125
+ return _ForkingPickler.loads(res)
126
+
127
+ def qsize(self):
128
+ # Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
129
+ return self._maxsize - self._sem._semlock._get_value()
130
+
131
+ def empty(self):
132
+ return not self._poll()
133
+
134
+ def full(self):
135
+ return self._sem._semlock._is_zero()
136
+
137
+ def get_nowait(self):
138
+ return self.get(False)
139
+
140
+ def put_nowait(self, obj):
141
+ return self.put(obj, False)
142
+
143
+ def close(self):
144
+ self._closed = True
145
+ close = self._close
146
+ if close:
147
+ self._close = None
148
+ close()
149
+
150
+ def join_thread(self):
151
+ debug('Queue.join_thread()')
152
+ assert self._closed, "Queue {0!r} not closed".format(self)
153
+ if self._jointhread:
154
+ self._jointhread()
155
+
156
+ def cancel_join_thread(self):
157
+ debug('Queue.cancel_join_thread()')
158
+ self._joincancelled = True
159
+ try:
160
+ self._jointhread.cancel()
161
+ except AttributeError:
162
+ pass
163
+
164
+ def _start_thread(self):
165
+ debug('Queue._start_thread()')
166
+
167
+ # Start thread which transfers data from buffer to pipe
168
+ self._buffer.clear()
169
+ self._thread = threading.Thread(
170
+ target=Queue._feed,
171
+ args=(self._buffer, self._notempty, self._send_bytes,
172
+ self._wlock, self._reader.close, self._writer.close,
173
+ self._ignore_epipe, self._on_queue_feeder_error,
174
+ self._sem),
175
+ name='QueueFeederThread'
176
+ )
177
+ self._thread.daemon = True
178
+
179
+ debug('doing self._thread.start()')
180
+ self._thread.start()
181
+ debug('... done self._thread.start()')
182
+
183
+ if not self._joincancelled:
184
+ self._jointhread = Finalize(
185
+ self._thread, Queue._finalize_join,
186
+ [weakref.ref(self._thread)],
187
+ exitpriority=-5
188
+ )
189
+
190
+ # Send sentinel to the thread queue object when garbage collected
191
+ self._close = Finalize(
192
+ self, Queue._finalize_close,
193
+ [self._buffer, self._notempty],
194
+ exitpriority=10
195
+ )
196
+
197
+ @staticmethod
198
+ def _finalize_join(twr):
199
+ debug('joining queue thread')
200
+ thread = twr()
201
+ if thread is not None:
202
+ thread.join()
203
+ debug('... queue thread joined')
204
+ else:
205
+ debug('... queue thread already dead')
206
+
207
+ @staticmethod
208
+ def _finalize_close(buffer, notempty):
209
+ debug('telling queue thread to quit')
210
+ with notempty:
211
+ buffer.append(_sentinel)
212
+ notempty.notify()
213
+
214
+ @staticmethod
215
+ def _feed(buffer, notempty, send_bytes, writelock, reader_close,
216
+ writer_close, ignore_epipe, onerror, queue_sem):
217
+ debug('starting thread to feed data to pipe')
218
+ nacquire = notempty.acquire
219
+ nrelease = notempty.release
220
+ nwait = notempty.wait
221
+ bpopleft = buffer.popleft
222
+ sentinel = _sentinel
223
+ if sys.platform != 'win32':
224
+ wacquire = writelock.acquire
225
+ wrelease = writelock.release
226
+ else:
227
+ wacquire = None
228
+
229
+ while 1:
230
+ try:
231
+ nacquire()
232
+ try:
233
+ if not buffer:
234
+ nwait()
235
+ finally:
236
+ nrelease()
237
+ try:
238
+ while 1:
239
+ obj = bpopleft()
240
+ if obj is sentinel:
241
+ debug('feeder thread got sentinel -- exiting')
242
+ reader_close()
243
+ writer_close()
244
+ return
245
+
246
+ # serialize the data before acquiring the lock
247
+ obj = _ForkingPickler.dumps(obj)
248
+ if wacquire is None:
249
+ send_bytes(obj)
250
+ else:
251
+ wacquire()
252
+ try:
253
+ send_bytes(obj)
254
+ finally:
255
+ wrelease()
256
+ except IndexError:
257
+ pass
258
+ except Exception as e:
259
+ if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
260
+ return
261
+ # Since this runs in a daemon thread the resources it uses
262
+ # may be become unusable while the process is cleaning up.
263
+ # We ignore errors which happen after the process has
264
+ # started to cleanup.
265
+ if is_exiting():
266
+ info('error in queue thread: %s', e)
267
+ return
268
+ else:
269
+ # Since the object has not been sent in the queue, we need
270
+ # to decrease the size of the queue. The error acts as
271
+ # if the object had been silently removed from the queue
272
+ # and this step is necessary to have a properly working
273
+ # queue.
274
+ queue_sem.release()
275
+ onerror(e, obj)
276
+
277
+ @staticmethod
278
+ def _on_queue_feeder_error(e, obj):
279
+ """
280
+ Private API hook called when feeding data in the background thread
281
+ raises an exception. For overriding by concurrent.futures.
282
+ """
283
+ import traceback
284
+ traceback.print_exc()
285
+
286
+
287
+ _sentinel = object()
288
+
289
+ #
290
+ # A queue type which also supports join() and task_done() methods
291
+ #
292
+ # Note that if you do not call task_done() for each finished task then
293
+ # eventually the counter's semaphore may overflow causing Bad Things
294
+ # to happen.
295
+ #
296
+
297
+ class JoinableQueue(Queue):
298
+
299
+ def __init__(self, maxsize=0, *, ctx):
300
+ Queue.__init__(self, maxsize, ctx=ctx)
301
+ self._unfinished_tasks = ctx.Semaphore(0)
302
+ self._cond = ctx.Condition()
303
+
304
+ def __getstate__(self):
305
+ return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
306
+
307
+ def __setstate__(self, state):
308
+ Queue.__setstate__(self, state[:-2])
309
+ self._cond, self._unfinished_tasks = state[-2:]
310
+
311
+ def put(self, obj, block=True, timeout=None):
312
+ if self._closed:
313
+ raise ValueError(f"Queue {self!r} is closed")
314
+ if not self._sem.acquire(block, timeout):
315
+ raise Full
316
+
317
+ with self._notempty, self._cond:
318
+ if self._thread is None:
319
+ self._start_thread()
320
+ self._buffer.append(obj)
321
+ self._unfinished_tasks.release()
322
+ self._notempty.notify()
323
+
324
+ def task_done(self):
325
+ with self._cond:
326
+ if not self._unfinished_tasks.acquire(False):
327
+ raise ValueError('task_done() called too many times')
328
+ if self._unfinished_tasks._semlock._is_zero():
329
+ self._cond.notify_all()
330
+
331
+ def join(self):
332
+ with self._cond:
333
+ if not self._unfinished_tasks._semlock._is_zero():
334
+ self._cond.wait()
335
+
336
+ #
337
+ # Simplified Queue type -- really just a locked pipe
338
+ #
339
+
340
+ class SimpleQueue(object):
341
+
342
+ def __init__(self, *, ctx):
343
+ self._reader, self._writer = connection.Pipe(duplex=False)
344
+ self._rlock = ctx.Lock()
345
+ self._poll = self._reader.poll
346
+ if sys.platform == 'win32':
347
+ self._wlock = None
348
+ else:
349
+ self._wlock = ctx.Lock()
350
+
351
+ def close(self):
352
+ self._reader.close()
353
+ self._writer.close()
354
+
355
+ def empty(self):
356
+ return not self._poll()
357
+
358
+ def __getstate__(self):
359
+ context.assert_spawning(self)
360
+ return (self._reader, self._writer, self._rlock, self._wlock)
361
+
362
+ def __setstate__(self, state):
363
+ (self._reader, self._writer, self._rlock, self._wlock) = state
364
+ self._poll = self._reader.poll
365
+
366
+ def get(self):
367
+ with self._rlock:
368
+ res = self._reader.recv_bytes()
369
+ # unserialize the data after having released the lock
370
+ return _ForkingPickler.loads(res)
371
+
372
+ def put(self, obj):
373
+ # serialize the data before acquiring the lock
374
+ obj = _ForkingPickler.dumps(obj)
375
+ if self._wlock is None:
376
+ # writes to a message oriented win32 pipe are atomic
377
+ self._writer.send_bytes(obj)
378
+ else:
379
+ with self._wlock:
380
+ self._writer.send_bytes(obj)
381
+
382
+ __class_getitem__ = classmethod(types.GenericAlias)
venv/lib/python3.10/site-packages/multiprocess/resource_tracker.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Server process to keep track of unlinked resources (like shared memory
3
+ # segments, semaphores etc.) and clean them.
4
+ #
5
+ # On Unix we run a server process which keeps track of unlinked
6
+ # resources. The server ignores SIGINT and SIGTERM and reads from a
7
+ # pipe. Every other process of the program has a copy of the writable
8
+ # end of the pipe, so we get EOF when all other processes have exited.
9
+ # Then the server process unlinks any remaining resource names.
10
+ #
11
+ # This is important because there may be system limits for such resources: for
12
+ # instance, the system only supports a limited number of named semaphores, and
13
+ # shared-memory segments live in the RAM. If a python process leaks such a
14
+ # resource, this resource will not be removed till the next reboot. Without
15
+ # this resource tracker process, "killall python" would probably leave unlinked
16
+ # resources.
17
+
18
+ import os
19
+ import signal
20
+ import sys
21
+ import threading
22
+ import warnings
23
+
24
+ from . import spawn
25
+ from . import util
26
+
27
+ __all__ = ['ensure_running', 'register', 'unregister']
28
+
29
+ _HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask')
30
+ _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
31
+
32
+ _CLEANUP_FUNCS = {
33
+ 'noop': lambda: None,
34
+ }
35
+
36
+ if os.name == 'posix':
37
+ try:
38
+ import _multiprocess as _multiprocessing
39
+ except ImportError:
40
+ import _multiprocessing
41
+ import _posixshmem
42
+
43
+ # Use sem_unlink() to clean up named semaphores.
44
+ #
45
+ # sem_unlink() may be missing if the Python build process detected the
46
+ # absence of POSIX named semaphores. In that case, no named semaphores were
47
+ # ever opened, so no cleanup would be necessary.
48
+ if hasattr(_multiprocessing, 'sem_unlink'):
49
+ _CLEANUP_FUNCS.update({
50
+ 'semaphore': _multiprocessing.sem_unlink,
51
+ })
52
+ _CLEANUP_FUNCS.update({
53
+ 'shared_memory': _posixshmem.shm_unlink,
54
+ })
55
+
56
+
57
+ class ResourceTracker(object):
58
+
59
+ def __init__(self):
60
+ self._lock = threading.Lock()
61
+ self._fd = None
62
+ self._pid = None
63
+
64
+ def _stop(self):
65
+ with self._lock:
66
+ if self._fd is None:
67
+ # not running
68
+ return
69
+
70
+ # closing the "alive" file descriptor stops main()
71
+ os.close(self._fd)
72
+ self._fd = None
73
+
74
+ os.waitpid(self._pid, 0)
75
+ self._pid = None
76
+
77
+ def getfd(self):
78
+ self.ensure_running()
79
+ return self._fd
80
+
81
+ def ensure_running(self):
82
+ '''Make sure that resource tracker process is running.
83
+
84
+ This can be run from any process. Usually a child process will use
85
+ the resource created by its parent.'''
86
+ with self._lock:
87
+ if self._fd is not None:
88
+ # resource tracker was launched before, is it still running?
89
+ if self._check_alive():
90
+ # => still alive
91
+ return
92
+ # => dead, launch it again
93
+ os.close(self._fd)
94
+
95
+ # Clean-up to avoid dangling processes.
96
+ try:
97
+ # _pid can be None if this process is a child from another
98
+ # python process, which has started the resource_tracker.
99
+ if self._pid is not None:
100
+ os.waitpid(self._pid, 0)
101
+ except ChildProcessError:
102
+ # The resource_tracker has already been terminated.
103
+ pass
104
+ self._fd = None
105
+ self._pid = None
106
+
107
+ warnings.warn('resource_tracker: process died unexpectedly, '
108
+ 'relaunching. Some resources might leak.')
109
+
110
+ fds_to_pass = []
111
+ try:
112
+ fds_to_pass.append(sys.stderr.fileno())
113
+ except Exception:
114
+ pass
115
+ cmd = 'from multiprocess.resource_tracker import main;main(%d)'
116
+ r, w = os.pipe()
117
+ try:
118
+ fds_to_pass.append(r)
119
+ # process will out live us, so no need to wait on pid
120
+ exe = spawn.get_executable()
121
+ args = [exe] + util._args_from_interpreter_flags()
122
+ args += ['-c', cmd % r]
123
+ # bpo-33613: Register a signal mask that will block the signals.
124
+ # This signal mask will be inherited by the child that is going
125
+ # to be spawned and will protect the child from a race condition
126
+ # that can make the child die before it registers signal handlers
127
+ # for SIGINT and SIGTERM. The mask is unregistered after spawning
128
+ # the child.
129
+ try:
130
+ if _HAVE_SIGMASK:
131
+ signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS)
132
+ pid = util.spawnv_passfds(exe, args, fds_to_pass)
133
+ finally:
134
+ if _HAVE_SIGMASK:
135
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
136
+ except:
137
+ os.close(w)
138
+ raise
139
+ else:
140
+ self._fd = w
141
+ self._pid = pid
142
+ finally:
143
+ os.close(r)
144
+
145
+ def _check_alive(self):
146
+ '''Check that the pipe has not been closed by sending a probe.'''
147
+ try:
148
+ # We cannot use send here as it calls ensure_running, creating
149
+ # a cycle.
150
+ os.write(self._fd, b'PROBE:0:noop\n')
151
+ except OSError:
152
+ return False
153
+ else:
154
+ return True
155
+
156
+ def register(self, name, rtype):
157
+ '''Register name of resource with resource tracker.'''
158
+ self._send('REGISTER', name, rtype)
159
+
160
+ def unregister(self, name, rtype):
161
+ '''Unregister name of resource with resource tracker.'''
162
+ self._send('UNREGISTER', name, rtype)
163
+
164
+ def _send(self, cmd, name, rtype):
165
+ self.ensure_running()
166
+ msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii')
167
+ if len(msg) > 512:
168
+ # posix guarantees that writes to a pipe of less than PIPE_BUF
169
+ # bytes are atomic, and that PIPE_BUF >= 512
170
+ raise ValueError('msg too long')
171
+ nbytes = os.write(self._fd, msg)
172
+ assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format(
173
+ nbytes, len(msg))
174
+
175
+
176
+ _resource_tracker = ResourceTracker()
177
+ ensure_running = _resource_tracker.ensure_running
178
+ register = _resource_tracker.register
179
+ unregister = _resource_tracker.unregister
180
+ getfd = _resource_tracker.getfd
181
+
182
+ def main(fd):
183
+ '''Run resource tracker.'''
184
+ # protect the process from ^C and "killall python" etc
185
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
186
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
187
+ if _HAVE_SIGMASK:
188
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
189
+
190
+ for f in (sys.stdin, sys.stdout):
191
+ try:
192
+ f.close()
193
+ except Exception:
194
+ pass
195
+
196
+ cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()}
197
+ try:
198
+ # keep track of registered/unregistered resources
199
+ with open(fd, 'rb') as f:
200
+ for line in f:
201
+ try:
202
+ cmd, name, rtype = line.strip().decode('ascii').split(':')
203
+ cleanup_func = _CLEANUP_FUNCS.get(rtype, None)
204
+ if cleanup_func is None:
205
+ raise ValueError(
206
+ f'Cannot register {name} for automatic cleanup: '
207
+ f'unknown resource type {rtype}')
208
+
209
+ if cmd == 'REGISTER':
210
+ cache[rtype].add(name)
211
+ elif cmd == 'UNREGISTER':
212
+ cache[rtype].remove(name)
213
+ elif cmd == 'PROBE':
214
+ pass
215
+ else:
216
+ raise RuntimeError('unrecognized command %r' % cmd)
217
+ except Exception:
218
+ try:
219
+ sys.excepthook(*sys.exc_info())
220
+ except:
221
+ pass
222
+ finally:
223
+ # all processes have terminated; cleanup any remaining resources
224
+ for rtype, rtype_cache in cache.items():
225
+ if rtype_cache:
226
+ try:
227
+ warnings.warn('resource_tracker: There appear to be %d '
228
+ 'leaked %s objects to clean up at shutdown' %
229
+ (len(rtype_cache), rtype))
230
+ except Exception:
231
+ pass
232
+ for name in rtype_cache:
233
+ # For some reason the process which created and registered this
234
+ # resource has failed to unregister it. Presumably it has
235
+ # died. We therefore unlink it.
236
+ try:
237
+ try:
238
+ _CLEANUP_FUNCS[rtype](name)
239
+ except Exception as e:
240
+ warnings.warn('resource_tracker: %r: %s' % (name, e))
241
+ finally:
242
+ pass
venv/lib/python3.10/site-packages/multiprocess/shared_memory.py ADDED
@@ -0,0 +1,534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Provides shared memory for direct access across processes.
2
+
3
+ The API of this package is currently provisional. Refer to the
4
+ documentation for details.
5
+ """
6
+
7
+
8
+ __all__ = [ 'SharedMemory', 'ShareableList' ]
9
+
10
+
11
+ from functools import partial
12
+ import mmap
13
+ import os
14
+ import errno
15
+ import struct
16
+ import secrets
17
+ import types
18
+
19
+ if os.name == "nt":
20
+ import _winapi
21
+ _USE_POSIX = False
22
+ else:
23
+ import _posixshmem
24
+ _USE_POSIX = True
25
+
26
+ from . import resource_tracker
27
+
28
+ _O_CREX = os.O_CREAT | os.O_EXCL
29
+
30
+ # FreeBSD (and perhaps other BSDs) limit names to 14 characters.
31
+ _SHM_SAFE_NAME_LENGTH = 14
32
+
33
+ # Shared memory block name prefix
34
+ if _USE_POSIX:
35
+ _SHM_NAME_PREFIX = '/psm_'
36
+ else:
37
+ _SHM_NAME_PREFIX = 'wnsm_'
38
+
39
+
40
+ def _make_filename():
41
+ "Create a random filename for the shared memory object."
42
+ # number of random bytes to use for name
43
+ nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2
44
+ assert nbytes >= 2, '_SHM_NAME_PREFIX too long'
45
+ name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes)
46
+ assert len(name) <= _SHM_SAFE_NAME_LENGTH
47
+ return name
48
+
49
+
50
+ class SharedMemory:
51
+ """Creates a new shared memory block or attaches to an existing
52
+ shared memory block.
53
+
54
+ Every shared memory block is assigned a unique name. This enables
55
+ one process to create a shared memory block with a particular name
56
+ so that a different process can attach to that same shared memory
57
+ block using that same name.
58
+
59
+ As a resource for sharing data across processes, shared memory blocks
60
+ may outlive the original process that created them. When one process
61
+ no longer needs access to a shared memory block that might still be
62
+ needed by other processes, the close() method should be called.
63
+ When a shared memory block is no longer needed by any process, the
64
+ unlink() method should be called to ensure proper cleanup."""
65
+
66
+ # Defaults; enables close() and unlink() to run without errors.
67
+ _name = None
68
+ _fd = -1
69
+ _mmap = None
70
+ _buf = None
71
+ _flags = os.O_RDWR
72
+ _mode = 0o600
73
+ _prepend_leading_slash = True if _USE_POSIX else False
74
+
75
+ def __init__(self, name=None, create=False, size=0):
76
+ if not size >= 0:
77
+ raise ValueError("'size' must be a positive integer")
78
+ if create:
79
+ self._flags = _O_CREX | os.O_RDWR
80
+ if size == 0:
81
+ raise ValueError("'size' must be a positive number different from zero")
82
+ if name is None and not self._flags & os.O_EXCL:
83
+ raise ValueError("'name' can only be None if create=True")
84
+
85
+ if _USE_POSIX:
86
+
87
+ # POSIX Shared Memory
88
+
89
+ if name is None:
90
+ while True:
91
+ name = _make_filename()
92
+ try:
93
+ self._fd = _posixshmem.shm_open(
94
+ name,
95
+ self._flags,
96
+ mode=self._mode
97
+ )
98
+ except FileExistsError:
99
+ continue
100
+ self._name = name
101
+ break
102
+ else:
103
+ name = "/" + name if self._prepend_leading_slash else name
104
+ self._fd = _posixshmem.shm_open(
105
+ name,
106
+ self._flags,
107
+ mode=self._mode
108
+ )
109
+ self._name = name
110
+ try:
111
+ if create and size:
112
+ os.ftruncate(self._fd, size)
113
+ stats = os.fstat(self._fd)
114
+ size = stats.st_size
115
+ self._mmap = mmap.mmap(self._fd, size)
116
+ except OSError:
117
+ self.unlink()
118
+ raise
119
+
120
+ resource_tracker.register(self._name, "shared_memory")
121
+
122
+ else:
123
+
124
+ # Windows Named Shared Memory
125
+
126
+ if create:
127
+ while True:
128
+ temp_name = _make_filename() if name is None else name
129
+ # Create and reserve shared memory block with this name
130
+ # until it can be attached to by mmap.
131
+ h_map = _winapi.CreateFileMapping(
132
+ _winapi.INVALID_HANDLE_VALUE,
133
+ _winapi.NULL,
134
+ _winapi.PAGE_READWRITE,
135
+ (size >> 32) & 0xFFFFFFFF,
136
+ size & 0xFFFFFFFF,
137
+ temp_name
138
+ )
139
+ try:
140
+ last_error_code = _winapi.GetLastError()
141
+ if last_error_code == _winapi.ERROR_ALREADY_EXISTS:
142
+ if name is not None:
143
+ raise FileExistsError(
144
+ errno.EEXIST,
145
+ os.strerror(errno.EEXIST),
146
+ name,
147
+ _winapi.ERROR_ALREADY_EXISTS
148
+ )
149
+ else:
150
+ continue
151
+ self._mmap = mmap.mmap(-1, size, tagname=temp_name)
152
+ finally:
153
+ _winapi.CloseHandle(h_map)
154
+ self._name = temp_name
155
+ break
156
+
157
+ else:
158
+ self._name = name
159
+ # Dynamically determine the existing named shared memory
160
+ # block's size which is likely a multiple of mmap.PAGESIZE.
161
+ h_map = _winapi.OpenFileMapping(
162
+ _winapi.FILE_MAP_READ,
163
+ False,
164
+ name
165
+ )
166
+ try:
167
+ p_buf = _winapi.MapViewOfFile(
168
+ h_map,
169
+ _winapi.FILE_MAP_READ,
170
+ 0,
171
+ 0,
172
+ 0
173
+ )
174
+ finally:
175
+ _winapi.CloseHandle(h_map)
176
+ try:
177
+ size = _winapi.VirtualQuerySize(p_buf)
178
+ finally:
179
+ _winapi.UnmapViewOfFile(p_buf)
180
+ self._mmap = mmap.mmap(-1, size, tagname=name)
181
+
182
+ self._size = size
183
+ self._buf = memoryview(self._mmap)
184
+
185
+ def __del__(self):
186
+ try:
187
+ self.close()
188
+ except OSError:
189
+ pass
190
+
191
+ def __reduce__(self):
192
+ return (
193
+ self.__class__,
194
+ (
195
+ self.name,
196
+ False,
197
+ self.size,
198
+ ),
199
+ )
200
+
201
+ def __repr__(self):
202
+ return f'{self.__class__.__name__}({self.name!r}, size={self.size})'
203
+
204
+ @property
205
+ def buf(self):
206
+ "A memoryview of contents of the shared memory block."
207
+ return self._buf
208
+
209
+ @property
210
+ def name(self):
211
+ "Unique name that identifies the shared memory block."
212
+ reported_name = self._name
213
+ if _USE_POSIX and self._prepend_leading_slash:
214
+ if self._name.startswith("/"):
215
+ reported_name = self._name[1:]
216
+ return reported_name
217
+
218
+ @property
219
+ def size(self):
220
+ "Size in bytes."
221
+ return self._size
222
+
223
+ def close(self):
224
+ """Closes access to the shared memory from this instance but does
225
+ not destroy the shared memory block."""
226
+ if self._buf is not None:
227
+ self._buf.release()
228
+ self._buf = None
229
+ if self._mmap is not None:
230
+ self._mmap.close()
231
+ self._mmap = None
232
+ if _USE_POSIX and self._fd >= 0:
233
+ os.close(self._fd)
234
+ self._fd = -1
235
+
236
+ def unlink(self):
237
+ """Requests that the underlying shared memory block be destroyed.
238
+
239
+ In order to ensure proper cleanup of resources, unlink should be
240
+ called once (and only once) across all processes which have access
241
+ to the shared memory block."""
242
+ if _USE_POSIX and self._name:
243
+ _posixshmem.shm_unlink(self._name)
244
+ resource_tracker.unregister(self._name, "shared_memory")
245
+
246
+
247
+ _encoding = "utf8"
248
+
249
+ class ShareableList:
250
+ """Pattern for a mutable list-like object shareable via a shared
251
+ memory block. It differs from the built-in list type in that these
252
+ lists can not change their overall length (i.e. no append, insert,
253
+ etc.)
254
+
255
+ Because values are packed into a memoryview as bytes, the struct
256
+ packing format for any storable value must require no more than 8
257
+ characters to describe its format."""
258
+
259
+ # The shared memory area is organized as follows:
260
+ # - 8 bytes: number of items (N) as a 64-bit integer
261
+ # - (N + 1) * 8 bytes: offsets of each element from the start of the
262
+ # data area
263
+ # - K bytes: the data area storing item values (with encoding and size
264
+ # depending on their respective types)
265
+ # - N * 8 bytes: `struct` format string for each element
266
+ # - N bytes: index into _back_transforms_mapping for each element
267
+ # (for reconstructing the corresponding Python value)
268
+ _types_mapping = {
269
+ int: "q",
270
+ float: "d",
271
+ bool: "xxxxxxx?",
272
+ str: "%ds",
273
+ bytes: "%ds",
274
+ None.__class__: "xxxxxx?x",
275
+ }
276
+ _alignment = 8
277
+ _back_transforms_mapping = {
278
+ 0: lambda value: value, # int, float, bool
279
+ 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str
280
+ 2: lambda value: value.rstrip(b'\x00'), # bytes
281
+ 3: lambda _value: None, # None
282
+ }
283
+
284
+ @staticmethod
285
+ def _extract_recreation_code(value):
286
+ """Used in concert with _back_transforms_mapping to convert values
287
+ into the appropriate Python objects when retrieving them from
288
+ the list as well as when storing them."""
289
+ if not isinstance(value, (str, bytes, None.__class__)):
290
+ return 0
291
+ elif isinstance(value, str):
292
+ return 1
293
+ elif isinstance(value, bytes):
294
+ return 2
295
+ else:
296
+ return 3 # NoneType
297
+
298
+ def __init__(self, sequence=None, *, name=None):
299
+ if name is None or sequence is not None:
300
+ sequence = sequence or ()
301
+ _formats = [
302
+ self._types_mapping[type(item)]
303
+ if not isinstance(item, (str, bytes))
304
+ else self._types_mapping[type(item)] % (
305
+ self._alignment * (len(item) // self._alignment + 1),
306
+ )
307
+ for item in sequence
308
+ ]
309
+ self._list_len = len(_formats)
310
+ assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len
311
+ offset = 0
312
+ # The offsets of each list element into the shared memory's
313
+ # data area (0 meaning the start of the data area, not the start
314
+ # of the shared memory area).
315
+ self._allocated_offsets = [0]
316
+ for fmt in _formats:
317
+ offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1])
318
+ self._allocated_offsets.append(offset)
319
+ _recreation_codes = [
320
+ self._extract_recreation_code(item) for item in sequence
321
+ ]
322
+ requested_size = struct.calcsize(
323
+ "q" + self._format_size_metainfo +
324
+ "".join(_formats) +
325
+ self._format_packing_metainfo +
326
+ self._format_back_transform_codes
327
+ )
328
+
329
+ self.shm = SharedMemory(name, create=True, size=requested_size)
330
+ else:
331
+ self.shm = SharedMemory(name)
332
+
333
+ if sequence is not None:
334
+ _enc = _encoding
335
+ struct.pack_into(
336
+ "q" + self._format_size_metainfo,
337
+ self.shm.buf,
338
+ 0,
339
+ self._list_len,
340
+ *(self._allocated_offsets)
341
+ )
342
+ struct.pack_into(
343
+ "".join(_formats),
344
+ self.shm.buf,
345
+ self._offset_data_start,
346
+ *(v.encode(_enc) if isinstance(v, str) else v for v in sequence)
347
+ )
348
+ struct.pack_into(
349
+ self._format_packing_metainfo,
350
+ self.shm.buf,
351
+ self._offset_packing_formats,
352
+ *(v.encode(_enc) for v in _formats)
353
+ )
354
+ struct.pack_into(
355
+ self._format_back_transform_codes,
356
+ self.shm.buf,
357
+ self._offset_back_transform_codes,
358
+ *(_recreation_codes)
359
+ )
360
+
361
+ else:
362
+ self._list_len = len(self) # Obtains size from offset 0 in buffer.
363
+ self._allocated_offsets = list(
364
+ struct.unpack_from(
365
+ self._format_size_metainfo,
366
+ self.shm.buf,
367
+ 1 * 8
368
+ )
369
+ )
370
+
371
+ def _get_packing_format(self, position):
372
+ "Gets the packing format for a single value stored in the list."
373
+ position = position if position >= 0 else position + self._list_len
374
+ if (position >= self._list_len) or (self._list_len < 0):
375
+ raise IndexError("Requested position out of range.")
376
+
377
+ v = struct.unpack_from(
378
+ "8s",
379
+ self.shm.buf,
380
+ self._offset_packing_formats + position * 8
381
+ )[0]
382
+ fmt = v.rstrip(b'\x00')
383
+ fmt_as_str = fmt.decode(_encoding)
384
+
385
+ return fmt_as_str
386
+
387
+ def _get_back_transform(self, position):
388
+ "Gets the back transformation function for a single value."
389
+
390
+ if (position >= self._list_len) or (self._list_len < 0):
391
+ raise IndexError("Requested position out of range.")
392
+
393
+ transform_code = struct.unpack_from(
394
+ "b",
395
+ self.shm.buf,
396
+ self._offset_back_transform_codes + position
397
+ )[0]
398
+ transform_function = self._back_transforms_mapping[transform_code]
399
+
400
+ return transform_function
401
+
402
+ def _set_packing_format_and_transform(self, position, fmt_as_str, value):
403
+ """Sets the packing format and back transformation code for a
404
+ single value in the list at the specified position."""
405
+
406
+ if (position >= self._list_len) or (self._list_len < 0):
407
+ raise IndexError("Requested position out of range.")
408
+
409
+ struct.pack_into(
410
+ "8s",
411
+ self.shm.buf,
412
+ self._offset_packing_formats + position * 8,
413
+ fmt_as_str.encode(_encoding)
414
+ )
415
+
416
+ transform_code = self._extract_recreation_code(value)
417
+ struct.pack_into(
418
+ "b",
419
+ self.shm.buf,
420
+ self._offset_back_transform_codes + position,
421
+ transform_code
422
+ )
423
+
424
+ def __getitem__(self, position):
425
+ position = position if position >= 0 else position + self._list_len
426
+ try:
427
+ offset = self._offset_data_start + self._allocated_offsets[position]
428
+ (v,) = struct.unpack_from(
429
+ self._get_packing_format(position),
430
+ self.shm.buf,
431
+ offset
432
+ )
433
+ except IndexError:
434
+ raise IndexError("index out of range")
435
+
436
+ back_transform = self._get_back_transform(position)
437
+ v = back_transform(v)
438
+
439
+ return v
440
+
441
+ def __setitem__(self, position, value):
442
+ position = position if position >= 0 else position + self._list_len
443
+ try:
444
+ item_offset = self._allocated_offsets[position]
445
+ offset = self._offset_data_start + item_offset
446
+ current_format = self._get_packing_format(position)
447
+ except IndexError:
448
+ raise IndexError("assignment index out of range")
449
+
450
+ if not isinstance(value, (str, bytes)):
451
+ new_format = self._types_mapping[type(value)]
452
+ encoded_value = value
453
+ else:
454
+ allocated_length = self._allocated_offsets[position + 1] - item_offset
455
+
456
+ encoded_value = (value.encode(_encoding)
457
+ if isinstance(value, str) else value)
458
+ if len(encoded_value) > allocated_length:
459
+ raise ValueError("bytes/str item exceeds available storage")
460
+ if current_format[-1] == "s":
461
+ new_format = current_format
462
+ else:
463
+ new_format = self._types_mapping[str] % (
464
+ allocated_length,
465
+ )
466
+
467
+ self._set_packing_format_and_transform(
468
+ position,
469
+ new_format,
470
+ value
471
+ )
472
+ struct.pack_into(new_format, self.shm.buf, offset, encoded_value)
473
+
474
+ def __reduce__(self):
475
+ return partial(self.__class__, name=self.shm.name), ()
476
+
477
+ def __len__(self):
478
+ return struct.unpack_from("q", self.shm.buf, 0)[0]
479
+
480
+ def __repr__(self):
481
+ return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})'
482
+
483
+ @property
484
+ def format(self):
485
+ "The struct packing format used by all currently stored items."
486
+ return "".join(
487
+ self._get_packing_format(i) for i in range(self._list_len)
488
+ )
489
+
490
+ @property
491
+ def _format_size_metainfo(self):
492
+ "The struct packing format used for the items' storage offsets."
493
+ return "q" * (self._list_len + 1)
494
+
495
+ @property
496
+ def _format_packing_metainfo(self):
497
+ "The struct packing format used for the items' packing formats."
498
+ return "8s" * self._list_len
499
+
500
+ @property
501
+ def _format_back_transform_codes(self):
502
+ "The struct packing format used for the items' back transforms."
503
+ return "b" * self._list_len
504
+
505
+ @property
506
+ def _offset_data_start(self):
507
+ # - 8 bytes for the list length
508
+ # - (N + 1) * 8 bytes for the element offsets
509
+ return (self._list_len + 2) * 8
510
+
511
+ @property
512
+ def _offset_packing_formats(self):
513
+ return self._offset_data_start + self._allocated_offsets[-1]
514
+
515
+ @property
516
+ def _offset_back_transform_codes(self):
517
+ return self._offset_packing_formats + self._list_len * 8
518
+
519
+ def count(self, value):
520
+ "L.count(value) -> integer -- return number of occurrences of value."
521
+
522
+ return sum(value == entry for entry in self)
523
+
524
+ def index(self, value):
525
+ """L.index(value) -> integer -- return first index of value.
526
+ Raises ValueError if the value is not present."""
527
+
528
+ for position, entry in enumerate(self):
529
+ if value == entry:
530
+ return position
531
+ else:
532
+ raise ValueError(f"{value!r} not in this container")
533
+
534
+ __class_getitem__ = classmethod(types.GenericAlias)
venv/lib/python3.10/site-packages/multiprocess/sharedctypes.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module which supports allocation of ctypes objects from shared memory
3
+ #
4
+ # multiprocessing/sharedctypes.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ import ctypes
11
+ import weakref
12
+
13
+ from . import heap
14
+ from . import get_context
15
+
16
+ from .context import reduction, assert_spawning
17
+ _ForkingPickler = reduction.ForkingPickler
18
+
19
+ __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
20
+
21
+ #
22
+ #
23
+ #
24
+
25
+ typecode_to_type = {
26
+ 'c': ctypes.c_char, 'u': ctypes.c_wchar,
27
+ 'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
28
+ 'h': ctypes.c_short, 'H': ctypes.c_ushort,
29
+ 'i': ctypes.c_int, 'I': ctypes.c_uint,
30
+ 'l': ctypes.c_long, 'L': ctypes.c_ulong,
31
+ 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong,
32
+ 'f': ctypes.c_float, 'd': ctypes.c_double
33
+ }
34
+
35
+ #
36
+ #
37
+ #
38
+
39
+ def _new_value(type_):
40
+ size = ctypes.sizeof(type_)
41
+ wrapper = heap.BufferWrapper(size)
42
+ return rebuild_ctype(type_, wrapper, None)
43
+
44
+ def RawValue(typecode_or_type, *args):
45
+ '''
46
+ Returns a ctypes object allocated from shared memory
47
+ '''
48
+ type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
49
+ obj = _new_value(type_)
50
+ ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
51
+ obj.__init__(*args)
52
+ return obj
53
+
54
+ def RawArray(typecode_or_type, size_or_initializer):
55
+ '''
56
+ Returns a ctypes array allocated from shared memory
57
+ '''
58
+ type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
59
+ if isinstance(size_or_initializer, int):
60
+ type_ = type_ * size_or_initializer
61
+ obj = _new_value(type_)
62
+ ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
63
+ return obj
64
+ else:
65
+ type_ = type_ * len(size_or_initializer)
66
+ result = _new_value(type_)
67
+ result.__init__(*size_or_initializer)
68
+ return result
69
+
70
+ def Value(typecode_or_type, *args, lock=True, ctx=None):
71
+ '''
72
+ Return a synchronization wrapper for a Value
73
+ '''
74
+ obj = RawValue(typecode_or_type, *args)
75
+ if lock is False:
76
+ return obj
77
+ if lock in (True, None):
78
+ ctx = ctx or get_context()
79
+ lock = ctx.RLock()
80
+ if not hasattr(lock, 'acquire'):
81
+ raise AttributeError("%r has no method 'acquire'" % lock)
82
+ return synchronized(obj, lock, ctx=ctx)
83
+
84
+ def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None):
85
+ '''
86
+ Return a synchronization wrapper for a RawArray
87
+ '''
88
+ obj = RawArray(typecode_or_type, size_or_initializer)
89
+ if lock is False:
90
+ return obj
91
+ if lock in (True, None):
92
+ ctx = ctx or get_context()
93
+ lock = ctx.RLock()
94
+ if not hasattr(lock, 'acquire'):
95
+ raise AttributeError("%r has no method 'acquire'" % lock)
96
+ return synchronized(obj, lock, ctx=ctx)
97
+
98
+ def copy(obj):
99
+ new_obj = _new_value(type(obj))
100
+ ctypes.pointer(new_obj)[0] = obj
101
+ return new_obj
102
+
103
+ def synchronized(obj, lock=None, ctx=None):
104
+ assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
105
+ ctx = ctx or get_context()
106
+
107
+ if isinstance(obj, ctypes._SimpleCData):
108
+ return Synchronized(obj, lock, ctx)
109
+ elif isinstance(obj, ctypes.Array):
110
+ if obj._type_ is ctypes.c_char:
111
+ return SynchronizedString(obj, lock, ctx)
112
+ return SynchronizedArray(obj, lock, ctx)
113
+ else:
114
+ cls = type(obj)
115
+ try:
116
+ scls = class_cache[cls]
117
+ except KeyError:
118
+ names = [field[0] for field in cls._fields_]
119
+ d = {name: make_property(name) for name in names}
120
+ classname = 'Synchronized' + cls.__name__
121
+ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
122
+ return scls(obj, lock, ctx)
123
+
124
+ #
125
+ # Functions for pickling/unpickling
126
+ #
127
+
128
+ def reduce_ctype(obj):
129
+ assert_spawning(obj)
130
+ if isinstance(obj, ctypes.Array):
131
+ return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
132
+ else:
133
+ return rebuild_ctype, (type(obj), obj._wrapper, None)
134
+
135
+ def rebuild_ctype(type_, wrapper, length):
136
+ if length is not None:
137
+ type_ = type_ * length
138
+ _ForkingPickler.register(type_, reduce_ctype)
139
+ buf = wrapper.create_memoryview()
140
+ obj = type_.from_buffer(buf)
141
+ obj._wrapper = wrapper
142
+ return obj
143
+
144
+ #
145
+ # Function to create properties
146
+ #
147
+
148
+ def make_property(name):
149
+ try:
150
+ return prop_cache[name]
151
+ except KeyError:
152
+ d = {}
153
+ exec(template % ((name,)*7), d)
154
+ prop_cache[name] = d[name]
155
+ return d[name]
156
+
157
+ template = '''
158
+ def get%s(self):
159
+ self.acquire()
160
+ try:
161
+ return self._obj.%s
162
+ finally:
163
+ self.release()
164
+ def set%s(self, value):
165
+ self.acquire()
166
+ try:
167
+ self._obj.%s = value
168
+ finally:
169
+ self.release()
170
+ %s = property(get%s, set%s)
171
+ '''
172
+
173
+ prop_cache = {}
174
+ class_cache = weakref.WeakKeyDictionary()
175
+
176
+ #
177
+ # Synchronized wrappers
178
+ #
179
+
180
+ class SynchronizedBase(object):
181
+
182
+ def __init__(self, obj, lock=None, ctx=None):
183
+ self._obj = obj
184
+ if lock:
185
+ self._lock = lock
186
+ else:
187
+ ctx = ctx or get_context(force=True)
188
+ self._lock = ctx.RLock()
189
+ self.acquire = self._lock.acquire
190
+ self.release = self._lock.release
191
+
192
+ def __enter__(self):
193
+ return self._lock.__enter__()
194
+
195
+ def __exit__(self, *args):
196
+ return self._lock.__exit__(*args)
197
+
198
+ def __reduce__(self):
199
+ assert_spawning(self)
200
+ return synchronized, (self._obj, self._lock)
201
+
202
+ def get_obj(self):
203
+ return self._obj
204
+
205
+ def get_lock(self):
206
+ return self._lock
207
+
208
+ def __repr__(self):
209
+ return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
210
+
211
+
212
+ class Synchronized(SynchronizedBase):
213
+ value = make_property('value')
214
+
215
+
216
+ class SynchronizedArray(SynchronizedBase):
217
+
218
+ def __len__(self):
219
+ return len(self._obj)
220
+
221
+ def __getitem__(self, i):
222
+ with self:
223
+ return self._obj[i]
224
+
225
+ def __setitem__(self, i, value):
226
+ with self:
227
+ self._obj[i] = value
228
+
229
+ def __getslice__(self, start, stop):
230
+ with self:
231
+ return self._obj[start:stop]
232
+
233
+ def __setslice__(self, start, stop, values):
234
+ with self:
235
+ self._obj[start:stop] = values
236
+
237
+
238
+ class SynchronizedString(SynchronizedArray):
239
+ value = make_property('value')
240
+ raw = make_property('raw')
venv/lib/python3.10/site-packages/multiprocess/spawn.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Code used to start processes when using the spawn or forkserver
3
+ # start methods.
4
+ #
5
+ # multiprocessing/spawn.py
6
+ #
7
+ # Copyright (c) 2006-2008, R Oudkerk
8
+ # Licensed to PSF under a Contributor Agreement.
9
+ #
10
+
11
+ import os
12
+ import sys
13
+ import runpy
14
+ import types
15
+
16
+ from . import get_start_method, set_start_method
17
+ from . import process
18
+ from .context import reduction
19
+ from . import util
20
+
21
+ __all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable',
22
+ 'get_preparation_data', 'get_command_line', 'import_main_path']
23
+
24
+ #
25
+ # _python_exe is the assumed path to the python executable.
26
+ # People embedding Python want to modify it.
27
+ #
28
+
29
+ if sys.platform != 'win32':
30
+ WINEXE = False
31
+ WINSERVICE = False
32
+ else:
33
+ WINEXE = getattr(sys, 'frozen', False)
34
+ WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
35
+
36
+ if WINSERVICE:
37
+ _python_exe = os.path.join(sys.exec_prefix, 'python.exe')
38
+ else:
39
+ _python_exe = sys.executable
40
+
41
+ def set_executable(exe):
42
+ global _python_exe
43
+ _python_exe = exe
44
+
45
+ def get_executable():
46
+ return _python_exe
47
+
48
+ #
49
+ #
50
+ #
51
+
52
+ def is_forking(argv):
53
+ '''
54
+ Return whether commandline indicates we are forking
55
+ '''
56
+ if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
57
+ return True
58
+ else:
59
+ return False
60
+
61
+
62
+ def freeze_support():
63
+ '''
64
+ Run code for process object if this in not the main process
65
+ '''
66
+ if is_forking(sys.argv):
67
+ kwds = {}
68
+ for arg in sys.argv[2:]:
69
+ name, value = arg.split('=')
70
+ if value == 'None':
71
+ kwds[name] = None
72
+ else:
73
+ kwds[name] = int(value)
74
+ spawn_main(**kwds)
75
+ sys.exit()
76
+
77
+
78
+ def get_command_line(**kwds):
79
+ '''
80
+ Returns prefix of command line used for spawning a child process
81
+ '''
82
+ if getattr(sys, 'frozen', False):
83
+ return ([sys.executable, '--multiprocessing-fork'] +
84
+ ['%s=%r' % item for item in kwds.items()])
85
+ else:
86
+ prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)'
87
+ prog %= ', '.join('%s=%r' % item for item in kwds.items())
88
+ opts = util._args_from_interpreter_flags()
89
+ return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork']
90
+
91
+
92
+ def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None):
93
+ '''
94
+ Run code specified by data received over pipe
95
+ '''
96
+ assert is_forking(sys.argv), "Not forking"
97
+ if sys.platform == 'win32':
98
+ import msvcrt
99
+ import _winapi
100
+
101
+ if parent_pid is not None:
102
+ source_process = _winapi.OpenProcess(
103
+ _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE,
104
+ False, parent_pid)
105
+ else:
106
+ source_process = None
107
+ new_handle = reduction.duplicate(pipe_handle,
108
+ source_process=source_process)
109
+ fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
110
+ parent_sentinel = source_process
111
+ else:
112
+ from . import resource_tracker
113
+ resource_tracker._resource_tracker._fd = tracker_fd
114
+ fd = pipe_handle
115
+ parent_sentinel = os.dup(pipe_handle)
116
+ exitcode = _main(fd, parent_sentinel)
117
+ sys.exit(exitcode)
118
+
119
+
120
+ def _main(fd, parent_sentinel):
121
+ with os.fdopen(fd, 'rb', closefd=True) as from_parent:
122
+ process.current_process()._inheriting = True
123
+ try:
124
+ preparation_data = reduction.pickle.load(from_parent)
125
+ prepare(preparation_data)
126
+ self = reduction.pickle.load(from_parent)
127
+ finally:
128
+ del process.current_process()._inheriting
129
+ return self._bootstrap(parent_sentinel)
130
+
131
+
132
+ def _check_not_importing_main():
133
+ if getattr(process.current_process(), '_inheriting', False):
134
+ raise RuntimeError('''
135
+ An attempt has been made to start a new process before the
136
+ current process has finished its bootstrapping phase.
137
+
138
+ This probably means that you are not using fork to start your
139
+ child processes and you have forgotten to use the proper idiom
140
+ in the main module:
141
+
142
+ if __name__ == '__main__':
143
+ freeze_support()
144
+ ...
145
+
146
+ The "freeze_support()" line can be omitted if the program
147
+ is not going to be frozen to produce an executable.''')
148
+
149
+
150
+ def get_preparation_data(name):
151
+ '''
152
+ Return info about parent needed by child to unpickle process object
153
+ '''
154
+ _check_not_importing_main()
155
+ d = dict(
156
+ log_to_stderr=util._log_to_stderr,
157
+ authkey=process.current_process().authkey,
158
+ )
159
+
160
+ if util._logger is not None:
161
+ d['log_level'] = util._logger.getEffectiveLevel()
162
+
163
+ sys_path=sys.path.copy()
164
+ try:
165
+ i = sys_path.index('')
166
+ except ValueError:
167
+ pass
168
+ else:
169
+ sys_path[i] = process.ORIGINAL_DIR
170
+
171
+ d.update(
172
+ name=name,
173
+ sys_path=sys_path,
174
+ sys_argv=sys.argv,
175
+ orig_dir=process.ORIGINAL_DIR,
176
+ dir=os.getcwd(),
177
+ start_method=get_start_method(),
178
+ )
179
+
180
+ # Figure out whether to initialise main in the subprocess as a module
181
+ # or through direct execution (or to leave it alone entirely)
182
+ main_module = sys.modules['__main__']
183
+ main_mod_name = getattr(main_module.__spec__, "name", None)
184
+ if main_mod_name is not None:
185
+ d['init_main_from_name'] = main_mod_name
186
+ elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
187
+ main_path = getattr(main_module, '__file__', None)
188
+ if main_path is not None:
189
+ if (not os.path.isabs(main_path) and
190
+ process.ORIGINAL_DIR is not None):
191
+ main_path = os.path.join(process.ORIGINAL_DIR, main_path)
192
+ d['init_main_from_path'] = os.path.normpath(main_path)
193
+
194
+ return d
195
+
196
+ #
197
+ # Prepare current process
198
+ #
199
+
200
+ old_main_modules = []
201
+
202
+ def prepare(data):
203
+ '''
204
+ Try to get current process ready to unpickle process object
205
+ '''
206
+ if 'name' in data:
207
+ process.current_process().name = data['name']
208
+
209
+ if 'authkey' in data:
210
+ process.current_process().authkey = data['authkey']
211
+
212
+ if 'log_to_stderr' in data and data['log_to_stderr']:
213
+ util.log_to_stderr()
214
+
215
+ if 'log_level' in data:
216
+ util.get_logger().setLevel(data['log_level'])
217
+
218
+ if 'sys_path' in data:
219
+ sys.path = data['sys_path']
220
+
221
+ if 'sys_argv' in data:
222
+ sys.argv = data['sys_argv']
223
+
224
+ if 'dir' in data:
225
+ os.chdir(data['dir'])
226
+
227
+ if 'orig_dir' in data:
228
+ process.ORIGINAL_DIR = data['orig_dir']
229
+
230
+ if 'start_method' in data:
231
+ set_start_method(data['start_method'], force=True)
232
+
233
+ if 'init_main_from_name' in data:
234
+ _fixup_main_from_name(data['init_main_from_name'])
235
+ elif 'init_main_from_path' in data:
236
+ _fixup_main_from_path(data['init_main_from_path'])
237
+
238
+ # Multiprocessing module helpers to fix up the main module in
239
+ # spawned subprocesses
240
+ def _fixup_main_from_name(mod_name):
241
+ # __main__.py files for packages, directories, zip archives, etc, run
242
+ # their "main only" code unconditionally, so we don't even try to
243
+ # populate anything in __main__, nor do we make any changes to
244
+ # __main__ attributes
245
+ current_main = sys.modules['__main__']
246
+ if mod_name == "__main__" or mod_name.endswith(".__main__"):
247
+ return
248
+
249
+ # If this process was forked, __main__ may already be populated
250
+ if getattr(current_main.__spec__, "name", None) == mod_name:
251
+ return
252
+
253
+ # Otherwise, __main__ may contain some non-main code where we need to
254
+ # support unpickling it properly. We rerun it as __mp_main__ and make
255
+ # the normal __main__ an alias to that
256
+ old_main_modules.append(current_main)
257
+ main_module = types.ModuleType("__mp_main__")
258
+ main_content = runpy.run_module(mod_name,
259
+ run_name="__mp_main__",
260
+ alter_sys=True)
261
+ main_module.__dict__.update(main_content)
262
+ sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
263
+
264
+
265
+ def _fixup_main_from_path(main_path):
266
+ # If this process was forked, __main__ may already be populated
267
+ current_main = sys.modules['__main__']
268
+
269
+ # Unfortunately, the main ipython launch script historically had no
270
+ # "if __name__ == '__main__'" guard, so we work around that
271
+ # by treating it like a __main__.py file
272
+ # See https://github.com/ipython/ipython/issues/4698
273
+ main_name = os.path.splitext(os.path.basename(main_path))[0]
274
+ if main_name == 'ipython':
275
+ return
276
+
277
+ # Otherwise, if __file__ already has the setting we expect,
278
+ # there's nothing more to do
279
+ if getattr(current_main, '__file__', None) == main_path:
280
+ return
281
+
282
+ # If the parent process has sent a path through rather than a module
283
+ # name we assume it is an executable script that may contain
284
+ # non-main code that needs to be executed
285
+ old_main_modules.append(current_main)
286
+ main_module = types.ModuleType("__mp_main__")
287
+ main_content = runpy.run_path(main_path,
288
+ run_name="__mp_main__")
289
+ main_module.__dict__.update(main_content)
290
+ sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
291
+
292
+
293
+ def import_main_path(main_path):
294
+ '''
295
+ Set sys.modules['__main__'] to module at main_path
296
+ '''
297
+ _fixup_main_from_path(main_path)
venv/lib/python3.10/site-packages/multiprocess/synchronize.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module implementing synchronization primitives
3
+ #
4
+ # multiprocessing/synchronize.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = [
11
+ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
12
+ ]
13
+
14
+ import threading
15
+ import sys
16
+ import tempfile
17
+ try:
18
+ import _multiprocess as _multiprocessing
19
+ except ImportError:
20
+ import _multiprocessing
21
+ import time
22
+
23
+ from . import context
24
+ from . import process
25
+ from . import util
26
+
27
+ # Try to import the mp.synchronize module cleanly, if it fails
28
+ # raise ImportError for platforms lacking a working sem_open implementation.
29
+ # See issue 3770
30
+ try:
31
+ from _multiprocess import SemLock, sem_unlink
32
+ except ImportError:
33
+ try:
34
+ from _multiprocessing import SemLock, sem_unlink
35
+ except (ImportError):
36
+ raise ImportError("This platform lacks a functioning sem_open" +
37
+ " implementation, therefore, the required" +
38
+ " synchronization primitives needed will not" +
39
+ " function, see issue 3770.")
40
+
41
+ #
42
+ # Constants
43
+ #
44
+
45
+ RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
46
+ SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
47
+
48
+ #
49
+ # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
50
+ #
51
+
52
+ class SemLock(object):
53
+
54
+ _rand = tempfile._RandomNameSequence()
55
+
56
+ def __init__(self, kind, value, maxvalue, *, ctx):
57
+ if ctx is None:
58
+ ctx = context._default_context.get_context()
59
+ name = ctx.get_start_method()
60
+ unlink_now = sys.platform == 'win32' or name == 'fork'
61
+ for i in range(100):
62
+ try:
63
+ sl = self._semlock = _multiprocessing.SemLock(
64
+ kind, value, maxvalue, self._make_name(),
65
+ unlink_now)
66
+ except FileExistsError:
67
+ pass
68
+ else:
69
+ break
70
+ else:
71
+ raise FileExistsError('cannot find name for semaphore')
72
+
73
+ util.debug('created semlock with handle %s' % sl.handle)
74
+ self._make_methods()
75
+
76
+ if sys.platform != 'win32':
77
+ def _after_fork(obj):
78
+ obj._semlock._after_fork()
79
+ util.register_after_fork(self, _after_fork)
80
+
81
+ if self._semlock.name is not None:
82
+ # We only get here if we are on Unix with forking
83
+ # disabled. When the object is garbage collected or the
84
+ # process shuts down we unlink the semaphore name
85
+ from .resource_tracker import register
86
+ register(self._semlock.name, "semaphore")
87
+ util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
88
+ exitpriority=0)
89
+
90
+ @staticmethod
91
+ def _cleanup(name):
92
+ from .resource_tracker import unregister
93
+ sem_unlink(name)
94
+ unregister(name, "semaphore")
95
+
96
+ def _make_methods(self):
97
+ self.acquire = self._semlock.acquire
98
+ self.release = self._semlock.release
99
+
100
+ def __enter__(self):
101
+ return self._semlock.__enter__()
102
+
103
+ def __exit__(self, *args):
104
+ return self._semlock.__exit__(*args)
105
+
106
+ def __getstate__(self):
107
+ context.assert_spawning(self)
108
+ sl = self._semlock
109
+ if sys.platform == 'win32':
110
+ h = context.get_spawning_popen().duplicate_for_child(sl.handle)
111
+ else:
112
+ h = sl.handle
113
+ return (h, sl.kind, sl.maxvalue, sl.name)
114
+
115
+ def __setstate__(self, state):
116
+ self._semlock = _multiprocessing.SemLock._rebuild(*state)
117
+ util.debug('recreated blocker with handle %r' % state[0])
118
+ self._make_methods()
119
+
120
+ @staticmethod
121
+ def _make_name():
122
+ return '%s-%s' % (process.current_process()._config['semprefix'],
123
+ next(SemLock._rand))
124
+
125
+ #
126
+ # Semaphore
127
+ #
128
+
129
+ class Semaphore(SemLock):
130
+
131
+ def __init__(self, value=1, *, ctx):
132
+ SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx)
133
+
134
+ def get_value(self):
135
+ return self._semlock._get_value()
136
+
137
+ def __repr__(self):
138
+ try:
139
+ value = self._semlock._get_value()
140
+ except Exception:
141
+ value = 'unknown'
142
+ return '<%s(value=%s)>' % (self.__class__.__name__, value)
143
+
144
+ #
145
+ # Bounded semaphore
146
+ #
147
+
148
+ class BoundedSemaphore(Semaphore):
149
+
150
+ def __init__(self, value=1, *, ctx):
151
+ SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx)
152
+
153
+ def __repr__(self):
154
+ try:
155
+ value = self._semlock._get_value()
156
+ except Exception:
157
+ value = 'unknown'
158
+ return '<%s(value=%s, maxvalue=%s)>' % \
159
+ (self.__class__.__name__, value, self._semlock.maxvalue)
160
+
161
+ #
162
+ # Non-recursive lock
163
+ #
164
+
165
+ class Lock(SemLock):
166
+
167
+ def __init__(self, *, ctx):
168
+ SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)
169
+
170
+ def __repr__(self):
171
+ try:
172
+ if self._semlock._is_mine():
173
+ name = process.current_process().name
174
+ if threading.current_thread().name != 'MainThread':
175
+ name += '|' + threading.current_thread().name
176
+ elif self._semlock._get_value() == 1:
177
+ name = 'None'
178
+ elif self._semlock._count() > 0:
179
+ name = 'SomeOtherThread'
180
+ else:
181
+ name = 'SomeOtherProcess'
182
+ except Exception:
183
+ name = 'unknown'
184
+ return '<%s(owner=%s)>' % (self.__class__.__name__, name)
185
+
186
+ #
187
+ # Recursive lock
188
+ #
189
+
190
+ class RLock(SemLock):
191
+
192
+ def __init__(self, *, ctx):
193
+ SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx)
194
+
195
+ def __repr__(self):
196
+ try:
197
+ if self._semlock._is_mine():
198
+ name = process.current_process().name
199
+ if threading.current_thread().name != 'MainThread':
200
+ name += '|' + threading.current_thread().name
201
+ count = self._semlock._count()
202
+ elif self._semlock._get_value() == 1:
203
+ name, count = 'None', 0
204
+ elif self._semlock._count() > 0:
205
+ name, count = 'SomeOtherThread', 'nonzero'
206
+ else:
207
+ name, count = 'SomeOtherProcess', 'nonzero'
208
+ except Exception:
209
+ name, count = 'unknown', 'unknown'
210
+ return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)
211
+
212
+ #
213
+ # Condition variable
214
+ #
215
+
216
+ class Condition(object):
217
+
218
+ def __init__(self, lock=None, *, ctx):
219
+ self._lock = lock or ctx.RLock()
220
+ self._sleeping_count = ctx.Semaphore(0)
221
+ self._woken_count = ctx.Semaphore(0)
222
+ self._wait_semaphore = ctx.Semaphore(0)
223
+ self._make_methods()
224
+
225
+ def __getstate__(self):
226
+ context.assert_spawning(self)
227
+ return (self._lock, self._sleeping_count,
228
+ self._woken_count, self._wait_semaphore)
229
+
230
+ def __setstate__(self, state):
231
+ (self._lock, self._sleeping_count,
232
+ self._woken_count, self._wait_semaphore) = state
233
+ self._make_methods()
234
+
235
+ def __enter__(self):
236
+ return self._lock.__enter__()
237
+
238
+ def __exit__(self, *args):
239
+ return self._lock.__exit__(*args)
240
+
241
+ def _make_methods(self):
242
+ self.acquire = self._lock.acquire
243
+ self.release = self._lock.release
244
+
245
+ def __repr__(self):
246
+ try:
247
+ num_waiters = (self._sleeping_count._semlock._get_value() -
248
+ self._woken_count._semlock._get_value())
249
+ except Exception:
250
+ num_waiters = 'unknown'
251
+ return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters)
252
+
253
+ def wait(self, timeout=None):
254
+ assert self._lock._semlock._is_mine(), \
255
+ 'must acquire() condition before using wait()'
256
+
257
+ # indicate that this thread is going to sleep
258
+ self._sleeping_count.release()
259
+
260
+ # release lock
261
+ count = self._lock._semlock._count()
262
+ for i in range(count):
263
+ self._lock.release()
264
+
265
+ try:
266
+ # wait for notification or timeout
267
+ return self._wait_semaphore.acquire(True, timeout)
268
+ finally:
269
+ # indicate that this thread has woken
270
+ self._woken_count.release()
271
+
272
+ # reacquire lock
273
+ for i in range(count):
274
+ self._lock.acquire()
275
+
276
+ def notify(self, n=1):
277
+ assert self._lock._semlock._is_mine(), 'lock is not owned'
278
+ assert not self._wait_semaphore.acquire(
279
+ False), ('notify: Should not have been able to acquire '
280
+ + '_wait_semaphore')
281
+
282
+ # to take account of timeouts since last notify*() we subtract
283
+ # woken_count from sleeping_count and rezero woken_count
284
+ while self._woken_count.acquire(False):
285
+ res = self._sleeping_count.acquire(False)
286
+ assert res, ('notify: Bug in sleeping_count.acquire'
287
+ + '- res should not be False')
288
+
289
+ sleepers = 0
290
+ while sleepers < n and self._sleeping_count.acquire(False):
291
+ self._wait_semaphore.release() # wake up one sleeper
292
+ sleepers += 1
293
+
294
+ if sleepers:
295
+ for i in range(sleepers):
296
+ self._woken_count.acquire() # wait for a sleeper to wake
297
+
298
+ # rezero wait_semaphore in case some timeouts just happened
299
+ while self._wait_semaphore.acquire(False):
300
+ pass
301
+
302
+ def notify_all(self):
303
+ self.notify(n=sys.maxsize)
304
+
305
+ def wait_for(self, predicate, timeout=None):
306
+ result = predicate()
307
+ if result:
308
+ return result
309
+ if timeout is not None:
310
+ endtime = getattr(time,'monotonic',time.time)() + timeout
311
+ else:
312
+ endtime = None
313
+ waittime = None
314
+ while not result:
315
+ if endtime is not None:
316
+ waittime = endtime - getattr(time,'monotonic',time.time)()
317
+ if waittime <= 0:
318
+ break
319
+ self.wait(waittime)
320
+ result = predicate()
321
+ return result
322
+
323
+ #
324
+ # Event
325
+ #
326
+
327
+ class Event(object):
328
+
329
+ def __init__(self, *, ctx):
330
+ self._cond = ctx.Condition(ctx.Lock())
331
+ self._flag = ctx.Semaphore(0)
332
+
333
+ def is_set(self):
334
+ with self._cond:
335
+ if self._flag.acquire(False):
336
+ self._flag.release()
337
+ return True
338
+ return False
339
+
340
+ def set(self):
341
+ with self._cond:
342
+ self._flag.acquire(False)
343
+ self._flag.release()
344
+ self._cond.notify_all()
345
+
346
+ def clear(self):
347
+ with self._cond:
348
+ self._flag.acquire(False)
349
+
350
+ def wait(self, timeout=None):
351
+ with self._cond:
352
+ if self._flag.acquire(False):
353
+ self._flag.release()
354
+ else:
355
+ self._cond.wait(timeout)
356
+
357
+ if self._flag.acquire(False):
358
+ self._flag.release()
359
+ return True
360
+ return False
361
+
362
+ #
363
+ # Barrier
364
+ #
365
+
366
+ class Barrier(threading.Barrier):
367
+
368
+ def __init__(self, parties, action=None, timeout=None, *, ctx):
369
+ import struct
370
+ from .heap import BufferWrapper
371
+ wrapper = BufferWrapper(struct.calcsize('i') * 2)
372
+ cond = ctx.Condition()
373
+ self.__setstate__((parties, action, timeout, cond, wrapper))
374
+ self._state = 0
375
+ self._count = 0
376
+
377
+ def __setstate__(self, state):
378
+ (self._parties, self._action, self._timeout,
379
+ self._cond, self._wrapper) = state
380
+ self._array = self._wrapper.create_memoryview().cast('i')
381
+
382
+ def __getstate__(self):
383
+ return (self._parties, self._action, self._timeout,
384
+ self._cond, self._wrapper)
385
+
386
+ @property
387
+ def _state(self):
388
+ return self._array[0]
389
+
390
+ @_state.setter
391
+ def _state(self, value):
392
+ self._array[0] = value
393
+
394
+ @property
395
+ def _count(self):
396
+ return self._array[1]
397
+
398
+ @_count.setter
399
+ def _count(self, value):
400
+ self._array[1] = value
venv/lib/python3.10/site-packages/multiprocess/util.py ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module providing various facilities to other parts of the package
3
+ #
4
+ # multiprocessing/util.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ import os
11
+ import itertools
12
+ import sys
13
+ import weakref
14
+ import atexit
15
+ import threading # we want threading to install it's
16
+ # cleanup function before multiprocessing does
17
+ from subprocess import _args_from_interpreter_flags
18
+
19
+ from . import process
20
+
21
+ __all__ = [
22
+ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
23
+ 'log_to_stderr', 'get_temp_dir', 'register_after_fork',
24
+ 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
25
+ 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING',
26
+ ]
27
+
28
+ #
29
+ # Logging
30
+ #
31
+
32
+ NOTSET = 0
33
+ SUBDEBUG = 5
34
+ DEBUG = 10
35
+ INFO = 20
36
+ SUBWARNING = 25
37
+
38
+ LOGGER_NAME = 'multiprocess'
39
+ DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
40
+
41
+ _logger = None
42
+ _log_to_stderr = False
43
+
44
+ def sub_debug(msg, *args):
45
+ if _logger:
46
+ _logger.log(SUBDEBUG, msg, *args)
47
+
48
+ def debug(msg, *args):
49
+ if _logger:
50
+ _logger.log(DEBUG, msg, *args)
51
+
52
+ def info(msg, *args):
53
+ if _logger:
54
+ _logger.log(INFO, msg, *args)
55
+
56
+ def sub_warning(msg, *args):
57
+ if _logger:
58
+ _logger.log(SUBWARNING, msg, *args)
59
+
60
+ def get_logger():
61
+ '''
62
+ Returns logger used by multiprocess
63
+ '''
64
+ global _logger
65
+ import logging
66
+
67
+ logging._acquireLock()
68
+ try:
69
+ if not _logger:
70
+
71
+ _logger = logging.getLogger(LOGGER_NAME)
72
+ _logger.propagate = 0
73
+
74
+ # XXX multiprocessing should cleanup before logging
75
+ if hasattr(atexit, 'unregister'):
76
+ atexit.unregister(_exit_function)
77
+ atexit.register(_exit_function)
78
+ else:
79
+ atexit._exithandlers.remove((_exit_function, (), {}))
80
+ atexit._exithandlers.append((_exit_function, (), {}))
81
+
82
+ finally:
83
+ logging._releaseLock()
84
+
85
+ return _logger
86
+
87
+ def log_to_stderr(level=None):
88
+ '''
89
+ Turn on logging and add a handler which prints to stderr
90
+ '''
91
+ global _log_to_stderr
92
+ import logging
93
+
94
+ logger = get_logger()
95
+ formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
96
+ handler = logging.StreamHandler()
97
+ handler.setFormatter(formatter)
98
+ logger.addHandler(handler)
99
+
100
+ if level:
101
+ logger.setLevel(level)
102
+ _log_to_stderr = True
103
+ return _logger
104
+
105
+
106
+ # Abstract socket support
107
+
108
+ def _platform_supports_abstract_sockets():
109
+ if sys.platform == "linux":
110
+ return True
111
+ if hasattr(sys, 'getandroidapilevel'):
112
+ return True
113
+ return False
114
+
115
+
116
+ def is_abstract_socket_namespace(address):
117
+ if not address:
118
+ return False
119
+ if isinstance(address, bytes):
120
+ return address[0] == 0
121
+ elif isinstance(address, str):
122
+ return address[0] == "\0"
123
+ raise TypeError(f'address type of {address!r} unrecognized')
124
+
125
+
126
+ abstract_sockets_supported = _platform_supports_abstract_sockets()
127
+
128
+ #
129
+ # Function returning a temp directory which will be removed on exit
130
+ #
131
+
132
+ def _remove_temp_dir(rmtree, tempdir):
133
+ rmtree(tempdir)
134
+
135
+ current_process = process.current_process()
136
+ # current_process() can be None if the finalizer is called
137
+ # late during Python finalization
138
+ if current_process is not None:
139
+ current_process._config['tempdir'] = None
140
+
141
+ def get_temp_dir():
142
+ # get name of a temp directory which will be automatically cleaned up
143
+ tempdir = process.current_process()._config.get('tempdir')
144
+ if tempdir is None:
145
+ import shutil, tempfile
146
+ tempdir = tempfile.mkdtemp(prefix='pymp-')
147
+ info('created temp directory %s', tempdir)
148
+ # keep a strong reference to shutil.rmtree(), since the finalizer
149
+ # can be called late during Python shutdown
150
+ Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir),
151
+ exitpriority=-100)
152
+ process.current_process()._config['tempdir'] = tempdir
153
+ return tempdir
154
+
155
+ #
156
+ # Support for reinitialization of objects when bootstrapping a child process
157
+ #
158
+
159
+ _afterfork_registry = weakref.WeakValueDictionary()
160
+ _afterfork_counter = itertools.count()
161
+
162
+ def _run_after_forkers():
163
+ items = list(_afterfork_registry.items())
164
+ items.sort()
165
+ for (index, ident, func), obj in items:
166
+ try:
167
+ func(obj)
168
+ except Exception as e:
169
+ info('after forker raised exception %s', e)
170
+
171
+ def register_after_fork(obj, func):
172
+ _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
173
+
174
+ #
175
+ # Finalization using weakrefs
176
+ #
177
+
178
+ _finalizer_registry = {}
179
+ _finalizer_counter = itertools.count()
180
+
181
+
182
+ class Finalize(object):
183
+ '''
184
+ Class which supports object finalization using weakrefs
185
+ '''
186
+ def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
187
+ if (exitpriority is not None) and not isinstance(exitpriority,int):
188
+ raise TypeError(
189
+ "Exitpriority ({0!r}) must be None or int, not {1!s}".format(
190
+ exitpriority, type(exitpriority)))
191
+
192
+ if obj is not None:
193
+ self._weakref = weakref.ref(obj, self)
194
+ elif exitpriority is None:
195
+ raise ValueError("Without object, exitpriority cannot be None")
196
+
197
+ self._callback = callback
198
+ self._args = args
199
+ self._kwargs = kwargs or {}
200
+ self._key = (exitpriority, next(_finalizer_counter))
201
+ self._pid = os.getpid()
202
+
203
+ _finalizer_registry[self._key] = self
204
+
205
+ def __call__(self, wr=None,
206
+ # Need to bind these locally because the globals can have
207
+ # been cleared at shutdown
208
+ _finalizer_registry=_finalizer_registry,
209
+ sub_debug=sub_debug, getpid=os.getpid):
210
+ '''
211
+ Run the callback unless it has already been called or cancelled
212
+ '''
213
+ try:
214
+ del _finalizer_registry[self._key]
215
+ except KeyError:
216
+ sub_debug('finalizer no longer registered')
217
+ else:
218
+ if self._pid != getpid():
219
+ sub_debug('finalizer ignored because different process')
220
+ res = None
221
+ else:
222
+ sub_debug('finalizer calling %s with args %s and kwargs %s',
223
+ self._callback, self._args, self._kwargs)
224
+ res = self._callback(*self._args, **self._kwargs)
225
+ self._weakref = self._callback = self._args = \
226
+ self._kwargs = self._key = None
227
+ return res
228
+
229
+ def cancel(self):
230
+ '''
231
+ Cancel finalization of the object
232
+ '''
233
+ try:
234
+ del _finalizer_registry[self._key]
235
+ except KeyError:
236
+ pass
237
+ else:
238
+ self._weakref = self._callback = self._args = \
239
+ self._kwargs = self._key = None
240
+
241
+ def still_active(self):
242
+ '''
243
+ Return whether this finalizer is still waiting to invoke callback
244
+ '''
245
+ return self._key in _finalizer_registry
246
+
247
+ def __repr__(self):
248
+ try:
249
+ obj = self._weakref()
250
+ except (AttributeError, TypeError):
251
+ obj = None
252
+
253
+ if obj is None:
254
+ return '<%s object, dead>' % self.__class__.__name__
255
+
256
+ x = '<%s object, callback=%s' % (
257
+ self.__class__.__name__,
258
+ getattr(self._callback, '__name__', self._callback))
259
+ if self._args:
260
+ x += ', args=' + str(self._args)
261
+ if self._kwargs:
262
+ x += ', kwargs=' + str(self._kwargs)
263
+ if self._key[0] is not None:
264
+ x += ', exitpriority=' + str(self._key[0])
265
+ return x + '>'
266
+
267
+
268
+ def _run_finalizers(minpriority=None):
269
+ '''
270
+ Run all finalizers whose exit priority is not None and at least minpriority
271
+
272
+ Finalizers with highest priority are called first; finalizers with
273
+ the same priority will be called in reverse order of creation.
274
+ '''
275
+ if _finalizer_registry is None:
276
+ # This function may be called after this module's globals are
277
+ # destroyed. See the _exit_function function in this module for more
278
+ # notes.
279
+ return
280
+
281
+ if minpriority is None:
282
+ f = lambda p : p[0] is not None
283
+ else:
284
+ f = lambda p : p[0] is not None and p[0] >= minpriority
285
+
286
+ # Careful: _finalizer_registry may be mutated while this function
287
+ # is running (either by a GC run or by another thread).
288
+
289
+ # list(_finalizer_registry) should be atomic, while
290
+ # list(_finalizer_registry.items()) is not.
291
+ keys = [key for key in list(_finalizer_registry) if f(key)]
292
+ keys.sort(reverse=True)
293
+
294
+ for key in keys:
295
+ finalizer = _finalizer_registry.get(key)
296
+ # key may have been removed from the registry
297
+ if finalizer is not None:
298
+ sub_debug('calling %s', finalizer)
299
+ try:
300
+ finalizer()
301
+ except Exception:
302
+ import traceback
303
+ traceback.print_exc()
304
+
305
+ if minpriority is None:
306
+ _finalizer_registry.clear()
307
+
308
+ #
309
+ # Clean up on exit
310
+ #
311
+
312
+ def is_exiting():
313
+ '''
314
+ Returns true if the process is shutting down
315
+ '''
316
+ return _exiting or _exiting is None
317
+
318
+ _exiting = False
319
+
320
+ def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
321
+ active_children=process.active_children,
322
+ current_process=process.current_process):
323
+ # We hold on to references to functions in the arglist due to the
324
+ # situation described below, where this function is called after this
325
+ # module's globals are destroyed.
326
+
327
+ global _exiting
328
+
329
+ if not _exiting:
330
+ _exiting = True
331
+
332
+ info('process shutting down')
333
+ debug('running all "atexit" finalizers with priority >= 0')
334
+ _run_finalizers(0)
335
+
336
+ if current_process() is not None:
337
+ # We check if the current process is None here because if
338
+ # it's None, any call to ``active_children()`` will raise
339
+ # an AttributeError (active_children winds up trying to
340
+ # get attributes from util._current_process). One
341
+ # situation where this can happen is if someone has
342
+ # manipulated sys.modules, causing this module to be
343
+ # garbage collected. The destructor for the module type
344
+ # then replaces all values in the module dict with None.
345
+ # For instance, after setuptools runs a test it replaces
346
+ # sys.modules with a copy created earlier. See issues
347
+ # #9775 and #15881. Also related: #4106, #9205, and
348
+ # #9207.
349
+
350
+ for p in active_children():
351
+ if p.daemon:
352
+ info('calling terminate() for daemon %s', p.name)
353
+ p._popen.terminate()
354
+
355
+ for p in active_children():
356
+ info('calling join() for process %s', p.name)
357
+ p.join()
358
+
359
+ debug('running the remaining "atexit" finalizers')
360
+ _run_finalizers()
361
+
362
+ atexit.register(_exit_function)
363
+
364
+ #
365
+ # Some fork aware types
366
+ #
367
+
368
+ class ForkAwareThreadLock(object):
369
+ def __init__(self):
370
+ self._lock = threading.Lock()
371
+ self.acquire = self._lock.acquire
372
+ self.release = self._lock.release
373
+ register_after_fork(self, ForkAwareThreadLock._at_fork_reinit)
374
+
375
+ def _at_fork_reinit(self):
376
+ self._lock._at_fork_reinit()
377
+
378
+ def __enter__(self):
379
+ return self._lock.__enter__()
380
+
381
+ def __exit__(self, *args):
382
+ return self._lock.__exit__(*args)
383
+
384
+
385
+ class ForkAwareLocal(threading.local):
386
+ def __init__(self):
387
+ register_after_fork(self, lambda obj : obj.__dict__.clear())
388
+ def __reduce__(self):
389
+ return type(self), ()
390
+
391
+ #
392
+ # Close fds except those specified
393
+ #
394
+
395
+ try:
396
+ MAXFD = os.sysconf("SC_OPEN_MAX")
397
+ except Exception:
398
+ MAXFD = 256
399
+
400
+ def close_all_fds_except(fds):
401
+ fds = list(fds) + [-1, MAXFD]
402
+ fds.sort()
403
+ assert fds[-1] == MAXFD, 'fd too large'
404
+ for i in range(len(fds) - 1):
405
+ os.closerange(fds[i]+1, fds[i+1])
406
+ #
407
+ # Close sys.stdin and replace stdin with os.devnull
408
+ #
409
+
410
+ def _close_stdin():
411
+ if sys.stdin is None:
412
+ return
413
+
414
+ try:
415
+ sys.stdin.close()
416
+ except (OSError, ValueError):
417
+ pass
418
+
419
+ try:
420
+ fd = os.open(os.devnull, os.O_RDONLY)
421
+ try:
422
+ sys.stdin = open(fd, encoding="utf-8", closefd=False)
423
+ except:
424
+ os.close(fd)
425
+ raise
426
+ except (OSError, ValueError):
427
+ pass
428
+
429
+ #
430
+ # Flush standard streams, if any
431
+ #
432
+
433
+ def _flush_std_streams():
434
+ try:
435
+ sys.stdout.flush()
436
+ except (AttributeError, ValueError):
437
+ pass
438
+ try:
439
+ sys.stderr.flush()
440
+ except (AttributeError, ValueError):
441
+ pass
442
+
443
+ #
444
+ # Start a program with only specified fds kept open
445
+ #
446
+
447
+ def spawnv_passfds(path, args, passfds):
448
+ import _posixsubprocess
449
+ passfds = tuple(sorted(map(int, passfds)))
450
+ errpipe_read, errpipe_write = os.pipe()
451
+ try:
452
+ return _posixsubprocess.fork_exec(
453
+ args, [os.fsencode(path)], True, passfds, None, None,
454
+ -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,
455
+ False, False, None, None, None, -1, None)
456
+ finally:
457
+ os.close(errpipe_read)
458
+ os.close(errpipe_write)
459
+
460
+
461
+ def close_fds(*fds):
462
+ """Close each file descriptor given as an argument"""
463
+ for fd in fds:
464
+ os.close(fd)
465
+
466
+
467
+ def _cleanup_tests():
468
+ """Cleanup multiprocessing resources when multiprocessing tests
469
+ completed."""
470
+
471
+ from test import support
472
+
473
+ # cleanup multiprocessing
474
+ process._cleanup()
475
+
476
+ # Stop the ForkServer process if it's running
477
+ from multiprocess import forkserver
478
+ forkserver._forkserver._stop()
479
+
480
+ # Stop the ResourceTracker process if it's running
481
+ from multiprocess import resource_tracker
482
+ resource_tracker._resource_tracker._stop()
483
+
484
+ # bpo-37421: Explicitly call _run_finalizers() to remove immediately
485
+ # temporary directories created by multiprocessing.util.get_temp_dir().
486
+ _run_finalizers()
487
+ support.gc_collect()
488
+
489
+ support.reap_children()
venv/lib/python3.10/site-packages/pynvml-11.5.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/pynvml-11.5.0.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2011-2021, NVIDIA Corporation.
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ * Redistributions of source code must retain the above copyright notice, this
8
+ list of conditions and the following disclaimer.
9
+
10
+ * Redistributions in binary form must reproduce the above copyright notice,
11
+ this list of conditions and the following disclaimer in the documentation
12
+ and/or other materials provided with the distribution.
13
+
14
+ * Neither the name of staged-recipes nor the names of its
15
+ contributors may be used to endorse or promote products derived from
16
+ this software without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
venv/lib/python3.10/site-packages/pynvml-11.5.0.dist-info/METADATA ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: pynvml
3
+ Version: 11.5.0
4
+ Summary: Python Bindings for the NVIDIA Management Library
5
+ Home-page: http://www.nvidia.com/
6
+ Author: NVIDIA Corporation
7
+ Author-email: [email protected]
8
+ License: BSD
9
+ Classifier: Development Status :: 5 - Production/Stable
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: Intended Audience :: System Administrators
12
+ Classifier: License :: OSI Approved :: BSD License
13
+ Classifier: Operating System :: Microsoft :: Windows
14
+ Classifier: Operating System :: POSIX :: Linux
15
+ Classifier: Programming Language :: Python
16
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
17
+ Classifier: Topic :: System :: Hardware
18
+ Classifier: Topic :: System :: Systems Administration
19
+ Requires-Python: >=3.6
20
+ Description-Content-Type: text/markdown
21
+ License-File: LICENSE.txt
22
+
23
+ Python bindings to the NVIDIA Management Library
24
+ ================================================
25
+
26
+ Provides a Python interface to GPU management and monitoring functions.
27
+
28
+ This is a wrapper around the NVML library.
29
+ For information about the NVML library, see the NVML developer page
30
+ http://developer.nvidia.com/nvidia-management-library-nvml
31
+
32
+ As of version 11.0.0, the NVML-wrappers used in pynvml are identical
33
+ to those published through [nvidia-ml-py](https://pypi.org/project/nvidia-ml-py/).
34
+
35
+ Note that this file can be run with 'python -m doctest -v README.txt'
36
+ although the results are system dependent
37
+
38
+ Requires
39
+ --------
40
+ Python 3, or an earlier version with the ctypes module.
41
+
42
+ Installation
43
+ ------------
44
+
45
+ pip install .
46
+
47
+ Usage
48
+ -----
49
+
50
+ You can use the lower level nvml bindings
51
+
52
+ ```python
53
+ >>> from pynvml import *
54
+ >>> nvmlInit()
55
+ >>> print("Driver Version:", nvmlSystemGetDriverVersion())
56
+ Driver Version: 410.00
57
+ >>> deviceCount = nvmlDeviceGetCount()
58
+ >>> for i in range(deviceCount):
59
+ ... handle = nvmlDeviceGetHandleByIndex(i)
60
+ ... print("Device", i, ":", nvmlDeviceGetName(handle))
61
+ ...
62
+ Device 0 : Tesla V100
63
+
64
+ >>> nvmlShutdown()
65
+ ```
66
+
67
+ Or the higher level nvidia_smi API
68
+
69
+ ```python
70
+ from pynvml.smi import nvidia_smi
71
+ nvsmi = nvidia_smi.getInstance()
72
+ nvsmi.DeviceQuery('memory.free, memory.total')
73
+ ```
74
+
75
+ ```python
76
+ from pynvml.smi import nvidia_smi
77
+ nvsmi = nvidia_smi.getInstance()
78
+ print(nvsmi.DeviceQuery('--help-query-gpu'), end='\n')
79
+ ```
80
+
81
+ Functions
82
+ ---------
83
+ Python methods wrap NVML functions, implemented in a C shared library.
84
+ Each function's use is the same with the following exceptions:
85
+
86
+ - Instead of returning error codes, failing error codes are raised as
87
+ Python exceptions.
88
+
89
+ ```python
90
+ >>> try:
91
+ ... nvmlDeviceGetCount()
92
+ ... except NVMLError as error:
93
+ ... print(error)
94
+ ...
95
+ Uninitialized
96
+ ```
97
+
98
+ - C function output parameters are returned from the corresponding
99
+ Python function left to right.
100
+
101
+ ```c
102
+ nvmlReturn_t nvmlDeviceGetEccMode(nvmlDevice_t device,
103
+ nvmlEnableState_t *current,
104
+ nvmlEnableState_t *pending);
105
+ ```
106
+
107
+ ```python
108
+ >>> nvmlInit()
109
+ >>> handle = nvmlDeviceGetHandleByIndex(0)
110
+ >>> (current, pending) = nvmlDeviceGetEccMode(handle)
111
+ ```
112
+
113
+ - C structs are converted into Python classes.
114
+
115
+ ```c
116
+ nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo(nvmlDevice_t device,
117
+ nvmlMemory_t *memory);
118
+ typedef struct nvmlMemory_st {
119
+ unsigned long long total;
120
+ unsigned long long free;
121
+ unsigned long long used;
122
+ } nvmlMemory_t;
123
+ ```
124
+
125
+ ```python
126
+ >>> info = nvmlDeviceGetMemoryInfo(handle)
127
+ >>> print "Total memory:", info.total
128
+ Total memory: 5636292608
129
+ >>> print "Free memory:", info.free
130
+ Free memory: 5578420224
131
+ >>> print "Used memory:", info.used
132
+ Used memory: 57872384
133
+ ```
134
+
135
+ - Python handles string buffer creation.
136
+
137
+ ```c
138
+ nvmlReturn_t nvmlSystemGetDriverVersion(char* version,
139
+ unsigned int length);
140
+ ```
141
+
142
+ ```python
143
+ >>> version = nvmlSystemGetDriverVersion();
144
+ >>> nvmlShutdown()
145
+ ```
146
+
147
+ For usage information see the NVML documentation.
148
+
149
+ Variables
150
+ ---------
151
+
152
+ All meaningful NVML constants and enums are exposed in Python.
153
+
154
+ The NVML_VALUE_NOT_AVAILABLE constant is not used. Instead None is mapped to the field.
155
+
156
+ NVML Permissions
157
+ ----------------
158
+
159
+ Many of the `pynvml` wrappers assume that the underlying NVIDIA Management Library (NVML) API can be used without admin/root privileges. However, it is certainly possible for the system permissions to prevent pynvml from querying GPU performance counters. For example:
160
+
161
+ ```
162
+ $ nvidia-smi nvlink -g 0
163
+ GPU 0: Tesla V100-SXM2-32GB (UUID: GPU-96ab329d-7a1f-73a8-a9b7-18b4b2855f92)
164
+ NVML: Unable to get the NvLink link utilization counter control for link 0: Insufficient Permissions
165
+ ```
166
+
167
+ A simple way to check the permissions status is to look for `RmProfilingAdminOnly` in the driver `params` file (Note that `RmProfilingAdminOnly == 1` means that admin/sudo access is required):
168
+
169
+ ```
170
+ $ cat /proc/driver/nvidia/params | grep RmProfilingAdminOnly
171
+ RmProfilingAdminOnly: 1
172
+ ```
173
+
174
+ For more information on setting/unsetting the relevant admin privileges, see [these notes](https://developer.nvidia.com/nvidia-development-tools-solutions-ERR_NVGPUCTRPERM-permission-issue-performance-counters) on resolving `ERR_NVGPUCTRPERM` errors.
175
+
176
+
177
+ Release Notes
178
+ -------------
179
+
180
+ - Version 2.285.0
181
+ - Added new functions for NVML 2.285. See NVML documentation for more information.
182
+ - Ported to support Python 3.0 and Python 2.0 syntax.
183
+ - Added nvidia_smi.py tool as a sample app.
184
+ - Version 3.295.0
185
+ - Added new functions for NVML 3.295. See NVML documentation for more information.
186
+ - Updated nvidia_smi.py tool
187
+ - Includes additional error handling
188
+ - Version 4.304.0
189
+ - Added new functions for NVML 4.304. See NVML documentation for more information.
190
+ - Updated nvidia_smi.py tool
191
+ - Version 4.304.3
192
+ - Fixing nvmlUnitGetDeviceCount bug
193
+ - Version 5.319.0
194
+ - Added new functions for NVML 5.319. See NVML documentation for more information.
195
+ - Version 6.340.0
196
+ - Added new functions for NVML 6.340. See NVML documentation for more information.
197
+ - Version 7.346.0
198
+ - Added new functions for NVML 7.346. See NVML documentation for more information.
199
+ - Version 7.352.0
200
+ - Added new functions for NVML 7.352. See NVML documentation for more information.
201
+ - Version 8.0.0
202
+ - Refactor code to a nvidia_smi singleton class
203
+ - Added DeviceQuery that returns a dictionary of (name, value).
204
+ - Added filter parameters on DeviceQuery to match query api in nvidia-smi
205
+ - Added filter parameters on XmlDeviceQuery to match query api in nvidia-smi
206
+ - Added integer enumeration for filter strings to reduce overhead for performance monitoring.
207
+ - Added loop(filter) method with async and callback support
208
+ - Version 8.0.1
209
+ - Restructuring directories into two packages (pynvml and nvidia_smi)
210
+ - Adding initial tests for both packages
211
+ - Some name-convention cleanup in pynvml
212
+ - Version 8.0.2
213
+ - Added NVLink function wrappers for pynvml module
214
+ - Version 8.0.3
215
+ - Added versioneer
216
+ - Fixed nvmlDeviceGetNvLinkUtilizationCounter bug
217
+ - Version 8.0.4
218
+ - Added nvmlDeviceGetTotalEnergyConsumption
219
+ - Added notes about NVML permissions
220
+ - Fixed version-check testing
221
+ - Version 11.0.0
222
+ - Updated nvml.py to CUDA 11
223
+ - Updated smi.py DeviceQuery to R460
224
+ - Aligned nvml.py with latest nvidia-ml-py deployment
225
+ - Version 11.4.0
226
+ - Updated nvml.py to CUDA 11.4
227
+ - Updated smi.py NVML_BRAND_NAMES
228
+ - Aligned nvml.py with latest nvidia-ml-py deployment (11.495.46)
229
+ - Version 11.4.1
230
+ - Fix comma bugs in nvml.py
231
+ - Version 11.5.0
232
+ - Updated nvml.py to support CUDA 11.5 and CUDA 12
233
+ - Aligned with latest nvidia-ml-py deployment (11.525.84)
venv/lib/python3.10/site-packages/pynvml-11.5.0.dist-info/RECORD ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pynvml-11.5.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ pynvml-11.5.0.dist-info/LICENSE.txt,sha256=5AzAoGtclmNYvZlq9qGHrXLKd6niBcA7rqxfb_JWFCo,1496
3
+ pynvml-11.5.0.dist-info/METADATA,sha256=1-8ZxWifKeVgXka8_aWMyThqkvfuiSY7C3Y-ApDMx58,7844
4
+ pynvml-11.5.0.dist-info/RECORD,,
5
+ pynvml-11.5.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ pynvml-11.5.0.dist-info/top_level.txt,sha256=-HOl6xCcXgtL7sbpp6RDht-w8fdNThSlG43OCijKlAA,7
7
+ pynvml/__init__.py,sha256=p_wxUcw6iBwOzRhcBinGAaRv9m5XsGinHdGvd-_81VA,113
8
+ pynvml/__pycache__/__init__.cpython-310.pyc,,
9
+ pynvml/__pycache__/_version.cpython-310.pyc,,
10
+ pynvml/__pycache__/nvml.cpython-310.pyc,,
11
+ pynvml/__pycache__/smi.cpython-310.pyc,,
12
+ pynvml/_version.py,sha256=vTstPSQBfWFx-4k3529Rx8pQrkjz4isaipGL8AI5178,498
13
+ pynvml/nvml.py,sha256=wuNMieeP59Ps38Wa2t0nWn2as3Ujm_-caYIef1_t2zU,170305
14
+ pynvml/smi.py,sha256=QI5ZokwNYj2YM5PlhkMkvUot0-TG_KhZgCgDUN3F-Qk,137572
venv/lib/python3.10/site-packages/pynvml-11.5.0.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.38.4)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
venv/lib/python3.10/site-packages/pynvml-11.5.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pynvml
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/ACT ADDED
Binary file (904 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Adelaide ADDED
Binary file (921 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Brisbane ADDED
Binary file (289 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Broken_Hill ADDED
Binary file (941 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Canberra ADDED
Binary file (904 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Currie ADDED
Binary file (1 kB). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Darwin ADDED
Binary file (234 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Eucla ADDED
Binary file (314 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Hobart ADDED
Binary file (1 kB). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/LHI ADDED
Binary file (692 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Lindeman ADDED
Binary file (325 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Lord_Howe ADDED
Binary file (692 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Melbourne ADDED
Binary file (904 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/NSW ADDED
Binary file (904 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/North ADDED
Binary file (234 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Perth ADDED
Binary file (306 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Queensland ADDED
Binary file (289 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/South ADDED
Binary file (921 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Sydney ADDED
Binary file (904 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Tasmania ADDED
Binary file (1 kB). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/Victoria ADDED
Binary file (904 Bytes). View file
 
venv/lib/python3.10/site-packages/tzdata/zoneinfo/Australia/West ADDED
Binary file (306 Bytes). View file