applied-ai-018 commited on
Commit
1274b42
·
verified ·
1 Parent(s): 39b51e5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/multiprocess/__info__.py +221 -0
  2. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/__info__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/__init__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/connection.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/context.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/forkserver.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/heap.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/managers.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/pool.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_fork.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_forkserver.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/process.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/queues.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/reduction.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/resource_sharer.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/resource_tracker.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/shared_memory.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/sharedctypes.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/spawn.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/synchronize.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/util.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/multiprocess/context.py +376 -0
  25. env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__init__.py +126 -0
  26. env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/__init__.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/connection.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/connection.py +75 -0
  29. env-llmeval/lib/python3.10/site-packages/multiprocess/forkserver.py +347 -0
  30. env-llmeval/lib/python3.10/site-packages/multiprocess/managers.py +1378 -0
  31. env-llmeval/lib/python3.10/site-packages/multiprocess/pool.py +957 -0
  32. env-llmeval/lib/python3.10/site-packages/multiprocess/popen_fork.py +83 -0
  33. env-llmeval/lib/python3.10/site-packages/multiprocess/popen_forkserver.py +74 -0
  34. env-llmeval/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py +131 -0
  35. env-llmeval/lib/python3.10/site-packages/multiprocess/resource_sharer.py +154 -0
  36. env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py +19 -0
  45. env-llmeval/lib/python3.10/site-packages/multiprocess/util.py +489 -0
  46. env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER +1 -0
  47. env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt +1568 -0
  48. env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA +38 -0
  49. env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD +22 -0
  50. env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL +5 -0
env-llmeval/lib/python3.10/site-packages/multiprocess/__info__.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ #
3
+ # Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
4
+ # Copyright (c) 2024 The Uncertainty Quantification Foundation.
5
+ # License: 3-clause BSD. The full license text is available at:
6
+ # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE
7
+ '''
8
+ -----------------------------------------------------------------
9
+ multiprocess: better multiprocessing and multithreading in Python
10
+ -----------------------------------------------------------------
11
+
12
+ About Multiprocess
13
+ ==================
14
+
15
+ ``multiprocess`` is a fork of ``multiprocessing``. ``multiprocess`` extends ``multiprocessing`` to provide enhanced serialization, using `dill`. ``multiprocess`` leverages ``multiprocessing`` to support the spawning of processes using the API of the Python standard library's ``threading`` module. ``multiprocessing`` has been distributed as part of the standard library since Python 2.6.
16
+
17
+ ``multiprocess`` is part of ``pathos``, a Python framework for heterogeneous computing.
18
+ ``multiprocess`` is in active development, so any user feedback, bug reports, comments,
19
+ or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/multiprocess/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query.
20
+
21
+
22
+ Major Features
23
+ ==============
24
+
25
+ ``multiprocess`` enables:
26
+
27
+ - objects to be transferred between processes using pipes or multi-producer/multi-consumer queues
28
+ - objects to be shared between processes using a server process or (for simple data) shared memory
29
+
30
+ ``multiprocess`` provides:
31
+
32
+ - equivalents of all the synchronization primitives in ``threading``
33
+ - a ``Pool`` class to facilitate submitting tasks to worker processes
34
+ - enhanced serialization, using ``dill``
35
+
36
+
37
+ Current Release
38
+ ===============
39
+
40
+ The latest released version of ``multiprocess`` is available from:
41
+
42
+ https://pypi.org/project/multiprocess
43
+
44
+ ``multiprocess`` is distributed under a 3-clause BSD license, and is a fork of ``multiprocessing``.
45
+
46
+
47
+ Development Version
48
+ ===================
49
+
50
+ You can get the latest development version with all the shiny new features at:
51
+
52
+ https://github.com/uqfoundation
53
+
54
+ If you have a new contribution, please submit a pull request.
55
+
56
+
57
+ Installation
58
+ ============
59
+
60
+ ``multiprocess`` can be installed with ``pip``::
61
+
62
+ $ pip install multiprocess
63
+
64
+ For Python 2, a C compiler is required to build the included extension module from source. Python 3 and binary installs do not require a C compiler.
65
+
66
+
67
+ Requirements
68
+ ============
69
+
70
+ ``multiprocess`` requires:
71
+
72
+ - ``python`` (or ``pypy``), **>=3.8**
73
+ - ``setuptools``, **>=42**
74
+ - ``dill``, **>=0.3.8**
75
+
76
+
77
+ Basic Usage
78
+ ===========
79
+
80
+ The ``multiprocess.Process`` class follows the API of ``threading.Thread``.
81
+ For example ::
82
+
83
+ from multiprocess import Process, Queue
84
+
85
+ def f(q):
86
+ q.put('hello world')
87
+
88
+ if __name__ == '__main__':
89
+ q = Queue()
90
+ p = Process(target=f, args=[q])
91
+ p.start()
92
+ print (q.get())
93
+ p.join()
94
+
95
+ Synchronization primitives like locks, semaphores and conditions are
96
+ available, for example ::
97
+
98
+ >>> from multiprocess import Condition
99
+ >>> c = Condition()
100
+ >>> print (c)
101
+ <Condition(<RLock(None, 0)>), 0>
102
+ >>> c.acquire()
103
+ True
104
+ >>> print (c)
105
+ <Condition(<RLock(MainProcess, 1)>), 0>
106
+
107
+ One can also use a manager to create shared objects either in shared
108
+ memory or in a server process, for example ::
109
+
110
+ >>> from multiprocess import Manager
111
+ >>> manager = Manager()
112
+ >>> l = manager.list(range(10))
113
+ >>> l.reverse()
114
+ >>> print (l)
115
+ [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
116
+ >>> print (repr(l))
117
+ <Proxy[list] object at 0x00E1B3B0>
118
+
119
+ Tasks can be offloaded to a pool of worker processes in various ways,
120
+ for example ::
121
+
122
+ >>> from multiprocess import Pool
123
+ >>> def f(x): return x*x
124
+ ...
125
+ >>> p = Pool(4)
126
+ >>> result = p.map_async(f, range(10))
127
+ >>> print (result.get(timeout=1))
128
+ [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
129
+
130
+ When ``dill`` is installed, serialization is extended to most objects,
131
+ for example ::
132
+
133
+ >>> from multiprocess import Pool
134
+ >>> p = Pool(4)
135
+ >>> print (p.map(lambda x: (lambda y:y**2)(x) + x, xrange(10)))
136
+ [0, 2, 6, 12, 20, 30, 42, 56, 72, 90]
137
+
138
+
139
+ More Information
140
+ ================
141
+
142
+ Probably the best way to get started is to look at the documentation at
143
+ http://multiprocess.rtfd.io. Also see ``multiprocess.tests`` for scripts that
144
+ demonstrate how ``multiprocess`` can be used to leverge multiple processes
145
+ to execute Python in parallel. You can run the test suite with
146
+ ``python -m multiprocess.tests``. As ``multiprocess`` conforms to the
147
+ ``multiprocessing`` interface, the examples and documentation found at
148
+ http://docs.python.org/library/multiprocessing.html also apply to
149
+ ``multiprocess`` if one will ``import multiprocessing as multiprocess``.
150
+ See https://github.com/uqfoundation/multiprocess/tree/master/py3.12/examples
151
+ for a set of examples that demonstrate some basic use cases and benchmarking
152
+ for running Python code in parallel. Please feel free to submit a ticket on
153
+ github, or ask a question on stackoverflow (**@Mike McKerns**). If you would
154
+ like to share how you use ``multiprocess`` in your work, please send an email
155
+ (to **mmckerns at uqfoundation dot org**).
156
+
157
+
158
+ Citation
159
+ ========
160
+
161
+ If you use ``multiprocess`` to do research that leads to publication, we ask that you
162
+ acknowledge use of ``multiprocess`` by citing the following in your publication::
163
+
164
+ M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis,
165
+ "Building a framework for predictive science", Proceedings of
166
+ the 10th Python in Science Conference, 2011;
167
+ http://arxiv.org/pdf/1202.1056
168
+
169
+ Michael McKerns and Michael Aivazis,
170
+ "pathos: a framework for heterogeneous computing", 2010- ;
171
+ https://uqfoundation.github.io/project/pathos
172
+
173
+ Please see https://uqfoundation.github.io/project/pathos or
174
+ http://arxiv.org/pdf/1202.1056 for further information.
175
+
176
+ '''
177
+
178
+ __all__ = []
179
+ __version__ = '0.70.16'
180
+ __author__ = 'Mike McKerns'
181
+
182
+ __license__ = '''
183
+ Copyright (c) 2008-2016 California Institute of Technology.
184
+ Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
185
+ All rights reserved.
186
+
187
+ This software forks the python package "multiprocessing". Licence and
188
+ copyright information for multiprocessing can be found in "COPYING".
189
+
190
+ This software is available subject to the conditions and terms laid
191
+ out below. By downloading and using this software you are agreeing
192
+ to the following conditions.
193
+
194
+ Redistribution and use in source and binary forms, with or without
195
+ modification, are permitted provided that the following conditions
196
+ are met:
197
+
198
+ - Redistributions of source code must retain the above copyright
199
+ notice, this list of conditions and the following disclaimer.
200
+
201
+ - Redistributions in binary form must reproduce the above copyright
202
+ notice, this list of conditions and the following disclaimer in the
203
+ documentation and/or other materials provided with the distribution.
204
+
205
+ - Neither the names of the copyright holders nor the names of any of
206
+ the contributors may be used to endorse or promote products derived
207
+ from this software without specific prior written permission.
208
+
209
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
210
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
211
+ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
212
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
213
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
214
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
215
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
216
+ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
217
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
218
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
219
+ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
220
+
221
+ '''
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/__info__.cpython-310.pyc ADDED
Binary file (7.91 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/connection.cpython-310.pyc ADDED
Binary file (25.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/context.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/forkserver.cpython-310.pyc ADDED
Binary file (8.46 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/heap.cpython-310.pyc ADDED
Binary file (7.71 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/managers.cpython-310.pyc ADDED
Binary file (40.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/pool.cpython-310.pyc ADDED
Binary file (25.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_fork.cpython-310.pyc ADDED
Binary file (2.56 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_forkserver.cpython-310.pyc ADDED
Binary file (2.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc ADDED
Binary file (2.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc ADDED
Binary file (3.51 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/process.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/queues.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/reduction.cpython-310.pyc ADDED
Binary file (8.45 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/resource_sharer.cpython-310.pyc ADDED
Binary file (5.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/resource_tracker.cpython-310.pyc ADDED
Binary file (5.59 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/shared_memory.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/sharedctypes.cpython-310.pyc ADDED
Binary file (7.14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/spawn.cpython-310.pyc ADDED
Binary file (6.77 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/synchronize.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/util.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/context.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import threading
4
+
5
+ from . import process
6
+ from . import reduction
7
+
8
+ __all__ = ()
9
+
10
+ #
11
+ # Exceptions
12
+ #
13
+
14
+ class ProcessError(Exception):
15
+ pass
16
+
17
+ class BufferTooShort(ProcessError):
18
+ pass
19
+
20
+ class TimeoutError(ProcessError):
21
+ pass
22
+
23
+ class AuthenticationError(ProcessError):
24
+ pass
25
+
26
+ #
27
+ # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py
28
+ #
29
+
30
+ class BaseContext(object):
31
+
32
+ ProcessError = ProcessError
33
+ BufferTooShort = BufferTooShort
34
+ TimeoutError = TimeoutError
35
+ AuthenticationError = AuthenticationError
36
+
37
+ current_process = staticmethod(process.current_process)
38
+ parent_process = staticmethod(process.parent_process)
39
+ active_children = staticmethod(process.active_children)
40
+
41
+ def cpu_count(self):
42
+ '''Returns the number of CPUs in the system'''
43
+ num = os.cpu_count()
44
+ if num is None:
45
+ raise NotImplementedError('cannot determine number of cpus')
46
+ else:
47
+ return num
48
+
49
+ def Manager(self):
50
+ '''Returns a manager associated with a running server process
51
+
52
+ The managers methods such as `Lock()`, `Condition()` and `Queue()`
53
+ can be used to create shared objects.
54
+ '''
55
+ from .managers import SyncManager
56
+ m = SyncManager(ctx=self.get_context())
57
+ m.start()
58
+ return m
59
+
60
+ def Pipe(self, duplex=True):
61
+ '''Returns two connection object connected by a pipe'''
62
+ from .connection import Pipe
63
+ return Pipe(duplex)
64
+
65
+ def Lock(self):
66
+ '''Returns a non-recursive lock object'''
67
+ from .synchronize import Lock
68
+ return Lock(ctx=self.get_context())
69
+
70
+ def RLock(self):
71
+ '''Returns a recursive lock object'''
72
+ from .synchronize import RLock
73
+ return RLock(ctx=self.get_context())
74
+
75
+ def Condition(self, lock=None):
76
+ '''Returns a condition object'''
77
+ from .synchronize import Condition
78
+ return Condition(lock, ctx=self.get_context())
79
+
80
+ def Semaphore(self, value=1):
81
+ '''Returns a semaphore object'''
82
+ from .synchronize import Semaphore
83
+ return Semaphore(value, ctx=self.get_context())
84
+
85
+ def BoundedSemaphore(self, value=1):
86
+ '''Returns a bounded semaphore object'''
87
+ from .synchronize import BoundedSemaphore
88
+ return BoundedSemaphore(value, ctx=self.get_context())
89
+
90
+ def Event(self):
91
+ '''Returns an event object'''
92
+ from .synchronize import Event
93
+ return Event(ctx=self.get_context())
94
+
95
+ def Barrier(self, parties, action=None, timeout=None):
96
+ '''Returns a barrier object'''
97
+ from .synchronize import Barrier
98
+ return Barrier(parties, action, timeout, ctx=self.get_context())
99
+
100
+ def Queue(self, maxsize=0):
101
+ '''Returns a queue object'''
102
+ from .queues import Queue
103
+ return Queue(maxsize, ctx=self.get_context())
104
+
105
+ def JoinableQueue(self, maxsize=0):
106
+ '''Returns a queue object'''
107
+ from .queues import JoinableQueue
108
+ return JoinableQueue(maxsize, ctx=self.get_context())
109
+
110
+ def SimpleQueue(self):
111
+ '''Returns a queue object'''
112
+ from .queues import SimpleQueue
113
+ return SimpleQueue(ctx=self.get_context())
114
+
115
+ def Pool(self, processes=None, initializer=None, initargs=(),
116
+ maxtasksperchild=None):
117
+ '''Returns a process pool object'''
118
+ from .pool import Pool
119
+ return Pool(processes, initializer, initargs, maxtasksperchild,
120
+ context=self.get_context())
121
+
122
+ def RawValue(self, typecode_or_type, *args):
123
+ '''Returns a shared object'''
124
+ from .sharedctypes import RawValue
125
+ return RawValue(typecode_or_type, *args)
126
+
127
+ def RawArray(self, typecode_or_type, size_or_initializer):
128
+ '''Returns a shared array'''
129
+ from .sharedctypes import RawArray
130
+ return RawArray(typecode_or_type, size_or_initializer)
131
+
132
+ def Value(self, typecode_or_type, *args, lock=True):
133
+ '''Returns a synchronized shared object'''
134
+ from .sharedctypes import Value
135
+ return Value(typecode_or_type, *args, lock=lock,
136
+ ctx=self.get_context())
137
+
138
+ def Array(self, typecode_or_type, size_or_initializer, *, lock=True):
139
+ '''Returns a synchronized shared array'''
140
+ from .sharedctypes import Array
141
+ return Array(typecode_or_type, size_or_initializer, lock=lock,
142
+ ctx=self.get_context())
143
+
144
+ def freeze_support(self):
145
+ '''Check whether this is a fake forked process in a frozen executable.
146
+ If so then run code specified by commandline and exit.
147
+ '''
148
+ if sys.platform == 'win32' and getattr(sys, 'frozen', False):
149
+ from .spawn import freeze_support
150
+ freeze_support()
151
+
152
+ def get_logger(self):
153
+ '''Return package logger -- if it does not already exist then
154
+ it is created.
155
+ '''
156
+ from .util import get_logger
157
+ return get_logger()
158
+
159
+ def log_to_stderr(self, level=None):
160
+ '''Turn on logging and add a handler which prints to stderr'''
161
+ from .util import log_to_stderr
162
+ return log_to_stderr(level)
163
+
164
+ def allow_connection_pickling(self):
165
+ '''Install support for sending connections and sockets
166
+ between processes
167
+ '''
168
+ # This is undocumented. In previous versions of multiprocessing
169
+ # its only effect was to make socket objects inheritable on Windows.
170
+ from . import connection
171
+
172
+ def set_executable(self, executable):
173
+ '''Sets the path to a python.exe or pythonw.exe binary used to run
174
+ child processes instead of sys.executable when using the 'spawn'
175
+ start method. Useful for people embedding Python.
176
+ '''
177
+ from .spawn import set_executable
178
+ set_executable(executable)
179
+
180
+ def set_forkserver_preload(self, module_names):
181
+ '''Set list of module names to try to load in forkserver process.
182
+ This is really just a hint.
183
+ '''
184
+ from .forkserver import set_forkserver_preload
185
+ set_forkserver_preload(module_names)
186
+
187
+ def get_context(self, method=None):
188
+ if method is None:
189
+ return self
190
+ try:
191
+ ctx = _concrete_contexts[method]
192
+ except KeyError:
193
+ raise ValueError('cannot find context for %r' % method) from None
194
+ ctx._check_available()
195
+ return ctx
196
+
197
+ def get_start_method(self, allow_none=False):
198
+ return self._name
199
+
200
+ def set_start_method(self, method, force=False):
201
+ raise ValueError('cannot set start method of concrete context')
202
+
203
+ @property
204
+ def reducer(self):
205
+ '''Controls how objects will be reduced to a form that can be
206
+ shared with other processes.'''
207
+ return globals().get('reduction')
208
+
209
+ @reducer.setter
210
+ def reducer(self, reduction):
211
+ globals()['reduction'] = reduction
212
+
213
+ def _check_available(self):
214
+ pass
215
+
216
+ #
217
+ # Type of default context -- underlying context can be set at most once
218
+ #
219
+
220
+ class Process(process.BaseProcess):
221
+ _start_method = None
222
+ @staticmethod
223
+ def _Popen(process_obj):
224
+ return _default_context.get_context().Process._Popen(process_obj)
225
+
226
+ @staticmethod
227
+ def _after_fork():
228
+ return _default_context.get_context().Process._after_fork()
229
+
230
+ class DefaultContext(BaseContext):
231
+ Process = Process
232
+
233
+ def __init__(self, context):
234
+ self._default_context = context
235
+ self._actual_context = None
236
+
237
+ def get_context(self, method=None):
238
+ if method is None:
239
+ if self._actual_context is None:
240
+ self._actual_context = self._default_context
241
+ return self._actual_context
242
+ else:
243
+ return super().get_context(method)
244
+
245
+ def set_start_method(self, method, force=False):
246
+ if self._actual_context is not None and not force:
247
+ raise RuntimeError('context has already been set')
248
+ if method is None and force:
249
+ self._actual_context = None
250
+ return
251
+ self._actual_context = self.get_context(method)
252
+
253
+ def get_start_method(self, allow_none=False):
254
+ if self._actual_context is None:
255
+ if allow_none:
256
+ return None
257
+ self._actual_context = self._default_context
258
+ return self._actual_context._name
259
+
260
+ def get_all_start_methods(self):
261
+ if sys.platform == 'win32':
262
+ return ['spawn']
263
+ else:
264
+ methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn']
265
+ if reduction.HAVE_SEND_HANDLE:
266
+ methods.append('forkserver')
267
+ return methods
268
+
269
+
270
+ #
271
+ # Context types for fixed start method
272
+ #
273
+
274
+ if sys.platform != 'win32':
275
+
276
+ class ForkProcess(process.BaseProcess):
277
+ _start_method = 'fork'
278
+ @staticmethod
279
+ def _Popen(process_obj):
280
+ from .popen_fork import Popen
281
+ return Popen(process_obj)
282
+
283
+ class SpawnProcess(process.BaseProcess):
284
+ _start_method = 'spawn'
285
+ @staticmethod
286
+ def _Popen(process_obj):
287
+ from .popen_spawn_posix import Popen
288
+ return Popen(process_obj)
289
+
290
+ @staticmethod
291
+ def _after_fork():
292
+ # process is spawned, nothing to do
293
+ pass
294
+
295
+ class ForkServerProcess(process.BaseProcess):
296
+ _start_method = 'forkserver'
297
+ @staticmethod
298
+ def _Popen(process_obj):
299
+ from .popen_forkserver import Popen
300
+ return Popen(process_obj)
301
+
302
+ class ForkContext(BaseContext):
303
+ _name = 'fork'
304
+ Process = ForkProcess
305
+
306
+ class SpawnContext(BaseContext):
307
+ _name = 'spawn'
308
+ Process = SpawnProcess
309
+
310
+ class ForkServerContext(BaseContext):
311
+ _name = 'forkserver'
312
+ Process = ForkServerProcess
313
+ def _check_available(self):
314
+ if not reduction.HAVE_SEND_HANDLE:
315
+ raise ValueError('forkserver start method not available')
316
+
317
+ _concrete_contexts = {
318
+ 'fork': ForkContext(),
319
+ 'spawn': SpawnContext(),
320
+ 'forkserver': ForkServerContext(),
321
+ }
322
+ if sys.platform == 'darwin':
323
+ # bpo-33725: running arbitrary code after fork() is no longer reliable
324
+ # on macOS since macOS 10.14 (Mojave). Use spawn by default instead.
325
+ _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn
326
+ else:
327
+ _default_context = DefaultContext(_concrete_contexts['fork'])
328
+
329
+ else:
330
+
331
+ class SpawnProcess(process.BaseProcess):
332
+ _start_method = 'spawn'
333
+ @staticmethod
334
+ def _Popen(process_obj):
335
+ from .popen_spawn_win32 import Popen
336
+ return Popen(process_obj)
337
+
338
+ @staticmethod
339
+ def _after_fork():
340
+ # process is spawned, nothing to do
341
+ pass
342
+
343
+ class SpawnContext(BaseContext):
344
+ _name = 'spawn'
345
+ Process = SpawnProcess
346
+
347
+ _concrete_contexts = {
348
+ 'spawn': SpawnContext(),
349
+ }
350
+ _default_context = DefaultContext(_concrete_contexts['spawn'])
351
+
352
+ #
353
+ # Force the start method
354
+ #
355
+
356
+ def _force_start_method(method):
357
+ _default_context._actual_context = _concrete_contexts[method]
358
+
359
+ #
360
+ # Check that the current thread is spawning a child process
361
+ #
362
+
363
+ _tls = threading.local()
364
+
365
+ def get_spawning_popen():
366
+ return getattr(_tls, 'spawning_popen', None)
367
+
368
+ def set_spawning_popen(popen):
369
+ _tls.spawning_popen = popen
370
+
371
+ def assert_spawning(obj):
372
+ if get_spawning_popen() is None:
373
+ raise RuntimeError(
374
+ '%s objects should only be shared between processes'
375
+ ' through inheritance' % type(obj).__name__
376
+ )
env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__init__.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Support for the API of the multiprocessing package using threads
3
+ #
4
+ # multiprocessing/dummy/__init__.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = [
11
+ 'Process', 'current_process', 'active_children', 'freeze_support',
12
+ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
13
+ 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
14
+ ]
15
+
16
+ #
17
+ # Imports
18
+ #
19
+
20
+ import threading
21
+ import sys
22
+ import weakref
23
+ import array
24
+
25
+ from .connection import Pipe
26
+ from threading import Lock, RLock, Semaphore, BoundedSemaphore
27
+ from threading import Event, Condition, Barrier
28
+ from queue import Queue
29
+
30
+ #
31
+ #
32
+ #
33
+
34
+ class DummyProcess(threading.Thread):
35
+
36
+ def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
37
+ threading.Thread.__init__(self, group, target, name, args, kwargs)
38
+ self._pid = None
39
+ self._children = weakref.WeakKeyDictionary()
40
+ self._start_called = False
41
+ self._parent = current_process()
42
+
43
+ def start(self):
44
+ if self._parent is not current_process():
45
+ raise RuntimeError(
46
+ "Parent is {0!r} but current_process is {1!r}".format(
47
+ self._parent, current_process()))
48
+ self._start_called = True
49
+ if hasattr(self._parent, '_children'):
50
+ self._parent._children[self] = None
51
+ threading.Thread.start(self)
52
+
53
+ @property
54
+ def exitcode(self):
55
+ if self._start_called and not self.is_alive():
56
+ return 0
57
+ else:
58
+ return None
59
+
60
+ #
61
+ #
62
+ #
63
+
64
+ Process = DummyProcess
65
+ current_process = threading.current_thread
66
+ current_process()._children = weakref.WeakKeyDictionary()
67
+
68
+ def active_children():
69
+ children = current_process()._children
70
+ for p in list(children):
71
+ if not p.is_alive():
72
+ children.pop(p, None)
73
+ return list(children)
74
+
75
+ def freeze_support():
76
+ pass
77
+
78
+ #
79
+ #
80
+ #
81
+
82
+ class Namespace(object):
83
+ def __init__(self, /, **kwds):
84
+ self.__dict__.update(kwds)
85
+ def __repr__(self):
86
+ items = list(self.__dict__.items())
87
+ temp = []
88
+ for name, value in items:
89
+ if not name.startswith('_'):
90
+ temp.append('%s=%r' % (name, value))
91
+ temp.sort()
92
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
93
+
94
+ dict = dict
95
+ list = list
96
+
97
+ def Array(typecode, sequence, lock=True):
98
+ return array.array(typecode, sequence)
99
+
100
+ class Value(object):
101
+ def __init__(self, typecode, value, lock=True):
102
+ self._typecode = typecode
103
+ self._value = value
104
+
105
+ @property
106
+ def value(self):
107
+ return self._value
108
+
109
+ @value.setter
110
+ def value(self, value):
111
+ self._value = value
112
+
113
+ def __repr__(self):
114
+ return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
115
+
116
+ def Manager():
117
+ return sys.modules[__name__]
118
+
119
+ def shutdown():
120
+ pass
121
+
122
+ def Pool(processes=None, initializer=None, initargs=()):
123
+ from ..pool import ThreadPool
124
+ return ThreadPool(processes, initializer, initargs)
125
+
126
+ JoinableQueue = Queue
env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.91 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/connection.cpython-310.pyc ADDED
Binary file (2.61 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/connection.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Analogue of `multiprocessing.connection` which uses queues instead of sockets
3
+ #
4
+ # multiprocessing/dummy/connection.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = [ 'Client', 'Listener', 'Pipe' ]
11
+
12
+ from queue import Queue
13
+
14
+
15
+ families = [None]
16
+
17
+
18
+ class Listener(object):
19
+
20
+ def __init__(self, address=None, family=None, backlog=1):
21
+ self._backlog_queue = Queue(backlog)
22
+
23
+ def accept(self):
24
+ return Connection(*self._backlog_queue.get())
25
+
26
+ def close(self):
27
+ self._backlog_queue = None
28
+
29
+ @property
30
+ def address(self):
31
+ return self._backlog_queue
32
+
33
+ def __enter__(self):
34
+ return self
35
+
36
+ def __exit__(self, exc_type, exc_value, exc_tb):
37
+ self.close()
38
+
39
+
40
+ def Client(address):
41
+ _in, _out = Queue(), Queue()
42
+ address.put((_out, _in))
43
+ return Connection(_in, _out)
44
+
45
+
46
+ def Pipe(duplex=True):
47
+ a, b = Queue(), Queue()
48
+ return Connection(a, b), Connection(b, a)
49
+
50
+
51
+ class Connection(object):
52
+
53
+ def __init__(self, _in, _out):
54
+ self._out = _out
55
+ self._in = _in
56
+ self.send = self.send_bytes = _out.put
57
+ self.recv = self.recv_bytes = _in.get
58
+
59
+ def poll(self, timeout=0.0):
60
+ if self._in.qsize() > 0:
61
+ return True
62
+ if timeout <= 0.0:
63
+ return False
64
+ with self._in.not_empty:
65
+ self._in.not_empty.wait(timeout)
66
+ return self._in.qsize() > 0
67
+
68
+ def close(self):
69
+ pass
70
+
71
+ def __enter__(self):
72
+ return self
73
+
74
+ def __exit__(self, exc_type, exc_value, exc_tb):
75
+ self.close()
env-llmeval/lib/python3.10/site-packages/multiprocess/forkserver.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import errno
2
+ import os
3
+ import selectors
4
+ import signal
5
+ import socket
6
+ import struct
7
+ import sys
8
+ import threading
9
+ import warnings
10
+
11
+ from . import connection
12
+ from . import process
13
+ from .context import reduction
14
+ from . import resource_tracker
15
+ from . import spawn
16
+ from . import util
17
+
18
+ __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process',
19
+ 'set_forkserver_preload']
20
+
21
+ #
22
+ #
23
+ #
24
+
25
+ MAXFDS_TO_SEND = 256
26
+ SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t
27
+
28
+ #
29
+ # Forkserver class
30
+ #
31
+
32
+ class ForkServer(object):
33
+
34
+ def __init__(self):
35
+ self._forkserver_address = None
36
+ self._forkserver_alive_fd = None
37
+ self._forkserver_pid = None
38
+ self._inherited_fds = None
39
+ self._lock = threading.Lock()
40
+ self._preload_modules = ['__main__']
41
+
42
+ def _stop(self):
43
+ # Method used by unit tests to stop the server
44
+ with self._lock:
45
+ self._stop_unlocked()
46
+
47
+ def _stop_unlocked(self):
48
+ if self._forkserver_pid is None:
49
+ return
50
+
51
+ # close the "alive" file descriptor asks the server to stop
52
+ os.close(self._forkserver_alive_fd)
53
+ self._forkserver_alive_fd = None
54
+
55
+ os.waitpid(self._forkserver_pid, 0)
56
+ self._forkserver_pid = None
57
+
58
+ if not util.is_abstract_socket_namespace(self._forkserver_address):
59
+ os.unlink(self._forkserver_address)
60
+ self._forkserver_address = None
61
+
62
+ def set_forkserver_preload(self, modules_names):
63
+ '''Set list of module names to try to load in forkserver process.'''
64
+ if not all(type(mod) is str for mod in self._preload_modules):
65
+ raise TypeError('module_names must be a list of strings')
66
+ self._preload_modules = modules_names
67
+
68
+ def get_inherited_fds(self):
69
+ '''Return list of fds inherited from parent process.
70
+
71
+ This returns None if the current process was not started by fork
72
+ server.
73
+ '''
74
+ return self._inherited_fds
75
+
76
+ def connect_to_new_process(self, fds):
77
+ '''Request forkserver to create a child process.
78
+
79
+ Returns a pair of fds (status_r, data_w). The calling process can read
80
+ the child process's pid and (eventually) its returncode from status_r.
81
+ The calling process should write to data_w the pickled preparation and
82
+ process data.
83
+ '''
84
+ self.ensure_running()
85
+ if len(fds) + 4 >= MAXFDS_TO_SEND:
86
+ raise ValueError('too many fds')
87
+ with socket.socket(socket.AF_UNIX) as client:
88
+ client.connect(self._forkserver_address)
89
+ parent_r, child_w = os.pipe()
90
+ child_r, parent_w = os.pipe()
91
+ allfds = [child_r, child_w, self._forkserver_alive_fd,
92
+ resource_tracker.getfd()]
93
+ allfds += fds
94
+ try:
95
+ reduction.sendfds(client, allfds)
96
+ return parent_r, parent_w
97
+ except:
98
+ os.close(parent_r)
99
+ os.close(parent_w)
100
+ raise
101
+ finally:
102
+ os.close(child_r)
103
+ os.close(child_w)
104
+
105
+ def ensure_running(self):
106
+ '''Make sure that a fork server is running.
107
+
108
+ This can be called from any process. Note that usually a child
109
+ process will just reuse the forkserver started by its parent, so
110
+ ensure_running() will do nothing.
111
+ '''
112
+ with self._lock:
113
+ resource_tracker.ensure_running()
114
+ if self._forkserver_pid is not None:
115
+ # forkserver was launched before, is it still running?
116
+ pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG)
117
+ if not pid:
118
+ # still alive
119
+ return
120
+ # dead, launch it again
121
+ os.close(self._forkserver_alive_fd)
122
+ self._forkserver_address = None
123
+ self._forkserver_alive_fd = None
124
+ self._forkserver_pid = None
125
+
126
+ cmd = ('from multiprocess.forkserver import main; ' +
127
+ 'main(%d, %d, %r, **%r)')
128
+
129
+ if self._preload_modules:
130
+ desired_keys = {'main_path', 'sys_path'}
131
+ data = spawn.get_preparation_data('ignore')
132
+ data = {x: y for x, y in data.items() if x in desired_keys}
133
+ else:
134
+ data = {}
135
+
136
+ with socket.socket(socket.AF_UNIX) as listener:
137
+ address = connection.arbitrary_address('AF_UNIX')
138
+ listener.bind(address)
139
+ if not util.is_abstract_socket_namespace(address):
140
+ os.chmod(address, 0o600)
141
+ listener.listen()
142
+
143
+ # all client processes own the write end of the "alive" pipe;
144
+ # when they all terminate the read end becomes ready.
145
+ alive_r, alive_w = os.pipe()
146
+ try:
147
+ fds_to_pass = [listener.fileno(), alive_r]
148
+ cmd %= (listener.fileno(), alive_r, self._preload_modules,
149
+ data)
150
+ exe = spawn.get_executable()
151
+ args = [exe] + util._args_from_interpreter_flags()
152
+ args += ['-c', cmd]
153
+ pid = util.spawnv_passfds(exe, args, fds_to_pass)
154
+ except:
155
+ os.close(alive_w)
156
+ raise
157
+ finally:
158
+ os.close(alive_r)
159
+ self._forkserver_address = address
160
+ self._forkserver_alive_fd = alive_w
161
+ self._forkserver_pid = pid
162
+
163
+ #
164
+ #
165
+ #
166
+
167
+ def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
168
+ '''Run forkserver.'''
169
+ if preload:
170
+ if '__main__' in preload and main_path is not None:
171
+ process.current_process()._inheriting = True
172
+ try:
173
+ spawn.import_main_path(main_path)
174
+ finally:
175
+ del process.current_process()._inheriting
176
+ for modname in preload:
177
+ try:
178
+ __import__(modname)
179
+ except ImportError:
180
+ pass
181
+
182
+ util._close_stdin()
183
+
184
+ sig_r, sig_w = os.pipe()
185
+ os.set_blocking(sig_r, False)
186
+ os.set_blocking(sig_w, False)
187
+
188
+ def sigchld_handler(*_unused):
189
+ # Dummy signal handler, doesn't do anything
190
+ pass
191
+
192
+ handlers = {
193
+ # unblocking SIGCHLD allows the wakeup fd to notify our event loop
194
+ signal.SIGCHLD: sigchld_handler,
195
+ # protect the process from ^C
196
+ signal.SIGINT: signal.SIG_IGN,
197
+ }
198
+ old_handlers = {sig: signal.signal(sig, val)
199
+ for (sig, val) in handlers.items()}
200
+
201
+ # calling os.write() in the Python signal handler is racy
202
+ signal.set_wakeup_fd(sig_w)
203
+
204
+ # map child pids to client fds
205
+ pid_to_fd = {}
206
+
207
+ with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \
208
+ selectors.DefaultSelector() as selector:
209
+ _forkserver._forkserver_address = listener.getsockname()
210
+
211
+ selector.register(listener, selectors.EVENT_READ)
212
+ selector.register(alive_r, selectors.EVENT_READ)
213
+ selector.register(sig_r, selectors.EVENT_READ)
214
+
215
+ while True:
216
+ try:
217
+ while True:
218
+ rfds = [key.fileobj for (key, events) in selector.select()]
219
+ if rfds:
220
+ break
221
+
222
+ if alive_r in rfds:
223
+ # EOF because no more client processes left
224
+ assert os.read(alive_r, 1) == b'', "Not at EOF?"
225
+ raise SystemExit
226
+
227
+ if sig_r in rfds:
228
+ # Got SIGCHLD
229
+ os.read(sig_r, 65536) # exhaust
230
+ while True:
231
+ # Scan for child processes
232
+ try:
233
+ pid, sts = os.waitpid(-1, os.WNOHANG)
234
+ except ChildProcessError:
235
+ break
236
+ if pid == 0:
237
+ break
238
+ child_w = pid_to_fd.pop(pid, None)
239
+ if child_w is not None:
240
+ returncode = os.waitstatus_to_exitcode(sts)
241
+ # Send exit code to client process
242
+ try:
243
+ write_signed(child_w, returncode)
244
+ except BrokenPipeError:
245
+ # client vanished
246
+ pass
247
+ os.close(child_w)
248
+ else:
249
+ # This shouldn't happen really
250
+ warnings.warn('forkserver: waitpid returned '
251
+ 'unexpected pid %d' % pid)
252
+
253
+ if listener in rfds:
254
+ # Incoming fork request
255
+ with listener.accept()[0] as s:
256
+ # Receive fds from client
257
+ fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
258
+ if len(fds) > MAXFDS_TO_SEND:
259
+ raise RuntimeError(
260
+ "Too many ({0:n}) fds to send".format(
261
+ len(fds)))
262
+ child_r, child_w, *fds = fds
263
+ s.close()
264
+ pid = os.fork()
265
+ if pid == 0:
266
+ # Child
267
+ code = 1
268
+ try:
269
+ listener.close()
270
+ selector.close()
271
+ unused_fds = [alive_r, child_w, sig_r, sig_w]
272
+ unused_fds.extend(pid_to_fd.values())
273
+ code = _serve_one(child_r, fds,
274
+ unused_fds,
275
+ old_handlers)
276
+ except Exception:
277
+ sys.excepthook(*sys.exc_info())
278
+ sys.stderr.flush()
279
+ finally:
280
+ os._exit(code)
281
+ else:
282
+ # Send pid to client process
283
+ try:
284
+ write_signed(child_w, pid)
285
+ except BrokenPipeError:
286
+ # client vanished
287
+ pass
288
+ pid_to_fd[pid] = child_w
289
+ os.close(child_r)
290
+ for fd in fds:
291
+ os.close(fd)
292
+
293
+ except OSError as e:
294
+ if e.errno != errno.ECONNABORTED:
295
+ raise
296
+
297
+
298
+ def _serve_one(child_r, fds, unused_fds, handlers):
299
+ # close unnecessary stuff and reset signal handlers
300
+ signal.set_wakeup_fd(-1)
301
+ for sig, val in handlers.items():
302
+ signal.signal(sig, val)
303
+ for fd in unused_fds:
304
+ os.close(fd)
305
+
306
+ (_forkserver._forkserver_alive_fd,
307
+ resource_tracker._resource_tracker._fd,
308
+ *_forkserver._inherited_fds) = fds
309
+
310
+ # Run process object received over pipe
311
+ parent_sentinel = os.dup(child_r)
312
+ code = spawn._main(child_r, parent_sentinel)
313
+
314
+ return code
315
+
316
+
317
+ #
318
+ # Read and write signed numbers
319
+ #
320
+
321
+ def read_signed(fd):
322
+ data = b''
323
+ length = SIGNED_STRUCT.size
324
+ while len(data) < length:
325
+ s = os.read(fd, length - len(data))
326
+ if not s:
327
+ raise EOFError('unexpected EOF')
328
+ data += s
329
+ return SIGNED_STRUCT.unpack(data)[0]
330
+
331
+ def write_signed(fd, n):
332
+ msg = SIGNED_STRUCT.pack(n)
333
+ while msg:
334
+ nbytes = os.write(fd, msg)
335
+ if nbytes == 0:
336
+ raise RuntimeError('should not get here')
337
+ msg = msg[nbytes:]
338
+
339
+ #
340
+ #
341
+ #
342
+
343
+ _forkserver = ForkServer()
344
+ ensure_running = _forkserver.ensure_running
345
+ get_inherited_fds = _forkserver.get_inherited_fds
346
+ connect_to_new_process = _forkserver.connect_to_new_process
347
+ set_forkserver_preload = _forkserver.set_forkserver_preload
env-llmeval/lib/python3.10/site-packages/multiprocess/managers.py ADDED
@@ -0,0 +1,1378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module providing manager classes for dealing
3
+ # with shared objects
4
+ #
5
+ # multiprocessing/managers.py
6
+ #
7
+ # Copyright (c) 2006-2008, R Oudkerk
8
+ # Licensed to PSF under a Contributor Agreement.
9
+ #
10
+
11
+ __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
12
+
13
+ #
14
+ # Imports
15
+ #
16
+
17
+ import sys
18
+ import threading
19
+ import signal
20
+ import array
21
+ import queue
22
+ import time
23
+ import types
24
+ import os
25
+ from os import getpid
26
+
27
+ from traceback import format_exc
28
+
29
+ from . import connection
30
+ from .context import reduction, get_spawning_popen, ProcessError
31
+ from . import pool
32
+ from . import process
33
+ from . import util
34
+ from . import get_context
35
+ try:
36
+ from . import shared_memory
37
+ except ImportError:
38
+ HAS_SHMEM = False
39
+ else:
40
+ HAS_SHMEM = True
41
+ __all__.append('SharedMemoryManager')
42
+
43
+ #
44
+ # Register some things for pickling
45
+ #
46
+
47
+ def reduce_array(a):
48
+ return array.array, (a.typecode, a.tobytes())
49
+ reduction.register(array.array, reduce_array)
50
+
51
+ view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
52
+ if view_types[0] is not list: # only needed in Py3.0
53
+ def rebuild_as_list(obj):
54
+ return list, (list(obj),)
55
+ for view_type in view_types:
56
+ reduction.register(view_type, rebuild_as_list)
57
+
58
+ #
59
+ # Type for identifying shared objects
60
+ #
61
+
62
+ class Token(object):
63
+ '''
64
+ Type to uniquely identify a shared object
65
+ '''
66
+ __slots__ = ('typeid', 'address', 'id')
67
+
68
+ def __init__(self, typeid, address, id):
69
+ (self.typeid, self.address, self.id) = (typeid, address, id)
70
+
71
+ def __getstate__(self):
72
+ return (self.typeid, self.address, self.id)
73
+
74
+ def __setstate__(self, state):
75
+ (self.typeid, self.address, self.id) = state
76
+
77
+ def __repr__(self):
78
+ return '%s(typeid=%r, address=%r, id=%r)' % \
79
+ (self.__class__.__name__, self.typeid, self.address, self.id)
80
+
81
+ #
82
+ # Function for communication with a manager's server process
83
+ #
84
+
85
+ def dispatch(c, id, methodname, args=(), kwds={}):
86
+ '''
87
+ Send a message to manager using connection `c` and return response
88
+ '''
89
+ c.send((id, methodname, args, kwds))
90
+ kind, result = c.recv()
91
+ if kind == '#RETURN':
92
+ return result
93
+ raise convert_to_error(kind, result)
94
+
95
+ def convert_to_error(kind, result):
96
+ if kind == '#ERROR':
97
+ return result
98
+ elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
99
+ if not isinstance(result, str):
100
+ raise TypeError(
101
+ "Result {0!r} (kind '{1}') type is {2}, not str".format(
102
+ result, kind, type(result)))
103
+ if kind == '#UNSERIALIZABLE':
104
+ return RemoteError('Unserializable message: %s\n' % result)
105
+ else:
106
+ return RemoteError(result)
107
+ else:
108
+ return ValueError('Unrecognized message type {!r}'.format(kind))
109
+
110
+ class RemoteError(Exception):
111
+ def __str__(self):
112
+ return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
113
+
114
+ #
115
+ # Functions for finding the method names of an object
116
+ #
117
+
118
+ def all_methods(obj):
119
+ '''
120
+ Return a list of names of methods of `obj`
121
+ '''
122
+ temp = []
123
+ for name in dir(obj):
124
+ func = getattr(obj, name)
125
+ if callable(func):
126
+ temp.append(name)
127
+ return temp
128
+
129
+ def public_methods(obj):
130
+ '''
131
+ Return a list of names of methods of `obj` which do not start with '_'
132
+ '''
133
+ return [name for name in all_methods(obj) if name[0] != '_']
134
+
135
+ #
136
+ # Server which is run in a process controlled by a manager
137
+ #
138
+
139
+ class Server(object):
140
+ '''
141
+ Server class which runs in a process controlled by a manager object
142
+ '''
143
+ public = ['shutdown', 'create', 'accept_connection', 'get_methods',
144
+ 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
145
+
146
+ def __init__(self, registry, address, authkey, serializer):
147
+ if not isinstance(authkey, bytes):
148
+ raise TypeError(
149
+ "Authkey {0!r} is type {1!s}, not bytes".format(
150
+ authkey, type(authkey)))
151
+ self.registry = registry
152
+ self.authkey = process.AuthenticationString(authkey)
153
+ Listener, Client = listener_client[serializer]
154
+
155
+ # do authentication later
156
+ self.listener = Listener(address=address, backlog=16)
157
+ self.address = self.listener.address
158
+
159
+ self.id_to_obj = {'0': (None, ())}
160
+ self.id_to_refcount = {}
161
+ self.id_to_local_proxy_obj = {}
162
+ self.mutex = threading.Lock()
163
+
164
+ def serve_forever(self):
165
+ '''
166
+ Run the server forever
167
+ '''
168
+ self.stop_event = threading.Event()
169
+ process.current_process()._manager_server = self
170
+ try:
171
+ accepter = threading.Thread(target=self.accepter)
172
+ accepter.daemon = True
173
+ accepter.start()
174
+ try:
175
+ while not self.stop_event.is_set():
176
+ self.stop_event.wait(1)
177
+ except (KeyboardInterrupt, SystemExit):
178
+ pass
179
+ finally:
180
+ if sys.stdout != sys.__stdout__: # what about stderr?
181
+ util.debug('resetting stdout, stderr')
182
+ sys.stdout = sys.__stdout__
183
+ sys.stderr = sys.__stderr__
184
+ sys.exit(0)
185
+
186
+ def accepter(self):
187
+ while True:
188
+ try:
189
+ c = self.listener.accept()
190
+ except OSError:
191
+ continue
192
+ t = threading.Thread(target=self.handle_request, args=(c,))
193
+ t.daemon = True
194
+ t.start()
195
+
196
+ def _handle_request(self, c):
197
+ request = None
198
+ try:
199
+ connection.deliver_challenge(c, self.authkey)
200
+ connection.answer_challenge(c, self.authkey)
201
+ request = c.recv()
202
+ ignore, funcname, args, kwds = request
203
+ assert funcname in self.public, '%r unrecognized' % funcname
204
+ func = getattr(self, funcname)
205
+ except Exception:
206
+ msg = ('#TRACEBACK', format_exc())
207
+ else:
208
+ try:
209
+ result = func(c, *args, **kwds)
210
+ except Exception:
211
+ msg = ('#TRACEBACK', format_exc())
212
+ else:
213
+ msg = ('#RETURN', result)
214
+
215
+ try:
216
+ c.send(msg)
217
+ except Exception as e:
218
+ try:
219
+ c.send(('#TRACEBACK', format_exc()))
220
+ except Exception:
221
+ pass
222
+ util.info('Failure to send message: %r', msg)
223
+ util.info(' ... request was %r', request)
224
+ util.info(' ... exception was %r', e)
225
+
226
+ def handle_request(self, conn):
227
+ '''
228
+ Handle a new connection
229
+ '''
230
+ try:
231
+ self._handle_request(conn)
232
+ except SystemExit:
233
+ # Server.serve_client() calls sys.exit(0) on EOF
234
+ pass
235
+ finally:
236
+ conn.close()
237
+
238
+ def serve_client(self, conn):
239
+ '''
240
+ Handle requests from the proxies in a particular process/thread
241
+ '''
242
+ util.debug('starting server thread to service %r',
243
+ threading.current_thread().name)
244
+
245
+ recv = conn.recv
246
+ send = conn.send
247
+ id_to_obj = self.id_to_obj
248
+
249
+ while not self.stop_event.is_set():
250
+
251
+ try:
252
+ methodname = obj = None
253
+ request = recv()
254
+ ident, methodname, args, kwds = request
255
+ try:
256
+ obj, exposed, gettypeid = id_to_obj[ident]
257
+ except KeyError as ke:
258
+ try:
259
+ obj, exposed, gettypeid = \
260
+ self.id_to_local_proxy_obj[ident]
261
+ except KeyError:
262
+ raise ke
263
+
264
+ if methodname not in exposed:
265
+ raise AttributeError(
266
+ 'method %r of %r object is not in exposed=%r' %
267
+ (methodname, type(obj), exposed)
268
+ )
269
+
270
+ function = getattr(obj, methodname)
271
+
272
+ try:
273
+ res = function(*args, **kwds)
274
+ except Exception as e:
275
+ msg = ('#ERROR', e)
276
+ else:
277
+ typeid = gettypeid and gettypeid.get(methodname, None)
278
+ if typeid:
279
+ rident, rexposed = self.create(conn, typeid, res)
280
+ token = Token(typeid, self.address, rident)
281
+ msg = ('#PROXY', (rexposed, token))
282
+ else:
283
+ msg = ('#RETURN', res)
284
+
285
+ except AttributeError:
286
+ if methodname is None:
287
+ msg = ('#TRACEBACK', format_exc())
288
+ else:
289
+ try:
290
+ fallback_func = self.fallback_mapping[methodname]
291
+ result = fallback_func(
292
+ self, conn, ident, obj, *args, **kwds
293
+ )
294
+ msg = ('#RETURN', result)
295
+ except Exception:
296
+ msg = ('#TRACEBACK', format_exc())
297
+
298
+ except EOFError:
299
+ util.debug('got EOF -- exiting thread serving %r',
300
+ threading.current_thread().name)
301
+ sys.exit(0)
302
+
303
+ except Exception:
304
+ msg = ('#TRACEBACK', format_exc())
305
+
306
+ try:
307
+ try:
308
+ send(msg)
309
+ except Exception:
310
+ send(('#UNSERIALIZABLE', format_exc()))
311
+ except Exception as e:
312
+ util.info('exception in thread serving %r',
313
+ threading.current_thread().name)
314
+ util.info(' ... message was %r', msg)
315
+ util.info(' ... exception was %r', e)
316
+ conn.close()
317
+ sys.exit(1)
318
+
319
+ def fallback_getvalue(self, conn, ident, obj):
320
+ return obj
321
+
322
+ def fallback_str(self, conn, ident, obj):
323
+ return str(obj)
324
+
325
+ def fallback_repr(self, conn, ident, obj):
326
+ return repr(obj)
327
+
328
+ fallback_mapping = {
329
+ '__str__':fallback_str,
330
+ '__repr__':fallback_repr,
331
+ '#GETVALUE':fallback_getvalue
332
+ }
333
+
334
+ def dummy(self, c):
335
+ pass
336
+
337
+ def debug_info(self, c):
338
+ '''
339
+ Return some info --- useful to spot problems with refcounting
340
+ '''
341
+ # Perhaps include debug info about 'c'?
342
+ with self.mutex:
343
+ result = []
344
+ keys = list(self.id_to_refcount.keys())
345
+ keys.sort()
346
+ for ident in keys:
347
+ if ident != '0':
348
+ result.append(' %s: refcount=%s\n %s' %
349
+ (ident, self.id_to_refcount[ident],
350
+ str(self.id_to_obj[ident][0])[:75]))
351
+ return '\n'.join(result)
352
+
353
+ def number_of_objects(self, c):
354
+ '''
355
+ Number of shared objects
356
+ '''
357
+ # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
358
+ return len(self.id_to_refcount)
359
+
360
+ def shutdown(self, c):
361
+ '''
362
+ Shutdown this process
363
+ '''
364
+ try:
365
+ util.debug('manager received shutdown message')
366
+ c.send(('#RETURN', None))
367
+ except:
368
+ import traceback
369
+ traceback.print_exc()
370
+ finally:
371
+ self.stop_event.set()
372
+
373
+ def create(self, c, typeid, /, *args, **kwds):
374
+ '''
375
+ Create a new shared object and return its id
376
+ '''
377
+ with self.mutex:
378
+ callable, exposed, method_to_typeid, proxytype = \
379
+ self.registry[typeid]
380
+
381
+ if callable is None:
382
+ if kwds or (len(args) != 1):
383
+ raise ValueError(
384
+ "Without callable, must have one non-keyword argument")
385
+ obj = args[0]
386
+ else:
387
+ obj = callable(*args, **kwds)
388
+
389
+ if exposed is None:
390
+ exposed = public_methods(obj)
391
+ if method_to_typeid is not None:
392
+ if not isinstance(method_to_typeid, dict):
393
+ raise TypeError(
394
+ "Method_to_typeid {0!r}: type {1!s}, not dict".format(
395
+ method_to_typeid, type(method_to_typeid)))
396
+ exposed = list(exposed) + list(method_to_typeid)
397
+
398
+ ident = '%x' % id(obj) # convert to string because xmlrpclib
399
+ # only has 32 bit signed integers
400
+ util.debug('%r callable returned object with id %r', typeid, ident)
401
+
402
+ self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
403
+ if ident not in self.id_to_refcount:
404
+ self.id_to_refcount[ident] = 0
405
+
406
+ self.incref(c, ident)
407
+ return ident, tuple(exposed)
408
+
409
+ def get_methods(self, c, token):
410
+ '''
411
+ Return the methods of the shared object indicated by token
412
+ '''
413
+ return tuple(self.id_to_obj[token.id][1])
414
+
415
+ def accept_connection(self, c, name):
416
+ '''
417
+ Spawn a new thread to serve this connection
418
+ '''
419
+ threading.current_thread().name = name
420
+ c.send(('#RETURN', None))
421
+ self.serve_client(c)
422
+
423
+ def incref(self, c, ident):
424
+ with self.mutex:
425
+ try:
426
+ self.id_to_refcount[ident] += 1
427
+ except KeyError as ke:
428
+ # If no external references exist but an internal (to the
429
+ # manager) still does and a new external reference is created
430
+ # from it, restore the manager's tracking of it from the
431
+ # previously stashed internal ref.
432
+ if ident in self.id_to_local_proxy_obj:
433
+ self.id_to_refcount[ident] = 1
434
+ self.id_to_obj[ident] = \
435
+ self.id_to_local_proxy_obj[ident]
436
+ obj, exposed, gettypeid = self.id_to_obj[ident]
437
+ util.debug('Server re-enabled tracking & INCREF %r', ident)
438
+ else:
439
+ raise ke
440
+
441
+ def decref(self, c, ident):
442
+ if ident not in self.id_to_refcount and \
443
+ ident in self.id_to_local_proxy_obj:
444
+ util.debug('Server DECREF skipping %r', ident)
445
+ return
446
+
447
+ with self.mutex:
448
+ if self.id_to_refcount[ident] <= 0:
449
+ raise AssertionError(
450
+ "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
451
+ ident, self.id_to_obj[ident],
452
+ self.id_to_refcount[ident]))
453
+ self.id_to_refcount[ident] -= 1
454
+ if self.id_to_refcount[ident] == 0:
455
+ del self.id_to_refcount[ident]
456
+
457
+ if ident not in self.id_to_refcount:
458
+ # Two-step process in case the object turns out to contain other
459
+ # proxy objects (e.g. a managed list of managed lists).
460
+ # Otherwise, deleting self.id_to_obj[ident] would trigger the
461
+ # deleting of the stored value (another managed object) which would
462
+ # in turn attempt to acquire the mutex that is already held here.
463
+ self.id_to_obj[ident] = (None, (), None) # thread-safe
464
+ util.debug('disposing of obj with id %r', ident)
465
+ with self.mutex:
466
+ del self.id_to_obj[ident]
467
+
468
+
469
+ #
470
+ # Class to represent state of a manager
471
+ #
472
+
473
+ class State(object):
474
+ __slots__ = ['value']
475
+ INITIAL = 0
476
+ STARTED = 1
477
+ SHUTDOWN = 2
478
+
479
+ #
480
+ # Mapping from serializer name to Listener and Client types
481
+ #
482
+
483
+ listener_client = { #XXX: register dill?
484
+ 'pickle' : (connection.Listener, connection.Client),
485
+ 'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
486
+ }
487
+
488
+ #
489
+ # Definition of BaseManager
490
+ #
491
+
492
+ class BaseManager(object):
493
+ '''
494
+ Base class for managers
495
+ '''
496
+ _registry = {}
497
+ _Server = Server
498
+
499
+ def __init__(self, address=None, authkey=None, serializer='pickle',
500
+ ctx=None):
501
+ if authkey is None:
502
+ authkey = process.current_process().authkey
503
+ self._address = address # XXX not final address if eg ('', 0)
504
+ self._authkey = process.AuthenticationString(authkey)
505
+ self._state = State()
506
+ self._state.value = State.INITIAL
507
+ self._serializer = serializer
508
+ self._Listener, self._Client = listener_client[serializer]
509
+ self._ctx = ctx or get_context()
510
+
511
+ def get_server(self):
512
+ '''
513
+ Return server object with serve_forever() method and address attribute
514
+ '''
515
+ if self._state.value != State.INITIAL:
516
+ if self._state.value == State.STARTED:
517
+ raise ProcessError("Already started server")
518
+ elif self._state.value == State.SHUTDOWN:
519
+ raise ProcessError("Manager has shut down")
520
+ else:
521
+ raise ProcessError(
522
+ "Unknown state {!r}".format(self._state.value))
523
+ return Server(self._registry, self._address,
524
+ self._authkey, self._serializer)
525
+
526
+ def connect(self):
527
+ '''
528
+ Connect manager object to the server process
529
+ '''
530
+ Listener, Client = listener_client[self._serializer]
531
+ conn = Client(self._address, authkey=self._authkey)
532
+ dispatch(conn, None, 'dummy')
533
+ self._state.value = State.STARTED
534
+
535
+ def start(self, initializer=None, initargs=()):
536
+ '''
537
+ Spawn a server process for this manager object
538
+ '''
539
+ if self._state.value != State.INITIAL:
540
+ if self._state.value == State.STARTED:
541
+ raise ProcessError("Already started server")
542
+ elif self._state.value == State.SHUTDOWN:
543
+ raise ProcessError("Manager has shut down")
544
+ else:
545
+ raise ProcessError(
546
+ "Unknown state {!r}".format(self._state.value))
547
+
548
+ if initializer is not None and not callable(initializer):
549
+ raise TypeError('initializer must be a callable')
550
+
551
+ # pipe over which we will retrieve address of server
552
+ reader, writer = connection.Pipe(duplex=False)
553
+
554
+ # spawn process which runs a server
555
+ self._process = self._ctx.Process(
556
+ target=type(self)._run_server,
557
+ args=(self._registry, self._address, self._authkey,
558
+ self._serializer, writer, initializer, initargs),
559
+ )
560
+ ident = ':'.join(str(i) for i in self._process._identity)
561
+ self._process.name = type(self).__name__ + '-' + ident
562
+ self._process.start()
563
+
564
+ # get address of server
565
+ writer.close()
566
+ self._address = reader.recv()
567
+ reader.close()
568
+
569
+ # register a finalizer
570
+ self._state.value = State.STARTED
571
+ self.shutdown = util.Finalize(
572
+ self, type(self)._finalize_manager,
573
+ args=(self._process, self._address, self._authkey,
574
+ self._state, self._Client),
575
+ exitpriority=0
576
+ )
577
+
578
+ @classmethod
579
+ def _run_server(cls, registry, address, authkey, serializer, writer,
580
+ initializer=None, initargs=()):
581
+ '''
582
+ Create a server, report its address and run it
583
+ '''
584
+ # bpo-36368: protect server process from KeyboardInterrupt signals
585
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
586
+
587
+ if initializer is not None:
588
+ initializer(*initargs)
589
+
590
+ # create server
591
+ server = cls._Server(registry, address, authkey, serializer)
592
+
593
+ # inform parent process of the server's address
594
+ writer.send(server.address)
595
+ writer.close()
596
+
597
+ # run the manager
598
+ util.info('manager serving at %r', server.address)
599
+ server.serve_forever()
600
+
601
+ def _create(self, typeid, /, *args, **kwds):
602
+ '''
603
+ Create a new shared object; return the token and exposed tuple
604
+ '''
605
+ assert self._state.value == State.STARTED, 'server not yet started'
606
+ conn = self._Client(self._address, authkey=self._authkey)
607
+ try:
608
+ id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
609
+ finally:
610
+ conn.close()
611
+ return Token(typeid, self._address, id), exposed
612
+
613
+ def join(self, timeout=None):
614
+ '''
615
+ Join the manager process (if it has been spawned)
616
+ '''
617
+ if self._process is not None:
618
+ self._process.join(timeout)
619
+ if not self._process.is_alive():
620
+ self._process = None
621
+
622
+ def _debug_info(self):
623
+ '''
624
+ Return some info about the servers shared objects and connections
625
+ '''
626
+ conn = self._Client(self._address, authkey=self._authkey)
627
+ try:
628
+ return dispatch(conn, None, 'debug_info')
629
+ finally:
630
+ conn.close()
631
+
632
+ def _number_of_objects(self):
633
+ '''
634
+ Return the number of shared objects
635
+ '''
636
+ conn = self._Client(self._address, authkey=self._authkey)
637
+ try:
638
+ return dispatch(conn, None, 'number_of_objects')
639
+ finally:
640
+ conn.close()
641
+
642
+ def __enter__(self):
643
+ if self._state.value == State.INITIAL:
644
+ self.start()
645
+ if self._state.value != State.STARTED:
646
+ if self._state.value == State.INITIAL:
647
+ raise ProcessError("Unable to start server")
648
+ elif self._state.value == State.SHUTDOWN:
649
+ raise ProcessError("Manager has shut down")
650
+ else:
651
+ raise ProcessError(
652
+ "Unknown state {!r}".format(self._state.value))
653
+ return self
654
+
655
+ def __exit__(self, exc_type, exc_val, exc_tb):
656
+ self.shutdown()
657
+
658
+ @staticmethod
659
+ def _finalize_manager(process, address, authkey, state, _Client):
660
+ '''
661
+ Shutdown the manager process; will be registered as a finalizer
662
+ '''
663
+ if process.is_alive():
664
+ util.info('sending shutdown message to manager')
665
+ try:
666
+ conn = _Client(address, authkey=authkey)
667
+ try:
668
+ dispatch(conn, None, 'shutdown')
669
+ finally:
670
+ conn.close()
671
+ except Exception:
672
+ pass
673
+
674
+ process.join(timeout=1.0)
675
+ if process.is_alive():
676
+ util.info('manager still alive')
677
+ if hasattr(process, 'terminate'):
678
+ util.info('trying to `terminate()` manager process')
679
+ process.terminate()
680
+ process.join(timeout=1.0)
681
+ if process.is_alive():
682
+ util.info('manager still alive after terminate')
683
+
684
+ state.value = State.SHUTDOWN
685
+ try:
686
+ del BaseProxy._address_to_local[address]
687
+ except KeyError:
688
+ pass
689
+
690
+ @property
691
+ def address(self):
692
+ return self._address
693
+
694
+ @classmethod
695
+ def register(cls, typeid, callable=None, proxytype=None, exposed=None,
696
+ method_to_typeid=None, create_method=True):
697
+ '''
698
+ Register a typeid with the manager type
699
+ '''
700
+ if '_registry' not in cls.__dict__:
701
+ cls._registry = cls._registry.copy()
702
+
703
+ if proxytype is None:
704
+ proxytype = AutoProxy
705
+
706
+ exposed = exposed or getattr(proxytype, '_exposed_', None)
707
+
708
+ method_to_typeid = method_to_typeid or \
709
+ getattr(proxytype, '_method_to_typeid_', None)
710
+
711
+ if method_to_typeid:
712
+ for key, value in list(method_to_typeid.items()): # isinstance?
713
+ assert type(key) is str, '%r is not a string' % key
714
+ assert type(value) is str, '%r is not a string' % value
715
+
716
+ cls._registry[typeid] = (
717
+ callable, exposed, method_to_typeid, proxytype
718
+ )
719
+
720
+ if create_method:
721
+ def temp(self, /, *args, **kwds):
722
+ util.debug('requesting creation of a shared %r object', typeid)
723
+ token, exp = self._create(typeid, *args, **kwds)
724
+ proxy = proxytype(
725
+ token, self._serializer, manager=self,
726
+ authkey=self._authkey, exposed=exp
727
+ )
728
+ conn = self._Client(token.address, authkey=self._authkey)
729
+ dispatch(conn, None, 'decref', (token.id,))
730
+ return proxy
731
+ temp.__name__ = typeid
732
+ setattr(cls, typeid, temp)
733
+
734
+ #
735
+ # Subclass of set which get cleared after a fork
736
+ #
737
+
738
+ class ProcessLocalSet(set):
739
+ def __init__(self):
740
+ util.register_after_fork(self, lambda obj: obj.clear())
741
+ def __reduce__(self):
742
+ return type(self), ()
743
+
744
+ #
745
+ # Definition of BaseProxy
746
+ #
747
+
748
+ class BaseProxy(object):
749
+ '''
750
+ A base for proxies of shared objects
751
+ '''
752
+ _address_to_local = {}
753
+ _mutex = util.ForkAwareThreadLock()
754
+
755
+ def __init__(self, token, serializer, manager=None,
756
+ authkey=None, exposed=None, incref=True, manager_owned=False):
757
+ with BaseProxy._mutex:
758
+ tls_idset = BaseProxy._address_to_local.get(token.address, None)
759
+ if tls_idset is None:
760
+ tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
761
+ BaseProxy._address_to_local[token.address] = tls_idset
762
+
763
+ # self._tls is used to record the connection used by this
764
+ # thread to communicate with the manager at token.address
765
+ self._tls = tls_idset[0]
766
+
767
+ # self._idset is used to record the identities of all shared
768
+ # objects for which the current process owns references and
769
+ # which are in the manager at token.address
770
+ self._idset = tls_idset[1]
771
+
772
+ self._token = token
773
+ self._id = self._token.id
774
+ self._manager = manager
775
+ self._serializer = serializer
776
+ self._Client = listener_client[serializer][1]
777
+
778
+ # Should be set to True only when a proxy object is being created
779
+ # on the manager server; primary use case: nested proxy objects.
780
+ # RebuildProxy detects when a proxy is being created on the manager
781
+ # and sets this value appropriately.
782
+ self._owned_by_manager = manager_owned
783
+
784
+ if authkey is not None:
785
+ self._authkey = process.AuthenticationString(authkey)
786
+ elif self._manager is not None:
787
+ self._authkey = self._manager._authkey
788
+ else:
789
+ self._authkey = process.current_process().authkey
790
+
791
+ if incref:
792
+ self._incref()
793
+
794
+ util.register_after_fork(self, BaseProxy._after_fork)
795
+
796
+ def _connect(self):
797
+ util.debug('making connection to manager')
798
+ name = process.current_process().name
799
+ if threading.current_thread().name != 'MainThread':
800
+ name += '|' + threading.current_thread().name
801
+ conn = self._Client(self._token.address, authkey=self._authkey)
802
+ dispatch(conn, None, 'accept_connection', (name,))
803
+ self._tls.connection = conn
804
+
805
+ def _callmethod(self, methodname, args=(), kwds={}):
806
+ '''
807
+ Try to call a method of the referent and return a copy of the result
808
+ '''
809
+ try:
810
+ conn = self._tls.connection
811
+ except AttributeError:
812
+ util.debug('thread %r does not own a connection',
813
+ threading.current_thread().name)
814
+ self._connect()
815
+ conn = self._tls.connection
816
+
817
+ conn.send((self._id, methodname, args, kwds))
818
+ kind, result = conn.recv()
819
+
820
+ if kind == '#RETURN':
821
+ return result
822
+ elif kind == '#PROXY':
823
+ exposed, token = result
824
+ proxytype = self._manager._registry[token.typeid][-1]
825
+ token.address = self._token.address
826
+ proxy = proxytype(
827
+ token, self._serializer, manager=self._manager,
828
+ authkey=self._authkey, exposed=exposed
829
+ )
830
+ conn = self._Client(token.address, authkey=self._authkey)
831
+ dispatch(conn, None, 'decref', (token.id,))
832
+ return proxy
833
+ raise convert_to_error(kind, result)
834
+
835
+ def _getvalue(self):
836
+ '''
837
+ Get a copy of the value of the referent
838
+ '''
839
+ return self._callmethod('#GETVALUE')
840
+
841
+ def _incref(self):
842
+ if self._owned_by_manager:
843
+ util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
844
+ return
845
+
846
+ conn = self._Client(self._token.address, authkey=self._authkey)
847
+ dispatch(conn, None, 'incref', (self._id,))
848
+ util.debug('INCREF %r', self._token.id)
849
+
850
+ self._idset.add(self._id)
851
+
852
+ state = self._manager and self._manager._state
853
+
854
+ self._close = util.Finalize(
855
+ self, BaseProxy._decref,
856
+ args=(self._token, self._authkey, state,
857
+ self._tls, self._idset, self._Client),
858
+ exitpriority=10
859
+ )
860
+
861
+ @staticmethod
862
+ def _decref(token, authkey, state, tls, idset, _Client):
863
+ idset.discard(token.id)
864
+
865
+ # check whether manager is still alive
866
+ if state is None or state.value == State.STARTED:
867
+ # tell manager this process no longer cares about referent
868
+ try:
869
+ util.debug('DECREF %r', token.id)
870
+ conn = _Client(token.address, authkey=authkey)
871
+ dispatch(conn, None, 'decref', (token.id,))
872
+ except Exception as e:
873
+ util.debug('... decref failed %s', e)
874
+
875
+ else:
876
+ util.debug('DECREF %r -- manager already shutdown', token.id)
877
+
878
+ # check whether we can close this thread's connection because
879
+ # the process owns no more references to objects for this manager
880
+ if not idset and hasattr(tls, 'connection'):
881
+ util.debug('thread %r has no more proxies so closing conn',
882
+ threading.current_thread().name)
883
+ tls.connection.close()
884
+ del tls.connection
885
+
886
+ def _after_fork(self):
887
+ self._manager = None
888
+ try:
889
+ self._incref()
890
+ except Exception as e:
891
+ # the proxy may just be for a manager which has shutdown
892
+ util.info('incref failed: %s' % e)
893
+
894
+ def __reduce__(self):
895
+ kwds = {}
896
+ if get_spawning_popen() is not None:
897
+ kwds['authkey'] = self._authkey
898
+
899
+ if getattr(self, '_isauto', False):
900
+ kwds['exposed'] = self._exposed_
901
+ return (RebuildProxy,
902
+ (AutoProxy, self._token, self._serializer, kwds))
903
+ else:
904
+ return (RebuildProxy,
905
+ (type(self), self._token, self._serializer, kwds))
906
+
907
+ def __deepcopy__(self, memo):
908
+ return self._getvalue()
909
+
910
+ def __repr__(self):
911
+ return '<%s object, typeid %r at %#x>' % \
912
+ (type(self).__name__, self._token.typeid, id(self))
913
+
914
+ def __str__(self):
915
+ '''
916
+ Return representation of the referent (or a fall-back if that fails)
917
+ '''
918
+ try:
919
+ return self._callmethod('__repr__')
920
+ except Exception:
921
+ return repr(self)[:-1] + "; '__str__()' failed>"
922
+
923
+ #
924
+ # Function used for unpickling
925
+ #
926
+
927
+ def RebuildProxy(func, token, serializer, kwds):
928
+ '''
929
+ Function used for unpickling proxy objects.
930
+ '''
931
+ server = getattr(process.current_process(), '_manager_server', None)
932
+ if server and server.address == token.address:
933
+ util.debug('Rebuild a proxy owned by manager, token=%r', token)
934
+ kwds['manager_owned'] = True
935
+ if token.id not in server.id_to_local_proxy_obj:
936
+ server.id_to_local_proxy_obj[token.id] = \
937
+ server.id_to_obj[token.id]
938
+ incref = (
939
+ kwds.pop('incref', True) and
940
+ not getattr(process.current_process(), '_inheriting', False)
941
+ )
942
+ return func(token, serializer, incref=incref, **kwds)
943
+
944
+ #
945
+ # Functions to create proxies and proxy types
946
+ #
947
+
948
+ def MakeProxyType(name, exposed, _cache={}):
949
+ '''
950
+ Return a proxy type whose methods are given by `exposed`
951
+ '''
952
+ exposed = tuple(exposed)
953
+ try:
954
+ return _cache[(name, exposed)]
955
+ except KeyError:
956
+ pass
957
+
958
+ dic = {}
959
+
960
+ for meth in exposed:
961
+ exec('''def %s(self, /, *args, **kwds):
962
+ return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
963
+
964
+ ProxyType = type(name, (BaseProxy,), dic)
965
+ ProxyType._exposed_ = exposed
966
+ _cache[(name, exposed)] = ProxyType
967
+ return ProxyType
968
+
969
+
970
+ def AutoProxy(token, serializer, manager=None, authkey=None,
971
+ exposed=None, incref=True, manager_owned=False):
972
+ '''
973
+ Return an auto-proxy for `token`
974
+ '''
975
+ _Client = listener_client[serializer][1]
976
+
977
+ if exposed is None:
978
+ conn = _Client(token.address, authkey=authkey)
979
+ try:
980
+ exposed = dispatch(conn, None, 'get_methods', (token,))
981
+ finally:
982
+ conn.close()
983
+
984
+ if authkey is None and manager is not None:
985
+ authkey = manager._authkey
986
+ if authkey is None:
987
+ authkey = process.current_process().authkey
988
+
989
+ ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
990
+ proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
991
+ incref=incref, manager_owned=manager_owned)
992
+ proxy._isauto = True
993
+ return proxy
994
+
995
+ #
996
+ # Types/callables which we will register with SyncManager
997
+ #
998
+
999
+ class Namespace(object):
1000
+ def __init__(self, /, **kwds):
1001
+ self.__dict__.update(kwds)
1002
+ def __repr__(self):
1003
+ items = list(self.__dict__.items())
1004
+ temp = []
1005
+ for name, value in items:
1006
+ if not name.startswith('_'):
1007
+ temp.append('%s=%r' % (name, value))
1008
+ temp.sort()
1009
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
1010
+
1011
+ class Value(object):
1012
+ def __init__(self, typecode, value, lock=True):
1013
+ self._typecode = typecode
1014
+ self._value = value
1015
+ def get(self):
1016
+ return self._value
1017
+ def set(self, value):
1018
+ self._value = value
1019
+ def __repr__(self):
1020
+ return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
1021
+ value = property(get, set)
1022
+
1023
+ def Array(typecode, sequence, lock=True):
1024
+ return array.array(typecode, sequence)
1025
+
1026
+ #
1027
+ # Proxy types used by SyncManager
1028
+ #
1029
+
1030
+ class IteratorProxy(BaseProxy):
1031
+ _exposed_ = ('__next__', 'send', 'throw', 'close')
1032
+ def __iter__(self):
1033
+ return self
1034
+ def __next__(self, *args):
1035
+ return self._callmethod('__next__', args)
1036
+ def send(self, *args):
1037
+ return self._callmethod('send', args)
1038
+ def throw(self, *args):
1039
+ return self._callmethod('throw', args)
1040
+ def close(self, *args):
1041
+ return self._callmethod('close', args)
1042
+
1043
+
1044
+ class AcquirerProxy(BaseProxy):
1045
+ _exposed_ = ('acquire', 'release')
1046
+ def acquire(self, blocking=True, timeout=None):
1047
+ args = (blocking,) if timeout is None else (blocking, timeout)
1048
+ return self._callmethod('acquire', args)
1049
+ def release(self):
1050
+ return self._callmethod('release')
1051
+ def __enter__(self):
1052
+ return self._callmethod('acquire')
1053
+ def __exit__(self, exc_type, exc_val, exc_tb):
1054
+ return self._callmethod('release')
1055
+
1056
+
1057
+ class ConditionProxy(AcquirerProxy):
1058
+ _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
1059
+ def wait(self, timeout=None):
1060
+ return self._callmethod('wait', (timeout,))
1061
+ def notify(self, n=1):
1062
+ return self._callmethod('notify', (n,))
1063
+ def notify_all(self):
1064
+ return self._callmethod('notify_all')
1065
+ def wait_for(self, predicate, timeout=None):
1066
+ result = predicate()
1067
+ if result:
1068
+ return result
1069
+ if timeout is not None:
1070
+ endtime = getattr(time,'monotonic',time.time)() + timeout
1071
+ else:
1072
+ endtime = None
1073
+ waittime = None
1074
+ while not result:
1075
+ if endtime is not None:
1076
+ waittime = endtime - getattr(time,'monotonic',time.time)()
1077
+ if waittime <= 0:
1078
+ break
1079
+ self.wait(waittime)
1080
+ result = predicate()
1081
+ return result
1082
+
1083
+
1084
+ class EventProxy(BaseProxy):
1085
+ _exposed_ = ('is_set', 'set', 'clear', 'wait')
1086
+ def is_set(self):
1087
+ return self._callmethod('is_set')
1088
+ def set(self):
1089
+ return self._callmethod('set')
1090
+ def clear(self):
1091
+ return self._callmethod('clear')
1092
+ def wait(self, timeout=None):
1093
+ return self._callmethod('wait', (timeout,))
1094
+
1095
+
1096
+ class BarrierProxy(BaseProxy):
1097
+ _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
1098
+ def wait(self, timeout=None):
1099
+ return self._callmethod('wait', (timeout,))
1100
+ def abort(self):
1101
+ return self._callmethod('abort')
1102
+ def reset(self):
1103
+ return self._callmethod('reset')
1104
+ @property
1105
+ def parties(self):
1106
+ return self._callmethod('__getattribute__', ('parties',))
1107
+ @property
1108
+ def n_waiting(self):
1109
+ return self._callmethod('__getattribute__', ('n_waiting',))
1110
+ @property
1111
+ def broken(self):
1112
+ return self._callmethod('__getattribute__', ('broken',))
1113
+
1114
+
1115
+ class NamespaceProxy(BaseProxy):
1116
+ _exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
1117
+ def __getattr__(self, key):
1118
+ if key[0] == '_':
1119
+ return object.__getattribute__(self, key)
1120
+ callmethod = object.__getattribute__(self, '_callmethod')
1121
+ return callmethod('__getattribute__', (key,))
1122
+ def __setattr__(self, key, value):
1123
+ if key[0] == '_':
1124
+ return object.__setattr__(self, key, value)
1125
+ callmethod = object.__getattribute__(self, '_callmethod')
1126
+ return callmethod('__setattr__', (key, value))
1127
+ def __delattr__(self, key):
1128
+ if key[0] == '_':
1129
+ return object.__delattr__(self, key)
1130
+ callmethod = object.__getattribute__(self, '_callmethod')
1131
+ return callmethod('__delattr__', (key,))
1132
+
1133
+
1134
+ class ValueProxy(BaseProxy):
1135
+ _exposed_ = ('get', 'set')
1136
+ def get(self):
1137
+ return self._callmethod('get')
1138
+ def set(self, value):
1139
+ return self._callmethod('set', (value,))
1140
+ value = property(get, set)
1141
+
1142
+ __class_getitem__ = classmethod(types.GenericAlias)
1143
+
1144
+
1145
+ BaseListProxy = MakeProxyType('BaseListProxy', (
1146
+ '__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
1147
+ '__mul__', '__reversed__', '__rmul__', '__setitem__',
1148
+ 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
1149
+ 'reverse', 'sort', '__imul__'
1150
+ ))
1151
+ class ListProxy(BaseListProxy):
1152
+ def __iadd__(self, value):
1153
+ self._callmethod('extend', (value,))
1154
+ return self
1155
+ def __imul__(self, value):
1156
+ self._callmethod('__imul__', (value,))
1157
+ return self
1158
+
1159
+
1160
+ DictProxy = MakeProxyType('DictProxy', (
1161
+ '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
1162
+ '__setitem__', 'clear', 'copy', 'get', 'items',
1163
+ 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
1164
+ ))
1165
+ DictProxy._method_to_typeid_ = {
1166
+ '__iter__': 'Iterator',
1167
+ }
1168
+
1169
+
1170
+ ArrayProxy = MakeProxyType('ArrayProxy', (
1171
+ '__len__', '__getitem__', '__setitem__'
1172
+ ))
1173
+
1174
+
1175
+ BasePoolProxy = MakeProxyType('PoolProxy', (
1176
+ 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
1177
+ 'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
1178
+ ))
1179
+ BasePoolProxy._method_to_typeid_ = {
1180
+ 'apply_async': 'AsyncResult',
1181
+ 'map_async': 'AsyncResult',
1182
+ 'starmap_async': 'AsyncResult',
1183
+ 'imap': 'Iterator',
1184
+ 'imap_unordered': 'Iterator'
1185
+ }
1186
+ class PoolProxy(BasePoolProxy):
1187
+ def __enter__(self):
1188
+ return self
1189
+ def __exit__(self, exc_type, exc_val, exc_tb):
1190
+ self.terminate()
1191
+
1192
+ #
1193
+ # Definition of SyncManager
1194
+ #
1195
+
1196
+ class SyncManager(BaseManager):
1197
+ '''
1198
+ Subclass of `BaseManager` which supports a number of shared object types.
1199
+
1200
+ The types registered are those intended for the synchronization
1201
+ of threads, plus `dict`, `list` and `Namespace`.
1202
+
1203
+ The `multiprocess.Manager()` function creates started instances of
1204
+ this class.
1205
+ '''
1206
+
1207
+ SyncManager.register('Queue', queue.Queue)
1208
+ SyncManager.register('JoinableQueue', queue.Queue)
1209
+ SyncManager.register('Event', threading.Event, EventProxy)
1210
+ SyncManager.register('Lock', threading.Lock, AcquirerProxy)
1211
+ SyncManager.register('RLock', threading.RLock, AcquirerProxy)
1212
+ SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
1213
+ SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
1214
+ AcquirerProxy)
1215
+ SyncManager.register('Condition', threading.Condition, ConditionProxy)
1216
+ SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
1217
+ SyncManager.register('Pool', pool.Pool, PoolProxy)
1218
+ SyncManager.register('list', list, ListProxy)
1219
+ SyncManager.register('dict', dict, DictProxy)
1220
+ SyncManager.register('Value', Value, ValueProxy)
1221
+ SyncManager.register('Array', Array, ArrayProxy)
1222
+ SyncManager.register('Namespace', Namespace, NamespaceProxy)
1223
+
1224
+ # types returned by methods of PoolProxy
1225
+ SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
1226
+ SyncManager.register('AsyncResult', create_method=False)
1227
+
1228
+ #
1229
+ # Definition of SharedMemoryManager and SharedMemoryServer
1230
+ #
1231
+
1232
+ if HAS_SHMEM:
1233
+ class _SharedMemoryTracker:
1234
+ "Manages one or more shared memory segments."
1235
+
1236
+ def __init__(self, name, segment_names=[]):
1237
+ self.shared_memory_context_name = name
1238
+ self.segment_names = segment_names
1239
+
1240
+ def register_segment(self, segment_name):
1241
+ "Adds the supplied shared memory block name to tracker."
1242
+ util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
1243
+ self.segment_names.append(segment_name)
1244
+
1245
+ def destroy_segment(self, segment_name):
1246
+ """Calls unlink() on the shared memory block with the supplied name
1247
+ and removes it from the list of blocks being tracked."""
1248
+ util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
1249
+ self.segment_names.remove(segment_name)
1250
+ segment = shared_memory.SharedMemory(segment_name)
1251
+ segment.close()
1252
+ segment.unlink()
1253
+
1254
+ def unlink(self):
1255
+ "Calls destroy_segment() on all tracked shared memory blocks."
1256
+ for segment_name in self.segment_names[:]:
1257
+ self.destroy_segment(segment_name)
1258
+
1259
+ def __del__(self):
1260
+ util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
1261
+ self.unlink()
1262
+
1263
+ def __getstate__(self):
1264
+ return (self.shared_memory_context_name, self.segment_names)
1265
+
1266
+ def __setstate__(self, state):
1267
+ self.__init__(*state)
1268
+
1269
+
1270
+ class SharedMemoryServer(Server):
1271
+
1272
+ public = Server.public + \
1273
+ ['track_segment', 'release_segment', 'list_segments']
1274
+
1275
+ def __init__(self, *args, **kwargs):
1276
+ Server.__init__(self, *args, **kwargs)
1277
+ address = self.address
1278
+ # The address of Linux abstract namespaces can be bytes
1279
+ if isinstance(address, bytes):
1280
+ address = os.fsdecode(address)
1281
+ self.shared_memory_context = \
1282
+ _SharedMemoryTracker(f"shm_{address}_{getpid()}")
1283
+ util.debug(f"SharedMemoryServer started by pid {getpid()}")
1284
+
1285
+ def create(self, c, typeid, /, *args, **kwargs):
1286
+ """Create a new distributed-shared object (not backed by a shared
1287
+ memory block) and return its id to be used in a Proxy Object."""
1288
+ # Unless set up as a shared proxy, don't make shared_memory_context
1289
+ # a standard part of kwargs. This makes things easier for supplying
1290
+ # simple functions.
1291
+ if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
1292
+ kwargs['shared_memory_context'] = self.shared_memory_context
1293
+ return Server.create(self, c, typeid, *args, **kwargs)
1294
+
1295
+ def shutdown(self, c):
1296
+ "Call unlink() on all tracked shared memory, terminate the Server."
1297
+ self.shared_memory_context.unlink()
1298
+ return Server.shutdown(self, c)
1299
+
1300
+ def track_segment(self, c, segment_name):
1301
+ "Adds the supplied shared memory block name to Server's tracker."
1302
+ self.shared_memory_context.register_segment(segment_name)
1303
+
1304
+ def release_segment(self, c, segment_name):
1305
+ """Calls unlink() on the shared memory block with the supplied name
1306
+ and removes it from the tracker instance inside the Server."""
1307
+ self.shared_memory_context.destroy_segment(segment_name)
1308
+
1309
+ def list_segments(self, c):
1310
+ """Returns a list of names of shared memory blocks that the Server
1311
+ is currently tracking."""
1312
+ return self.shared_memory_context.segment_names
1313
+
1314
+
1315
+ class SharedMemoryManager(BaseManager):
1316
+ """Like SyncManager but uses SharedMemoryServer instead of Server.
1317
+
1318
+ It provides methods for creating and returning SharedMemory instances
1319
+ and for creating a list-like object (ShareableList) backed by shared
1320
+ memory. It also provides methods that create and return Proxy Objects
1321
+ that support synchronization across processes (i.e. multi-process-safe
1322
+ locks and semaphores).
1323
+ """
1324
+
1325
+ _Server = SharedMemoryServer
1326
+
1327
+ def __init__(self, *args, **kwargs):
1328
+ if os.name == "posix":
1329
+ # bpo-36867: Ensure the resource_tracker is running before
1330
+ # launching the manager process, so that concurrent
1331
+ # shared_memory manipulation both in the manager and in the
1332
+ # current process does not create two resource_tracker
1333
+ # processes.
1334
+ from . import resource_tracker
1335
+ resource_tracker.ensure_running()
1336
+ BaseManager.__init__(self, *args, **kwargs)
1337
+ util.debug(f"{self.__class__.__name__} created by pid {getpid()}")
1338
+
1339
+ def __del__(self):
1340
+ util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
1341
+ pass
1342
+
1343
+ def get_server(self):
1344
+ 'Better than monkeypatching for now; merge into Server ultimately'
1345
+ if self._state.value != State.INITIAL:
1346
+ if self._state.value == State.STARTED:
1347
+ raise ProcessError("Already started SharedMemoryServer")
1348
+ elif self._state.value == State.SHUTDOWN:
1349
+ raise ProcessError("SharedMemoryManager has shut down")
1350
+ else:
1351
+ raise ProcessError(
1352
+ "Unknown state {!r}".format(self._state.value))
1353
+ return self._Server(self._registry, self._address,
1354
+ self._authkey, self._serializer)
1355
+
1356
+ def SharedMemory(self, size):
1357
+ """Returns a new SharedMemory instance with the specified size in
1358
+ bytes, to be tracked by the manager."""
1359
+ with self._Client(self._address, authkey=self._authkey) as conn:
1360
+ sms = shared_memory.SharedMemory(None, create=True, size=size)
1361
+ try:
1362
+ dispatch(conn, None, 'track_segment', (sms.name,))
1363
+ except BaseException as e:
1364
+ sms.unlink()
1365
+ raise e
1366
+ return sms
1367
+
1368
+ def ShareableList(self, sequence):
1369
+ """Returns a new ShareableList instance populated with the values
1370
+ from the input sequence, to be tracked by the manager."""
1371
+ with self._Client(self._address, authkey=self._authkey) as conn:
1372
+ sl = shared_memory.ShareableList(sequence)
1373
+ try:
1374
+ dispatch(conn, None, 'track_segment', (sl.shm.name,))
1375
+ except BaseException as e:
1376
+ sl.shm.unlink()
1377
+ raise e
1378
+ return sl
env-llmeval/lib/python3.10/site-packages/multiprocess/pool.py ADDED
@@ -0,0 +1,957 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module providing the `Pool` class for managing a process pool
3
+ #
4
+ # multiprocessing/pool.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = ['Pool', 'ThreadPool']
11
+
12
+ #
13
+ # Imports
14
+ #
15
+
16
+ import collections
17
+ import itertools
18
+ import os
19
+ import queue
20
+ import threading
21
+ import time
22
+ import traceback
23
+ import types
24
+ import warnings
25
+
26
+ # If threading is available then ThreadPool should be provided. Therefore
27
+ # we avoid top-level imports which are liable to fail on some systems.
28
+ from . import util
29
+ from . import get_context, TimeoutError
30
+ from .connection import wait
31
+
32
+ #
33
+ # Constants representing the state of a pool
34
+ #
35
+
36
+ INIT = "INIT"
37
+ RUN = "RUN"
38
+ CLOSE = "CLOSE"
39
+ TERMINATE = "TERMINATE"
40
+
41
+ #
42
+ # Miscellaneous
43
+ #
44
+
45
+ job_counter = itertools.count()
46
+
47
+ def mapstar(args):
48
+ return list(map(*args))
49
+
50
+ def starmapstar(args):
51
+ return list(itertools.starmap(args[0], args[1]))
52
+
53
+ #
54
+ # Hack to embed stringification of remote traceback in local traceback
55
+ #
56
+
57
+ class RemoteTraceback(Exception):
58
+ def __init__(self, tb):
59
+ self.tb = tb
60
+ def __str__(self):
61
+ return self.tb
62
+
63
+ class ExceptionWithTraceback:
64
+ def __init__(self, exc, tb):
65
+ tb = traceback.format_exception(type(exc), exc, tb)
66
+ tb = ''.join(tb)
67
+ self.exc = exc
68
+ self.tb = '\n"""\n%s"""' % tb
69
+ def __reduce__(self):
70
+ return rebuild_exc, (self.exc, self.tb)
71
+
72
+ def rebuild_exc(exc, tb):
73
+ exc.__cause__ = RemoteTraceback(tb)
74
+ return exc
75
+
76
+ #
77
+ # Code run by worker processes
78
+ #
79
+
80
+ class MaybeEncodingError(Exception):
81
+ """Wraps possible unpickleable errors, so they can be
82
+ safely sent through the socket."""
83
+
84
+ def __init__(self, exc, value):
85
+ self.exc = repr(exc)
86
+ self.value = repr(value)
87
+ super(MaybeEncodingError, self).__init__(self.exc, self.value)
88
+
89
+ def __str__(self):
90
+ return "Error sending result: '%s'. Reason: '%s'" % (self.value,
91
+ self.exc)
92
+
93
+ def __repr__(self):
94
+ return "<%s: %s>" % (self.__class__.__name__, self)
95
+
96
+
97
+ def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
98
+ wrap_exception=False):
99
+ if (maxtasks is not None) and not (isinstance(maxtasks, int)
100
+ and maxtasks >= 1):
101
+ raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks))
102
+ put = outqueue.put
103
+ get = inqueue.get
104
+ if hasattr(inqueue, '_writer'):
105
+ inqueue._writer.close()
106
+ outqueue._reader.close()
107
+
108
+ if initializer is not None:
109
+ initializer(*initargs)
110
+
111
+ completed = 0
112
+ while maxtasks is None or (maxtasks and completed < maxtasks):
113
+ try:
114
+ task = get()
115
+ except (EOFError, OSError):
116
+ util.debug('worker got EOFError or OSError -- exiting')
117
+ break
118
+
119
+ if task is None:
120
+ util.debug('worker got sentinel -- exiting')
121
+ break
122
+
123
+ job, i, func, args, kwds = task
124
+ try:
125
+ result = (True, func(*args, **kwds))
126
+ except Exception as e:
127
+ if wrap_exception and func is not _helper_reraises_exception:
128
+ e = ExceptionWithTraceback(e, e.__traceback__)
129
+ result = (False, e)
130
+ try:
131
+ put((job, i, result))
132
+ except Exception as e:
133
+ wrapped = MaybeEncodingError(e, result[1])
134
+ util.debug("Possible encoding error while sending result: %s" % (
135
+ wrapped))
136
+ put((job, i, (False, wrapped)))
137
+
138
+ task = job = result = func = args = kwds = None
139
+ completed += 1
140
+ util.debug('worker exiting after %d tasks' % completed)
141
+
142
+ def _helper_reraises_exception(ex):
143
+ 'Pickle-able helper function for use by _guarded_task_generation.'
144
+ raise ex
145
+
146
+ #
147
+ # Class representing a process pool
148
+ #
149
+
150
+ class _PoolCache(dict):
151
+ """
152
+ Class that implements a cache for the Pool class that will notify
153
+ the pool management threads every time the cache is emptied. The
154
+ notification is done by the use of a queue that is provided when
155
+ instantiating the cache.
156
+ """
157
+ def __init__(self, /, *args, notifier=None, **kwds):
158
+ self.notifier = notifier
159
+ super().__init__(*args, **kwds)
160
+
161
+ def __delitem__(self, item):
162
+ super().__delitem__(item)
163
+
164
+ # Notify that the cache is empty. This is important because the
165
+ # pool keeps maintaining workers until the cache gets drained. This
166
+ # eliminates a race condition in which a task is finished after the
167
+ # the pool's _handle_workers method has enter another iteration of the
168
+ # loop. In this situation, the only event that can wake up the pool
169
+ # is the cache to be emptied (no more tasks available).
170
+ if not self:
171
+ self.notifier.put(None)
172
+
173
+ class Pool(object):
174
+ '''
175
+ Class which supports an async version of applying functions to arguments.
176
+ '''
177
+ _wrap_exception = True
178
+
179
+ @staticmethod
180
+ def Process(ctx, *args, **kwds):
181
+ return ctx.Process(*args, **kwds)
182
+
183
+ def __init__(self, processes=None, initializer=None, initargs=(),
184
+ maxtasksperchild=None, context=None):
185
+ # Attributes initialized early to make sure that they exist in
186
+ # __del__() if __init__() raises an exception
187
+ self._pool = []
188
+ self._state = INIT
189
+
190
+ self._ctx = context or get_context()
191
+ self._setup_queues()
192
+ self._taskqueue = queue.SimpleQueue()
193
+ # The _change_notifier queue exist to wake up self._handle_workers()
194
+ # when the cache (self._cache) is empty or when there is a change in
195
+ # the _state variable of the thread that runs _handle_workers.
196
+ self._change_notifier = self._ctx.SimpleQueue()
197
+ self._cache = _PoolCache(notifier=self._change_notifier)
198
+ self._maxtasksperchild = maxtasksperchild
199
+ self._initializer = initializer
200
+ self._initargs = initargs
201
+
202
+ if processes is None:
203
+ processes = os.cpu_count() or 1
204
+ if processes < 1:
205
+ raise ValueError("Number of processes must be at least 1")
206
+ if maxtasksperchild is not None:
207
+ if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0:
208
+ raise ValueError("maxtasksperchild must be a positive int or None")
209
+
210
+ if initializer is not None and not callable(initializer):
211
+ raise TypeError('initializer must be a callable')
212
+
213
+ self._processes = processes
214
+ try:
215
+ self._repopulate_pool()
216
+ except Exception:
217
+ for p in self._pool:
218
+ if p.exitcode is None:
219
+ p.terminate()
220
+ for p in self._pool:
221
+ p.join()
222
+ raise
223
+
224
+ sentinels = self._get_sentinels()
225
+
226
+ self._worker_handler = threading.Thread(
227
+ target=Pool._handle_workers,
228
+ args=(self._cache, self._taskqueue, self._ctx, self.Process,
229
+ self._processes, self._pool, self._inqueue, self._outqueue,
230
+ self._initializer, self._initargs, self._maxtasksperchild,
231
+ self._wrap_exception, sentinels, self._change_notifier)
232
+ )
233
+ self._worker_handler.daemon = True
234
+ self._worker_handler._state = RUN
235
+ self._worker_handler.start()
236
+
237
+
238
+ self._task_handler = threading.Thread(
239
+ target=Pool._handle_tasks,
240
+ args=(self._taskqueue, self._quick_put, self._outqueue,
241
+ self._pool, self._cache)
242
+ )
243
+ self._task_handler.daemon = True
244
+ self._task_handler._state = RUN
245
+ self._task_handler.start()
246
+
247
+ self._result_handler = threading.Thread(
248
+ target=Pool._handle_results,
249
+ args=(self._outqueue, self._quick_get, self._cache)
250
+ )
251
+ self._result_handler.daemon = True
252
+ self._result_handler._state = RUN
253
+ self._result_handler.start()
254
+
255
+ self._terminate = util.Finalize(
256
+ self, self._terminate_pool,
257
+ args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
258
+ self._change_notifier, self._worker_handler, self._task_handler,
259
+ self._result_handler, self._cache),
260
+ exitpriority=15
261
+ )
262
+ self._state = RUN
263
+
264
+ # Copy globals as function locals to make sure that they are available
265
+ # during Python shutdown when the Pool is destroyed.
266
+ def __del__(self, _warn=warnings.warn, RUN=RUN):
267
+ if self._state == RUN:
268
+ _warn(f"unclosed running multiprocessing pool {self!r}",
269
+ ResourceWarning, source=self)
270
+ if getattr(self, '_change_notifier', None) is not None:
271
+ self._change_notifier.put(None)
272
+
273
+ def __repr__(self):
274
+ cls = self.__class__
275
+ return (f'<{cls.__module__}.{cls.__qualname__} '
276
+ f'state={self._state} '
277
+ f'pool_size={len(self._pool)}>')
278
+
279
+ def _get_sentinels(self):
280
+ task_queue_sentinels = [self._outqueue._reader]
281
+ self_notifier_sentinels = [self._change_notifier._reader]
282
+ return [*task_queue_sentinels, *self_notifier_sentinels]
283
+
284
+ @staticmethod
285
+ def _get_worker_sentinels(workers):
286
+ return [worker.sentinel for worker in
287
+ workers if hasattr(worker, "sentinel")]
288
+
289
+ @staticmethod
290
+ def _join_exited_workers(pool):
291
+ """Cleanup after any worker processes which have exited due to reaching
292
+ their specified lifetime. Returns True if any workers were cleaned up.
293
+ """
294
+ cleaned = False
295
+ for i in reversed(range(len(pool))):
296
+ worker = pool[i]
297
+ if worker.exitcode is not None:
298
+ # worker exited
299
+ util.debug('cleaning up worker %d' % i)
300
+ worker.join()
301
+ cleaned = True
302
+ del pool[i]
303
+ return cleaned
304
+
305
+ def _repopulate_pool(self):
306
+ return self._repopulate_pool_static(self._ctx, self.Process,
307
+ self._processes,
308
+ self._pool, self._inqueue,
309
+ self._outqueue, self._initializer,
310
+ self._initargs,
311
+ self._maxtasksperchild,
312
+ self._wrap_exception)
313
+
314
+ @staticmethod
315
+ def _repopulate_pool_static(ctx, Process, processes, pool, inqueue,
316
+ outqueue, initializer, initargs,
317
+ maxtasksperchild, wrap_exception):
318
+ """Bring the number of pool processes up to the specified number,
319
+ for use after reaping workers which have exited.
320
+ """
321
+ for i in range(processes - len(pool)):
322
+ w = Process(ctx, target=worker,
323
+ args=(inqueue, outqueue,
324
+ initializer,
325
+ initargs, maxtasksperchild,
326
+ wrap_exception))
327
+ w.name = w.name.replace('Process', 'PoolWorker')
328
+ w.daemon = True
329
+ w.start()
330
+ pool.append(w)
331
+ util.debug('added worker')
332
+
333
+ @staticmethod
334
+ def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue,
335
+ initializer, initargs, maxtasksperchild,
336
+ wrap_exception):
337
+ """Clean up any exited workers and start replacements for them.
338
+ """
339
+ if Pool._join_exited_workers(pool):
340
+ Pool._repopulate_pool_static(ctx, Process, processes, pool,
341
+ inqueue, outqueue, initializer,
342
+ initargs, maxtasksperchild,
343
+ wrap_exception)
344
+
345
+ def _setup_queues(self):
346
+ self._inqueue = self._ctx.SimpleQueue()
347
+ self._outqueue = self._ctx.SimpleQueue()
348
+ self._quick_put = self._inqueue._writer.send
349
+ self._quick_get = self._outqueue._reader.recv
350
+
351
+ def _check_running(self):
352
+ if self._state != RUN:
353
+ raise ValueError("Pool not running")
354
+
355
+ def apply(self, func, args=(), kwds={}):
356
+ '''
357
+ Equivalent of `func(*args, **kwds)`.
358
+ Pool must be running.
359
+ '''
360
+ return self.apply_async(func, args, kwds).get()
361
+
362
+ def map(self, func, iterable, chunksize=None):
363
+ '''
364
+ Apply `func` to each element in `iterable`, collecting the results
365
+ in a list that is returned.
366
+ '''
367
+ return self._map_async(func, iterable, mapstar, chunksize).get()
368
+
369
+ def starmap(self, func, iterable, chunksize=None):
370
+ '''
371
+ Like `map()` method but the elements of the `iterable` are expected to
372
+ be iterables as well and will be unpacked as arguments. Hence
373
+ `func` and (a, b) becomes func(a, b).
374
+ '''
375
+ return self._map_async(func, iterable, starmapstar, chunksize).get()
376
+
377
+ def starmap_async(self, func, iterable, chunksize=None, callback=None,
378
+ error_callback=None):
379
+ '''
380
+ Asynchronous version of `starmap()` method.
381
+ '''
382
+ return self._map_async(func, iterable, starmapstar, chunksize,
383
+ callback, error_callback)
384
+
385
+ def _guarded_task_generation(self, result_job, func, iterable):
386
+ '''Provides a generator of tasks for imap and imap_unordered with
387
+ appropriate handling for iterables which throw exceptions during
388
+ iteration.'''
389
+ try:
390
+ i = -1
391
+ for i, x in enumerate(iterable):
392
+ yield (result_job, i, func, (x,), {})
393
+ except Exception as e:
394
+ yield (result_job, i+1, _helper_reraises_exception, (e,), {})
395
+
396
+ def imap(self, func, iterable, chunksize=1):
397
+ '''
398
+ Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
399
+ '''
400
+ self._check_running()
401
+ if chunksize == 1:
402
+ result = IMapIterator(self)
403
+ self._taskqueue.put(
404
+ (
405
+ self._guarded_task_generation(result._job, func, iterable),
406
+ result._set_length
407
+ ))
408
+ return result
409
+ else:
410
+ if chunksize < 1:
411
+ raise ValueError(
412
+ "Chunksize must be 1+, not {0:n}".format(
413
+ chunksize))
414
+ task_batches = Pool._get_tasks(func, iterable, chunksize)
415
+ result = IMapIterator(self)
416
+ self._taskqueue.put(
417
+ (
418
+ self._guarded_task_generation(result._job,
419
+ mapstar,
420
+ task_batches),
421
+ result._set_length
422
+ ))
423
+ return (item for chunk in result for item in chunk)
424
+
425
+ def imap_unordered(self, func, iterable, chunksize=1):
426
+ '''
427
+ Like `imap()` method but ordering of results is arbitrary.
428
+ '''
429
+ self._check_running()
430
+ if chunksize == 1:
431
+ result = IMapUnorderedIterator(self)
432
+ self._taskqueue.put(
433
+ (
434
+ self._guarded_task_generation(result._job, func, iterable),
435
+ result._set_length
436
+ ))
437
+ return result
438
+ else:
439
+ if chunksize < 1:
440
+ raise ValueError(
441
+ "Chunksize must be 1+, not {0!r}".format(chunksize))
442
+ task_batches = Pool._get_tasks(func, iterable, chunksize)
443
+ result = IMapUnorderedIterator(self)
444
+ self._taskqueue.put(
445
+ (
446
+ self._guarded_task_generation(result._job,
447
+ mapstar,
448
+ task_batches),
449
+ result._set_length
450
+ ))
451
+ return (item for chunk in result for item in chunk)
452
+
453
+ def apply_async(self, func, args=(), kwds={}, callback=None,
454
+ error_callback=None):
455
+ '''
456
+ Asynchronous version of `apply()` method.
457
+ '''
458
+ self._check_running()
459
+ result = ApplyResult(self, callback, error_callback)
460
+ self._taskqueue.put(([(result._job, 0, func, args, kwds)], None))
461
+ return result
462
+
463
+ def map_async(self, func, iterable, chunksize=None, callback=None,
464
+ error_callback=None):
465
+ '''
466
+ Asynchronous version of `map()` method.
467
+ '''
468
+ return self._map_async(func, iterable, mapstar, chunksize, callback,
469
+ error_callback)
470
+
471
+ def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
472
+ error_callback=None):
473
+ '''
474
+ Helper function to implement map, starmap and their async counterparts.
475
+ '''
476
+ self._check_running()
477
+ if not hasattr(iterable, '__len__'):
478
+ iterable = list(iterable)
479
+
480
+ if chunksize is None:
481
+ chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
482
+ if extra:
483
+ chunksize += 1
484
+ if len(iterable) == 0:
485
+ chunksize = 0
486
+
487
+ task_batches = Pool._get_tasks(func, iterable, chunksize)
488
+ result = MapResult(self, chunksize, len(iterable), callback,
489
+ error_callback=error_callback)
490
+ self._taskqueue.put(
491
+ (
492
+ self._guarded_task_generation(result._job,
493
+ mapper,
494
+ task_batches),
495
+ None
496
+ )
497
+ )
498
+ return result
499
+
500
+ @staticmethod
501
+ def _wait_for_updates(sentinels, change_notifier, timeout=None):
502
+ wait(sentinels, timeout=timeout)
503
+ while not change_notifier.empty():
504
+ change_notifier.get()
505
+
506
+ @classmethod
507
+ def _handle_workers(cls, cache, taskqueue, ctx, Process, processes,
508
+ pool, inqueue, outqueue, initializer, initargs,
509
+ maxtasksperchild, wrap_exception, sentinels,
510
+ change_notifier):
511
+ thread = threading.current_thread()
512
+
513
+ # Keep maintaining workers until the cache gets drained, unless the pool
514
+ # is terminated.
515
+ while thread._state == RUN or (cache and thread._state != TERMINATE):
516
+ cls._maintain_pool(ctx, Process, processes, pool, inqueue,
517
+ outqueue, initializer, initargs,
518
+ maxtasksperchild, wrap_exception)
519
+
520
+ current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels]
521
+
522
+ cls._wait_for_updates(current_sentinels, change_notifier)
523
+ # send sentinel to stop workers
524
+ taskqueue.put(None)
525
+ util.debug('worker handler exiting')
526
+
527
+ @staticmethod
528
+ def _handle_tasks(taskqueue, put, outqueue, pool, cache):
529
+ thread = threading.current_thread()
530
+
531
+ for taskseq, set_length in iter(taskqueue.get, None):
532
+ task = None
533
+ try:
534
+ # iterating taskseq cannot fail
535
+ for task in taskseq:
536
+ if thread._state != RUN:
537
+ util.debug('task handler found thread._state != RUN')
538
+ break
539
+ try:
540
+ put(task)
541
+ except Exception as e:
542
+ job, idx = task[:2]
543
+ try:
544
+ cache[job]._set(idx, (False, e))
545
+ except KeyError:
546
+ pass
547
+ else:
548
+ if set_length:
549
+ util.debug('doing set_length()')
550
+ idx = task[1] if task else -1
551
+ set_length(idx + 1)
552
+ continue
553
+ break
554
+ finally:
555
+ task = taskseq = job = None
556
+ else:
557
+ util.debug('task handler got sentinel')
558
+
559
+ try:
560
+ # tell result handler to finish when cache is empty
561
+ util.debug('task handler sending sentinel to result handler')
562
+ outqueue.put(None)
563
+
564
+ # tell workers there is no more work
565
+ util.debug('task handler sending sentinel to workers')
566
+ for p in pool:
567
+ put(None)
568
+ except OSError:
569
+ util.debug('task handler got OSError when sending sentinels')
570
+
571
+ util.debug('task handler exiting')
572
+
573
+ @staticmethod
574
+ def _handle_results(outqueue, get, cache):
575
+ thread = threading.current_thread()
576
+
577
+ while 1:
578
+ try:
579
+ task = get()
580
+ except (OSError, EOFError):
581
+ util.debug('result handler got EOFError/OSError -- exiting')
582
+ return
583
+
584
+ if thread._state != RUN:
585
+ assert thread._state == TERMINATE, "Thread not in TERMINATE"
586
+ util.debug('result handler found thread._state=TERMINATE')
587
+ break
588
+
589
+ if task is None:
590
+ util.debug('result handler got sentinel')
591
+ break
592
+
593
+ job, i, obj = task
594
+ try:
595
+ cache[job]._set(i, obj)
596
+ except KeyError:
597
+ pass
598
+ task = job = obj = None
599
+
600
+ while cache and thread._state != TERMINATE:
601
+ try:
602
+ task = get()
603
+ except (OSError, EOFError):
604
+ util.debug('result handler got EOFError/OSError -- exiting')
605
+ return
606
+
607
+ if task is None:
608
+ util.debug('result handler ignoring extra sentinel')
609
+ continue
610
+ job, i, obj = task
611
+ try:
612
+ cache[job]._set(i, obj)
613
+ except KeyError:
614
+ pass
615
+ task = job = obj = None
616
+
617
+ if hasattr(outqueue, '_reader'):
618
+ util.debug('ensuring that outqueue is not full')
619
+ # If we don't make room available in outqueue then
620
+ # attempts to add the sentinel (None) to outqueue may
621
+ # block. There is guaranteed to be no more than 2 sentinels.
622
+ try:
623
+ for i in range(10):
624
+ if not outqueue._reader.poll():
625
+ break
626
+ get()
627
+ except (OSError, EOFError):
628
+ pass
629
+
630
+ util.debug('result handler exiting: len(cache)=%s, thread._state=%s',
631
+ len(cache), thread._state)
632
+
633
+ @staticmethod
634
+ def _get_tasks(func, it, size):
635
+ it = iter(it)
636
+ while 1:
637
+ x = tuple(itertools.islice(it, size))
638
+ if not x:
639
+ return
640
+ yield (func, x)
641
+
642
+ def __reduce__(self):
643
+ raise NotImplementedError(
644
+ 'pool objects cannot be passed between processes or pickled'
645
+ )
646
+
647
+ def close(self):
648
+ util.debug('closing pool')
649
+ if self._state == RUN:
650
+ self._state = CLOSE
651
+ self._worker_handler._state = CLOSE
652
+ self._change_notifier.put(None)
653
+
654
+ def terminate(self):
655
+ util.debug('terminating pool')
656
+ self._state = TERMINATE
657
+ self._terminate()
658
+
659
+ def join(self):
660
+ util.debug('joining pool')
661
+ if self._state == RUN:
662
+ raise ValueError("Pool is still running")
663
+ elif self._state not in (CLOSE, TERMINATE):
664
+ raise ValueError("In unknown state")
665
+ self._worker_handler.join()
666
+ self._task_handler.join()
667
+ self._result_handler.join()
668
+ for p in self._pool:
669
+ p.join()
670
+
671
+ @staticmethod
672
+ def _help_stuff_finish(inqueue, task_handler, size):
673
+ # task_handler may be blocked trying to put items on inqueue
674
+ util.debug('removing tasks from inqueue until task handler finished')
675
+ inqueue._rlock.acquire()
676
+ while task_handler.is_alive() and inqueue._reader.poll():
677
+ inqueue._reader.recv()
678
+ time.sleep(0)
679
+
680
+ @classmethod
681
+ def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier,
682
+ worker_handler, task_handler, result_handler, cache):
683
+ # this is guaranteed to only be called once
684
+ util.debug('finalizing pool')
685
+
686
+ # Notify that the worker_handler state has been changed so the
687
+ # _handle_workers loop can be unblocked (and exited) in order to
688
+ # send the finalization sentinel all the workers.
689
+ worker_handler._state = TERMINATE
690
+ change_notifier.put(None)
691
+
692
+ task_handler._state = TERMINATE
693
+
694
+ util.debug('helping task handler/workers to finish')
695
+ cls._help_stuff_finish(inqueue, task_handler, len(pool))
696
+
697
+ if (not result_handler.is_alive()) and (len(cache) != 0):
698
+ raise AssertionError(
699
+ "Cannot have cache with result_hander not alive")
700
+
701
+ result_handler._state = TERMINATE
702
+ change_notifier.put(None)
703
+ outqueue.put(None) # sentinel
704
+
705
+ # We must wait for the worker handler to exit before terminating
706
+ # workers because we don't want workers to be restarted behind our back.
707
+ util.debug('joining worker handler')
708
+ if threading.current_thread() is not worker_handler:
709
+ worker_handler.join()
710
+
711
+ # Terminate workers which haven't already finished.
712
+ if pool and hasattr(pool[0], 'terminate'):
713
+ util.debug('terminating workers')
714
+ for p in pool:
715
+ if p.exitcode is None:
716
+ p.terminate()
717
+
718
+ util.debug('joining task handler')
719
+ if threading.current_thread() is not task_handler:
720
+ task_handler.join()
721
+
722
+ util.debug('joining result handler')
723
+ if threading.current_thread() is not result_handler:
724
+ result_handler.join()
725
+
726
+ if pool and hasattr(pool[0], 'terminate'):
727
+ util.debug('joining pool workers')
728
+ for p in pool:
729
+ if p.is_alive():
730
+ # worker has not yet exited
731
+ util.debug('cleaning up worker %d' % p.pid)
732
+ p.join()
733
+
734
+ def __enter__(self):
735
+ self._check_running()
736
+ return self
737
+
738
+ def __exit__(self, exc_type, exc_val, exc_tb):
739
+ self.terminate()
740
+
741
+ #
742
+ # Class whose instances are returned by `Pool.apply_async()`
743
+ #
744
+
745
+ class ApplyResult(object):
746
+
747
+ def __init__(self, pool, callback, error_callback):
748
+ self._pool = pool
749
+ self._event = threading.Event()
750
+ self._job = next(job_counter)
751
+ self._cache = pool._cache
752
+ self._callback = callback
753
+ self._error_callback = error_callback
754
+ self._cache[self._job] = self
755
+
756
+ def ready(self):
757
+ return self._event.is_set()
758
+
759
+ def successful(self):
760
+ if not self.ready():
761
+ raise ValueError("{0!r} not ready".format(self))
762
+ return self._success
763
+
764
+ def wait(self, timeout=None):
765
+ self._event.wait(timeout)
766
+
767
+ def get(self, timeout=None):
768
+ self.wait(timeout)
769
+ if not self.ready():
770
+ raise TimeoutError
771
+ if self._success:
772
+ return self._value
773
+ else:
774
+ raise self._value
775
+
776
+ def _set(self, i, obj):
777
+ self._success, self._value = obj
778
+ if self._callback and self._success:
779
+ self._callback(self._value)
780
+ if self._error_callback and not self._success:
781
+ self._error_callback(self._value)
782
+ self._event.set()
783
+ del self._cache[self._job]
784
+ self._pool = None
785
+
786
+ __class_getitem__ = classmethod(types.GenericAlias)
787
+
788
+ AsyncResult = ApplyResult # create alias -- see #17805
789
+
790
+ #
791
+ # Class whose instances are returned by `Pool.map_async()`
792
+ #
793
+
794
+ class MapResult(ApplyResult):
795
+
796
+ def __init__(self, pool, chunksize, length, callback, error_callback):
797
+ ApplyResult.__init__(self, pool, callback,
798
+ error_callback=error_callback)
799
+ self._success = True
800
+ self._value = [None] * length
801
+ self._chunksize = chunksize
802
+ if chunksize <= 0:
803
+ self._number_left = 0
804
+ self._event.set()
805
+ del self._cache[self._job]
806
+ else:
807
+ self._number_left = length//chunksize + bool(length % chunksize)
808
+
809
+ def _set(self, i, success_result):
810
+ self._number_left -= 1
811
+ success, result = success_result
812
+ if success and self._success:
813
+ self._value[i*self._chunksize:(i+1)*self._chunksize] = result
814
+ if self._number_left == 0:
815
+ if self._callback:
816
+ self._callback(self._value)
817
+ del self._cache[self._job]
818
+ self._event.set()
819
+ self._pool = None
820
+ else:
821
+ if not success and self._success:
822
+ # only store first exception
823
+ self._success = False
824
+ self._value = result
825
+ if self._number_left == 0:
826
+ # only consider the result ready once all jobs are done
827
+ if self._error_callback:
828
+ self._error_callback(self._value)
829
+ del self._cache[self._job]
830
+ self._event.set()
831
+ self._pool = None
832
+
833
+ #
834
+ # Class whose instances are returned by `Pool.imap()`
835
+ #
836
+
837
+ class IMapIterator(object):
838
+
839
+ def __init__(self, pool):
840
+ self._pool = pool
841
+ self._cond = threading.Condition(threading.Lock())
842
+ self._job = next(job_counter)
843
+ self._cache = pool._cache
844
+ self._items = collections.deque()
845
+ self._index = 0
846
+ self._length = None
847
+ self._unsorted = {}
848
+ self._cache[self._job] = self
849
+
850
+ def __iter__(self):
851
+ return self
852
+
853
+ def next(self, timeout=None):
854
+ with self._cond:
855
+ try:
856
+ item = self._items.popleft()
857
+ except IndexError:
858
+ if self._index == self._length:
859
+ self._pool = None
860
+ raise StopIteration from None
861
+ self._cond.wait(timeout)
862
+ try:
863
+ item = self._items.popleft()
864
+ except IndexError:
865
+ if self._index == self._length:
866
+ self._pool = None
867
+ raise StopIteration from None
868
+ raise TimeoutError from None
869
+
870
+ success, value = item
871
+ if success:
872
+ return value
873
+ raise value
874
+
875
+ __next__ = next # XXX
876
+
877
+ def _set(self, i, obj):
878
+ with self._cond:
879
+ if self._index == i:
880
+ self._items.append(obj)
881
+ self._index += 1
882
+ while self._index in self._unsorted:
883
+ obj = self._unsorted.pop(self._index)
884
+ self._items.append(obj)
885
+ self._index += 1
886
+ self._cond.notify()
887
+ else:
888
+ self._unsorted[i] = obj
889
+
890
+ if self._index == self._length:
891
+ del self._cache[self._job]
892
+ self._pool = None
893
+
894
+ def _set_length(self, length):
895
+ with self._cond:
896
+ self._length = length
897
+ if self._index == self._length:
898
+ self._cond.notify()
899
+ del self._cache[self._job]
900
+ self._pool = None
901
+
902
+ #
903
+ # Class whose instances are returned by `Pool.imap_unordered()`
904
+ #
905
+
906
+ class IMapUnorderedIterator(IMapIterator):
907
+
908
+ def _set(self, i, obj):
909
+ with self._cond:
910
+ self._items.append(obj)
911
+ self._index += 1
912
+ self._cond.notify()
913
+ if self._index == self._length:
914
+ del self._cache[self._job]
915
+ self._pool = None
916
+
917
+ #
918
+ #
919
+ #
920
+
921
+ class ThreadPool(Pool):
922
+ _wrap_exception = False
923
+
924
+ @staticmethod
925
+ def Process(ctx, *args, **kwds):
926
+ from .dummy import Process
927
+ return Process(*args, **kwds)
928
+
929
+ def __init__(self, processes=None, initializer=None, initargs=()):
930
+ Pool.__init__(self, processes, initializer, initargs)
931
+
932
+ def _setup_queues(self):
933
+ self._inqueue = queue.SimpleQueue()
934
+ self._outqueue = queue.SimpleQueue()
935
+ self._quick_put = self._inqueue.put
936
+ self._quick_get = self._outqueue.get
937
+
938
+ def _get_sentinels(self):
939
+ return [self._change_notifier._reader]
940
+
941
+ @staticmethod
942
+ def _get_worker_sentinels(workers):
943
+ return []
944
+
945
+ @staticmethod
946
+ def _help_stuff_finish(inqueue, task_handler, size):
947
+ # drain inqueue, and put sentinels at its head to make workers finish
948
+ try:
949
+ while True:
950
+ inqueue.get(block=False)
951
+ except queue.Empty:
952
+ pass
953
+ for i in range(size):
954
+ inqueue.put(None)
955
+
956
+ def _wait_for_updates(self, sentinels, change_notifier, timeout):
957
+ time.sleep(timeout)
env-llmeval/lib/python3.10/site-packages/multiprocess/popen_fork.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import signal
3
+
4
+ from . import util
5
+
6
+ __all__ = ['Popen']
7
+
8
+ #
9
+ # Start child process using fork
10
+ #
11
+
12
+ class Popen(object):
13
+ method = 'fork'
14
+
15
+ def __init__(self, process_obj):
16
+ util._flush_std_streams()
17
+ self.returncode = None
18
+ self.finalizer = None
19
+ self._launch(process_obj)
20
+
21
+ def duplicate_for_child(self, fd):
22
+ return fd
23
+
24
+ def poll(self, flag=os.WNOHANG):
25
+ if self.returncode is None:
26
+ try:
27
+ pid, sts = os.waitpid(self.pid, flag)
28
+ except OSError:
29
+ # Child process not yet created. See #1731717
30
+ # e.errno == errno.ECHILD == 10
31
+ return None
32
+ if pid == self.pid:
33
+ self.returncode = os.waitstatus_to_exitcode(sts)
34
+ return self.returncode
35
+
36
+ def wait(self, timeout=None):
37
+ if self.returncode is None:
38
+ if timeout is not None:
39
+ from multiprocess.connection import wait
40
+ if not wait([self.sentinel], timeout):
41
+ return None
42
+ # This shouldn't block if wait() returned successfully.
43
+ return self.poll(os.WNOHANG if timeout == 0.0 else 0)
44
+ return self.returncode
45
+
46
+ def _send_signal(self, sig):
47
+ if self.returncode is None:
48
+ try:
49
+ os.kill(self.pid, sig)
50
+ except ProcessLookupError:
51
+ pass
52
+ except OSError:
53
+ if self.wait(timeout=0.1) is None:
54
+ raise
55
+
56
+ def terminate(self):
57
+ self._send_signal(signal.SIGTERM)
58
+
59
+ def kill(self):
60
+ self._send_signal(signal.SIGKILL)
61
+
62
+ def _launch(self, process_obj):
63
+ code = 1
64
+ parent_r, child_w = os.pipe()
65
+ child_r, parent_w = os.pipe()
66
+ self.pid = os.fork()
67
+ if self.pid == 0:
68
+ try:
69
+ os.close(parent_r)
70
+ os.close(parent_w)
71
+ code = process_obj._bootstrap(parent_sentinel=child_r)
72
+ finally:
73
+ os._exit(code)
74
+ else:
75
+ os.close(child_w)
76
+ os.close(child_r)
77
+ self.finalizer = util.Finalize(self, util.close_fds,
78
+ (parent_r, parent_w,))
79
+ self.sentinel = parent_r
80
+
81
+ def close(self):
82
+ if self.finalizer is not None:
83
+ self.finalizer()
env-llmeval/lib/python3.10/site-packages/multiprocess/popen_forkserver.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+
4
+ from .context import reduction, set_spawning_popen
5
+ if not reduction.HAVE_SEND_HANDLE:
6
+ raise ImportError('No support for sending fds between processes')
7
+ from . import forkserver
8
+ from . import popen_fork
9
+ from . import spawn
10
+ from . import util
11
+
12
+
13
+ __all__ = ['Popen']
14
+
15
+ #
16
+ # Wrapper for an fd used while launching a process
17
+ #
18
+
19
+ class _DupFd(object):
20
+ def __init__(self, ind):
21
+ self.ind = ind
22
+ def detach(self):
23
+ return forkserver.get_inherited_fds()[self.ind]
24
+
25
+ #
26
+ # Start child process using a server process
27
+ #
28
+
29
+ class Popen(popen_fork.Popen):
30
+ method = 'forkserver'
31
+ DupFd = _DupFd
32
+
33
+ def __init__(self, process_obj):
34
+ self._fds = []
35
+ super().__init__(process_obj)
36
+
37
+ def duplicate_for_child(self, fd):
38
+ self._fds.append(fd)
39
+ return len(self._fds) - 1
40
+
41
+ def _launch(self, process_obj):
42
+ prep_data = spawn.get_preparation_data(process_obj._name)
43
+ buf = io.BytesIO()
44
+ set_spawning_popen(self)
45
+ try:
46
+ reduction.dump(prep_data, buf)
47
+ reduction.dump(process_obj, buf)
48
+ finally:
49
+ set_spawning_popen(None)
50
+
51
+ self.sentinel, w = forkserver.connect_to_new_process(self._fds)
52
+ # Keep a duplicate of the data pipe's write end as a sentinel of the
53
+ # parent process used by the child process.
54
+ _parent_w = os.dup(w)
55
+ self.finalizer = util.Finalize(self, util.close_fds,
56
+ (_parent_w, self.sentinel))
57
+ with open(w, 'wb', closefd=True) as f:
58
+ f.write(buf.getbuffer())
59
+ self.pid = forkserver.read_signed(self.sentinel)
60
+
61
+ def poll(self, flag=os.WNOHANG):
62
+ if self.returncode is None:
63
+ from multiprocess.connection import wait
64
+ timeout = 0 if flag == os.WNOHANG else None
65
+ if not wait([self.sentinel], timeout):
66
+ return None
67
+ try:
68
+ self.returncode = forkserver.read_signed(self.sentinel)
69
+ except (OSError, EOFError):
70
+ # This should not happen usually, but perhaps the forkserver
71
+ # process itself got killed
72
+ self.returncode = 255
73
+
74
+ return self.returncode
env-llmeval/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import msvcrt
3
+ import signal
4
+ import sys
5
+ import _winapi
6
+
7
+ from .context import reduction, get_spawning_popen, set_spawning_popen
8
+ from . import spawn
9
+ from . import util
10
+
11
+ __all__ = ['Popen']
12
+
13
+ #
14
+ #
15
+ #
16
+
17
+ TERMINATE = 0x10000
18
+ WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
19
+ WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
20
+
21
+
22
+ def _path_eq(p1, p2):
23
+ return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)
24
+
25
+ WINENV = not _path_eq(sys.executable, sys._base_executable)
26
+
27
+
28
+ def _close_handles(*handles):
29
+ for handle in handles:
30
+ _winapi.CloseHandle(handle)
31
+
32
+
33
+ #
34
+ # We define a Popen class similar to the one from subprocess, but
35
+ # whose constructor takes a process object as its argument.
36
+ #
37
+
38
+ class Popen(object):
39
+ '''
40
+ Start a subprocess to run the code of a process object
41
+ '''
42
+ method = 'spawn'
43
+
44
+ def __init__(self, process_obj):
45
+ prep_data = spawn.get_preparation_data(process_obj._name)
46
+
47
+ # read end of pipe will be duplicated by the child process
48
+ # -- see spawn_main() in spawn.py.
49
+ #
50
+ # bpo-33929: Previously, the read end of pipe was "stolen" by the child
51
+ # process, but it leaked a handle if the child process had been
52
+ # terminated before it could steal the handle from the parent process.
53
+ rhandle, whandle = _winapi.CreatePipe(None, 0)
54
+ wfd = msvcrt.open_osfhandle(whandle, 0)
55
+ cmd = spawn.get_command_line(parent_pid=os.getpid(),
56
+ pipe_handle=rhandle)
57
+ cmd = ' '.join('"%s"' % x for x in cmd)
58
+
59
+ python_exe = spawn.get_executable()
60
+
61
+ # bpo-35797: When running in a venv, we bypass the redirect
62
+ # executor and launch our base Python.
63
+ if WINENV and _path_eq(python_exe, sys.executable):
64
+ python_exe = sys._base_executable
65
+ env = os.environ.copy()
66
+ env["__PYVENV_LAUNCHER__"] = sys.executable
67
+ else:
68
+ env = None
69
+
70
+ with open(wfd, 'wb', closefd=True) as to_child:
71
+ # start process
72
+ try:
73
+ hp, ht, pid, tid = _winapi.CreateProcess(
74
+ python_exe, cmd,
75
+ None, None, False, 0, env, None, None)
76
+ _winapi.CloseHandle(ht)
77
+ except:
78
+ _winapi.CloseHandle(rhandle)
79
+ raise
80
+
81
+ # set attributes of self
82
+ self.pid = pid
83
+ self.returncode = None
84
+ self._handle = hp
85
+ self.sentinel = int(hp)
86
+ self.finalizer = util.Finalize(self, _close_handles,
87
+ (self.sentinel, int(rhandle)))
88
+
89
+ # send information to child
90
+ set_spawning_popen(self)
91
+ try:
92
+ reduction.dump(prep_data, to_child)
93
+ reduction.dump(process_obj, to_child)
94
+ finally:
95
+ set_spawning_popen(None)
96
+
97
+ def duplicate_for_child(self, handle):
98
+ assert self is get_spawning_popen()
99
+ return reduction.duplicate(handle, self.sentinel)
100
+
101
+ def wait(self, timeout=None):
102
+ if self.returncode is None:
103
+ if timeout is None:
104
+ msecs = _winapi.INFINITE
105
+ else:
106
+ msecs = max(0, int(timeout * 1000 + 0.5))
107
+
108
+ res = _winapi.WaitForSingleObject(int(self._handle), msecs)
109
+ if res == _winapi.WAIT_OBJECT_0:
110
+ code = _winapi.GetExitCodeProcess(self._handle)
111
+ if code == TERMINATE:
112
+ code = -signal.SIGTERM
113
+ self.returncode = code
114
+
115
+ return self.returncode
116
+
117
+ def poll(self):
118
+ return self.wait(timeout=0)
119
+
120
+ def terminate(self):
121
+ if self.returncode is None:
122
+ try:
123
+ _winapi.TerminateProcess(int(self._handle), TERMINATE)
124
+ except OSError:
125
+ if self.wait(timeout=1.0) is None:
126
+ raise
127
+
128
+ kill = terminate
129
+
130
+ def close(self):
131
+ self.finalizer()
env-llmeval/lib/python3.10/site-packages/multiprocess/resource_sharer.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # We use a background thread for sharing fds on Unix, and for sharing sockets on
3
+ # Windows.
4
+ #
5
+ # A client which wants to pickle a resource registers it with the resource
6
+ # sharer and gets an identifier in return. The unpickling process will connect
7
+ # to the resource sharer, sends the identifier and its pid, and then receives
8
+ # the resource.
9
+ #
10
+
11
+ import os
12
+ import signal
13
+ import socket
14
+ import sys
15
+ import threading
16
+
17
+ from . import process
18
+ from .context import reduction
19
+ from . import util
20
+
21
+ __all__ = ['stop']
22
+
23
+
24
+ if sys.platform == 'win32':
25
+ __all__ += ['DupSocket']
26
+
27
+ class DupSocket(object):
28
+ '''Picklable wrapper for a socket.'''
29
+ def __init__(self, sock):
30
+ new_sock = sock.dup()
31
+ def send(conn, pid):
32
+ share = new_sock.share(pid)
33
+ conn.send_bytes(share)
34
+ self._id = _resource_sharer.register(send, new_sock.close)
35
+
36
+ def detach(self):
37
+ '''Get the socket. This should only be called once.'''
38
+ with _resource_sharer.get_connection(self._id) as conn:
39
+ share = conn.recv_bytes()
40
+ return socket.fromshare(share)
41
+
42
+ else:
43
+ __all__ += ['DupFd']
44
+
45
+ class DupFd(object):
46
+ '''Wrapper for fd which can be used at any time.'''
47
+ def __init__(self, fd):
48
+ new_fd = os.dup(fd)
49
+ def send(conn, pid):
50
+ reduction.send_handle(conn, new_fd, pid)
51
+ def close():
52
+ os.close(new_fd)
53
+ self._id = _resource_sharer.register(send, close)
54
+
55
+ def detach(self):
56
+ '''Get the fd. This should only be called once.'''
57
+ with _resource_sharer.get_connection(self._id) as conn:
58
+ return reduction.recv_handle(conn)
59
+
60
+
61
+ class _ResourceSharer(object):
62
+ '''Manager for resources using background thread.'''
63
+ def __init__(self):
64
+ self._key = 0
65
+ self._cache = {}
66
+ self._lock = threading.Lock()
67
+ self._listener = None
68
+ self._address = None
69
+ self._thread = None
70
+ util.register_after_fork(self, _ResourceSharer._afterfork)
71
+
72
+ def register(self, send, close):
73
+ '''Register resource, returning an identifier.'''
74
+ with self._lock:
75
+ if self._address is None:
76
+ self._start()
77
+ self._key += 1
78
+ self._cache[self._key] = (send, close)
79
+ return (self._address, self._key)
80
+
81
+ @staticmethod
82
+ def get_connection(ident):
83
+ '''Return connection from which to receive identified resource.'''
84
+ from .connection import Client
85
+ address, key = ident
86
+ c = Client(address, authkey=process.current_process().authkey)
87
+ c.send((key, os.getpid()))
88
+ return c
89
+
90
+ def stop(self, timeout=None):
91
+ '''Stop the background thread and clear registered resources.'''
92
+ from .connection import Client
93
+ with self._lock:
94
+ if self._address is not None:
95
+ c = Client(self._address,
96
+ authkey=process.current_process().authkey)
97
+ c.send(None)
98
+ c.close()
99
+ self._thread.join(timeout)
100
+ if self._thread.is_alive():
101
+ util.sub_warning('_ResourceSharer thread did '
102
+ 'not stop when asked')
103
+ self._listener.close()
104
+ self._thread = None
105
+ self._address = None
106
+ self._listener = None
107
+ for key, (send, close) in self._cache.items():
108
+ close()
109
+ self._cache.clear()
110
+
111
+ def _afterfork(self):
112
+ for key, (send, close) in self._cache.items():
113
+ close()
114
+ self._cache.clear()
115
+ self._lock._at_fork_reinit()
116
+ if self._listener is not None:
117
+ self._listener.close()
118
+ self._listener = None
119
+ self._address = None
120
+ self._thread = None
121
+
122
+ def _start(self):
123
+ from .connection import Listener
124
+ assert self._listener is None, "Already have Listener"
125
+ util.debug('starting listener and thread for sending handles')
126
+ self._listener = Listener(authkey=process.current_process().authkey)
127
+ self._address = self._listener.address
128
+ t = threading.Thread(target=self._serve)
129
+ t.daemon = True
130
+ t.start()
131
+ self._thread = t
132
+
133
+ def _serve(self):
134
+ if hasattr(signal, 'pthread_sigmask'):
135
+ signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
136
+ while 1:
137
+ try:
138
+ with self._listener.accept() as conn:
139
+ msg = conn.recv()
140
+ if msg is None:
141
+ break
142
+ key, destination_pid = msg
143
+ send, close = self._cache.pop(key)
144
+ try:
145
+ send(conn, destination_pid)
146
+ finally:
147
+ close()
148
+ except:
149
+ if not util.is_exiting():
150
+ sys.excepthook(*sys.exc_info())
151
+
152
+
153
+ _resource_sharer = _ResourceSharer()
154
+ stop = _resource_sharer.stop
env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (885 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc ADDED
Binary file (530 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc ADDED
Binary file (596 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc ADDED
Binary file (631 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc ADDED
Binary file (580 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc ADDED
Binary file (474 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+ from multiprocess.tests import install_tests_in_module_dict
3
+
4
+ import sys
5
+ from test import support
6
+
7
+ if support.PGO:
8
+ raise unittest.SkipTest("test is not helpful for PGO")
9
+
10
+ if sys.platform == "win32":
11
+ raise unittest.SkipTest("fork is not available on Windows")
12
+
13
+ if sys.platform == 'darwin':
14
+ raise unittest.SkipTest("test may crash on macOS (bpo-33725)")
15
+
16
+ install_tests_in_module_dict(globals(), 'fork')
17
+
18
+ if __name__ == '__main__':
19
+ unittest.main()
env-llmeval/lib/python3.10/site-packages/multiprocess/util.py ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module providing various facilities to other parts of the package
3
+ #
4
+ # multiprocessing/util.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ import os
11
+ import itertools
12
+ import sys
13
+ import weakref
14
+ import atexit
15
+ import threading # we want threading to install it's
16
+ # cleanup function before multiprocessing does
17
+ from subprocess import _args_from_interpreter_flags
18
+
19
+ from . import process
20
+
21
+ __all__ = [
22
+ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
23
+ 'log_to_stderr', 'get_temp_dir', 'register_after_fork',
24
+ 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
25
+ 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING',
26
+ ]
27
+
28
+ #
29
+ # Logging
30
+ #
31
+
32
+ NOTSET = 0
33
+ SUBDEBUG = 5
34
+ DEBUG = 10
35
+ INFO = 20
36
+ SUBWARNING = 25
37
+
38
+ LOGGER_NAME = 'multiprocess'
39
+ DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
40
+
41
+ _logger = None
42
+ _log_to_stderr = False
43
+
44
+ def sub_debug(msg, *args):
45
+ if _logger:
46
+ _logger.log(SUBDEBUG, msg, *args)
47
+
48
+ def debug(msg, *args):
49
+ if _logger:
50
+ _logger.log(DEBUG, msg, *args)
51
+
52
+ def info(msg, *args):
53
+ if _logger:
54
+ _logger.log(INFO, msg, *args)
55
+
56
+ def sub_warning(msg, *args):
57
+ if _logger:
58
+ _logger.log(SUBWARNING, msg, *args)
59
+
60
+ def get_logger():
61
+ '''
62
+ Returns logger used by multiprocess
63
+ '''
64
+ global _logger
65
+ import logging
66
+
67
+ logging._acquireLock()
68
+ try:
69
+ if not _logger:
70
+
71
+ _logger = logging.getLogger(LOGGER_NAME)
72
+ _logger.propagate = 0
73
+
74
+ # XXX multiprocessing should cleanup before logging
75
+ if hasattr(atexit, 'unregister'):
76
+ atexit.unregister(_exit_function)
77
+ atexit.register(_exit_function)
78
+ else:
79
+ atexit._exithandlers.remove((_exit_function, (), {}))
80
+ atexit._exithandlers.append((_exit_function, (), {}))
81
+
82
+ finally:
83
+ logging._releaseLock()
84
+
85
+ return _logger
86
+
87
+ def log_to_stderr(level=None):
88
+ '''
89
+ Turn on logging and add a handler which prints to stderr
90
+ '''
91
+ global _log_to_stderr
92
+ import logging
93
+
94
+ logger = get_logger()
95
+ formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
96
+ handler = logging.StreamHandler()
97
+ handler.setFormatter(formatter)
98
+ logger.addHandler(handler)
99
+
100
+ if level:
101
+ logger.setLevel(level)
102
+ _log_to_stderr = True
103
+ return _logger
104
+
105
+
106
+ # Abstract socket support
107
+
108
+ def _platform_supports_abstract_sockets():
109
+ if sys.platform == "linux":
110
+ return True
111
+ if hasattr(sys, 'getandroidapilevel'):
112
+ return True
113
+ return False
114
+
115
+
116
+ def is_abstract_socket_namespace(address):
117
+ if not address:
118
+ return False
119
+ if isinstance(address, bytes):
120
+ return address[0] == 0
121
+ elif isinstance(address, str):
122
+ return address[0] == "\0"
123
+ raise TypeError(f'address type of {address!r} unrecognized')
124
+
125
+
126
+ abstract_sockets_supported = _platform_supports_abstract_sockets()
127
+
128
+ #
129
+ # Function returning a temp directory which will be removed on exit
130
+ #
131
+
132
+ def _remove_temp_dir(rmtree, tempdir):
133
+ rmtree(tempdir)
134
+
135
+ current_process = process.current_process()
136
+ # current_process() can be None if the finalizer is called
137
+ # late during Python finalization
138
+ if current_process is not None:
139
+ current_process._config['tempdir'] = None
140
+
141
+ def get_temp_dir():
142
+ # get name of a temp directory which will be automatically cleaned up
143
+ tempdir = process.current_process()._config.get('tempdir')
144
+ if tempdir is None:
145
+ import shutil, tempfile
146
+ tempdir = tempfile.mkdtemp(prefix='pymp-')
147
+ info('created temp directory %s', tempdir)
148
+ # keep a strong reference to shutil.rmtree(), since the finalizer
149
+ # can be called late during Python shutdown
150
+ Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir),
151
+ exitpriority=-100)
152
+ process.current_process()._config['tempdir'] = tempdir
153
+ return tempdir
154
+
155
+ #
156
+ # Support for reinitialization of objects when bootstrapping a child process
157
+ #
158
+
159
+ _afterfork_registry = weakref.WeakValueDictionary()
160
+ _afterfork_counter = itertools.count()
161
+
162
+ def _run_after_forkers():
163
+ items = list(_afterfork_registry.items())
164
+ items.sort()
165
+ for (index, ident, func), obj in items:
166
+ try:
167
+ func(obj)
168
+ except Exception as e:
169
+ info('after forker raised exception %s', e)
170
+
171
+ def register_after_fork(obj, func):
172
+ _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
173
+
174
+ #
175
+ # Finalization using weakrefs
176
+ #
177
+
178
+ _finalizer_registry = {}
179
+ _finalizer_counter = itertools.count()
180
+
181
+
182
+ class Finalize(object):
183
+ '''
184
+ Class which supports object finalization using weakrefs
185
+ '''
186
+ def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
187
+ if (exitpriority is not None) and not isinstance(exitpriority,int):
188
+ raise TypeError(
189
+ "Exitpriority ({0!r}) must be None or int, not {1!s}".format(
190
+ exitpriority, type(exitpriority)))
191
+
192
+ if obj is not None:
193
+ self._weakref = weakref.ref(obj, self)
194
+ elif exitpriority is None:
195
+ raise ValueError("Without object, exitpriority cannot be None")
196
+
197
+ self._callback = callback
198
+ self._args = args
199
+ self._kwargs = kwargs or {}
200
+ self._key = (exitpriority, next(_finalizer_counter))
201
+ self._pid = os.getpid()
202
+
203
+ _finalizer_registry[self._key] = self
204
+
205
+ def __call__(self, wr=None,
206
+ # Need to bind these locally because the globals can have
207
+ # been cleared at shutdown
208
+ _finalizer_registry=_finalizer_registry,
209
+ sub_debug=sub_debug, getpid=os.getpid):
210
+ '''
211
+ Run the callback unless it has already been called or cancelled
212
+ '''
213
+ try:
214
+ del _finalizer_registry[self._key]
215
+ except KeyError:
216
+ sub_debug('finalizer no longer registered')
217
+ else:
218
+ if self._pid != getpid():
219
+ sub_debug('finalizer ignored because different process')
220
+ res = None
221
+ else:
222
+ sub_debug('finalizer calling %s with args %s and kwargs %s',
223
+ self._callback, self._args, self._kwargs)
224
+ res = self._callback(*self._args, **self._kwargs)
225
+ self._weakref = self._callback = self._args = \
226
+ self._kwargs = self._key = None
227
+ return res
228
+
229
+ def cancel(self):
230
+ '''
231
+ Cancel finalization of the object
232
+ '''
233
+ try:
234
+ del _finalizer_registry[self._key]
235
+ except KeyError:
236
+ pass
237
+ else:
238
+ self._weakref = self._callback = self._args = \
239
+ self._kwargs = self._key = None
240
+
241
+ def still_active(self):
242
+ '''
243
+ Return whether this finalizer is still waiting to invoke callback
244
+ '''
245
+ return self._key in _finalizer_registry
246
+
247
+ def __repr__(self):
248
+ try:
249
+ obj = self._weakref()
250
+ except (AttributeError, TypeError):
251
+ obj = None
252
+
253
+ if obj is None:
254
+ return '<%s object, dead>' % self.__class__.__name__
255
+
256
+ x = '<%s object, callback=%s' % (
257
+ self.__class__.__name__,
258
+ getattr(self._callback, '__name__', self._callback))
259
+ if self._args:
260
+ x += ', args=' + str(self._args)
261
+ if self._kwargs:
262
+ x += ', kwargs=' + str(self._kwargs)
263
+ if self._key[0] is not None:
264
+ x += ', exitpriority=' + str(self._key[0])
265
+ return x + '>'
266
+
267
+
268
+ def _run_finalizers(minpriority=None):
269
+ '''
270
+ Run all finalizers whose exit priority is not None and at least minpriority
271
+
272
+ Finalizers with highest priority are called first; finalizers with
273
+ the same priority will be called in reverse order of creation.
274
+ '''
275
+ if _finalizer_registry is None:
276
+ # This function may be called after this module's globals are
277
+ # destroyed. See the _exit_function function in this module for more
278
+ # notes.
279
+ return
280
+
281
+ if minpriority is None:
282
+ f = lambda p : p[0] is not None
283
+ else:
284
+ f = lambda p : p[0] is not None and p[0] >= minpriority
285
+
286
+ # Careful: _finalizer_registry may be mutated while this function
287
+ # is running (either by a GC run or by another thread).
288
+
289
+ # list(_finalizer_registry) should be atomic, while
290
+ # list(_finalizer_registry.items()) is not.
291
+ keys = [key for key in list(_finalizer_registry) if f(key)]
292
+ keys.sort(reverse=True)
293
+
294
+ for key in keys:
295
+ finalizer = _finalizer_registry.get(key)
296
+ # key may have been removed from the registry
297
+ if finalizer is not None:
298
+ sub_debug('calling %s', finalizer)
299
+ try:
300
+ finalizer()
301
+ except Exception:
302
+ import traceback
303
+ traceback.print_exc()
304
+
305
+ if minpriority is None:
306
+ _finalizer_registry.clear()
307
+
308
+ #
309
+ # Clean up on exit
310
+ #
311
+
312
+ def is_exiting():
313
+ '''
314
+ Returns true if the process is shutting down
315
+ '''
316
+ return _exiting or _exiting is None
317
+
318
+ _exiting = False
319
+
320
+ def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
321
+ active_children=process.active_children,
322
+ current_process=process.current_process):
323
+ # We hold on to references to functions in the arglist due to the
324
+ # situation described below, where this function is called after this
325
+ # module's globals are destroyed.
326
+
327
+ global _exiting
328
+
329
+ if not _exiting:
330
+ _exiting = True
331
+
332
+ info('process shutting down')
333
+ debug('running all "atexit" finalizers with priority >= 0')
334
+ _run_finalizers(0)
335
+
336
+ if current_process() is not None:
337
+ # We check if the current process is None here because if
338
+ # it's None, any call to ``active_children()`` will raise
339
+ # an AttributeError (active_children winds up trying to
340
+ # get attributes from util._current_process). One
341
+ # situation where this can happen is if someone has
342
+ # manipulated sys.modules, causing this module to be
343
+ # garbage collected. The destructor for the module type
344
+ # then replaces all values in the module dict with None.
345
+ # For instance, after setuptools runs a test it replaces
346
+ # sys.modules with a copy created earlier. See issues
347
+ # #9775 and #15881. Also related: #4106, #9205, and
348
+ # #9207.
349
+
350
+ for p in active_children():
351
+ if p.daemon:
352
+ info('calling terminate() for daemon %s', p.name)
353
+ p._popen.terminate()
354
+
355
+ for p in active_children():
356
+ info('calling join() for process %s', p.name)
357
+ p.join()
358
+
359
+ debug('running the remaining "atexit" finalizers')
360
+ _run_finalizers()
361
+
362
+ atexit.register(_exit_function)
363
+
364
+ #
365
+ # Some fork aware types
366
+ #
367
+
368
+ class ForkAwareThreadLock(object):
369
+ def __init__(self):
370
+ self._lock = threading.Lock()
371
+ self.acquire = self._lock.acquire
372
+ self.release = self._lock.release
373
+ register_after_fork(self, ForkAwareThreadLock._at_fork_reinit)
374
+
375
+ def _at_fork_reinit(self):
376
+ self._lock._at_fork_reinit()
377
+
378
+ def __enter__(self):
379
+ return self._lock.__enter__()
380
+
381
+ def __exit__(self, *args):
382
+ return self._lock.__exit__(*args)
383
+
384
+
385
+ class ForkAwareLocal(threading.local):
386
+ def __init__(self):
387
+ register_after_fork(self, lambda obj : obj.__dict__.clear())
388
+ def __reduce__(self):
389
+ return type(self), ()
390
+
391
+ #
392
+ # Close fds except those specified
393
+ #
394
+
395
+ try:
396
+ MAXFD = os.sysconf("SC_OPEN_MAX")
397
+ except Exception:
398
+ MAXFD = 256
399
+
400
+ def close_all_fds_except(fds):
401
+ fds = list(fds) + [-1, MAXFD]
402
+ fds.sort()
403
+ assert fds[-1] == MAXFD, 'fd too large'
404
+ for i in range(len(fds) - 1):
405
+ os.closerange(fds[i]+1, fds[i+1])
406
+ #
407
+ # Close sys.stdin and replace stdin with os.devnull
408
+ #
409
+
410
+ def _close_stdin():
411
+ if sys.stdin is None:
412
+ return
413
+
414
+ try:
415
+ sys.stdin.close()
416
+ except (OSError, ValueError):
417
+ pass
418
+
419
+ try:
420
+ fd = os.open(os.devnull, os.O_RDONLY)
421
+ try:
422
+ sys.stdin = open(fd, encoding="utf-8", closefd=False)
423
+ except:
424
+ os.close(fd)
425
+ raise
426
+ except (OSError, ValueError):
427
+ pass
428
+
429
+ #
430
+ # Flush standard streams, if any
431
+ #
432
+
433
+ def _flush_std_streams():
434
+ try:
435
+ sys.stdout.flush()
436
+ except (AttributeError, ValueError):
437
+ pass
438
+ try:
439
+ sys.stderr.flush()
440
+ except (AttributeError, ValueError):
441
+ pass
442
+
443
+ #
444
+ # Start a program with only specified fds kept open
445
+ #
446
+
447
+ def spawnv_passfds(path, args, passfds):
448
+ import _posixsubprocess
449
+ passfds = tuple(sorted(map(int, passfds)))
450
+ errpipe_read, errpipe_write = os.pipe()
451
+ try:
452
+ return _posixsubprocess.fork_exec(
453
+ args, [os.fsencode(path)], True, passfds, None, None,
454
+ -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,
455
+ False, False, None, None, None, -1, None)
456
+ finally:
457
+ os.close(errpipe_read)
458
+ os.close(errpipe_write)
459
+
460
+
461
+ def close_fds(*fds):
462
+ """Close each file descriptor given as an argument"""
463
+ for fd in fds:
464
+ os.close(fd)
465
+
466
+
467
+ def _cleanup_tests():
468
+ """Cleanup multiprocessing resources when multiprocessing tests
469
+ completed."""
470
+
471
+ from test import support
472
+
473
+ # cleanup multiprocessing
474
+ process._cleanup()
475
+
476
+ # Stop the ForkServer process if it's running
477
+ from multiprocess import forkserver
478
+ forkserver._forkserver._stop()
479
+
480
+ # Stop the ResourceTracker process if it's running
481
+ from multiprocess import resource_tracker
482
+ resource_tracker._resource_tracker._stop()
483
+
484
+ # bpo-37421: Explicitly call _run_finalizers() to remove immediately
485
+ # temporary directories created by multiprocessing.util.get_temp_dir().
486
+ _run_finalizers()
487
+ support.gc_collect()
488
+
489
+ support.reap_children()
env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ End User License Agreement
2
+ --------------------------
3
+
4
+
5
+ Preface
6
+ -------
7
+
8
+ The Software License Agreement in Chapter 1 and the Supplement
9
+ in Chapter 2 contain license terms and conditions that govern
10
+ the use of NVIDIA software. By accepting this agreement, you
11
+ agree to comply with all the terms and conditions applicable
12
+ to the product(s) included herein.
13
+
14
+
15
+ NVIDIA Driver
16
+
17
+
18
+ Description
19
+
20
+ This package contains the operating system driver and
21
+ fundamental system software components for NVIDIA GPUs.
22
+
23
+
24
+ NVIDIA CUDA Toolkit
25
+
26
+
27
+ Description
28
+
29
+ The NVIDIA CUDA Toolkit provides command-line and graphical
30
+ tools for building, debugging and optimizing the performance
31
+ of applications accelerated by NVIDIA GPUs, runtime and math
32
+ libraries, and documentation including programming guides,
33
+ user manuals, and API references.
34
+
35
+
36
+ Default Install Location of CUDA Toolkit
37
+
38
+ Windows platform:
39
+
40
+ %ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
41
+
42
+ Linux platform:
43
+
44
+ /usr/local/cuda-#.#
45
+
46
+ Mac platform:
47
+
48
+ /Developer/NVIDIA/CUDA-#.#
49
+
50
+
51
+ NVIDIA CUDA Samples
52
+
53
+
54
+ Description
55
+
56
+ This package includes over 100+ CUDA examples that demonstrate
57
+ various CUDA programming principles, and efficient CUDA
58
+ implementation of algorithms in specific application domains.
59
+
60
+
61
+ Default Install Location of CUDA Samples
62
+
63
+ Windows platform:
64
+
65
+ %ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
66
+
67
+ Linux platform:
68
+
69
+ /usr/local/cuda-#.#/samples
70
+
71
+ and
72
+
73
+ $HOME/NVIDIA_CUDA-#.#_Samples
74
+
75
+ Mac platform:
76
+
77
+ /Developer/NVIDIA/CUDA-#.#/samples
78
+
79
+
80
+ NVIDIA Nsight Visual Studio Edition (Windows only)
81
+
82
+
83
+ Description
84
+
85
+ NVIDIA Nsight Development Platform, Visual Studio Edition is a
86
+ development environment integrated into Microsoft Visual
87
+ Studio that provides tools for debugging, profiling, analyzing
88
+ and optimizing your GPU computing and graphics applications.
89
+
90
+
91
+ Default Install Location of Nsight Visual Studio Edition
92
+
93
+ Windows platform:
94
+
95
+ %ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
96
+
97
+
98
+ 1. License Agreement for NVIDIA Software Development Kits
99
+ ---------------------------------------------------------
100
+
101
+
102
+ Release Date: July 26, 2018
103
+ ---------------------------
104
+
105
+
106
+ Important NoticeRead before downloading, installing,
107
+ copying or using the licensed software:
108
+ -------------------------------------------------------
109
+
110
+ This license agreement, including exhibits attached
111
+ ("Agreement”) is a legal agreement between you and NVIDIA
112
+ Corporation ("NVIDIA") and governs your use of a NVIDIA
113
+ software development kit (“SDK”).
114
+
115
+ Each SDK has its own set of software and materials, but here
116
+ is a description of the types of items that may be included in
117
+ a SDK: source code, header files, APIs, data sets and assets
118
+ (examples include images, textures, models, scenes, videos,
119
+ native API input/output files), binary software, sample code,
120
+ libraries, utility programs, programming code and
121
+ documentation.
122
+
123
+ This Agreement can be accepted only by an adult of legal age
124
+ of majority in the country in which the SDK is used.
125
+
126
+ If you are entering into this Agreement on behalf of a company
127
+ or other legal entity, you represent that you have the legal
128
+ authority to bind the entity to this Agreement, in which case
129
+ “you” will mean the entity you represent.
130
+
131
+ If you don’t have the required age or authority to accept
132
+ this Agreement, or if you don’t accept all the terms and
133
+ conditions of this Agreement, do not download, install or use
134
+ the SDK.
135
+
136
+ You agree to use the SDK only for purposes that are permitted
137
+ by (a) this Agreement, and (b) any applicable law, regulation
138
+ or generally accepted practices or guidelines in the relevant
139
+ jurisdictions.
140
+
141
+
142
+ 1.1. License
143
+
144
+
145
+ 1.1.1. License Grant
146
+
147
+ Subject to the terms of this Agreement, NVIDIA hereby grants
148
+ you a non-exclusive, non-transferable license, without the
149
+ right to sublicense (except as expressly provided in this
150
+ Agreement) to:
151
+
152
+ 1. Install and use the SDK,
153
+
154
+ 2. Modify and create derivative works of sample source code
155
+ delivered in the SDK, and
156
+
157
+ 3. Distribute those portions of the SDK that are identified
158
+ in this Agreement as distributable, as incorporated in
159
+ object code format into a software application that meets
160
+ the distribution requirements indicated in this Agreement.
161
+
162
+
163
+ 1.1.2. Distribution Requirements
164
+
165
+ These are the distribution requirements for you to exercise
166
+ the distribution grant:
167
+
168
+ 1. Your application must have material additional
169
+ functionality, beyond the included portions of the SDK.
170
+
171
+ 2. The distributable portions of the SDK shall only be
172
+ accessed by your application.
173
+
174
+ 3. The following notice shall be included in modifications
175
+ and derivative works of sample source code distributed:
176
+ “This software contains source code provided by NVIDIA
177
+ Corporation.”
178
+
179
+ 4. Unless a developer tool is identified in this Agreement
180
+ as distributable, it is delivered for your internal use
181
+ only.
182
+
183
+ 5. The terms under which you distribute your application
184
+ must be consistent with the terms of this Agreement,
185
+ including (without limitation) terms relating to the
186
+ license grant and license restrictions and protection of
187
+ NVIDIA’s intellectual property rights. Additionally, you
188
+ agree that you will protect the privacy, security and
189
+ legal rights of your application users.
190
+
191
+ 6. You agree to notify NVIDIA in writing of any known or
192
+ suspected distribution or use of the SDK not in compliance
193
+ with the requirements of this Agreement, and to enforce
194
+ the terms of your agreements with respect to distributed
195
+ SDK.
196
+
197
+
198
+ 1.1.3. Authorized Users
199
+
200
+ You may allow employees and contractors of your entity or of
201
+ your subsidiary(ies) to access and use the SDK from your
202
+ secure network to perform work on your behalf.
203
+
204
+ If you are an academic institution you may allow users
205
+ enrolled or employed by the academic institution to access and
206
+ use the SDK from your secure network.
207
+
208
+ You are responsible for the compliance with the terms of this
209
+ Agreement by your authorized users. If you become aware that
210
+ your authorized users didn’t follow the terms of this
211
+ Agreement, you agree to take reasonable steps to resolve the
212
+ non-compliance and prevent new occurrences.
213
+
214
+
215
+ 1.1.4. Pre-Release SDK
216
+
217
+ The SDK versions identified as alpha, beta, preview or
218
+ otherwise as pre-release, may not be fully functional, may
219
+ contain errors or design flaws, and may have reduced or
220
+ different security, privacy, accessibility, availability, and
221
+ reliability standards relative to commercial versions of
222
+ NVIDIA software and materials. Use of a pre-release SDK may
223
+ result in unexpected results, loss of data, project delays or
224
+ other unpredictable damage or loss.
225
+
226
+ You may use a pre-release SDK at your own risk, understanding
227
+ that pre-release SDKs are not intended for use in production
228
+ or business-critical systems.
229
+
230
+ NVIDIA may choose not to make available a commercial version
231
+ of any pre-release SDK. NVIDIA may also choose to abandon
232
+ development and terminate the availability of a pre-release
233
+ SDK at any time without liability.
234
+
235
+
236
+ 1.1.5. Updates
237
+
238
+ NVIDIA may, at its option, make available patches, workarounds
239
+ or other updates to this SDK. Unless the updates are provided
240
+ with their separate governing terms, they are deemed part of
241
+ the SDK licensed to you as provided in this Agreement. You
242
+ agree that the form and content of the SDK that NVIDIA
243
+ provides may change without prior notice to you. While NVIDIA
244
+ generally maintains compatibility between versions, NVIDIA may
245
+ in some cases make changes that introduce incompatibilities in
246
+ future versions of the SDK.
247
+
248
+
249
+ 1.1.6. Third Party Licenses
250
+
251
+ The SDK may come bundled with, or otherwise include or be
252
+ distributed with, third party software licensed by a NVIDIA
253
+ supplier and/or open source software provided under an open
254
+ source license. Use of third party software is subject to the
255
+ third-party license terms, or in the absence of third party
256
+ terms, the terms of this Agreement. Copyright to third party
257
+ software is held by the copyright holders indicated in the
258
+ third-party software or license.
259
+
260
+
261
+ 1.1.7. Reservation of Rights
262
+
263
+ NVIDIA reserves all rights, title, and interest in and to the
264
+ SDK, not expressly granted to you under this Agreement.
265
+
266
+
267
+ 1.2. Limitations
268
+
269
+ The following license limitations apply to your use of the
270
+ SDK:
271
+
272
+ 1. You may not reverse engineer, decompile or disassemble,
273
+ or remove copyright or other proprietary notices from any
274
+ portion of the SDK or copies of the SDK.
275
+
276
+ 2. Except as expressly provided in this Agreement, you may
277
+ not copy, sell, rent, sublicense, transfer, distribute,
278
+ modify, or create derivative works of any portion of the
279
+ SDK. For clarity, you may not distribute or sublicense the
280
+ SDK as a stand-alone product.
281
+
282
+ 3. Unless you have an agreement with NVIDIA for this
283
+ purpose, you may not indicate that an application created
284
+ with the SDK is sponsored or endorsed by NVIDIA.
285
+
286
+ 4. You may not bypass, disable, or circumvent any
287
+ encryption, security, digital rights management or
288
+ authentication mechanism in the SDK.
289
+
290
+ 5. You may not use the SDK in any manner that would cause it
291
+ to become subject to an open source software license. As
292
+ examples, licenses that require as a condition of use,
293
+ modification, and/or distribution that the SDK be:
294
+
295
+ a. Disclosed or distributed in source code form;
296
+
297
+ b. Licensed for the purpose of making derivative works;
298
+ or
299
+
300
+ c. Redistributable at no charge.
301
+
302
+ 6. Unless you have an agreement with NVIDIA for this
303
+ purpose, you may not use the SDK with any system or
304
+ application where the use or failure of the system or
305
+ application can reasonably be expected to threaten or
306
+ result in personal injury, death, or catastrophic loss.
307
+ Examples include use in avionics, navigation, military,
308
+ medical, life support or other life critical applications.
309
+ NVIDIA does not design, test or manufacture the SDK for
310
+ these critical uses and NVIDIA shall not be liable to you
311
+ or any third party, in whole or in part, for any claims or
312
+ damages arising from such uses.
313
+
314
+ 7. You agree to defend, indemnify and hold harmless NVIDIA
315
+ and its affiliates, and their respective employees,
316
+ contractors, agents, officers and directors, from and
317
+ against any and all claims, damages, obligations, losses,
318
+ liabilities, costs or debt, fines, restitutions and
319
+ expenses (including but not limited to attorney’s fees
320
+ and costs incident to establishing the right of
321
+ indemnification) arising out of or related to your use of
322
+ the SDK outside of the scope of this Agreement, or not in
323
+ compliance with its terms.
324
+
325
+
326
+ 1.3. Ownership
327
+
328
+ 1. NVIDIA or its licensors hold all rights, title and
329
+ interest in and to the SDK and its modifications and
330
+ derivative works, including their respective intellectual
331
+ property rights, subject to your rights described in this
332
+ section. This SDK may include software and materials from
333
+ NVIDIA’s licensors, and these licensors are intended
334
+ third party beneficiaries that may enforce this Agreement
335
+ with respect to their intellectual property rights.
336
+
337
+ 2. You hold all rights, title and interest in and to your
338
+ applications and your derivative works of the sample
339
+ source code delivered in the SDK, including their
340
+ respective intellectual property rights, subject to
341
+ NVIDIA’s rights described in this section.
342
+
343
+ 3. You may, but don’t have to, provide to NVIDIA
344
+ suggestions, feature requests or other feedback regarding
345
+ the SDK, including possible enhancements or modifications
346
+ to the SDK. For any feedback that you voluntarily provide,
347
+ you hereby grant NVIDIA and its affiliates a perpetual,
348
+ non-exclusive, worldwide, irrevocable license to use,
349
+ reproduce, modify, license, sublicense (through multiple
350
+ tiers of sublicensees), and distribute (through multiple
351
+ tiers of distributors) it without the payment of any
352
+ royalties or fees to you. NVIDIA will use feedback at its
353
+ choice. NVIDIA is constantly looking for ways to improve
354
+ its products, so you may send feedback to NVIDIA through
355
+ the developer portal at https://developer.nvidia.com.
356
+
357
+
358
+ 1.4. No Warranties
359
+
360
+ THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
361
+ FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
362
+ ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
363
+ OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
364
+ BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
365
+ FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
366
+ ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
367
+ WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
368
+ DEALING OR COURSE OF TRADE.
369
+
370
+
371
+ 1.5. Limitation of Liability
372
+
373
+ TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
374
+ AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
375
+ PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
376
+ OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
377
+ PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
378
+ WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
379
+ WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
380
+ OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
381
+ PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
382
+ LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
383
+ TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
384
+ AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
385
+ NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
386
+ LIMIT.
387
+
388
+ These exclusions and limitations of liability shall apply
389
+ regardless if NVIDIA or its affiliates have been advised of
390
+ the possibility of such damages, and regardless of whether a
391
+ remedy fails its essential purpose. These exclusions and
392
+ limitations of liability form an essential basis of the
393
+ bargain between the parties, and, absent any of these
394
+ exclusions or limitations of liability, the provisions of this
395
+ Agreement, including, without limitation, the economic terms,
396
+ would be substantially different.
397
+
398
+
399
+ 1.6. Termination
400
+
401
+ 1. This Agreement will continue to apply until terminated by
402
+ either you or NVIDIA as described below.
403
+
404
+ 2. If you want to terminate this Agreement, you may do so by
405
+ stopping to use the SDK.
406
+
407
+ 3. NVIDIA may, at any time, terminate this Agreement if:
408
+
409
+ a. (i) you fail to comply with any term of this
410
+ Agreement and the non-compliance is not fixed within
411
+ thirty (30) days following notice from NVIDIA (or
412
+ immediately if you violate NVIDIA’s intellectual
413
+ property rights);
414
+
415
+ b. (ii) you commence or participate in any legal
416
+ proceeding against NVIDIA with respect to the SDK; or
417
+
418
+ c. (iii) NVIDIA decides to no longer provide the SDK in
419
+ a country or, in NVIDIA’s sole discretion, the
420
+ continued use of it is no longer commercially viable.
421
+
422
+ 4. Upon any termination of this Agreement, you agree to
423
+ promptly discontinue use of the SDK and destroy all copies
424
+ in your possession or control. Your prior distributions in
425
+ accordance with this Agreement are not affected by the
426
+ termination of this Agreement. Upon written request, you
427
+ will certify in writing that you have complied with your
428
+ commitments under this section. Upon any termination of
429
+ this Agreement all provisions survive except for the
430
+ license grant provisions.
431
+
432
+
433
+ 1.7. General
434
+
435
+ If you wish to assign this Agreement or your rights and
436
+ obligations, including by merger, consolidation, dissolution
437
+ or operation of law, contact NVIDIA to ask for permission. Any
438
+ attempted assignment not approved by NVIDIA in writing shall
439
+ be void and of no effect. NVIDIA may assign, delegate or
440
+ transfer this Agreement and its rights and obligations, and if
441
+ to a non-affiliate you will be notified.
442
+
443
+ You agree to cooperate with NVIDIA and provide reasonably
444
+ requested information to verify your compliance with this
445
+ Agreement.
446
+
447
+ This Agreement will be governed in all respects by the laws of
448
+ the United States and of the State of Delaware as those laws
449
+ are applied to contracts entered into and performed entirely
450
+ within Delaware by Delaware residents, without regard to the
451
+ conflicts of laws principles. The United Nations Convention on
452
+ Contracts for the International Sale of Goods is specifically
453
+ disclaimed. You agree to all terms of this Agreement in the
454
+ English language.
455
+
456
+ The state or federal courts residing in Santa Clara County,
457
+ California shall have exclusive jurisdiction over any dispute
458
+ or claim arising out of this Agreement. Notwithstanding this,
459
+ you agree that NVIDIA shall still be allowed to apply for
460
+ injunctive remedies or an equivalent type of urgent legal
461
+ relief in any jurisdiction.
462
+
463
+ If any court of competent jurisdiction determines that any
464
+ provision of this Agreement is illegal, invalid or
465
+ unenforceable, such provision will be construed as limited to
466
+ the extent necessary to be consistent with and fully
467
+ enforceable under the law and the remaining provisions will
468
+ remain in full force and effect. Unless otherwise specified,
469
+ remedies are cumulative.
470
+
471
+ Each party acknowledges and agrees that the other is an
472
+ independent contractor in the performance of this Agreement.
473
+
474
+ The SDK has been developed entirely at private expense and is
475
+ “commercial items” consisting of “commercial computer
476
+ software” and “commercial computer software
477
+ documentation” provided with RESTRICTED RIGHTS. Use,
478
+ duplication or disclosure by the U.S. Government or a U.S.
479
+ Government subcontractor is subject to the restrictions in
480
+ this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
481
+ in subparagraphs (c)(1) and (2) of the Commercial Computer
482
+ Software - Restricted Rights clause at FAR 52.227-19, as
483
+ applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
484
+ Expressway, Santa Clara, CA 95051.
485
+
486
+ The SDK is subject to United States export laws and
487
+ regulations. You agree that you will not ship, transfer or
488
+ export the SDK into any country, or use the SDK in any manner,
489
+ prohibited by the United States Bureau of Industry and
490
+ Security or economic sanctions regulations administered by the
491
+ U.S. Department of Treasury’s Office of Foreign Assets
492
+ Control (OFAC), or any applicable export laws, restrictions or
493
+ regulations. These laws include restrictions on destinations,
494
+ end users and end use. By accepting this Agreement, you
495
+ confirm that you are not a resident or citizen of any country
496
+ currently embargoed by the U.S. and that you are not otherwise
497
+ prohibited from receiving the SDK.
498
+
499
+ Any notice delivered by NVIDIA to you under this Agreement
500
+ will be delivered via mail, email or fax. You agree that any
501
+ notices that NVIDIA sends you electronically will satisfy any
502
+ legal communication requirements. Please direct your legal
503
+ notices or other correspondence to NVIDIA Corporation, 2788
504
+ San Tomas Expressway, Santa Clara, California 95051, United
505
+ States of America, Attention: Legal Department.
506
+
507
+ This Agreement and any exhibits incorporated into this
508
+ Agreement constitute the entire agreement of the parties with
509
+ respect to the subject matter of this Agreement and supersede
510
+ all prior negotiations or documentation exchanged between the
511
+ parties relating to this SDK license. Any additional and/or
512
+ conflicting terms on documents issued by you are null, void,
513
+ and invalid. Any amendment or waiver under this Agreement
514
+ shall be in writing and signed by representatives of both
515
+ parties.
516
+
517
+
518
+ 2. CUDA Toolkit Supplement to Software License Agreement for
519
+ NVIDIA Software Development Kits
520
+ ------------------------------------------------------------
521
+
522
+
523
+ Release date: August 16, 2018
524
+ -----------------------------
525
+
526
+ The terms in this supplement govern your use of the NVIDIA
527
+ CUDA Toolkit SDK under the terms of your license agreement
528
+ (“Agreement”) as modified by this supplement. Capitalized
529
+ terms used but not defined below have the meaning assigned to
530
+ them in the Agreement.
531
+
532
+ This supplement is an exhibit to the Agreement and is
533
+ incorporated as an integral part of the Agreement. In the
534
+ event of conflict between the terms in this supplement and the
535
+ terms in the Agreement, the terms in this supplement govern.
536
+
537
+
538
+ 2.1. License Scope
539
+
540
+ The SDK is licensed for you to develop applications only for
541
+ use in systems with NVIDIA GPUs.
542
+
543
+
544
+ 2.2. Distribution
545
+
546
+ The portions of the SDK that are distributable under the
547
+ Agreement are listed in Attachment A.
548
+
549
+
550
+ 2.3. Operating Systems
551
+
552
+ Those portions of the SDK designed exclusively for use on the
553
+ Linux or FreeBSD operating systems, or other operating systems
554
+ derived from the source code to these operating systems, may
555
+ be copied and redistributed for use in accordance with this
556
+ Agreement, provided that the object code files are not
557
+ modified in any way (except for unzipping of compressed
558
+ files).
559
+
560
+
561
+ 2.4. Audio and Video Encoders and Decoders
562
+
563
+ You acknowledge and agree that it is your sole responsibility
564
+ to obtain any additional third-party licenses required to
565
+ make, have made, use, have used, sell, import, and offer for
566
+ sale your products or services that include or incorporate any
567
+ third-party software and content relating to audio and/or
568
+ video encoders and decoders from, including but not limited
569
+ to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
570
+ MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
571
+ under this Agreement any necessary patent or other rights with
572
+ respect to any audio and/or video encoders and decoders.
573
+
574
+
575
+ 2.5. Licensing
576
+
577
+ If the distribution terms in this Agreement are not suitable
578
+ for your organization, or for any questions regarding this
579
+ Agreement, please contact NVIDIA at
580
581
+
582
+
583
+ 2.6. Attachment A
584
+
585
+ The following portions of the SDK are distributable under the
586
+ Agreement:
587
+
588
+ Component
589
+
590
+ CUDA Runtime
591
+
592
+ Windows
593
+
594
+ cudart.dll, cudart_static.lib, cudadevrt.lib
595
+
596
+ Mac OSX
597
+
598
+ libcudart.dylib, libcudart_static.a, libcudadevrt.a
599
+
600
+ Linux
601
+
602
+ libcudart.so, libcudart_static.a, libcudadevrt.a
603
+
604
+ Android
605
+
606
+ libcudart.so, libcudart_static.a, libcudadevrt.a
607
+
608
+ Component
609
+
610
+ CUDA FFT Library
611
+
612
+ Windows
613
+
614
+ cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
615
+
616
+ Mac OSX
617
+
618
+ libcufft.dylib, libcufft_static.a, libcufftw.dylib,
619
+ libcufftw_static.a
620
+
621
+ Linux
622
+
623
+ libcufft.so, libcufft_static.a, libcufftw.so,
624
+ libcufftw_static.a
625
+
626
+ Android
627
+
628
+ libcufft.so, libcufft_static.a, libcufftw.so,
629
+ libcufftw_static.a
630
+
631
+ Component
632
+
633
+ CUDA BLAS Library
634
+
635
+ Windows
636
+
637
+ cublas.dll, cublasLt.dll
638
+
639
+ Mac OSX
640
+
641
+ libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
642
+ libcublasLt_static.a
643
+
644
+ Linux
645
+
646
+ libcublas.so, libcublasLt.so, libcublas_static.a,
647
+ libcublasLt_static.a
648
+
649
+ Android
650
+
651
+ libcublas.so, libcublasLt.so, libcublas_static.a,
652
+ libcublasLt_static.a
653
+
654
+ Component
655
+
656
+ NVIDIA "Drop-in" BLAS Library
657
+
658
+ Windows
659
+
660
+ nvblas.dll
661
+
662
+ Mac OSX
663
+
664
+ libnvblas.dylib
665
+
666
+ Linux
667
+
668
+ libnvblas.so
669
+
670
+ Component
671
+
672
+ CUDA Sparse Matrix Library
673
+
674
+ Windows
675
+
676
+ cusparse.dll, cusparse.lib
677
+
678
+ Mac OSX
679
+
680
+ libcusparse.dylib, libcusparse_static.a
681
+
682
+ Linux
683
+
684
+ libcusparse.so, libcusparse_static.a
685
+
686
+ Android
687
+
688
+ libcusparse.so, libcusparse_static.a
689
+
690
+ Component
691
+
692
+ CUDA Linear Solver Library
693
+
694
+ Windows
695
+
696
+ cusolver.dll, cusolver.lib
697
+
698
+ Mac OSX
699
+
700
+ libcusolver.dylib, libcusolver_static.a
701
+
702
+ Linux
703
+
704
+ libcusolver.so, libcusolver_static.a
705
+
706
+ Android
707
+
708
+ libcusolver.so, libcusolver_static.a
709
+
710
+ Component
711
+
712
+ CUDA Random Number Generation Library
713
+
714
+ Windows
715
+
716
+ curand.dll, curand.lib
717
+
718
+ Mac OSX
719
+
720
+ libcurand.dylib, libcurand_static.a
721
+
722
+ Linux
723
+
724
+ libcurand.so, libcurand_static.a
725
+
726
+ Android
727
+
728
+ libcurand.so, libcurand_static.a
729
+
730
+ Component
731
+
732
+ CUDA Accelerated Graph Library
733
+
734
+ Component
735
+
736
+ NVIDIA Performance Primitives Library
737
+
738
+ Windows
739
+
740
+ nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
741
+ nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
742
+ nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
743
+ nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
744
+ nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
745
+
746
+ Mac OSX
747
+
748
+ libnppc.dylib, libnppc_static.a, libnppial.dylib,
749
+ libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
750
+ libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
751
+ libnppidei_static.a, libnppif.dylib, libnppif_static.a,
752
+ libnppig.dylib, libnppig_static.a, libnppim.dylib,
753
+ libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
754
+ libnpps.dylib, libnpps_static.a
755
+
756
+ Linux
757
+
758
+ libnppc.so, libnppc_static.a, libnppial.so,
759
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
760
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
761
+ libnppidei_static.a, libnppif.so, libnppif_static.a
762
+ libnppig.so, libnppig_static.a, libnppim.so,
763
+ libnppim_static.a, libnppist.so, libnppist_static.a,
764
+ libnppisu.so, libnppisu_static.a, libnppitc.so
765
+ libnppitc_static.a, libnpps.so, libnpps_static.a
766
+
767
+ Android
768
+
769
+ libnppc.so, libnppc_static.a, libnppial.so,
770
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
771
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
772
+ libnppidei_static.a, libnppif.so, libnppif_static.a
773
+ libnppig.so, libnppig_static.a, libnppim.so,
774
+ libnppim_static.a, libnppist.so, libnppist_static.a,
775
+ libnppisu.so, libnppisu_static.a, libnppitc.so
776
+ libnppitc_static.a, libnpps.so, libnpps_static.a
777
+
778
+ Component
779
+
780
+ NVIDIA JPEG Library
781
+
782
+ Linux
783
+
784
+ libnvjpeg.so, libnvjpeg_static.a
785
+
786
+ Component
787
+
788
+ Internal common library required for statically linking to
789
+ cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
790
+
791
+ Mac OSX
792
+
793
+ libculibos.a
794
+
795
+ Linux
796
+
797
+ libculibos.a
798
+
799
+ Component
800
+
801
+ NVIDIA Runtime Compilation Library and Header
802
+
803
+ All
804
+
805
+ nvrtc.h
806
+
807
+ Windows
808
+
809
+ nvrtc.dll, nvrtc-builtins.dll
810
+
811
+ Mac OSX
812
+
813
+ libnvrtc.dylib, libnvrtc-builtins.dylib
814
+
815
+ Linux
816
+
817
+ libnvrtc.so, libnvrtc-builtins.so
818
+
819
+ Component
820
+
821
+ NVIDIA Optimizing Compiler Library
822
+
823
+ Windows
824
+
825
+ nvvm.dll
826
+
827
+ Mac OSX
828
+
829
+ libnvvm.dylib
830
+
831
+ Linux
832
+
833
+ libnvvm.so
834
+
835
+ Component
836
+
837
+ NVIDIA Common Device Math Functions Library
838
+
839
+ Windows
840
+
841
+ libdevice.10.bc
842
+
843
+ Mac OSX
844
+
845
+ libdevice.10.bc
846
+
847
+ Linux
848
+
849
+ libdevice.10.bc
850
+
851
+ Component
852
+
853
+ CUDA Occupancy Calculation Header Library
854
+
855
+ All
856
+
857
+ cuda_occupancy.h
858
+
859
+ Component
860
+
861
+ CUDA Half Precision Headers
862
+
863
+ All
864
+
865
+ cuda_fp16.h, cuda_fp16.hpp
866
+
867
+ Component
868
+
869
+ CUDA Profiling Tools Interface (CUPTI) Library
870
+
871
+ Windows
872
+
873
+ cupti.dll
874
+
875
+ Mac OSX
876
+
877
+ libcupti.dylib
878
+
879
+ Linux
880
+
881
+ libcupti.so
882
+
883
+ Component
884
+
885
+ NVIDIA Tools Extension Library
886
+
887
+ Windows
888
+
889
+ nvToolsExt.dll, nvToolsExt.lib
890
+
891
+ Mac OSX
892
+
893
+ libnvToolsExt.dylib
894
+
895
+ Linux
896
+
897
+ libnvToolsExt.so
898
+
899
+ Component
900
+
901
+ NVIDIA CUDA Driver Libraries
902
+
903
+ Linux
904
+
905
+ libcuda.so, libnvidia-fatbinaryloader.so,
906
+ libnvidia-ptxjitcompiler.so
907
+
908
+ The NVIDIA CUDA Driver Libraries are only distributable in
909
+ applications that meet this criteria:
910
+
911
+ 1. The application was developed starting from a NVIDIA CUDA
912
+ container obtained from Docker Hub or the NVIDIA GPU
913
+ Cloud, and
914
+
915
+ 2. The resulting application is packaged as a Docker
916
+ container and distributed to users on Docker Hub or the
917
+ NVIDIA GPU Cloud only.
918
+
919
+
920
+ 2.7. Attachment B
921
+
922
+
923
+ Additional Licensing Obligations
924
+
925
+ The following third party components included in the SOFTWARE
926
+ are licensed to Licensee pursuant to the following terms and
927
+ conditions:
928
+
929
+ 1. Licensee's use of the GDB third party component is
930
+ subject to the terms and conditions of GNU GPL v3:
931
+
932
+ This product includes copyrighted third-party software licensed
933
+ under the terms of the GNU General Public License v3 ("GPL v3").
934
+ All third-party software packages are copyright by their respective
935
+ authors. GPL v3 terms and conditions are hereby incorporated into
936
+ the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
937
+
938
+ Consistent with these licensing requirements, the software
939
+ listed below is provided under the terms of the specified
940
+ open source software licenses. To obtain source code for
941
+ software provided under licenses that require
942
+ redistribution of source code, including the GNU General
943
+ Public License (GPL) and GNU Lesser General Public License
944
+ (LGPL), contact [email protected]. This offer is
945
+ valid for a period of three (3) years from the date of the
946
+ distribution of this product by NVIDIA CORPORATION.
947
+
948
+ Component License
949
+ CUDA-GDB GPL v3
950
+
951
+ 2. Licensee represents and warrants that any and all third
952
+ party licensing and/or royalty payment obligations in
953
+ connection with Licensee's use of the H.264 video codecs
954
+ are solely the responsibility of Licensee.
955
+
956
+ 3. Licensee's use of the Thrust library is subject to the
957
+ terms and conditions of the Apache License Version 2.0.
958
+ All third-party software packages are copyright by their
959
+ respective authors. Apache License Version 2.0 terms and
960
+ conditions are hereby incorporated into the Agreement by
961
+ this reference.
962
+ http://www.apache.org/licenses/LICENSE-2.0.html
963
+
964
+ In addition, Licensee acknowledges the following notice:
965
+ Thrust includes source code from the Boost Iterator,
966
+ Tuple, System, and Random Number libraries.
967
+
968
+ Boost Software License - Version 1.0 - August 17th, 2003
969
+ . . . .
970
+
971
+ Permission is hereby granted, free of charge, to any person or
972
+ organization obtaining a copy of the software and accompanying
973
+ documentation covered by this license (the "Software") to use,
974
+ reproduce, display, distribute, execute, and transmit the Software,
975
+ and to prepare derivative works of the Software, and to permit
976
+ third-parties to whom the Software is furnished to do so, all
977
+ subject to the following:
978
+
979
+ The copyright notices in the Software and this entire statement,
980
+ including the above license grant, this restriction and the following
981
+ disclaimer, must be included in all copies of the Software, in whole
982
+ or in part, and all derivative works of the Software, unless such
983
+ copies or derivative works are solely in the form of machine-executable
984
+ object code generated by a source language processor.
985
+
986
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
987
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
988
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
989
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
990
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
991
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
992
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
993
+ OTHER DEALINGS IN THE SOFTWARE.
994
+
995
+ 4. Licensee's use of the LLVM third party component is
996
+ subject to the following terms and conditions:
997
+
998
+ ======================================================
999
+ LLVM Release License
1000
+ ======================================================
1001
+ University of Illinois/NCSA
1002
+ Open Source License
1003
+
1004
+ Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
1005
+ All rights reserved.
1006
+
1007
+ Developed by:
1008
+
1009
+ LLVM Team
1010
+
1011
+ University of Illinois at Urbana-Champaign
1012
+
1013
+ http://llvm.org
1014
+
1015
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1016
+ of this software and associated documentation files (the "Software"), to
1017
+ deal with the Software without restriction, including without limitation the
1018
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1019
+ sell copies of the Software, and to permit persons to whom the Software is
1020
+ furnished to do so, subject to the following conditions:
1021
+
1022
+ * Redistributions of source code must retain the above copyright notice,
1023
+ this list of conditions and the following disclaimers.
1024
+
1025
+ * Redistributions in binary form must reproduce the above copyright
1026
+ notice, this list of conditions and the following disclaimers in the
1027
+ documentation and/or other materials provided with the distribution.
1028
+
1029
+ * Neither the names of the LLVM Team, University of Illinois at Urbana-
1030
+ Champaign, nor the names of its contributors may be used to endorse or
1031
+ promote products derived from this Software without specific prior
1032
+ written permission.
1033
+
1034
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1035
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1036
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1037
+ THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
1038
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1039
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1040
+ DEALINGS WITH THE SOFTWARE.
1041
+
1042
+ 5. Licensee's use (e.g. nvprof) of the PCRE third party
1043
+ component is subject to the following terms and
1044
+ conditions:
1045
+
1046
+ ------------
1047
+ PCRE LICENCE
1048
+ ------------
1049
+ PCRE is a library of functions to support regular expressions whose syntax
1050
+ and semantics are as close as possible to those of the Perl 5 language.
1051
+ Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
1052
+ specified below. The documentation for PCRE, supplied in the "doc"
1053
+ directory, is distributed under the same terms as the software itself. The
1054
+ basic library functions are written in C and are freestanding. Also
1055
+ included in the distribution is a set of C++ wrapper functions, and a just-
1056
+ in-time compiler that can be used to optimize pattern matching. These are
1057
+ both optional features that can be omitted when the library is built.
1058
+
1059
+ THE BASIC LIBRARY FUNCTIONS
1060
+ ---------------------------
1061
+ Written by: Philip Hazel
1062
+ Email local part: ph10
1063
+ Email domain: cam.ac.uk
1064
+ University of Cambridge Computing Service,
1065
+ Cambridge, England.
1066
+ Copyright (c) 1997-2012 University of Cambridge
1067
+ All rights reserved.
1068
+
1069
+ PCRE JUST-IN-TIME COMPILATION SUPPORT
1070
+ -------------------------------------
1071
+ Written by: Zoltan Herczeg
1072
+ Email local part: hzmester
1073
+ Emain domain: freemail.hu
1074
+ Copyright(c) 2010-2012 Zoltan Herczeg
1075
+ All rights reserved.
1076
+
1077
+ STACK-LESS JUST-IN-TIME COMPILER
1078
+ --------------------------------
1079
+ Written by: Zoltan Herczeg
1080
+ Email local part: hzmester
1081
+ Emain domain: freemail.hu
1082
+ Copyright(c) 2009-2012 Zoltan Herczeg
1083
+ All rights reserved.
1084
+
1085
+ THE C++ WRAPPER FUNCTIONS
1086
+ -------------------------
1087
+ Contributed by: Google Inc.
1088
+ Copyright (c) 2007-2012, Google Inc.
1089
+ All rights reserved.
1090
+
1091
+ THE "BSD" LICENCE
1092
+ -----------------
1093
+ Redistribution and use in source and binary forms, with or without
1094
+ modification, are permitted provided that the following conditions are met:
1095
+
1096
+ * Redistributions of source code must retain the above copyright notice,
1097
+ this list of conditions and the following disclaimer.
1098
+
1099
+ * Redistributions in binary form must reproduce the above copyright
1100
+ notice, this list of conditions and the following disclaimer in the
1101
+ documentation and/or other materials provided with the distribution.
1102
+
1103
+ * Neither the name of the University of Cambridge nor the name of Google
1104
+ Inc. nor the names of their contributors may be used to endorse or
1105
+ promote products derived from this software without specific prior
1106
+ written permission.
1107
+
1108
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1109
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1110
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1111
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
1112
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1113
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1114
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1115
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1116
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1117
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1118
+ POSSIBILITY OF SUCH DAMAGE.
1119
+
1120
+ 6. Some of the cuBLAS library routines were written by or
1121
+ derived from code written by Vasily Volkov and are subject
1122
+ to the Modified Berkeley Software Distribution License as
1123
+ follows:
1124
+
1125
+ Copyright (c) 2007-2009, Regents of the University of California
1126
+
1127
+ All rights reserved.
1128
+
1129
+ Redistribution and use in source and binary forms, with or without
1130
+ modification, are permitted provided that the following conditions are
1131
+ met:
1132
+ * Redistributions of source code must retain the above copyright
1133
+ notice, this list of conditions and the following disclaimer.
1134
+ * Redistributions in binary form must reproduce the above
1135
+ copyright notice, this list of conditions and the following
1136
+ disclaimer in the documentation and/or other materials provided
1137
+ with the distribution.
1138
+ * Neither the name of the University of California, Berkeley nor
1139
+ the names of its contributors may be used to endorse or promote
1140
+ products derived from this software without specific prior
1141
+ written permission.
1142
+
1143
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1144
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1145
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1146
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1147
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1148
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1149
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1150
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1151
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1152
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1153
+ POSSIBILITY OF SUCH DAMAGE.
1154
+
1155
+ 7. Some of the cuBLAS library routines were written by or
1156
+ derived from code written by Davide Barbieri and are
1157
+ subject to the Modified Berkeley Software Distribution
1158
+ License as follows:
1159
+
1160
+ Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
1161
+
1162
+ All rights reserved.
1163
+
1164
+ Redistribution and use in source and binary forms, with or without
1165
+ modification, are permitted provided that the following conditions are
1166
+ met:
1167
+ * Redistributions of source code must retain the above copyright
1168
+ notice, this list of conditions and the following disclaimer.
1169
+ * Redistributions in binary form must reproduce the above
1170
+ copyright notice, this list of conditions and the following
1171
+ disclaimer in the documentation and/or other materials provided
1172
+ with the distribution.
1173
+ * The name of the author may not be used to endorse or promote
1174
+ products derived from this software without specific prior
1175
+ written permission.
1176
+
1177
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1178
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1179
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1180
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1181
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1182
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1183
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1184
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1185
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1186
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1187
+ POSSIBILITY OF SUCH DAMAGE.
1188
+
1189
+ 8. Some of the cuBLAS library routines were derived from
1190
+ code developed by the University of Tennessee and are
1191
+ subject to the Modified Berkeley Software Distribution
1192
+ License as follows:
1193
+
1194
+ Copyright (c) 2010 The University of Tennessee.
1195
+
1196
+ All rights reserved.
1197
+
1198
+ Redistribution and use in source and binary forms, with or without
1199
+ modification, are permitted provided that the following conditions are
1200
+ met:
1201
+ * Redistributions of source code must retain the above copyright
1202
+ notice, this list of conditions and the following disclaimer.
1203
+ * Redistributions in binary form must reproduce the above
1204
+ copyright notice, this list of conditions and the following
1205
+ disclaimer listed in this license in the documentation and/or
1206
+ other materials provided with the distribution.
1207
+ * Neither the name of the copyright holders nor the names of its
1208
+ contributors may be used to endorse or promote products derived
1209
+ from this software without specific prior written permission.
1210
+
1211
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1212
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1213
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1214
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1215
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1216
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1217
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1218
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1219
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1220
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1221
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1222
+
1223
+ 9. Some of the cuBLAS library routines were written by or
1224
+ derived from code written by Jonathan Hogg and are subject
1225
+ to the Modified Berkeley Software Distribution License as
1226
+ follows:
1227
+
1228
+ Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
1229
+
1230
+ All rights reserved.
1231
+
1232
+ Redistribution and use in source and binary forms, with or without
1233
+ modification, are permitted provided that the following conditions are
1234
+ met:
1235
+ * Redistributions of source code must retain the above copyright
1236
+ notice, this list of conditions and the following disclaimer.
1237
+ * Redistributions in binary form must reproduce the above
1238
+ copyright notice, this list of conditions and the following
1239
+ disclaimer in the documentation and/or other materials provided
1240
+ with the distribution.
1241
+ * Neither the name of the STFC nor the names of its contributors
1242
+ may be used to endorse or promote products derived from this
1243
+ software without specific prior written permission.
1244
+
1245
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1246
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1247
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1248
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
1249
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1250
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1251
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
1252
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
1253
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
1254
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
1255
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1256
+
1257
+ 10. Some of the cuBLAS library routines were written by or
1258
+ derived from code written by Ahmad M. Abdelfattah, David
1259
+ Keyes, and Hatem Ltaief, and are subject to the Apache
1260
+ License, Version 2.0, as follows:
1261
+
1262
+ -- (C) Copyright 2013 King Abdullah University of Science and Technology
1263
+ Authors:
1264
+ Ahmad Abdelfattah ([email protected])
1265
+ David Keyes ([email protected])
1266
+ Hatem Ltaief ([email protected])
1267
+
1268
+ Redistribution and use in source and binary forms, with or without
1269
+ modification, are permitted provided that the following conditions
1270
+ are met:
1271
+
1272
+ * Redistributions of source code must retain the above copyright
1273
+ notice, this list of conditions and the following disclaimer.
1274
+ * Redistributions in binary form must reproduce the above copyright
1275
+ notice, this list of conditions and the following disclaimer in the
1276
+ documentation and/or other materials provided with the distribution.
1277
+ * Neither the name of the King Abdullah University of Science and
1278
+ Technology nor the names of its contributors may be used to endorse
1279
+ or promote products derived from this software without specific prior
1280
+ written permission.
1281
+
1282
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1283
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1284
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1285
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1286
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1287
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1288
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1289
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1290
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1291
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1292
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
1293
+
1294
+ 11. Some of the cuSPARSE library routines were written by or
1295
+ derived from code written by Li-Wen Chang and are subject
1296
+ to the NCSA Open Source License as follows:
1297
+
1298
+ Copyright (c) 2012, University of Illinois.
1299
+
1300
+ All rights reserved.
1301
+
1302
+ Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
1303
+
1304
+ Permission is hereby granted, free of charge, to any person obtaining
1305
+ a copy of this software and associated documentation files (the
1306
+ "Software"), to deal with the Software without restriction, including
1307
+ without limitation the rights to use, copy, modify, merge, publish,
1308
+ distribute, sublicense, and/or sell copies of the Software, and to
1309
+ permit persons to whom the Software is furnished to do so, subject to
1310
+ the following conditions:
1311
+ * Redistributions of source code must retain the above copyright
1312
+ notice, this list of conditions and the following disclaimer.
1313
+ * Redistributions in binary form must reproduce the above
1314
+ copyright notice, this list of conditions and the following
1315
+ disclaimers in the documentation and/or other materials provided
1316
+ with the distribution.
1317
+ * Neither the names of IMPACT Group, University of Illinois, nor
1318
+ the names of its contributors may be used to endorse or promote
1319
+ products derived from this Software without specific prior
1320
+ written permission.
1321
+
1322
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1323
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1324
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1325
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
1326
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
1327
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
1328
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
1329
+ SOFTWARE.
1330
+
1331
+ 12. Some of the cuRAND library routines were written by or
1332
+ derived from code written by Mutsuo Saito and Makoto
1333
+ Matsumoto and are subject to the following license:
1334
+
1335
+ Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
1336
+ University. All rights reserved.
1337
+
1338
+ Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
1339
+ University and University of Tokyo. All rights reserved.
1340
+
1341
+ Redistribution and use in source and binary forms, with or without
1342
+ modification, are permitted provided that the following conditions are
1343
+ met:
1344
+ * Redistributions of source code must retain the above copyright
1345
+ notice, this list of conditions and the following disclaimer.
1346
+ * Redistributions in binary form must reproduce the above
1347
+ copyright notice, this list of conditions and the following
1348
+ disclaimer in the documentation and/or other materials provided
1349
+ with the distribution.
1350
+ * Neither the name of the Hiroshima University nor the names of
1351
+ its contributors may be used to endorse or promote products
1352
+ derived from this software without specific prior written
1353
+ permission.
1354
+
1355
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1356
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1357
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1358
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1359
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1360
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1361
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1362
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1363
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1364
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1365
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1366
+
1367
+ 13. Some of the cuRAND library routines were derived from
1368
+ code developed by D. E. Shaw Research and are subject to
1369
+ the following license:
1370
+
1371
+ Copyright 2010-2011, D. E. Shaw Research.
1372
+
1373
+ All rights reserved.
1374
+
1375
+ Redistribution and use in source and binary forms, with or without
1376
+ modification, are permitted provided that the following conditions are
1377
+ met:
1378
+ * Redistributions of source code must retain the above copyright
1379
+ notice, this list of conditions, and the following disclaimer.
1380
+ * Redistributions in binary form must reproduce the above
1381
+ copyright notice, this list of conditions, and the following
1382
+ disclaimer in the documentation and/or other materials provided
1383
+ with the distribution.
1384
+ * Neither the name of D. E. Shaw Research nor the names of its
1385
+ contributors may be used to endorse or promote products derived
1386
+ from this software without specific prior written permission.
1387
+
1388
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1389
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1390
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1391
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1392
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1393
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1394
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1395
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1396
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1397
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1398
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1399
+
1400
+ 14. Some of the Math library routines were written by or
1401
+ derived from code developed by Norbert Juffa and are
1402
+ subject to the following license:
1403
+
1404
+ Copyright (c) 2015-2017, Norbert Juffa
1405
+ All rights reserved.
1406
+
1407
+ Redistribution and use in source and binary forms, with or without
1408
+ modification, are permitted provided that the following conditions
1409
+ are met:
1410
+
1411
+ 1. Redistributions of source code must retain the above copyright
1412
+ notice, this list of conditions and the following disclaimer.
1413
+
1414
+ 2. Redistributions in binary form must reproduce the above copyright
1415
+ notice, this list of conditions and the following disclaimer in the
1416
+ documentation and/or other materials provided with the distribution.
1417
+
1418
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1419
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1420
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1421
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1422
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1423
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1424
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1425
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1426
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1427
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1428
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1429
+
1430
+ 15. Licensee's use of the lz4 third party component is
1431
+ subject to the following terms and conditions:
1432
+
1433
+ Copyright (C) 2011-2013, Yann Collet.
1434
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1435
+
1436
+ Redistribution and use in source and binary forms, with or without
1437
+ modification, are permitted provided that the following conditions are
1438
+ met:
1439
+
1440
+ * Redistributions of source code must retain the above copyright
1441
+ notice, this list of conditions and the following disclaimer.
1442
+ * Redistributions in binary form must reproduce the above
1443
+ copyright notice, this list of conditions and the following disclaimer
1444
+ in the documentation and/or other materials provided with the
1445
+ distribution.
1446
+
1447
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1448
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1449
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1450
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1451
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1452
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1453
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1454
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1455
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1456
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1457
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1458
+
1459
+ 16. The NPP library uses code from the Boost Math Toolkit,
1460
+ and is subject to the following license:
1461
+
1462
+ Boost Software License - Version 1.0 - August 17th, 2003
1463
+ . . . .
1464
+
1465
+ Permission is hereby granted, free of charge, to any person or
1466
+ organization obtaining a copy of the software and accompanying
1467
+ documentation covered by this license (the "Software") to use,
1468
+ reproduce, display, distribute, execute, and transmit the Software,
1469
+ and to prepare derivative works of the Software, and to permit
1470
+ third-parties to whom the Software is furnished to do so, all
1471
+ subject to the following:
1472
+
1473
+ The copyright notices in the Software and this entire statement,
1474
+ including the above license grant, this restriction and the following
1475
+ disclaimer, must be included in all copies of the Software, in whole
1476
+ or in part, and all derivative works of the Software, unless such
1477
+ copies or derivative works are solely in the form of machine-executable
1478
+ object code generated by a source language processor.
1479
+
1480
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1481
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1482
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
1483
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
1484
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
1485
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
1486
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1487
+ OTHER DEALINGS IN THE SOFTWARE.
1488
+
1489
+ 17. Portions of the Nsight Eclipse Edition is subject to the
1490
+ following license:
1491
+
1492
+ The Eclipse Foundation makes available all content in this plug-in
1493
+ ("Content"). Unless otherwise indicated below, the Content is provided
1494
+ to you under the terms and conditions of the Eclipse Public License
1495
+ Version 1.0 ("EPL"). A copy of the EPL is available at http://
1496
+ www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
1497
+ will mean the Content.
1498
+
1499
+ If you did not receive this Content directly from the Eclipse
1500
+ Foundation, the Content is being redistributed by another party
1501
+ ("Redistributor") and different terms and conditions may apply to your
1502
+ use of any object code in the Content. Check the Redistributor's
1503
+ license that was provided with the Content. If no such license exists,
1504
+ contact the Redistributor. Unless otherwise indicated below, the terms
1505
+ and conditions of the EPL still apply to any source code in the
1506
+ Content and such source code may be obtained at http://www.eclipse.org.
1507
+
1508
+ 18. Some of the cuBLAS library routines uses code from
1509
+ OpenAI, which is subject to the following license:
1510
+
1511
+ License URL
1512
+ https://github.com/openai/openai-gemm/blob/master/LICENSE
1513
+
1514
+ License Text
1515
+ The MIT License
1516
+
1517
+ Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
1518
+
1519
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1520
+ of this software and associated documentation files (the "Software"), to deal
1521
+ in the Software without restriction, including without limitation the rights
1522
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1523
+ copies of the Software, and to permit persons to whom the Software is
1524
+ furnished to do so, subject to the following conditions:
1525
+
1526
+ The above copyright notice and this permission notice shall be included in
1527
+ all copies or substantial portions of the Software.
1528
+
1529
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1530
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1531
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1532
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1533
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1534
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
1535
+ THE SOFTWARE.
1536
+
1537
+ 19. Licensee's use of the Visual Studio Setup Configuration
1538
+ Samples is subject to the following license:
1539
+
1540
+ The MIT License (MIT)
1541
+ Copyright (C) Microsoft Corporation. All rights reserved.
1542
+
1543
+ Permission is hereby granted, free of charge, to any person
1544
+ obtaining a copy of this software and associated documentation
1545
+ files (the "Software"), to deal in the Software without restriction,
1546
+ including without limitation the rights to use, copy, modify, merge,
1547
+ publish, distribute, sublicense, and/or sell copies of the Software,
1548
+ and to permit persons to whom the Software is furnished to do so,
1549
+ subject to the following conditions:
1550
+
1551
+ The above copyright notice and this permission notice shall be included
1552
+ in all copies or substantial portions of the Software.
1553
+
1554
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1555
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1556
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1557
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1558
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1559
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1560
+
1561
+ 20. Licensee's use of linmath.h header for CPU functions for
1562
+ GL vector/matrix operations from lunarG is subject to the
1563
+ Apache License Version 2.0.
1564
+
1565
+ 21. The DX12-CUDA sample uses the d3dx12.h header, which is
1566
+ subject to the MIT license .
1567
+
1568
+ -----------------
env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: nvidia-cusolver-cu12
3
+ Version: 11.4.5.107
4
+ Summary: CUDA solver native runtime libraries
5
+ Home-page: https://developer.nvidia.com/cuda-zone
6
+ Author: Nvidia CUDA Installer Team
7
+ Author-email: [email protected]
8
+ License: NVIDIA Proprietary Software
9
+ Keywords: cuda,nvidia,runtime,machine learning,deep learning
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Education
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: Other/Proprietary License
15
+ Classifier: Natural Language :: English
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.5
18
+ Classifier: Programming Language :: Python :: 3.6
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3 :: Only
25
+ Classifier: Topic :: Scientific/Engineering
26
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
27
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
+ Classifier: Topic :: Software Development
29
+ Classifier: Topic :: Software Development :: Libraries
30
+ Classifier: Operating System :: Microsoft :: Windows
31
+ Classifier: Operating System :: POSIX :: Linux
32
+ Requires-Python: >=3
33
+ License-File: License.txt
34
+ Requires-Dist: nvidia-cublas-cu12
35
+ Requires-Dist: nvidia-nvjitlink-cu12
36
+ Requires-Dist: nvidia-cusparse-cu12
37
+
38
+ CUDA solver native runtime libraries
env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ nvidia/__pycache__/__init__.cpython-310.pyc,,
3
+ nvidia/cusolver/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ nvidia/cusolver/__pycache__/__init__.cpython-310.pyc,,
5
+ nvidia/cusolver/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ nvidia/cusolver/include/__pycache__/__init__.cpython-310.pyc,,
7
+ nvidia/cusolver/include/cusolverDn.h,sha256=8KUcqUxWPr8jpz3ZVpTB6I3IXMme1ok7E7vi9XXKRzk,147406
8
+ nvidia/cusolver/include/cusolverMg.h,sha256=N8989nnS2BleeMyuftbQgBDJ4sMAkLPSnmy_S_7fxng,11549
9
+ nvidia/cusolver/include/cusolverRf.h,sha256=7BZfWeuMJ8w1Pz4iZeGmwvDZbDNNq0ivG5MHtiATtls,14292
10
+ nvidia/cusolver/include/cusolverSp.h,sha256=8fev0XawDBd0xrOxUlQ3WhclKlUuVAT64zKxwnP8iT0,32561
11
+ nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h,sha256=rTuS0rxwGV3bAz50ua59WVPQ9SvlijORj732oPejoCk,37495
12
+ nvidia/cusolver/include/cusolver_common.h,sha256=8SMCLEPkMN9Ni_KANkvPSHCieV1jrTARuS-Mhmuq5H8,8826
13
+ nvidia/cusolver/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ nvidia/cusolver/lib/__pycache__/__init__.cpython-310.pyc,,
15
+ nvidia/cusolver/lib/libcusolver.so.11,sha256=ECh6vHzpxfx-fBY3YVZrWZ6uGzYsR-EACRHRmEQ9bVI,114481816
16
+ nvidia/cusolver/lib/libcusolverMg.so.11,sha256=0f3uK8NQhMAFtQ5r76UCApP7coB7wWG2pQOMh1RMmwY,79763496
17
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
18
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
19
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA,sha256=b8Zxnx3ZVIwttTKBnzgVXjXu8-_pRL6wBkYMTV7i6gA,1626
20
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD,,
21
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
22
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-manylinux1_x86_64
5
+