applied-ai-018 commited on
Commit
2ce45d5
·
verified ·
1 Parent(s): e7e4eb8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/idna/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/idna/__pycache__/compat.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/idna/__pycache__/core.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/idna/__pycache__/idnadata.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/idna/__pycache__/intranges.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/idna/__pycache__/package_data.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/idna/__pycache__/uts46data.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/idna/codec.py +118 -0
  9. llmeval-env/lib/python3.10/site-packages/idna/py.typed +0 -0
  10. llmeval-env/lib/python3.10/site-packages/multiprocess/context.py +376 -0
  11. llmeval-env/lib/python3.10/site-packages/multiprocess/managers.py +1378 -0
  12. llmeval-env/lib/python3.10/site-packages/multiprocess/shared_memory.py +534 -0
  13. llmeval-env/lib/python3.10/site-packages/numpy-1.26.4.dist-info/INSTALLER +1 -0
  14. llmeval-env/lib/python3.10/site-packages/numpy-1.26.4.dist-info/LICENSE.txt +971 -0
  15. llmeval-env/lib/python3.10/site-packages/numpy-1.26.4.dist-info/METADATA +1092 -0
  16. llmeval-env/lib/python3.10/site-packages/numpy-1.26.4.dist-info/RECORD +0 -0
  17. llmeval-env/lib/python3.10/site-packages/numpy-1.26.4.dist-info/WHEEL +6 -0
  18. llmeval-env/lib/python3.10/site-packages/numpy-1.26.4.dist-info/entry_points.txt +9 -0
  19. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/bleu.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/triton/compiler/__init__.py +7 -0
  23. llmeval-env/lib/python3.10/site-packages/triton/compiler/__pycache__/__init__.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/triton/compiler/__pycache__/code_generator.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/triton/compiler/__pycache__/compiler.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/triton/compiler/__pycache__/errors.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/triton/compiler/__pycache__/make_launcher.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/triton/compiler/__pycache__/utils.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/triton/compiler/backends/__init__.py +0 -0
  30. llmeval-env/lib/python3.10/site-packages/triton/compiler/backends/__pycache__/__init__.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/triton/compiler/backends/__pycache__/cuda.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/triton/compiler/backends/cuda.py +220 -0
  33. llmeval-env/lib/python3.10/site-packages/triton/compiler/code_generator.py +1235 -0
  34. llmeval-env/lib/python3.10/site-packages/triton/compiler/compiler.py +270 -0
  35. llmeval-env/lib/python3.10/site-packages/triton/compiler/errors.py +52 -0
  36. llmeval-env/lib/python3.10/site-packages/triton/compiler/make_launcher.py +297 -0
  37. llmeval-env/lib/python3.10/site-packages/triton/compiler/utils.py +282 -0
  38. llmeval-env/lib/python3.10/site-packages/triton/language/extra/__init__.py +3 -0
  39. llmeval-env/lib/python3.10/site-packages/triton/language/extra/__pycache__/__init__.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/triton/language/extra/__pycache__/cuda.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/triton/language/extra/cuda.py +18 -0
  42. llmeval-env/lib/python3.10/site-packages/triton/ops/__init__.py +14 -0
  43. llmeval-env/lib/python3.10/site-packages/triton/ops/__pycache__/__init__.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/triton/ops/__pycache__/cross_entropy.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/triton/ops/__pycache__/flash_attention.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/triton/ops/__pycache__/matmul.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/triton/ops/__pycache__/matmul_perf_model.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/__init__.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/matmul.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/triton/ops/blocksparse/matmul.py +432 -0
llmeval-env/lib/python3.10/site-packages/idna/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (836 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/idna/__pycache__/compat.cpython-310.pyc ADDED
Binary file (736 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/idna/__pycache__/core.cpython-310.pyc ADDED
Binary file (9.47 kB). View file
 
llmeval-env/lib/python3.10/site-packages/idna/__pycache__/idnadata.cpython-310.pyc ADDED
Binary file (194 kB). View file
 
llmeval-env/lib/python3.10/site-packages/idna/__pycache__/intranges.cpython-310.pyc ADDED
Binary file (1.97 kB). View file
 
llmeval-env/lib/python3.10/site-packages/idna/__pycache__/package_data.cpython-310.pyc ADDED
Binary file (200 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/idna/__pycache__/uts46data.cpython-310.pyc ADDED
Binary file (152 kB). View file
 
llmeval-env/lib/python3.10/site-packages/idna/codec.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .core import encode, decode, alabel, ulabel, IDNAError
2
+ import codecs
3
+ import re
4
+ from typing import Any, Tuple, Optional
5
+
6
+ _unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
7
+
8
+ class Codec(codecs.Codec):
9
+
10
+ def encode(self, data: str, errors: str = 'strict') -> Tuple[bytes, int]:
11
+ if errors != 'strict':
12
+ raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
13
+
14
+ if not data:
15
+ return b"", 0
16
+
17
+ return encode(data), len(data)
18
+
19
+ def decode(self, data: bytes, errors: str = 'strict') -> Tuple[str, int]:
20
+ if errors != 'strict':
21
+ raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
22
+
23
+ if not data:
24
+ return '', 0
25
+
26
+ return decode(data), len(data)
27
+
28
+ class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
29
+ def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[bytes, int]:
30
+ if errors != 'strict':
31
+ raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
32
+
33
+ if not data:
34
+ return b'', 0
35
+
36
+ labels = _unicode_dots_re.split(data)
37
+ trailing_dot = b''
38
+ if labels:
39
+ if not labels[-1]:
40
+ trailing_dot = b'.'
41
+ del labels[-1]
42
+ elif not final:
43
+ # Keep potentially unfinished label until the next call
44
+ del labels[-1]
45
+ if labels:
46
+ trailing_dot = b'.'
47
+
48
+ result = []
49
+ size = 0
50
+ for label in labels:
51
+ result.append(alabel(label))
52
+ if size:
53
+ size += 1
54
+ size += len(label)
55
+
56
+ # Join with U+002E
57
+ result_bytes = b'.'.join(result) + trailing_dot
58
+ size += len(trailing_dot)
59
+ return result_bytes, size
60
+
61
+ class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
62
+ def _buffer_decode(self, data: Any, errors: str, final: bool) -> Tuple[str, int]:
63
+ if errors != 'strict':
64
+ raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
65
+
66
+ if not data:
67
+ return ('', 0)
68
+
69
+ if not isinstance(data, str):
70
+ data = str(data, 'ascii')
71
+
72
+ labels = _unicode_dots_re.split(data)
73
+ trailing_dot = ''
74
+ if labels:
75
+ if not labels[-1]:
76
+ trailing_dot = '.'
77
+ del labels[-1]
78
+ elif not final:
79
+ # Keep potentially unfinished label until the next call
80
+ del labels[-1]
81
+ if labels:
82
+ trailing_dot = '.'
83
+
84
+ result = []
85
+ size = 0
86
+ for label in labels:
87
+ result.append(ulabel(label))
88
+ if size:
89
+ size += 1
90
+ size += len(label)
91
+
92
+ result_str = '.'.join(result) + trailing_dot
93
+ size += len(trailing_dot)
94
+ return (result_str, size)
95
+
96
+
97
+ class StreamWriter(Codec, codecs.StreamWriter):
98
+ pass
99
+
100
+
101
+ class StreamReader(Codec, codecs.StreamReader):
102
+ pass
103
+
104
+
105
+ def search_function(name: str) -> Optional[codecs.CodecInfo]:
106
+ if name != 'idna2008':
107
+ return None
108
+ return codecs.CodecInfo(
109
+ name=name,
110
+ encode=Codec().encode,
111
+ decode=Codec().decode,
112
+ incrementalencoder=IncrementalEncoder,
113
+ incrementaldecoder=IncrementalDecoder,
114
+ streamwriter=StreamWriter,
115
+ streamreader=StreamReader,
116
+ )
117
+
118
+ codecs.register(search_function)
llmeval-env/lib/python3.10/site-packages/idna/py.typed ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/multiprocess/context.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import threading
4
+
5
+ from . import process
6
+ from . import reduction
7
+
8
+ __all__ = ()
9
+
10
+ #
11
+ # Exceptions
12
+ #
13
+
14
+ class ProcessError(Exception):
15
+ pass
16
+
17
+ class BufferTooShort(ProcessError):
18
+ pass
19
+
20
+ class TimeoutError(ProcessError):
21
+ pass
22
+
23
+ class AuthenticationError(ProcessError):
24
+ pass
25
+
26
+ #
27
+ # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py
28
+ #
29
+
30
+ class BaseContext(object):
31
+
32
+ ProcessError = ProcessError
33
+ BufferTooShort = BufferTooShort
34
+ TimeoutError = TimeoutError
35
+ AuthenticationError = AuthenticationError
36
+
37
+ current_process = staticmethod(process.current_process)
38
+ parent_process = staticmethod(process.parent_process)
39
+ active_children = staticmethod(process.active_children)
40
+
41
+ def cpu_count(self):
42
+ '''Returns the number of CPUs in the system'''
43
+ num = os.cpu_count()
44
+ if num is None:
45
+ raise NotImplementedError('cannot determine number of cpus')
46
+ else:
47
+ return num
48
+
49
+ def Manager(self):
50
+ '''Returns a manager associated with a running server process
51
+
52
+ The managers methods such as `Lock()`, `Condition()` and `Queue()`
53
+ can be used to create shared objects.
54
+ '''
55
+ from .managers import SyncManager
56
+ m = SyncManager(ctx=self.get_context())
57
+ m.start()
58
+ return m
59
+
60
+ def Pipe(self, duplex=True):
61
+ '''Returns two connection object connected by a pipe'''
62
+ from .connection import Pipe
63
+ return Pipe(duplex)
64
+
65
+ def Lock(self):
66
+ '''Returns a non-recursive lock object'''
67
+ from .synchronize import Lock
68
+ return Lock(ctx=self.get_context())
69
+
70
+ def RLock(self):
71
+ '''Returns a recursive lock object'''
72
+ from .synchronize import RLock
73
+ return RLock(ctx=self.get_context())
74
+
75
+ def Condition(self, lock=None):
76
+ '''Returns a condition object'''
77
+ from .synchronize import Condition
78
+ return Condition(lock, ctx=self.get_context())
79
+
80
+ def Semaphore(self, value=1):
81
+ '''Returns a semaphore object'''
82
+ from .synchronize import Semaphore
83
+ return Semaphore(value, ctx=self.get_context())
84
+
85
+ def BoundedSemaphore(self, value=1):
86
+ '''Returns a bounded semaphore object'''
87
+ from .synchronize import BoundedSemaphore
88
+ return BoundedSemaphore(value, ctx=self.get_context())
89
+
90
+ def Event(self):
91
+ '''Returns an event object'''
92
+ from .synchronize import Event
93
+ return Event(ctx=self.get_context())
94
+
95
+ def Barrier(self, parties, action=None, timeout=None):
96
+ '''Returns a barrier object'''
97
+ from .synchronize import Barrier
98
+ return Barrier(parties, action, timeout, ctx=self.get_context())
99
+
100
+ def Queue(self, maxsize=0):
101
+ '''Returns a queue object'''
102
+ from .queues import Queue
103
+ return Queue(maxsize, ctx=self.get_context())
104
+
105
+ def JoinableQueue(self, maxsize=0):
106
+ '''Returns a queue object'''
107
+ from .queues import JoinableQueue
108
+ return JoinableQueue(maxsize, ctx=self.get_context())
109
+
110
+ def SimpleQueue(self):
111
+ '''Returns a queue object'''
112
+ from .queues import SimpleQueue
113
+ return SimpleQueue(ctx=self.get_context())
114
+
115
+ def Pool(self, processes=None, initializer=None, initargs=(),
116
+ maxtasksperchild=None):
117
+ '''Returns a process pool object'''
118
+ from .pool import Pool
119
+ return Pool(processes, initializer, initargs, maxtasksperchild,
120
+ context=self.get_context())
121
+
122
+ def RawValue(self, typecode_or_type, *args):
123
+ '''Returns a shared object'''
124
+ from .sharedctypes import RawValue
125
+ return RawValue(typecode_or_type, *args)
126
+
127
+ def RawArray(self, typecode_or_type, size_or_initializer):
128
+ '''Returns a shared array'''
129
+ from .sharedctypes import RawArray
130
+ return RawArray(typecode_or_type, size_or_initializer)
131
+
132
+ def Value(self, typecode_or_type, *args, lock=True):
133
+ '''Returns a synchronized shared object'''
134
+ from .sharedctypes import Value
135
+ return Value(typecode_or_type, *args, lock=lock,
136
+ ctx=self.get_context())
137
+
138
+ def Array(self, typecode_or_type, size_or_initializer, *, lock=True):
139
+ '''Returns a synchronized shared array'''
140
+ from .sharedctypes import Array
141
+ return Array(typecode_or_type, size_or_initializer, lock=lock,
142
+ ctx=self.get_context())
143
+
144
+ def freeze_support(self):
145
+ '''Check whether this is a fake forked process in a frozen executable.
146
+ If so then run code specified by commandline and exit.
147
+ '''
148
+ if sys.platform == 'win32' and getattr(sys, 'frozen', False):
149
+ from .spawn import freeze_support
150
+ freeze_support()
151
+
152
+ def get_logger(self):
153
+ '''Return package logger -- if it does not already exist then
154
+ it is created.
155
+ '''
156
+ from .util import get_logger
157
+ return get_logger()
158
+
159
+ def log_to_stderr(self, level=None):
160
+ '''Turn on logging and add a handler which prints to stderr'''
161
+ from .util import log_to_stderr
162
+ return log_to_stderr(level)
163
+
164
+ def allow_connection_pickling(self):
165
+ '''Install support for sending connections and sockets
166
+ between processes
167
+ '''
168
+ # This is undocumented. In previous versions of multiprocessing
169
+ # its only effect was to make socket objects inheritable on Windows.
170
+ from . import connection
171
+
172
+ def set_executable(self, executable):
173
+ '''Sets the path to a python.exe or pythonw.exe binary used to run
174
+ child processes instead of sys.executable when using the 'spawn'
175
+ start method. Useful for people embedding Python.
176
+ '''
177
+ from .spawn import set_executable
178
+ set_executable(executable)
179
+
180
+ def set_forkserver_preload(self, module_names):
181
+ '''Set list of module names to try to load in forkserver process.
182
+ This is really just a hint.
183
+ '''
184
+ from .forkserver import set_forkserver_preload
185
+ set_forkserver_preload(module_names)
186
+
187
+ def get_context(self, method=None):
188
+ if method is None:
189
+ return self
190
+ try:
191
+ ctx = _concrete_contexts[method]
192
+ except KeyError:
193
+ raise ValueError('cannot find context for %r' % method) from None
194
+ ctx._check_available()
195
+ return ctx
196
+
197
+ def get_start_method(self, allow_none=False):
198
+ return self._name
199
+
200
+ def set_start_method(self, method, force=False):
201
+ raise ValueError('cannot set start method of concrete context')
202
+
203
+ @property
204
+ def reducer(self):
205
+ '''Controls how objects will be reduced to a form that can be
206
+ shared with other processes.'''
207
+ return globals().get('reduction')
208
+
209
+ @reducer.setter
210
+ def reducer(self, reduction):
211
+ globals()['reduction'] = reduction
212
+
213
+ def _check_available(self):
214
+ pass
215
+
216
+ #
217
+ # Type of default context -- underlying context can be set at most once
218
+ #
219
+
220
+ class Process(process.BaseProcess):
221
+ _start_method = None
222
+ @staticmethod
223
+ def _Popen(process_obj):
224
+ return _default_context.get_context().Process._Popen(process_obj)
225
+
226
+ @staticmethod
227
+ def _after_fork():
228
+ return _default_context.get_context().Process._after_fork()
229
+
230
+ class DefaultContext(BaseContext):
231
+ Process = Process
232
+
233
+ def __init__(self, context):
234
+ self._default_context = context
235
+ self._actual_context = None
236
+
237
+ def get_context(self, method=None):
238
+ if method is None:
239
+ if self._actual_context is None:
240
+ self._actual_context = self._default_context
241
+ return self._actual_context
242
+ else:
243
+ return super().get_context(method)
244
+
245
+ def set_start_method(self, method, force=False):
246
+ if self._actual_context is not None and not force:
247
+ raise RuntimeError('context has already been set')
248
+ if method is None and force:
249
+ self._actual_context = None
250
+ return
251
+ self._actual_context = self.get_context(method)
252
+
253
+ def get_start_method(self, allow_none=False):
254
+ if self._actual_context is None:
255
+ if allow_none:
256
+ return None
257
+ self._actual_context = self._default_context
258
+ return self._actual_context._name
259
+
260
+ def get_all_start_methods(self):
261
+ if sys.platform == 'win32':
262
+ return ['spawn']
263
+ else:
264
+ methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn']
265
+ if reduction.HAVE_SEND_HANDLE:
266
+ methods.append('forkserver')
267
+ return methods
268
+
269
+
270
+ #
271
+ # Context types for fixed start method
272
+ #
273
+
274
+ if sys.platform != 'win32':
275
+
276
+ class ForkProcess(process.BaseProcess):
277
+ _start_method = 'fork'
278
+ @staticmethod
279
+ def _Popen(process_obj):
280
+ from .popen_fork import Popen
281
+ return Popen(process_obj)
282
+
283
+ class SpawnProcess(process.BaseProcess):
284
+ _start_method = 'spawn'
285
+ @staticmethod
286
+ def _Popen(process_obj):
287
+ from .popen_spawn_posix import Popen
288
+ return Popen(process_obj)
289
+
290
+ @staticmethod
291
+ def _after_fork():
292
+ # process is spawned, nothing to do
293
+ pass
294
+
295
+ class ForkServerProcess(process.BaseProcess):
296
+ _start_method = 'forkserver'
297
+ @staticmethod
298
+ def _Popen(process_obj):
299
+ from .popen_forkserver import Popen
300
+ return Popen(process_obj)
301
+
302
+ class ForkContext(BaseContext):
303
+ _name = 'fork'
304
+ Process = ForkProcess
305
+
306
+ class SpawnContext(BaseContext):
307
+ _name = 'spawn'
308
+ Process = SpawnProcess
309
+
310
+ class ForkServerContext(BaseContext):
311
+ _name = 'forkserver'
312
+ Process = ForkServerProcess
313
+ def _check_available(self):
314
+ if not reduction.HAVE_SEND_HANDLE:
315
+ raise ValueError('forkserver start method not available')
316
+
317
+ _concrete_contexts = {
318
+ 'fork': ForkContext(),
319
+ 'spawn': SpawnContext(),
320
+ 'forkserver': ForkServerContext(),
321
+ }
322
+ if sys.platform == 'darwin':
323
+ # bpo-33725: running arbitrary code after fork() is no longer reliable
324
+ # on macOS since macOS 10.14 (Mojave). Use spawn by default instead.
325
+ _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn
326
+ else:
327
+ _default_context = DefaultContext(_concrete_contexts['fork'])
328
+
329
+ else:
330
+
331
+ class SpawnProcess(process.BaseProcess):
332
+ _start_method = 'spawn'
333
+ @staticmethod
334
+ def _Popen(process_obj):
335
+ from .popen_spawn_win32 import Popen
336
+ return Popen(process_obj)
337
+
338
+ @staticmethod
339
+ def _after_fork():
340
+ # process is spawned, nothing to do
341
+ pass
342
+
343
+ class SpawnContext(BaseContext):
344
+ _name = 'spawn'
345
+ Process = SpawnProcess
346
+
347
+ _concrete_contexts = {
348
+ 'spawn': SpawnContext(),
349
+ }
350
+ _default_context = DefaultContext(_concrete_contexts['spawn'])
351
+
352
+ #
353
+ # Force the start method
354
+ #
355
+
356
+ def _force_start_method(method):
357
+ _default_context._actual_context = _concrete_contexts[method]
358
+
359
+ #
360
+ # Check that the current thread is spawning a child process
361
+ #
362
+
363
+ _tls = threading.local()
364
+
365
+ def get_spawning_popen():
366
+ return getattr(_tls, 'spawning_popen', None)
367
+
368
+ def set_spawning_popen(popen):
369
+ _tls.spawning_popen = popen
370
+
371
+ def assert_spawning(obj):
372
+ if get_spawning_popen() is None:
373
+ raise RuntimeError(
374
+ '%s objects should only be shared between processes'
375
+ ' through inheritance' % type(obj).__name__
376
+ )
llmeval-env/lib/python3.10/site-packages/multiprocess/managers.py ADDED
@@ -0,0 +1,1378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module providing manager classes for dealing
3
+ # with shared objects
4
+ #
5
+ # multiprocessing/managers.py
6
+ #
7
+ # Copyright (c) 2006-2008, R Oudkerk
8
+ # Licensed to PSF under a Contributor Agreement.
9
+ #
10
+
11
+ __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
12
+
13
+ #
14
+ # Imports
15
+ #
16
+
17
+ import sys
18
+ import threading
19
+ import signal
20
+ import array
21
+ import queue
22
+ import time
23
+ import types
24
+ import os
25
+ from os import getpid
26
+
27
+ from traceback import format_exc
28
+
29
+ from . import connection
30
+ from .context import reduction, get_spawning_popen, ProcessError
31
+ from . import pool
32
+ from . import process
33
+ from . import util
34
+ from . import get_context
35
+ try:
36
+ from . import shared_memory
37
+ except ImportError:
38
+ HAS_SHMEM = False
39
+ else:
40
+ HAS_SHMEM = True
41
+ __all__.append('SharedMemoryManager')
42
+
43
+ #
44
+ # Register some things for pickling
45
+ #
46
+
47
+ def reduce_array(a):
48
+ return array.array, (a.typecode, a.tobytes())
49
+ reduction.register(array.array, reduce_array)
50
+
51
+ view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
52
+ if view_types[0] is not list: # only needed in Py3.0
53
+ def rebuild_as_list(obj):
54
+ return list, (list(obj),)
55
+ for view_type in view_types:
56
+ reduction.register(view_type, rebuild_as_list)
57
+
58
+ #
59
+ # Type for identifying shared objects
60
+ #
61
+
62
+ class Token(object):
63
+ '''
64
+ Type to uniquely identify a shared object
65
+ '''
66
+ __slots__ = ('typeid', 'address', 'id')
67
+
68
+ def __init__(self, typeid, address, id):
69
+ (self.typeid, self.address, self.id) = (typeid, address, id)
70
+
71
+ def __getstate__(self):
72
+ return (self.typeid, self.address, self.id)
73
+
74
+ def __setstate__(self, state):
75
+ (self.typeid, self.address, self.id) = state
76
+
77
+ def __repr__(self):
78
+ return '%s(typeid=%r, address=%r, id=%r)' % \
79
+ (self.__class__.__name__, self.typeid, self.address, self.id)
80
+
81
+ #
82
+ # Function for communication with a manager's server process
83
+ #
84
+
85
+ def dispatch(c, id, methodname, args=(), kwds={}):
86
+ '''
87
+ Send a message to manager using connection `c` and return response
88
+ '''
89
+ c.send((id, methodname, args, kwds))
90
+ kind, result = c.recv()
91
+ if kind == '#RETURN':
92
+ return result
93
+ raise convert_to_error(kind, result)
94
+
95
+ def convert_to_error(kind, result):
96
+ if kind == '#ERROR':
97
+ return result
98
+ elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
99
+ if not isinstance(result, str):
100
+ raise TypeError(
101
+ "Result {0!r} (kind '{1}') type is {2}, not str".format(
102
+ result, kind, type(result)))
103
+ if kind == '#UNSERIALIZABLE':
104
+ return RemoteError('Unserializable message: %s\n' % result)
105
+ else:
106
+ return RemoteError(result)
107
+ else:
108
+ return ValueError('Unrecognized message type {!r}'.format(kind))
109
+
110
+ class RemoteError(Exception):
111
+ def __str__(self):
112
+ return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
113
+
114
+ #
115
+ # Functions for finding the method names of an object
116
+ #
117
+
118
+ def all_methods(obj):
119
+ '''
120
+ Return a list of names of methods of `obj`
121
+ '''
122
+ temp = []
123
+ for name in dir(obj):
124
+ func = getattr(obj, name)
125
+ if callable(func):
126
+ temp.append(name)
127
+ return temp
128
+
129
+ def public_methods(obj):
130
+ '''
131
+ Return a list of names of methods of `obj` which do not start with '_'
132
+ '''
133
+ return [name for name in all_methods(obj) if name[0] != '_']
134
+
135
+ #
136
+ # Server which is run in a process controlled by a manager
137
+ #
138
+
139
+ class Server(object):
140
+ '''
141
+ Server class which runs in a process controlled by a manager object
142
+ '''
143
+ public = ['shutdown', 'create', 'accept_connection', 'get_methods',
144
+ 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
145
+
146
+ def __init__(self, registry, address, authkey, serializer):
147
+ if not isinstance(authkey, bytes):
148
+ raise TypeError(
149
+ "Authkey {0!r} is type {1!s}, not bytes".format(
150
+ authkey, type(authkey)))
151
+ self.registry = registry
152
+ self.authkey = process.AuthenticationString(authkey)
153
+ Listener, Client = listener_client[serializer]
154
+
155
+ # do authentication later
156
+ self.listener = Listener(address=address, backlog=16)
157
+ self.address = self.listener.address
158
+
159
+ self.id_to_obj = {'0': (None, ())}
160
+ self.id_to_refcount = {}
161
+ self.id_to_local_proxy_obj = {}
162
+ self.mutex = threading.Lock()
163
+
164
+ def serve_forever(self):
165
+ '''
166
+ Run the server forever
167
+ '''
168
+ self.stop_event = threading.Event()
169
+ process.current_process()._manager_server = self
170
+ try:
171
+ accepter = threading.Thread(target=self.accepter)
172
+ accepter.daemon = True
173
+ accepter.start()
174
+ try:
175
+ while not self.stop_event.is_set():
176
+ self.stop_event.wait(1)
177
+ except (KeyboardInterrupt, SystemExit):
178
+ pass
179
+ finally:
180
+ if sys.stdout != sys.__stdout__: # what about stderr?
181
+ util.debug('resetting stdout, stderr')
182
+ sys.stdout = sys.__stdout__
183
+ sys.stderr = sys.__stderr__
184
+ sys.exit(0)
185
+
186
+ def accepter(self):
187
+ while True:
188
+ try:
189
+ c = self.listener.accept()
190
+ except OSError:
191
+ continue
192
+ t = threading.Thread(target=self.handle_request, args=(c,))
193
+ t.daemon = True
194
+ t.start()
195
+
196
+ def _handle_request(self, c):
197
+ request = None
198
+ try:
199
+ connection.deliver_challenge(c, self.authkey)
200
+ connection.answer_challenge(c, self.authkey)
201
+ request = c.recv()
202
+ ignore, funcname, args, kwds = request
203
+ assert funcname in self.public, '%r unrecognized' % funcname
204
+ func = getattr(self, funcname)
205
+ except Exception:
206
+ msg = ('#TRACEBACK', format_exc())
207
+ else:
208
+ try:
209
+ result = func(c, *args, **kwds)
210
+ except Exception:
211
+ msg = ('#TRACEBACK', format_exc())
212
+ else:
213
+ msg = ('#RETURN', result)
214
+
215
+ try:
216
+ c.send(msg)
217
+ except Exception as e:
218
+ try:
219
+ c.send(('#TRACEBACK', format_exc()))
220
+ except Exception:
221
+ pass
222
+ util.info('Failure to send message: %r', msg)
223
+ util.info(' ... request was %r', request)
224
+ util.info(' ... exception was %r', e)
225
+
226
+ def handle_request(self, conn):
227
+ '''
228
+ Handle a new connection
229
+ '''
230
+ try:
231
+ self._handle_request(conn)
232
+ except SystemExit:
233
+ # Server.serve_client() calls sys.exit(0) on EOF
234
+ pass
235
+ finally:
236
+ conn.close()
237
+
238
+ def serve_client(self, conn):
239
+ '''
240
+ Handle requests from the proxies in a particular process/thread
241
+ '''
242
+ util.debug('starting server thread to service %r',
243
+ threading.current_thread().name)
244
+
245
+ recv = conn.recv
246
+ send = conn.send
247
+ id_to_obj = self.id_to_obj
248
+
249
+ while not self.stop_event.is_set():
250
+
251
+ try:
252
+ methodname = obj = None
253
+ request = recv()
254
+ ident, methodname, args, kwds = request
255
+ try:
256
+ obj, exposed, gettypeid = id_to_obj[ident]
257
+ except KeyError as ke:
258
+ try:
259
+ obj, exposed, gettypeid = \
260
+ self.id_to_local_proxy_obj[ident]
261
+ except KeyError:
262
+ raise ke
263
+
264
+ if methodname not in exposed:
265
+ raise AttributeError(
266
+ 'method %r of %r object is not in exposed=%r' %
267
+ (methodname, type(obj), exposed)
268
+ )
269
+
270
+ function = getattr(obj, methodname)
271
+
272
+ try:
273
+ res = function(*args, **kwds)
274
+ except Exception as e:
275
+ msg = ('#ERROR', e)
276
+ else:
277
+ typeid = gettypeid and gettypeid.get(methodname, None)
278
+ if typeid:
279
+ rident, rexposed = self.create(conn, typeid, res)
280
+ token = Token(typeid, self.address, rident)
281
+ msg = ('#PROXY', (rexposed, token))
282
+ else:
283
+ msg = ('#RETURN', res)
284
+
285
+ except AttributeError:
286
+ if methodname is None:
287
+ msg = ('#TRACEBACK', format_exc())
288
+ else:
289
+ try:
290
+ fallback_func = self.fallback_mapping[methodname]
291
+ result = fallback_func(
292
+ self, conn, ident, obj, *args, **kwds
293
+ )
294
+ msg = ('#RETURN', result)
295
+ except Exception:
296
+ msg = ('#TRACEBACK', format_exc())
297
+
298
+ except EOFError:
299
+ util.debug('got EOF -- exiting thread serving %r',
300
+ threading.current_thread().name)
301
+ sys.exit(0)
302
+
303
+ except Exception:
304
+ msg = ('#TRACEBACK', format_exc())
305
+
306
+ try:
307
+ try:
308
+ send(msg)
309
+ except Exception:
310
+ send(('#UNSERIALIZABLE', format_exc()))
311
+ except Exception as e:
312
+ util.info('exception in thread serving %r',
313
+ threading.current_thread().name)
314
+ util.info(' ... message was %r', msg)
315
+ util.info(' ... exception was %r', e)
316
+ conn.close()
317
+ sys.exit(1)
318
+
319
+ def fallback_getvalue(self, conn, ident, obj):
320
+ return obj
321
+
322
+ def fallback_str(self, conn, ident, obj):
323
+ return str(obj)
324
+
325
+ def fallback_repr(self, conn, ident, obj):
326
+ return repr(obj)
327
+
328
+ fallback_mapping = {
329
+ '__str__':fallback_str,
330
+ '__repr__':fallback_repr,
331
+ '#GETVALUE':fallback_getvalue
332
+ }
333
+
334
+ def dummy(self, c):
335
+ pass
336
+
337
+ def debug_info(self, c):
338
+ '''
339
+ Return some info --- useful to spot problems with refcounting
340
+ '''
341
+ # Perhaps include debug info about 'c'?
342
+ with self.mutex:
343
+ result = []
344
+ keys = list(self.id_to_refcount.keys())
345
+ keys.sort()
346
+ for ident in keys:
347
+ if ident != '0':
348
+ result.append(' %s: refcount=%s\n %s' %
349
+ (ident, self.id_to_refcount[ident],
350
+ str(self.id_to_obj[ident][0])[:75]))
351
+ return '\n'.join(result)
352
+
353
+ def number_of_objects(self, c):
354
+ '''
355
+ Number of shared objects
356
+ '''
357
+ # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
358
+ return len(self.id_to_refcount)
359
+
360
+ def shutdown(self, c):
361
+ '''
362
+ Shutdown this process
363
+ '''
364
+ try:
365
+ util.debug('manager received shutdown message')
366
+ c.send(('#RETURN', None))
367
+ except:
368
+ import traceback
369
+ traceback.print_exc()
370
+ finally:
371
+ self.stop_event.set()
372
+
373
+ def create(self, c, typeid, /, *args, **kwds):
374
+ '''
375
+ Create a new shared object and return its id
376
+ '''
377
+ with self.mutex:
378
+ callable, exposed, method_to_typeid, proxytype = \
379
+ self.registry[typeid]
380
+
381
+ if callable is None:
382
+ if kwds or (len(args) != 1):
383
+ raise ValueError(
384
+ "Without callable, must have one non-keyword argument")
385
+ obj = args[0]
386
+ else:
387
+ obj = callable(*args, **kwds)
388
+
389
+ if exposed is None:
390
+ exposed = public_methods(obj)
391
+ if method_to_typeid is not None:
392
+ if not isinstance(method_to_typeid, dict):
393
+ raise TypeError(
394
+ "Method_to_typeid {0!r}: type {1!s}, not dict".format(
395
+ method_to_typeid, type(method_to_typeid)))
396
+ exposed = list(exposed) + list(method_to_typeid)
397
+
398
+ ident = '%x' % id(obj) # convert to string because xmlrpclib
399
+ # only has 32 bit signed integers
400
+ util.debug('%r callable returned object with id %r', typeid, ident)
401
+
402
+ self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
403
+ if ident not in self.id_to_refcount:
404
+ self.id_to_refcount[ident] = 0
405
+
406
+ self.incref(c, ident)
407
+ return ident, tuple(exposed)
408
+
409
+ def get_methods(self, c, token):
410
+ '''
411
+ Return the methods of the shared object indicated by token
412
+ '''
413
+ return tuple(self.id_to_obj[token.id][1])
414
+
415
+ def accept_connection(self, c, name):
416
+ '''
417
+ Spawn a new thread to serve this connection
418
+ '''
419
+ threading.current_thread().name = name
420
+ c.send(('#RETURN', None))
421
+ self.serve_client(c)
422
+
423
+ def incref(self, c, ident):
424
+ with self.mutex:
425
+ try:
426
+ self.id_to_refcount[ident] += 1
427
+ except KeyError as ke:
428
+ # If no external references exist but an internal (to the
429
+ # manager) still does and a new external reference is created
430
+ # from it, restore the manager's tracking of it from the
431
+ # previously stashed internal ref.
432
+ if ident in self.id_to_local_proxy_obj:
433
+ self.id_to_refcount[ident] = 1
434
+ self.id_to_obj[ident] = \
435
+ self.id_to_local_proxy_obj[ident]
436
+ obj, exposed, gettypeid = self.id_to_obj[ident]
437
+ util.debug('Server re-enabled tracking & INCREF %r', ident)
438
+ else:
439
+ raise ke
440
+
441
+ def decref(self, c, ident):
442
+ if ident not in self.id_to_refcount and \
443
+ ident in self.id_to_local_proxy_obj:
444
+ util.debug('Server DECREF skipping %r', ident)
445
+ return
446
+
447
+ with self.mutex:
448
+ if self.id_to_refcount[ident] <= 0:
449
+ raise AssertionError(
450
+ "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
451
+ ident, self.id_to_obj[ident],
452
+ self.id_to_refcount[ident]))
453
+ self.id_to_refcount[ident] -= 1
454
+ if self.id_to_refcount[ident] == 0:
455
+ del self.id_to_refcount[ident]
456
+
457
+ if ident not in self.id_to_refcount:
458
+ # Two-step process in case the object turns out to contain other
459
+ # proxy objects (e.g. a managed list of managed lists).
460
+ # Otherwise, deleting self.id_to_obj[ident] would trigger the
461
+ # deleting of the stored value (another managed object) which would
462
+ # in turn attempt to acquire the mutex that is already held here.
463
+ self.id_to_obj[ident] = (None, (), None) # thread-safe
464
+ util.debug('disposing of obj with id %r', ident)
465
+ with self.mutex:
466
+ del self.id_to_obj[ident]
467
+
468
+
469
+ #
470
+ # Class to represent state of a manager
471
+ #
472
+
473
+ class State(object):
474
+ __slots__ = ['value']
475
+ INITIAL = 0
476
+ STARTED = 1
477
+ SHUTDOWN = 2
478
+
479
+ #
480
+ # Mapping from serializer name to Listener and Client types
481
+ #
482
+
483
+ listener_client = { #XXX: register dill?
484
+ 'pickle' : (connection.Listener, connection.Client),
485
+ 'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
486
+ }
487
+
488
+ #
489
+ # Definition of BaseManager
490
+ #
491
+
492
+ class BaseManager(object):
493
+ '''
494
+ Base class for managers
495
+ '''
496
+ _registry = {}
497
+ _Server = Server
498
+
499
+ def __init__(self, address=None, authkey=None, serializer='pickle',
500
+ ctx=None):
501
+ if authkey is None:
502
+ authkey = process.current_process().authkey
503
+ self._address = address # XXX not final address if eg ('', 0)
504
+ self._authkey = process.AuthenticationString(authkey)
505
+ self._state = State()
506
+ self._state.value = State.INITIAL
507
+ self._serializer = serializer
508
+ self._Listener, self._Client = listener_client[serializer]
509
+ self._ctx = ctx or get_context()
510
+
511
+ def get_server(self):
512
+ '''
513
+ Return server object with serve_forever() method and address attribute
514
+ '''
515
+ if self._state.value != State.INITIAL:
516
+ if self._state.value == State.STARTED:
517
+ raise ProcessError("Already started server")
518
+ elif self._state.value == State.SHUTDOWN:
519
+ raise ProcessError("Manager has shut down")
520
+ else:
521
+ raise ProcessError(
522
+ "Unknown state {!r}".format(self._state.value))
523
+ return Server(self._registry, self._address,
524
+ self._authkey, self._serializer)
525
+
526
+ def connect(self):
527
+ '''
528
+ Connect manager object to the server process
529
+ '''
530
+ Listener, Client = listener_client[self._serializer]
531
+ conn = Client(self._address, authkey=self._authkey)
532
+ dispatch(conn, None, 'dummy')
533
+ self._state.value = State.STARTED
534
+
535
+ def start(self, initializer=None, initargs=()):
536
+ '''
537
+ Spawn a server process for this manager object
538
+ '''
539
+ if self._state.value != State.INITIAL:
540
+ if self._state.value == State.STARTED:
541
+ raise ProcessError("Already started server")
542
+ elif self._state.value == State.SHUTDOWN:
543
+ raise ProcessError("Manager has shut down")
544
+ else:
545
+ raise ProcessError(
546
+ "Unknown state {!r}".format(self._state.value))
547
+
548
+ if initializer is not None and not callable(initializer):
549
+ raise TypeError('initializer must be a callable')
550
+
551
+ # pipe over which we will retrieve address of server
552
+ reader, writer = connection.Pipe(duplex=False)
553
+
554
+ # spawn process which runs a server
555
+ self._process = self._ctx.Process(
556
+ target=type(self)._run_server,
557
+ args=(self._registry, self._address, self._authkey,
558
+ self._serializer, writer, initializer, initargs),
559
+ )
560
+ ident = ':'.join(str(i) for i in self._process._identity)
561
+ self._process.name = type(self).__name__ + '-' + ident
562
+ self._process.start()
563
+
564
+ # get address of server
565
+ writer.close()
566
+ self._address = reader.recv()
567
+ reader.close()
568
+
569
+ # register a finalizer
570
+ self._state.value = State.STARTED
571
+ self.shutdown = util.Finalize(
572
+ self, type(self)._finalize_manager,
573
+ args=(self._process, self._address, self._authkey,
574
+ self._state, self._Client),
575
+ exitpriority=0
576
+ )
577
+
578
+ @classmethod
579
+ def _run_server(cls, registry, address, authkey, serializer, writer,
580
+ initializer=None, initargs=()):
581
+ '''
582
+ Create a server, report its address and run it
583
+ '''
584
+ # bpo-36368: protect server process from KeyboardInterrupt signals
585
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
586
+
587
+ if initializer is not None:
588
+ initializer(*initargs)
589
+
590
+ # create server
591
+ server = cls._Server(registry, address, authkey, serializer)
592
+
593
+ # inform parent process of the server's address
594
+ writer.send(server.address)
595
+ writer.close()
596
+
597
+ # run the manager
598
+ util.info('manager serving at %r', server.address)
599
+ server.serve_forever()
600
+
601
+ def _create(self, typeid, /, *args, **kwds):
602
+ '''
603
+ Create a new shared object; return the token and exposed tuple
604
+ '''
605
+ assert self._state.value == State.STARTED, 'server not yet started'
606
+ conn = self._Client(self._address, authkey=self._authkey)
607
+ try:
608
+ id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
609
+ finally:
610
+ conn.close()
611
+ return Token(typeid, self._address, id), exposed
612
+
613
+ def join(self, timeout=None):
614
+ '''
615
+ Join the manager process (if it has been spawned)
616
+ '''
617
+ if self._process is not None:
618
+ self._process.join(timeout)
619
+ if not self._process.is_alive():
620
+ self._process = None
621
+
622
+ def _debug_info(self):
623
+ '''
624
+ Return some info about the servers shared objects and connections
625
+ '''
626
+ conn = self._Client(self._address, authkey=self._authkey)
627
+ try:
628
+ return dispatch(conn, None, 'debug_info')
629
+ finally:
630
+ conn.close()
631
+
632
+ def _number_of_objects(self):
633
+ '''
634
+ Return the number of shared objects
635
+ '''
636
+ conn = self._Client(self._address, authkey=self._authkey)
637
+ try:
638
+ return dispatch(conn, None, 'number_of_objects')
639
+ finally:
640
+ conn.close()
641
+
642
+ def __enter__(self):
643
+ if self._state.value == State.INITIAL:
644
+ self.start()
645
+ if self._state.value != State.STARTED:
646
+ if self._state.value == State.INITIAL:
647
+ raise ProcessError("Unable to start server")
648
+ elif self._state.value == State.SHUTDOWN:
649
+ raise ProcessError("Manager has shut down")
650
+ else:
651
+ raise ProcessError(
652
+ "Unknown state {!r}".format(self._state.value))
653
+ return self
654
+
655
+ def __exit__(self, exc_type, exc_val, exc_tb):
656
+ self.shutdown()
657
+
658
+ @staticmethod
659
+ def _finalize_manager(process, address, authkey, state, _Client):
660
+ '''
661
+ Shutdown the manager process; will be registered as a finalizer
662
+ '''
663
+ if process.is_alive():
664
+ util.info('sending shutdown message to manager')
665
+ try:
666
+ conn = _Client(address, authkey=authkey)
667
+ try:
668
+ dispatch(conn, None, 'shutdown')
669
+ finally:
670
+ conn.close()
671
+ except Exception:
672
+ pass
673
+
674
+ process.join(timeout=1.0)
675
+ if process.is_alive():
676
+ util.info('manager still alive')
677
+ if hasattr(process, 'terminate'):
678
+ util.info('trying to `terminate()` manager process')
679
+ process.terminate()
680
+ process.join(timeout=1.0)
681
+ if process.is_alive():
682
+ util.info('manager still alive after terminate')
683
+
684
+ state.value = State.SHUTDOWN
685
+ try:
686
+ del BaseProxy._address_to_local[address]
687
+ except KeyError:
688
+ pass
689
+
690
+ @property
691
+ def address(self):
692
+ return self._address
693
+
694
+ @classmethod
695
+ def register(cls, typeid, callable=None, proxytype=None, exposed=None,
696
+ method_to_typeid=None, create_method=True):
697
+ '''
698
+ Register a typeid with the manager type
699
+ '''
700
+ if '_registry' not in cls.__dict__:
701
+ cls._registry = cls._registry.copy()
702
+
703
+ if proxytype is None:
704
+ proxytype = AutoProxy
705
+
706
+ exposed = exposed or getattr(proxytype, '_exposed_', None)
707
+
708
+ method_to_typeid = method_to_typeid or \
709
+ getattr(proxytype, '_method_to_typeid_', None)
710
+
711
+ if method_to_typeid:
712
+ for key, value in list(method_to_typeid.items()): # isinstance?
713
+ assert type(key) is str, '%r is not a string' % key
714
+ assert type(value) is str, '%r is not a string' % value
715
+
716
+ cls._registry[typeid] = (
717
+ callable, exposed, method_to_typeid, proxytype
718
+ )
719
+
720
+ if create_method:
721
+ def temp(self, /, *args, **kwds):
722
+ util.debug('requesting creation of a shared %r object', typeid)
723
+ token, exp = self._create(typeid, *args, **kwds)
724
+ proxy = proxytype(
725
+ token, self._serializer, manager=self,
726
+ authkey=self._authkey, exposed=exp
727
+ )
728
+ conn = self._Client(token.address, authkey=self._authkey)
729
+ dispatch(conn, None, 'decref', (token.id,))
730
+ return proxy
731
+ temp.__name__ = typeid
732
+ setattr(cls, typeid, temp)
733
+
734
+ #
735
+ # Subclass of set which get cleared after a fork
736
+ #
737
+
738
+ class ProcessLocalSet(set):
739
+ def __init__(self):
740
+ util.register_after_fork(self, lambda obj: obj.clear())
741
+ def __reduce__(self):
742
+ return type(self), ()
743
+
744
+ #
745
+ # Definition of BaseProxy
746
+ #
747
+
748
+ class BaseProxy(object):
749
+ '''
750
+ A base for proxies of shared objects
751
+ '''
752
+ _address_to_local = {}
753
+ _mutex = util.ForkAwareThreadLock()
754
+
755
+ def __init__(self, token, serializer, manager=None,
756
+ authkey=None, exposed=None, incref=True, manager_owned=False):
757
+ with BaseProxy._mutex:
758
+ tls_idset = BaseProxy._address_to_local.get(token.address, None)
759
+ if tls_idset is None:
760
+ tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
761
+ BaseProxy._address_to_local[token.address] = tls_idset
762
+
763
+ # self._tls is used to record the connection used by this
764
+ # thread to communicate with the manager at token.address
765
+ self._tls = tls_idset[0]
766
+
767
+ # self._idset is used to record the identities of all shared
768
+ # objects for which the current process owns references and
769
+ # which are in the manager at token.address
770
+ self._idset = tls_idset[1]
771
+
772
+ self._token = token
773
+ self._id = self._token.id
774
+ self._manager = manager
775
+ self._serializer = serializer
776
+ self._Client = listener_client[serializer][1]
777
+
778
+ # Should be set to True only when a proxy object is being created
779
+ # on the manager server; primary use case: nested proxy objects.
780
+ # RebuildProxy detects when a proxy is being created on the manager
781
+ # and sets this value appropriately.
782
+ self._owned_by_manager = manager_owned
783
+
784
+ if authkey is not None:
785
+ self._authkey = process.AuthenticationString(authkey)
786
+ elif self._manager is not None:
787
+ self._authkey = self._manager._authkey
788
+ else:
789
+ self._authkey = process.current_process().authkey
790
+
791
+ if incref:
792
+ self._incref()
793
+
794
+ util.register_after_fork(self, BaseProxy._after_fork)
795
+
796
+ def _connect(self):
797
+ util.debug('making connection to manager')
798
+ name = process.current_process().name
799
+ if threading.current_thread().name != 'MainThread':
800
+ name += '|' + threading.current_thread().name
801
+ conn = self._Client(self._token.address, authkey=self._authkey)
802
+ dispatch(conn, None, 'accept_connection', (name,))
803
+ self._tls.connection = conn
804
+
805
+ def _callmethod(self, methodname, args=(), kwds={}):
806
+ '''
807
+ Try to call a method of the referent and return a copy of the result
808
+ '''
809
+ try:
810
+ conn = self._tls.connection
811
+ except AttributeError:
812
+ util.debug('thread %r does not own a connection',
813
+ threading.current_thread().name)
814
+ self._connect()
815
+ conn = self._tls.connection
816
+
817
+ conn.send((self._id, methodname, args, kwds))
818
+ kind, result = conn.recv()
819
+
820
+ if kind == '#RETURN':
821
+ return result
822
+ elif kind == '#PROXY':
823
+ exposed, token = result
824
+ proxytype = self._manager._registry[token.typeid][-1]
825
+ token.address = self._token.address
826
+ proxy = proxytype(
827
+ token, self._serializer, manager=self._manager,
828
+ authkey=self._authkey, exposed=exposed
829
+ )
830
+ conn = self._Client(token.address, authkey=self._authkey)
831
+ dispatch(conn, None, 'decref', (token.id,))
832
+ return proxy
833
+ raise convert_to_error(kind, result)
834
+
835
+ def _getvalue(self):
836
+ '''
837
+ Get a copy of the value of the referent
838
+ '''
839
+ return self._callmethod('#GETVALUE')
840
+
841
+ def _incref(self):
842
+ if self._owned_by_manager:
843
+ util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
844
+ return
845
+
846
+ conn = self._Client(self._token.address, authkey=self._authkey)
847
+ dispatch(conn, None, 'incref', (self._id,))
848
+ util.debug('INCREF %r', self._token.id)
849
+
850
+ self._idset.add(self._id)
851
+
852
+ state = self._manager and self._manager._state
853
+
854
+ self._close = util.Finalize(
855
+ self, BaseProxy._decref,
856
+ args=(self._token, self._authkey, state,
857
+ self._tls, self._idset, self._Client),
858
+ exitpriority=10
859
+ )
860
+
861
+ @staticmethod
862
+ def _decref(token, authkey, state, tls, idset, _Client):
863
+ idset.discard(token.id)
864
+
865
+ # check whether manager is still alive
866
+ if state is None or state.value == State.STARTED:
867
+ # tell manager this process no longer cares about referent
868
+ try:
869
+ util.debug('DECREF %r', token.id)
870
+ conn = _Client(token.address, authkey=authkey)
871
+ dispatch(conn, None, 'decref', (token.id,))
872
+ except Exception as e:
873
+ util.debug('... decref failed %s', e)
874
+
875
+ else:
876
+ util.debug('DECREF %r -- manager already shutdown', token.id)
877
+
878
+ # check whether we can close this thread's connection because
879
+ # the process owns no more references to objects for this manager
880
+ if not idset and hasattr(tls, 'connection'):
881
+ util.debug('thread %r has no more proxies so closing conn',
882
+ threading.current_thread().name)
883
+ tls.connection.close()
884
+ del tls.connection
885
+
886
+ def _after_fork(self):
887
+ self._manager = None
888
+ try:
889
+ self._incref()
890
+ except Exception as e:
891
+ # the proxy may just be for a manager which has shutdown
892
+ util.info('incref failed: %s' % e)
893
+
894
+ def __reduce__(self):
895
+ kwds = {}
896
+ if get_spawning_popen() is not None:
897
+ kwds['authkey'] = self._authkey
898
+
899
+ if getattr(self, '_isauto', False):
900
+ kwds['exposed'] = self._exposed_
901
+ return (RebuildProxy,
902
+ (AutoProxy, self._token, self._serializer, kwds))
903
+ else:
904
+ return (RebuildProxy,
905
+ (type(self), self._token, self._serializer, kwds))
906
+
907
+ def __deepcopy__(self, memo):
908
+ return self._getvalue()
909
+
910
+ def __repr__(self):
911
+ return '<%s object, typeid %r at %#x>' % \
912
+ (type(self).__name__, self._token.typeid, id(self))
913
+
914
+ def __str__(self):
915
+ '''
916
+ Return representation of the referent (or a fall-back if that fails)
917
+ '''
918
+ try:
919
+ return self._callmethod('__repr__')
920
+ except Exception:
921
+ return repr(self)[:-1] + "; '__str__()' failed>"
922
+
923
+ #
924
+ # Function used for unpickling
925
+ #
926
+
927
+ def RebuildProxy(func, token, serializer, kwds):
928
+ '''
929
+ Function used for unpickling proxy objects.
930
+ '''
931
+ server = getattr(process.current_process(), '_manager_server', None)
932
+ if server and server.address == token.address:
933
+ util.debug('Rebuild a proxy owned by manager, token=%r', token)
934
+ kwds['manager_owned'] = True
935
+ if token.id not in server.id_to_local_proxy_obj:
936
+ server.id_to_local_proxy_obj[token.id] = \
937
+ server.id_to_obj[token.id]
938
+ incref = (
939
+ kwds.pop('incref', True) and
940
+ not getattr(process.current_process(), '_inheriting', False)
941
+ )
942
+ return func(token, serializer, incref=incref, **kwds)
943
+
944
+ #
945
+ # Functions to create proxies and proxy types
946
+ #
947
+
948
+ def MakeProxyType(name, exposed, _cache={}):
949
+ '''
950
+ Return a proxy type whose methods are given by `exposed`
951
+ '''
952
+ exposed = tuple(exposed)
953
+ try:
954
+ return _cache[(name, exposed)]
955
+ except KeyError:
956
+ pass
957
+
958
+ dic = {}
959
+
960
+ for meth in exposed:
961
+ exec('''def %s(self, /, *args, **kwds):
962
+ return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
963
+
964
+ ProxyType = type(name, (BaseProxy,), dic)
965
+ ProxyType._exposed_ = exposed
966
+ _cache[(name, exposed)] = ProxyType
967
+ return ProxyType
968
+
969
+
970
+ def AutoProxy(token, serializer, manager=None, authkey=None,
971
+ exposed=None, incref=True, manager_owned=False):
972
+ '''
973
+ Return an auto-proxy for `token`
974
+ '''
975
+ _Client = listener_client[serializer][1]
976
+
977
+ if exposed is None:
978
+ conn = _Client(token.address, authkey=authkey)
979
+ try:
980
+ exposed = dispatch(conn, None, 'get_methods', (token,))
981
+ finally:
982
+ conn.close()
983
+
984
+ if authkey is None and manager is not None:
985
+ authkey = manager._authkey
986
+ if authkey is None:
987
+ authkey = process.current_process().authkey
988
+
989
+ ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
990
+ proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
991
+ incref=incref, manager_owned=manager_owned)
992
+ proxy._isauto = True
993
+ return proxy
994
+
995
+ #
996
+ # Types/callables which we will register with SyncManager
997
+ #
998
+
999
+ class Namespace(object):
1000
+ def __init__(self, /, **kwds):
1001
+ self.__dict__.update(kwds)
1002
+ def __repr__(self):
1003
+ items = list(self.__dict__.items())
1004
+ temp = []
1005
+ for name, value in items:
1006
+ if not name.startswith('_'):
1007
+ temp.append('%s=%r' % (name, value))
1008
+ temp.sort()
1009
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
1010
+
1011
+ class Value(object):
1012
+ def __init__(self, typecode, value, lock=True):
1013
+ self._typecode = typecode
1014
+ self._value = value
1015
+ def get(self):
1016
+ return self._value
1017
+ def set(self, value):
1018
+ self._value = value
1019
+ def __repr__(self):
1020
+ return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
1021
+ value = property(get, set)
1022
+
1023
+ def Array(typecode, sequence, lock=True):
1024
+ return array.array(typecode, sequence)
1025
+
1026
+ #
1027
+ # Proxy types used by SyncManager
1028
+ #
1029
+
1030
+ class IteratorProxy(BaseProxy):
1031
+ _exposed_ = ('__next__', 'send', 'throw', 'close')
1032
+ def __iter__(self):
1033
+ return self
1034
+ def __next__(self, *args):
1035
+ return self._callmethod('__next__', args)
1036
+ def send(self, *args):
1037
+ return self._callmethod('send', args)
1038
+ def throw(self, *args):
1039
+ return self._callmethod('throw', args)
1040
+ def close(self, *args):
1041
+ return self._callmethod('close', args)
1042
+
1043
+
1044
+ class AcquirerProxy(BaseProxy):
1045
+ _exposed_ = ('acquire', 'release')
1046
+ def acquire(self, blocking=True, timeout=None):
1047
+ args = (blocking,) if timeout is None else (blocking, timeout)
1048
+ return self._callmethod('acquire', args)
1049
+ def release(self):
1050
+ return self._callmethod('release')
1051
+ def __enter__(self):
1052
+ return self._callmethod('acquire')
1053
+ def __exit__(self, exc_type, exc_val, exc_tb):
1054
+ return self._callmethod('release')
1055
+
1056
+
1057
+ class ConditionProxy(AcquirerProxy):
1058
+ _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
1059
+ def wait(self, timeout=None):
1060
+ return self._callmethod('wait', (timeout,))
1061
+ def notify(self, n=1):
1062
+ return self._callmethod('notify', (n,))
1063
+ def notify_all(self):
1064
+ return self._callmethod('notify_all')
1065
+ def wait_for(self, predicate, timeout=None):
1066
+ result = predicate()
1067
+ if result:
1068
+ return result
1069
+ if timeout is not None:
1070
+ endtime = getattr(time,'monotonic',time.time)() + timeout
1071
+ else:
1072
+ endtime = None
1073
+ waittime = None
1074
+ while not result:
1075
+ if endtime is not None:
1076
+ waittime = endtime - getattr(time,'monotonic',time.time)()
1077
+ if waittime <= 0:
1078
+ break
1079
+ self.wait(waittime)
1080
+ result = predicate()
1081
+ return result
1082
+
1083
+
1084
+ class EventProxy(BaseProxy):
1085
+ _exposed_ = ('is_set', 'set', 'clear', 'wait')
1086
+ def is_set(self):
1087
+ return self._callmethod('is_set')
1088
+ def set(self):
1089
+ return self._callmethod('set')
1090
+ def clear(self):
1091
+ return self._callmethod('clear')
1092
+ def wait(self, timeout=None):
1093
+ return self._callmethod('wait', (timeout,))
1094
+
1095
+
1096
+ class BarrierProxy(BaseProxy):
1097
+ _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
1098
+ def wait(self, timeout=None):
1099
+ return self._callmethod('wait', (timeout,))
1100
+ def abort(self):
1101
+ return self._callmethod('abort')
1102
+ def reset(self):
1103
+ return self._callmethod('reset')
1104
+ @property
1105
+ def parties(self):
1106
+ return self._callmethod('__getattribute__', ('parties',))
1107
+ @property
1108
+ def n_waiting(self):
1109
+ return self._callmethod('__getattribute__', ('n_waiting',))
1110
+ @property
1111
+ def broken(self):
1112
+ return self._callmethod('__getattribute__', ('broken',))
1113
+
1114
+
1115
+ class NamespaceProxy(BaseProxy):
1116
+ _exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
1117
+ def __getattr__(self, key):
1118
+ if key[0] == '_':
1119
+ return object.__getattribute__(self, key)
1120
+ callmethod = object.__getattribute__(self, '_callmethod')
1121
+ return callmethod('__getattribute__', (key,))
1122
+ def __setattr__(self, key, value):
1123
+ if key[0] == '_':
1124
+ return object.__setattr__(self, key, value)
1125
+ callmethod = object.__getattribute__(self, '_callmethod')
1126
+ return callmethod('__setattr__', (key, value))
1127
+ def __delattr__(self, key):
1128
+ if key[0] == '_':
1129
+ return object.__delattr__(self, key)
1130
+ callmethod = object.__getattribute__(self, '_callmethod')
1131
+ return callmethod('__delattr__', (key,))
1132
+
1133
+
1134
+ class ValueProxy(BaseProxy):
1135
+ _exposed_ = ('get', 'set')
1136
+ def get(self):
1137
+ return self._callmethod('get')
1138
+ def set(self, value):
1139
+ return self._callmethod('set', (value,))
1140
+ value = property(get, set)
1141
+
1142
+ __class_getitem__ = classmethod(types.GenericAlias)
1143
+
1144
+
1145
+ BaseListProxy = MakeProxyType('BaseListProxy', (
1146
+ '__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
1147
+ '__mul__', '__reversed__', '__rmul__', '__setitem__',
1148
+ 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
1149
+ 'reverse', 'sort', '__imul__'
1150
+ ))
1151
+ class ListProxy(BaseListProxy):
1152
+ def __iadd__(self, value):
1153
+ self._callmethod('extend', (value,))
1154
+ return self
1155
+ def __imul__(self, value):
1156
+ self._callmethod('__imul__', (value,))
1157
+ return self
1158
+
1159
+
1160
+ DictProxy = MakeProxyType('DictProxy', (
1161
+ '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
1162
+ '__setitem__', 'clear', 'copy', 'get', 'items',
1163
+ 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
1164
+ ))
1165
+ DictProxy._method_to_typeid_ = {
1166
+ '__iter__': 'Iterator',
1167
+ }
1168
+
1169
+
1170
+ ArrayProxy = MakeProxyType('ArrayProxy', (
1171
+ '__len__', '__getitem__', '__setitem__'
1172
+ ))
1173
+
1174
+
1175
+ BasePoolProxy = MakeProxyType('PoolProxy', (
1176
+ 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
1177
+ 'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
1178
+ ))
1179
+ BasePoolProxy._method_to_typeid_ = {
1180
+ 'apply_async': 'AsyncResult',
1181
+ 'map_async': 'AsyncResult',
1182
+ 'starmap_async': 'AsyncResult',
1183
+ 'imap': 'Iterator',
1184
+ 'imap_unordered': 'Iterator'
1185
+ }
1186
+ class PoolProxy(BasePoolProxy):
1187
+ def __enter__(self):
1188
+ return self
1189
+ def __exit__(self, exc_type, exc_val, exc_tb):
1190
+ self.terminate()
1191
+
1192
+ #
1193
+ # Definition of SyncManager
1194
+ #
1195
+
1196
+ class SyncManager(BaseManager):
1197
+ '''
1198
+ Subclass of `BaseManager` which supports a number of shared object types.
1199
+
1200
+ The types registered are those intended for the synchronization
1201
+ of threads, plus `dict`, `list` and `Namespace`.
1202
+
1203
+ The `multiprocess.Manager()` function creates started instances of
1204
+ this class.
1205
+ '''
1206
+
1207
+ SyncManager.register('Queue', queue.Queue)
1208
+ SyncManager.register('JoinableQueue', queue.Queue)
1209
+ SyncManager.register('Event', threading.Event, EventProxy)
1210
+ SyncManager.register('Lock', threading.Lock, AcquirerProxy)
1211
+ SyncManager.register('RLock', threading.RLock, AcquirerProxy)
1212
+ SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
1213
+ SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
1214
+ AcquirerProxy)
1215
+ SyncManager.register('Condition', threading.Condition, ConditionProxy)
1216
+ SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
1217
+ SyncManager.register('Pool', pool.Pool, PoolProxy)
1218
+ SyncManager.register('list', list, ListProxy)
1219
+ SyncManager.register('dict', dict, DictProxy)
1220
+ SyncManager.register('Value', Value, ValueProxy)
1221
+ SyncManager.register('Array', Array, ArrayProxy)
1222
+ SyncManager.register('Namespace', Namespace, NamespaceProxy)
1223
+
1224
+ # types returned by methods of PoolProxy
1225
+ SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
1226
+ SyncManager.register('AsyncResult', create_method=False)
1227
+
1228
+ #
1229
+ # Definition of SharedMemoryManager and SharedMemoryServer
1230
+ #
1231
+
1232
+ if HAS_SHMEM:
1233
+ class _SharedMemoryTracker:
1234
+ "Manages one or more shared memory segments."
1235
+
1236
+ def __init__(self, name, segment_names=[]):
1237
+ self.shared_memory_context_name = name
1238
+ self.segment_names = segment_names
1239
+
1240
+ def register_segment(self, segment_name):
1241
+ "Adds the supplied shared memory block name to tracker."
1242
+ util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
1243
+ self.segment_names.append(segment_name)
1244
+
1245
+ def destroy_segment(self, segment_name):
1246
+ """Calls unlink() on the shared memory block with the supplied name
1247
+ and removes it from the list of blocks being tracked."""
1248
+ util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
1249
+ self.segment_names.remove(segment_name)
1250
+ segment = shared_memory.SharedMemory(segment_name)
1251
+ segment.close()
1252
+ segment.unlink()
1253
+
1254
+ def unlink(self):
1255
+ "Calls destroy_segment() on all tracked shared memory blocks."
1256
+ for segment_name in self.segment_names[:]:
1257
+ self.destroy_segment(segment_name)
1258
+
1259
+ def __del__(self):
1260
+ util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
1261
+ self.unlink()
1262
+
1263
+ def __getstate__(self):
1264
+ return (self.shared_memory_context_name, self.segment_names)
1265
+
1266
+ def __setstate__(self, state):
1267
+ self.__init__(*state)
1268
+
1269
+
1270
+ class SharedMemoryServer(Server):
1271
+
1272
+ public = Server.public + \
1273
+ ['track_segment', 'release_segment', 'list_segments']
1274
+
1275
+ def __init__(self, *args, **kwargs):
1276
+ Server.__init__(self, *args, **kwargs)
1277
+ address = self.address
1278
+ # The address of Linux abstract namespaces can be bytes
1279
+ if isinstance(address, bytes):
1280
+ address = os.fsdecode(address)
1281
+ self.shared_memory_context = \
1282
+ _SharedMemoryTracker(f"shm_{address}_{getpid()}")
1283
+ util.debug(f"SharedMemoryServer started by pid {getpid()}")
1284
+
1285
+ def create(self, c, typeid, /, *args, **kwargs):
1286
+ """Create a new distributed-shared object (not backed by a shared
1287
+ memory block) and return its id to be used in a Proxy Object."""
1288
+ # Unless set up as a shared proxy, don't make shared_memory_context
1289
+ # a standard part of kwargs. This makes things easier for supplying
1290
+ # simple functions.
1291
+ if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
1292
+ kwargs['shared_memory_context'] = self.shared_memory_context
1293
+ return Server.create(self, c, typeid, *args, **kwargs)
1294
+
1295
+ def shutdown(self, c):
1296
+ "Call unlink() on all tracked shared memory, terminate the Server."
1297
+ self.shared_memory_context.unlink()
1298
+ return Server.shutdown(self, c)
1299
+
1300
+ def track_segment(self, c, segment_name):
1301
+ "Adds the supplied shared memory block name to Server's tracker."
1302
+ self.shared_memory_context.register_segment(segment_name)
1303
+
1304
+ def release_segment(self, c, segment_name):
1305
+ """Calls unlink() on the shared memory block with the supplied name
1306
+ and removes it from the tracker instance inside the Server."""
1307
+ self.shared_memory_context.destroy_segment(segment_name)
1308
+
1309
+ def list_segments(self, c):
1310
+ """Returns a list of names of shared memory blocks that the Server
1311
+ is currently tracking."""
1312
+ return self.shared_memory_context.segment_names
1313
+
1314
+
1315
+ class SharedMemoryManager(BaseManager):
1316
+ """Like SyncManager but uses SharedMemoryServer instead of Server.
1317
+
1318
+ It provides methods for creating and returning SharedMemory instances
1319
+ and for creating a list-like object (ShareableList) backed by shared
1320
+ memory. It also provides methods that create and return Proxy Objects
1321
+ that support synchronization across processes (i.e. multi-process-safe
1322
+ locks and semaphores).
1323
+ """
1324
+
1325
+ _Server = SharedMemoryServer
1326
+
1327
+ def __init__(self, *args, **kwargs):
1328
+ if os.name == "posix":
1329
+ # bpo-36867: Ensure the resource_tracker is running before
1330
+ # launching the manager process, so that concurrent
1331
+ # shared_memory manipulation both in the manager and in the
1332
+ # current process does not create two resource_tracker
1333
+ # processes.
1334
+ from . import resource_tracker
1335
+ resource_tracker.ensure_running()
1336
+ BaseManager.__init__(self, *args, **kwargs)
1337
+ util.debug(f"{self.__class__.__name__} created by pid {getpid()}")
1338
+
1339
+ def __del__(self):
1340
+ util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
1341
+ pass
1342
+
1343
+ def get_server(self):
1344
+ 'Better than monkeypatching for now; merge into Server ultimately'
1345
+ if self._state.value != State.INITIAL:
1346
+ if self._state.value == State.STARTED:
1347
+ raise ProcessError("Already started SharedMemoryServer")
1348
+ elif self._state.value == State.SHUTDOWN:
1349
+ raise ProcessError("SharedMemoryManager has shut down")
1350
+ else:
1351
+ raise ProcessError(
1352
+ "Unknown state {!r}".format(self._state.value))
1353
+ return self._Server(self._registry, self._address,
1354
+ self._authkey, self._serializer)
1355
+
1356
+ def SharedMemory(self, size):
1357
+ """Returns a new SharedMemory instance with the specified size in
1358
+ bytes, to be tracked by the manager."""
1359
+ with self._Client(self._address, authkey=self._authkey) as conn:
1360
+ sms = shared_memory.SharedMemory(None, create=True, size=size)
1361
+ try:
1362
+ dispatch(conn, None, 'track_segment', (sms.name,))
1363
+ except BaseException as e:
1364
+ sms.unlink()
1365
+ raise e
1366
+ return sms
1367
+
1368
+ def ShareableList(self, sequence):
1369
+ """Returns a new ShareableList instance populated with the values
1370
+ from the input sequence, to be tracked by the manager."""
1371
+ with self._Client(self._address, authkey=self._authkey) as conn:
1372
+ sl = shared_memory.ShareableList(sequence)
1373
+ try:
1374
+ dispatch(conn, None, 'track_segment', (sl.shm.name,))
1375
+ except BaseException as e:
1376
+ sl.shm.unlink()
1377
+ raise e
1378
+ return sl
llmeval-env/lib/python3.10/site-packages/multiprocess/shared_memory.py ADDED
@@ -0,0 +1,534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Provides shared memory for direct access across processes.
2
+
3
+ The API of this package is currently provisional. Refer to the
4
+ documentation for details.
5
+ """
6
+
7
+
8
+ __all__ = [ 'SharedMemory', 'ShareableList' ]
9
+
10
+
11
+ from functools import partial
12
+ import mmap
13
+ import os
14
+ import errno
15
+ import struct
16
+ import secrets
17
+ import types
18
+
19
+ if os.name == "nt":
20
+ import _winapi
21
+ _USE_POSIX = False
22
+ else:
23
+ import _posixshmem
24
+ _USE_POSIX = True
25
+
26
+ from . import resource_tracker
27
+
28
+ _O_CREX = os.O_CREAT | os.O_EXCL
29
+
30
+ # FreeBSD (and perhaps other BSDs) limit names to 14 characters.
31
+ _SHM_SAFE_NAME_LENGTH = 14
32
+
33
+ # Shared memory block name prefix
34
+ if _USE_POSIX:
35
+ _SHM_NAME_PREFIX = '/psm_'
36
+ else:
37
+ _SHM_NAME_PREFIX = 'wnsm_'
38
+
39
+
40
+ def _make_filename():
41
+ "Create a random filename for the shared memory object."
42
+ # number of random bytes to use for name
43
+ nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2
44
+ assert nbytes >= 2, '_SHM_NAME_PREFIX too long'
45
+ name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes)
46
+ assert len(name) <= _SHM_SAFE_NAME_LENGTH
47
+ return name
48
+
49
+
50
+ class SharedMemory:
51
+ """Creates a new shared memory block or attaches to an existing
52
+ shared memory block.
53
+
54
+ Every shared memory block is assigned a unique name. This enables
55
+ one process to create a shared memory block with a particular name
56
+ so that a different process can attach to that same shared memory
57
+ block using that same name.
58
+
59
+ As a resource for sharing data across processes, shared memory blocks
60
+ may outlive the original process that created them. When one process
61
+ no longer needs access to a shared memory block that might still be
62
+ needed by other processes, the close() method should be called.
63
+ When a shared memory block is no longer needed by any process, the
64
+ unlink() method should be called to ensure proper cleanup."""
65
+
66
+ # Defaults; enables close() and unlink() to run without errors.
67
+ _name = None
68
+ _fd = -1
69
+ _mmap = None
70
+ _buf = None
71
+ _flags = os.O_RDWR
72
+ _mode = 0o600
73
+ _prepend_leading_slash = True if _USE_POSIX else False
74
+
75
+ def __init__(self, name=None, create=False, size=0):
76
+ if not size >= 0:
77
+ raise ValueError("'size' must be a positive integer")
78
+ if create:
79
+ self._flags = _O_CREX | os.O_RDWR
80
+ if size == 0:
81
+ raise ValueError("'size' must be a positive number different from zero")
82
+ if name is None and not self._flags & os.O_EXCL:
83
+ raise ValueError("'name' can only be None if create=True")
84
+
85
+ if _USE_POSIX:
86
+
87
+ # POSIX Shared Memory
88
+
89
+ if name is None:
90
+ while True:
91
+ name = _make_filename()
92
+ try:
93
+ self._fd = _posixshmem.shm_open(
94
+ name,
95
+ self._flags,
96
+ mode=self._mode
97
+ )
98
+ except FileExistsError:
99
+ continue
100
+ self._name = name
101
+ break
102
+ else:
103
+ name = "/" + name if self._prepend_leading_slash else name
104
+ self._fd = _posixshmem.shm_open(
105
+ name,
106
+ self._flags,
107
+ mode=self._mode
108
+ )
109
+ self._name = name
110
+ try:
111
+ if create and size:
112
+ os.ftruncate(self._fd, size)
113
+ stats = os.fstat(self._fd)
114
+ size = stats.st_size
115
+ self._mmap = mmap.mmap(self._fd, size)
116
+ except OSError:
117
+ self.unlink()
118
+ raise
119
+
120
+ resource_tracker.register(self._name, "shared_memory")
121
+
122
+ else:
123
+
124
+ # Windows Named Shared Memory
125
+
126
+ if create:
127
+ while True:
128
+ temp_name = _make_filename() if name is None else name
129
+ # Create and reserve shared memory block with this name
130
+ # until it can be attached to by mmap.
131
+ h_map = _winapi.CreateFileMapping(
132
+ _winapi.INVALID_HANDLE_VALUE,
133
+ _winapi.NULL,
134
+ _winapi.PAGE_READWRITE,
135
+ (size >> 32) & 0xFFFFFFFF,
136
+ size & 0xFFFFFFFF,
137
+ temp_name
138
+ )
139
+ try:
140
+ last_error_code = _winapi.GetLastError()
141
+ if last_error_code == _winapi.ERROR_ALREADY_EXISTS:
142
+ if name is not None:
143
+ raise FileExistsError(
144
+ errno.EEXIST,
145
+ os.strerror(errno.EEXIST),
146
+ name,
147
+ _winapi.ERROR_ALREADY_EXISTS
148
+ )
149
+ else:
150
+ continue
151
+ self._mmap = mmap.mmap(-1, size, tagname=temp_name)
152
+ finally:
153
+ _winapi.CloseHandle(h_map)
154
+ self._name = temp_name
155
+ break
156
+
157
+ else:
158
+ self._name = name
159
+ # Dynamically determine the existing named shared memory
160
+ # block's size which is likely a multiple of mmap.PAGESIZE.
161
+ h_map = _winapi.OpenFileMapping(
162
+ _winapi.FILE_MAP_READ,
163
+ False,
164
+ name
165
+ )
166
+ try:
167
+ p_buf = _winapi.MapViewOfFile(
168
+ h_map,
169
+ _winapi.FILE_MAP_READ,
170
+ 0,
171
+ 0,
172
+ 0
173
+ )
174
+ finally:
175
+ _winapi.CloseHandle(h_map)
176
+ try:
177
+ size = _winapi.VirtualQuerySize(p_buf)
178
+ finally:
179
+ _winapi.UnmapViewOfFile(p_buf)
180
+ self._mmap = mmap.mmap(-1, size, tagname=name)
181
+
182
+ self._size = size
183
+ self._buf = memoryview(self._mmap)
184
+
185
+ def __del__(self):
186
+ try:
187
+ self.close()
188
+ except OSError:
189
+ pass
190
+
191
+ def __reduce__(self):
192
+ return (
193
+ self.__class__,
194
+ (
195
+ self.name,
196
+ False,
197
+ self.size,
198
+ ),
199
+ )
200
+
201
+ def __repr__(self):
202
+ return f'{self.__class__.__name__}({self.name!r}, size={self.size})'
203
+
204
+ @property
205
+ def buf(self):
206
+ "A memoryview of contents of the shared memory block."
207
+ return self._buf
208
+
209
+ @property
210
+ def name(self):
211
+ "Unique name that identifies the shared memory block."
212
+ reported_name = self._name
213
+ if _USE_POSIX and self._prepend_leading_slash:
214
+ if self._name.startswith("/"):
215
+ reported_name = self._name[1:]
216
+ return reported_name
217
+
218
+ @property
219
+ def size(self):
220
+ "Size in bytes."
221
+ return self._size
222
+
223
+ def close(self):
224
+ """Closes access to the shared memory from this instance but does
225
+ not destroy the shared memory block."""
226
+ if self._buf is not None:
227
+ self._buf.release()
228
+ self._buf = None
229
+ if self._mmap is not None:
230
+ self._mmap.close()
231
+ self._mmap = None
232
+ if _USE_POSIX and self._fd >= 0:
233
+ os.close(self._fd)
234
+ self._fd = -1
235
+
236
+ def unlink(self):
237
+ """Requests that the underlying shared memory block be destroyed.
238
+
239
+ In order to ensure proper cleanup of resources, unlink should be
240
+ called once (and only once) across all processes which have access
241
+ to the shared memory block."""
242
+ if _USE_POSIX and self._name:
243
+ _posixshmem.shm_unlink(self._name)
244
+ resource_tracker.unregister(self._name, "shared_memory")
245
+
246
+
247
+ _encoding = "utf8"
248
+
249
+ class ShareableList:
250
+ """Pattern for a mutable list-like object shareable via a shared
251
+ memory block. It differs from the built-in list type in that these
252
+ lists can not change their overall length (i.e. no append, insert,
253
+ etc.)
254
+
255
+ Because values are packed into a memoryview as bytes, the struct
256
+ packing format for any storable value must require no more than 8
257
+ characters to describe its format."""
258
+
259
+ # The shared memory area is organized as follows:
260
+ # - 8 bytes: number of items (N) as a 64-bit integer
261
+ # - (N + 1) * 8 bytes: offsets of each element from the start of the
262
+ # data area
263
+ # - K bytes: the data area storing item values (with encoding and size
264
+ # depending on their respective types)
265
+ # - N * 8 bytes: `struct` format string for each element
266
+ # - N bytes: index into _back_transforms_mapping for each element
267
+ # (for reconstructing the corresponding Python value)
268
+ _types_mapping = {
269
+ int: "q",
270
+ float: "d",
271
+ bool: "xxxxxxx?",
272
+ str: "%ds",
273
+ bytes: "%ds",
274
+ None.__class__: "xxxxxx?x",
275
+ }
276
+ _alignment = 8
277
+ _back_transforms_mapping = {
278
+ 0: lambda value: value, # int, float, bool
279
+ 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str
280
+ 2: lambda value: value.rstrip(b'\x00'), # bytes
281
+ 3: lambda _value: None, # None
282
+ }
283
+
284
+ @staticmethod
285
+ def _extract_recreation_code(value):
286
+ """Used in concert with _back_transforms_mapping to convert values
287
+ into the appropriate Python objects when retrieving them from
288
+ the list as well as when storing them."""
289
+ if not isinstance(value, (str, bytes, None.__class__)):
290
+ return 0
291
+ elif isinstance(value, str):
292
+ return 1
293
+ elif isinstance(value, bytes):
294
+ return 2
295
+ else:
296
+ return 3 # NoneType
297
+
298
+ def __init__(self, sequence=None, *, name=None):
299
+ if name is None or sequence is not None:
300
+ sequence = sequence or ()
301
+ _formats = [
302
+ self._types_mapping[type(item)]
303
+ if not isinstance(item, (str, bytes))
304
+ else self._types_mapping[type(item)] % (
305
+ self._alignment * (len(item) // self._alignment + 1),
306
+ )
307
+ for item in sequence
308
+ ]
309
+ self._list_len = len(_formats)
310
+ assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len
311
+ offset = 0
312
+ # The offsets of each list element into the shared memory's
313
+ # data area (0 meaning the start of the data area, not the start
314
+ # of the shared memory area).
315
+ self._allocated_offsets = [0]
316
+ for fmt in _formats:
317
+ offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1])
318
+ self._allocated_offsets.append(offset)
319
+ _recreation_codes = [
320
+ self._extract_recreation_code(item) for item in sequence
321
+ ]
322
+ requested_size = struct.calcsize(
323
+ "q" + self._format_size_metainfo +
324
+ "".join(_formats) +
325
+ self._format_packing_metainfo +
326
+ self._format_back_transform_codes
327
+ )
328
+
329
+ self.shm = SharedMemory(name, create=True, size=requested_size)
330
+ else:
331
+ self.shm = SharedMemory(name)
332
+
333
+ if sequence is not None:
334
+ _enc = _encoding
335
+ struct.pack_into(
336
+ "q" + self._format_size_metainfo,
337
+ self.shm.buf,
338
+ 0,
339
+ self._list_len,
340
+ *(self._allocated_offsets)
341
+ )
342
+ struct.pack_into(
343
+ "".join(_formats),
344
+ self.shm.buf,
345
+ self._offset_data_start,
346
+ *(v.encode(_enc) if isinstance(v, str) else v for v in sequence)
347
+ )
348
+ struct.pack_into(
349
+ self._format_packing_metainfo,
350
+ self.shm.buf,
351
+ self._offset_packing_formats,
352
+ *(v.encode(_enc) for v in _formats)
353
+ )
354
+ struct.pack_into(
355
+ self._format_back_transform_codes,
356
+ self.shm.buf,
357
+ self._offset_back_transform_codes,
358
+ *(_recreation_codes)
359
+ )
360
+
361
+ else:
362
+ self._list_len = len(self) # Obtains size from offset 0 in buffer.
363
+ self._allocated_offsets = list(
364
+ struct.unpack_from(
365
+ self._format_size_metainfo,
366
+ self.shm.buf,
367
+ 1 * 8
368
+ )
369
+ )
370
+
371
+ def _get_packing_format(self, position):
372
+ "Gets the packing format for a single value stored in the list."
373
+ position = position if position >= 0 else position + self._list_len
374
+ if (position >= self._list_len) or (self._list_len < 0):
375
+ raise IndexError("Requested position out of range.")
376
+
377
+ v = struct.unpack_from(
378
+ "8s",
379
+ self.shm.buf,
380
+ self._offset_packing_formats + position * 8
381
+ )[0]
382
+ fmt = v.rstrip(b'\x00')
383
+ fmt_as_str = fmt.decode(_encoding)
384
+
385
+ return fmt_as_str
386
+
387
+ def _get_back_transform(self, position):
388
+ "Gets the back transformation function for a single value."
389
+
390
+ if (position >= self._list_len) or (self._list_len < 0):
391
+ raise IndexError("Requested position out of range.")
392
+
393
+ transform_code = struct.unpack_from(
394
+ "b",
395
+ self.shm.buf,
396
+ self._offset_back_transform_codes + position
397
+ )[0]
398
+ transform_function = self._back_transforms_mapping[transform_code]
399
+
400
+ return transform_function
401
+
402
+ def _set_packing_format_and_transform(self, position, fmt_as_str, value):
403
+ """Sets the packing format and back transformation code for a
404
+ single value in the list at the specified position."""
405
+
406
+ if (position >= self._list_len) or (self._list_len < 0):
407
+ raise IndexError("Requested position out of range.")
408
+
409
+ struct.pack_into(
410
+ "8s",
411
+ self.shm.buf,
412
+ self._offset_packing_formats + position * 8,
413
+ fmt_as_str.encode(_encoding)
414
+ )
415
+
416
+ transform_code = self._extract_recreation_code(value)
417
+ struct.pack_into(
418
+ "b",
419
+ self.shm.buf,
420
+ self._offset_back_transform_codes + position,
421
+ transform_code
422
+ )
423
+
424
+ def __getitem__(self, position):
425
+ position = position if position >= 0 else position + self._list_len
426
+ try:
427
+ offset = self._offset_data_start + self._allocated_offsets[position]
428
+ (v,) = struct.unpack_from(
429
+ self._get_packing_format(position),
430
+ self.shm.buf,
431
+ offset
432
+ )
433
+ except IndexError:
434
+ raise IndexError("index out of range")
435
+
436
+ back_transform = self._get_back_transform(position)
437
+ v = back_transform(v)
438
+
439
+ return v
440
+
441
+ def __setitem__(self, position, value):
442
+ position = position if position >= 0 else position + self._list_len
443
+ try:
444
+ item_offset = self._allocated_offsets[position]
445
+ offset = self._offset_data_start + item_offset
446
+ current_format = self._get_packing_format(position)
447
+ except IndexError:
448
+ raise IndexError("assignment index out of range")
449
+
450
+ if not isinstance(value, (str, bytes)):
451
+ new_format = self._types_mapping[type(value)]
452
+ encoded_value = value
453
+ else:
454
+ allocated_length = self._allocated_offsets[position + 1] - item_offset
455
+
456
+ encoded_value = (value.encode(_encoding)
457
+ if isinstance(value, str) else value)
458
+ if len(encoded_value) > allocated_length:
459
+ raise ValueError("bytes/str item exceeds available storage")
460
+ if current_format[-1] == "s":
461
+ new_format = current_format
462
+ else:
463
+ new_format = self._types_mapping[str] % (
464
+ allocated_length,
465
+ )
466
+
467
+ self._set_packing_format_and_transform(
468
+ position,
469
+ new_format,
470
+ value
471
+ )
472
+ struct.pack_into(new_format, self.shm.buf, offset, encoded_value)
473
+
474
+ def __reduce__(self):
475
+ return partial(self.__class__, name=self.shm.name), ()
476
+
477
+ def __len__(self):
478
+ return struct.unpack_from("q", self.shm.buf, 0)[0]
479
+
480
+ def __repr__(self):
481
+ return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})'
482
+
483
+ @property
484
+ def format(self):
485
+ "The struct packing format used by all currently stored items."
486
+ return "".join(
487
+ self._get_packing_format(i) for i in range(self._list_len)
488
+ )
489
+
490
+ @property
491
+ def _format_size_metainfo(self):
492
+ "The struct packing format used for the items' storage offsets."
493
+ return "q" * (self._list_len + 1)
494
+
495
+ @property
496
+ def _format_packing_metainfo(self):
497
+ "The struct packing format used for the items' packing formats."
498
+ return "8s" * self._list_len
499
+
500
+ @property
501
+ def _format_back_transform_codes(self):
502
+ "The struct packing format used for the items' back transforms."
503
+ return "b" * self._list_len
504
+
505
+ @property
506
+ def _offset_data_start(self):
507
+ # - 8 bytes for the list length
508
+ # - (N + 1) * 8 bytes for the element offsets
509
+ return (self._list_len + 2) * 8
510
+
511
+ @property
512
+ def _offset_packing_formats(self):
513
+ return self._offset_data_start + self._allocated_offsets[-1]
514
+
515
+ @property
516
+ def _offset_back_transform_codes(self):
517
+ return self._offset_packing_formats + self._list_len * 8
518
+
519
+ def count(self, value):
520
+ "L.count(value) -> integer -- return number of occurrences of value."
521
+
522
+ return sum(value == entry for entry in self)
523
+
524
+ def index(self, value):
525
+ """L.index(value) -> integer -- return first index of value.
526
+ Raises ValueError if the value is not present."""
527
+
528
+ for position, entry in enumerate(self):
529
+ if value == entry:
530
+ return position
531
+ else:
532
+ raise ValueError(f"{value!r} not in this container")
533
+
534
+ __class_getitem__ = classmethod(types.GenericAlias)
llmeval-env/lib/python3.10/site-packages/numpy-1.26.4.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/numpy-1.26.4.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,971 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2005-2023, NumPy Developers.
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are
6
+ met:
7
+
8
+ * Redistributions of source code must retain the above copyright
9
+ notice, this list of conditions and the following disclaimer.
10
+
11
+ * Redistributions in binary form must reproduce the above
12
+ copyright notice, this list of conditions and the following
13
+ disclaimer in the documentation and/or other materials provided
14
+ with the distribution.
15
+
16
+ * Neither the name of the NumPy Developers nor the names of any
17
+ contributors may be used to endorse or promote products derived
18
+ from this software without specific prior written permission.
19
+
20
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
+
32
+ ----
33
+
34
+ The NumPy repository and source distributions bundle several libraries that are
35
+ compatibly licensed. We list these here.
36
+
37
+ Name: lapack-lite
38
+ Files: numpy/linalg/lapack_lite/*
39
+ License: BSD-3-Clause
40
+ For details, see numpy/linalg/lapack_lite/LICENSE.txt
41
+
42
+ Name: tempita
43
+ Files: tools/npy_tempita/*
44
+ License: MIT
45
+ For details, see tools/npy_tempita/license.txt
46
+
47
+ Name: dragon4
48
+ Files: numpy/core/src/multiarray/dragon4.c
49
+ License: MIT
50
+ For license text, see numpy/core/src/multiarray/dragon4.c
51
+
52
+ Name: libdivide
53
+ Files: numpy/core/include/numpy/libdivide/*
54
+ License: Zlib
55
+ For license text, see numpy/core/include/numpy/libdivide/LICENSE.txt
56
+
57
+
58
+ Note that the following files are vendored in the repository and sdist but not
59
+ installed in built numpy packages:
60
+
61
+ Name: Meson
62
+ Files: vendored-meson/meson/*
63
+ License: Apache 2.0
64
+ For license text, see vendored-meson/meson/COPYING
65
+
66
+ Name: spin
67
+ Files: .spin/cmds.py
68
+ License: BSD-3
69
+ For license text, see .spin/LICENSE
70
+
71
+ ----
72
+
73
+ This binary distribution of NumPy also bundles the following software:
74
+
75
+
76
+ Name: OpenBLAS
77
+ Files: numpy.libs/libopenblas*.so
78
+ Description: bundled as a dynamically linked library
79
+ Availability: https://github.com/OpenMathLib/OpenBLAS/
80
+ License: BSD-3-Clause
81
+ Copyright (c) 2011-2014, The OpenBLAS Project
82
+ All rights reserved.
83
+
84
+ Redistribution and use in source and binary forms, with or without
85
+ modification, are permitted provided that the following conditions are
86
+ met:
87
+
88
+ 1. Redistributions of source code must retain the above copyright
89
+ notice, this list of conditions and the following disclaimer.
90
+
91
+ 2. Redistributions in binary form must reproduce the above copyright
92
+ notice, this list of conditions and the following disclaimer in
93
+ the documentation and/or other materials provided with the
94
+ distribution.
95
+ 3. Neither the name of the OpenBLAS project nor the names of
96
+ its contributors may be used to endorse or promote products
97
+ derived from this software without specific prior written
98
+ permission.
99
+
100
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
101
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
102
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
103
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
104
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
105
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
106
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
107
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
108
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
109
+ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
110
+
111
+
112
+ Name: LAPACK
113
+ Files: numpy.libs/libopenblas*.so
114
+ Description: bundled in OpenBLAS
115
+ Availability: https://github.com/OpenMathLib/OpenBLAS/
116
+ License: BSD-3-Clause-Attribution
117
+ Copyright (c) 1992-2013 The University of Tennessee and The University
118
+ of Tennessee Research Foundation. All rights
119
+ reserved.
120
+ Copyright (c) 2000-2013 The University of California Berkeley. All
121
+ rights reserved.
122
+ Copyright (c) 2006-2013 The University of Colorado Denver. All rights
123
+ reserved.
124
+
125
+ $COPYRIGHT$
126
+
127
+ Additional copyrights may follow
128
+
129
+ $HEADER$
130
+
131
+ Redistribution and use in source and binary forms, with or without
132
+ modification, are permitted provided that the following conditions are
133
+ met:
134
+
135
+ - Redistributions of source code must retain the above copyright
136
+ notice, this list of conditions and the following disclaimer.
137
+
138
+ - Redistributions in binary form must reproduce the above copyright
139
+ notice, this list of conditions and the following disclaimer listed
140
+ in this license in the documentation and/or other materials
141
+ provided with the distribution.
142
+
143
+ - Neither the name of the copyright holders nor the names of its
144
+ contributors may be used to endorse or promote products derived from
145
+ this software without specific prior written permission.
146
+
147
+ The copyright holders provide no reassurances that the source code
148
+ provided does not infringe any patent, copyright, or any other
149
+ intellectual property rights of third parties. The copyright holders
150
+ disclaim any liability to any recipient for claims brought against
151
+ recipient by any third party for infringement of that parties
152
+ intellectual property rights.
153
+
154
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
155
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
156
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
157
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
158
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
159
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
160
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
161
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
162
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
163
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
164
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
165
+
166
+
167
+ Name: GCC runtime library
168
+ Files: numpy.libs/libgfortran*.so
169
+ Description: dynamically linked to files compiled with gcc
170
+ Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
171
+ License: GPL-3.0-with-GCC-exception
172
+ Copyright (C) 2002-2017 Free Software Foundation, Inc.
173
+
174
+ Libgfortran is free software; you can redistribute it and/or modify
175
+ it under the terms of the GNU General Public License as published by
176
+ the Free Software Foundation; either version 3, or (at your option)
177
+ any later version.
178
+
179
+ Libgfortran is distributed in the hope that it will be useful,
180
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
181
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
182
+ GNU General Public License for more details.
183
+
184
+ Under Section 7 of GPL version 3, you are granted additional
185
+ permissions described in the GCC Runtime Library Exception, version
186
+ 3.1, as published by the Free Software Foundation.
187
+
188
+ You should have received a copy of the GNU General Public License and
189
+ a copy of the GCC Runtime Library Exception along with this program;
190
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
191
+ <http://www.gnu.org/licenses/>.
192
+
193
+ ----
194
+
195
+ Full text of license texts referred to above follows (that they are
196
+ listed below does not necessarily imply the conditions apply to the
197
+ present binary release):
198
+
199
+ ----
200
+
201
+ GCC RUNTIME LIBRARY EXCEPTION
202
+
203
+ Version 3.1, 31 March 2009
204
+
205
+ Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/>
206
+
207
+ Everyone is permitted to copy and distribute verbatim copies of this
208
+ license document, but changing it is not allowed.
209
+
210
+ This GCC Runtime Library Exception ("Exception") is an additional
211
+ permission under section 7 of the GNU General Public License, version
212
+ 3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
213
+ bears a notice placed by the copyright holder of the file stating that
214
+ the file is governed by GPLv3 along with this Exception.
215
+
216
+ When you use GCC to compile a program, GCC may combine portions of
217
+ certain GCC header files and runtime libraries with the compiled
218
+ program. The purpose of this Exception is to allow compilation of
219
+ non-GPL (including proprietary) programs to use, in this way, the
220
+ header files and runtime libraries covered by this Exception.
221
+
222
+ 0. Definitions.
223
+
224
+ A file is an "Independent Module" if it either requires the Runtime
225
+ Library for execution after a Compilation Process, or makes use of an
226
+ interface provided by the Runtime Library, but is not otherwise based
227
+ on the Runtime Library.
228
+
229
+ "GCC" means a version of the GNU Compiler Collection, with or without
230
+ modifications, governed by version 3 (or a specified later version) of
231
+ the GNU General Public License (GPL) with the option of using any
232
+ subsequent versions published by the FSF.
233
+
234
+ "GPL-compatible Software" is software whose conditions of propagation,
235
+ modification and use would permit combination with GCC in accord with
236
+ the license of GCC.
237
+
238
+ "Target Code" refers to output from any compiler for a real or virtual
239
+ target processor architecture, in executable form or suitable for
240
+ input to an assembler, loader, linker and/or execution
241
+ phase. Notwithstanding that, Target Code does not include data in any
242
+ format that is used as a compiler intermediate representation, or used
243
+ for producing a compiler intermediate representation.
244
+
245
+ The "Compilation Process" transforms code entirely represented in
246
+ non-intermediate languages designed for human-written code, and/or in
247
+ Java Virtual Machine byte code, into Target Code. Thus, for example,
248
+ use of source code generators and preprocessors need not be considered
249
+ part of the Compilation Process, since the Compilation Process can be
250
+ understood as starting with the output of the generators or
251
+ preprocessors.
252
+
253
+ A Compilation Process is "Eligible" if it is done using GCC, alone or
254
+ with other GPL-compatible software, or if it is done without using any
255
+ work based on GCC. For example, using non-GPL-compatible Software to
256
+ optimize any GCC intermediate representations would not qualify as an
257
+ Eligible Compilation Process.
258
+
259
+ 1. Grant of Additional Permission.
260
+
261
+ You have permission to propagate a work of Target Code formed by
262
+ combining the Runtime Library with Independent Modules, even if such
263
+ propagation would otherwise violate the terms of GPLv3, provided that
264
+ all Target Code was generated by Eligible Compilation Processes. You
265
+ may then convey such a combination under terms of your choice,
266
+ consistent with the licensing of the Independent Modules.
267
+
268
+ 2. No Weakening of GCC Copyleft.
269
+
270
+ The availability of this Exception does not imply any general
271
+ presumption that third-party software is unaffected by the copyleft
272
+ requirements of the license of GCC.
273
+
274
+ ----
275
+
276
+ GNU GENERAL PUBLIC LICENSE
277
+ Version 3, 29 June 2007
278
+
279
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
280
+ Everyone is permitted to copy and distribute verbatim copies
281
+ of this license document, but changing it is not allowed.
282
+
283
+ Preamble
284
+
285
+ The GNU General Public License is a free, copyleft license for
286
+ software and other kinds of works.
287
+
288
+ The licenses for most software and other practical works are designed
289
+ to take away your freedom to share and change the works. By contrast,
290
+ the GNU General Public License is intended to guarantee your freedom to
291
+ share and change all versions of a program--to make sure it remains free
292
+ software for all its users. We, the Free Software Foundation, use the
293
+ GNU General Public License for most of our software; it applies also to
294
+ any other work released this way by its authors. You can apply it to
295
+ your programs, too.
296
+
297
+ When we speak of free software, we are referring to freedom, not
298
+ price. Our General Public Licenses are designed to make sure that you
299
+ have the freedom to distribute copies of free software (and charge for
300
+ them if you wish), that you receive source code or can get it if you
301
+ want it, that you can change the software or use pieces of it in new
302
+ free programs, and that you know you can do these things.
303
+
304
+ To protect your rights, we need to prevent others from denying you
305
+ these rights or asking you to surrender the rights. Therefore, you have
306
+ certain responsibilities if you distribute copies of the software, or if
307
+ you modify it: responsibilities to respect the freedom of others.
308
+
309
+ For example, if you distribute copies of such a program, whether
310
+ gratis or for a fee, you must pass on to the recipients the same
311
+ freedoms that you received. You must make sure that they, too, receive
312
+ or can get the source code. And you must show them these terms so they
313
+ know their rights.
314
+
315
+ Developers that use the GNU GPL protect your rights with two steps:
316
+ (1) assert copyright on the software, and (2) offer you this License
317
+ giving you legal permission to copy, distribute and/or modify it.
318
+
319
+ For the developers' and authors' protection, the GPL clearly explains
320
+ that there is no warranty for this free software. For both users' and
321
+ authors' sake, the GPL requires that modified versions be marked as
322
+ changed, so that their problems will not be attributed erroneously to
323
+ authors of previous versions.
324
+
325
+ Some devices are designed to deny users access to install or run
326
+ modified versions of the software inside them, although the manufacturer
327
+ can do so. This is fundamentally incompatible with the aim of
328
+ protecting users' freedom to change the software. The systematic
329
+ pattern of such abuse occurs in the area of products for individuals to
330
+ use, which is precisely where it is most unacceptable. Therefore, we
331
+ have designed this version of the GPL to prohibit the practice for those
332
+ products. If such problems arise substantially in other domains, we
333
+ stand ready to extend this provision to those domains in future versions
334
+ of the GPL, as needed to protect the freedom of users.
335
+
336
+ Finally, every program is threatened constantly by software patents.
337
+ States should not allow patents to restrict development and use of
338
+ software on general-purpose computers, but in those that do, we wish to
339
+ avoid the special danger that patents applied to a free program could
340
+ make it effectively proprietary. To prevent this, the GPL assures that
341
+ patents cannot be used to render the program non-free.
342
+
343
+ The precise terms and conditions for copying, distribution and
344
+ modification follow.
345
+
346
+ TERMS AND CONDITIONS
347
+
348
+ 0. Definitions.
349
+
350
+ "This License" refers to version 3 of the GNU General Public License.
351
+
352
+ "Copyright" also means copyright-like laws that apply to other kinds of
353
+ works, such as semiconductor masks.
354
+
355
+ "The Program" refers to any copyrightable work licensed under this
356
+ License. Each licensee is addressed as "you". "Licensees" and
357
+ "recipients" may be individuals or organizations.
358
+
359
+ To "modify" a work means to copy from or adapt all or part of the work
360
+ in a fashion requiring copyright permission, other than the making of an
361
+ exact copy. The resulting work is called a "modified version" of the
362
+ earlier work or a work "based on" the earlier work.
363
+
364
+ A "covered work" means either the unmodified Program or a work based
365
+ on the Program.
366
+
367
+ To "propagate" a work means to do anything with it that, without
368
+ permission, would make you directly or secondarily liable for
369
+ infringement under applicable copyright law, except executing it on a
370
+ computer or modifying a private copy. Propagation includes copying,
371
+ distribution (with or without modification), making available to the
372
+ public, and in some countries other activities as well.
373
+
374
+ To "convey" a work means any kind of propagation that enables other
375
+ parties to make or receive copies. Mere interaction with a user through
376
+ a computer network, with no transfer of a copy, is not conveying.
377
+
378
+ An interactive user interface displays "Appropriate Legal Notices"
379
+ to the extent that it includes a convenient and prominently visible
380
+ feature that (1) displays an appropriate copyright notice, and (2)
381
+ tells the user that there is no warranty for the work (except to the
382
+ extent that warranties are provided), that licensees may convey the
383
+ work under this License, and how to view a copy of this License. If
384
+ the interface presents a list of user commands or options, such as a
385
+ menu, a prominent item in the list meets this criterion.
386
+
387
+ 1. Source Code.
388
+
389
+ The "source code" for a work means the preferred form of the work
390
+ for making modifications to it. "Object code" means any non-source
391
+ form of a work.
392
+
393
+ A "Standard Interface" means an interface that either is an official
394
+ standard defined by a recognized standards body, or, in the case of
395
+ interfaces specified for a particular programming language, one that
396
+ is widely used among developers working in that language.
397
+
398
+ The "System Libraries" of an executable work include anything, other
399
+ than the work as a whole, that (a) is included in the normal form of
400
+ packaging a Major Component, but which is not part of that Major
401
+ Component, and (b) serves only to enable use of the work with that
402
+ Major Component, or to implement a Standard Interface for which an
403
+ implementation is available to the public in source code form. A
404
+ "Major Component", in this context, means a major essential component
405
+ (kernel, window system, and so on) of the specific operating system
406
+ (if any) on which the executable work runs, or a compiler used to
407
+ produce the work, or an object code interpreter used to run it.
408
+
409
+ The "Corresponding Source" for a work in object code form means all
410
+ the source code needed to generate, install, and (for an executable
411
+ work) run the object code and to modify the work, including scripts to
412
+ control those activities. However, it does not include the work's
413
+ System Libraries, or general-purpose tools or generally available free
414
+ programs which are used unmodified in performing those activities but
415
+ which are not part of the work. For example, Corresponding Source
416
+ includes interface definition files associated with source files for
417
+ the work, and the source code for shared libraries and dynamically
418
+ linked subprograms that the work is specifically designed to require,
419
+ such as by intimate data communication or control flow between those
420
+ subprograms and other parts of the work.
421
+
422
+ The Corresponding Source need not include anything that users
423
+ can regenerate automatically from other parts of the Corresponding
424
+ Source.
425
+
426
+ The Corresponding Source for a work in source code form is that
427
+ same work.
428
+
429
+ 2. Basic Permissions.
430
+
431
+ All rights granted under this License are granted for the term of
432
+ copyright on the Program, and are irrevocable provided the stated
433
+ conditions are met. This License explicitly affirms your unlimited
434
+ permission to run the unmodified Program. The output from running a
435
+ covered work is covered by this License only if the output, given its
436
+ content, constitutes a covered work. This License acknowledges your
437
+ rights of fair use or other equivalent, as provided by copyright law.
438
+
439
+ You may make, run and propagate covered works that you do not
440
+ convey, without conditions so long as your license otherwise remains
441
+ in force. You may convey covered works to others for the sole purpose
442
+ of having them make modifications exclusively for you, or provide you
443
+ with facilities for running those works, provided that you comply with
444
+ the terms of this License in conveying all material for which you do
445
+ not control copyright. Those thus making or running the covered works
446
+ for you must do so exclusively on your behalf, under your direction
447
+ and control, on terms that prohibit them from making any copies of
448
+ your copyrighted material outside their relationship with you.
449
+
450
+ Conveying under any other circumstances is permitted solely under
451
+ the conditions stated below. Sublicensing is not allowed; section 10
452
+ makes it unnecessary.
453
+
454
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
455
+
456
+ No covered work shall be deemed part of an effective technological
457
+ measure under any applicable law fulfilling obligations under article
458
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
459
+ similar laws prohibiting or restricting circumvention of such
460
+ measures.
461
+
462
+ When you convey a covered work, you waive any legal power to forbid
463
+ circumvention of technological measures to the extent such circumvention
464
+ is effected by exercising rights under this License with respect to
465
+ the covered work, and you disclaim any intention to limit operation or
466
+ modification of the work as a means of enforcing, against the work's
467
+ users, your or third parties' legal rights to forbid circumvention of
468
+ technological measures.
469
+
470
+ 4. Conveying Verbatim Copies.
471
+
472
+ You may convey verbatim copies of the Program's source code as you
473
+ receive it, in any medium, provided that you conspicuously and
474
+ appropriately publish on each copy an appropriate copyright notice;
475
+ keep intact all notices stating that this License and any
476
+ non-permissive terms added in accord with section 7 apply to the code;
477
+ keep intact all notices of the absence of any warranty; and give all
478
+ recipients a copy of this License along with the Program.
479
+
480
+ You may charge any price or no price for each copy that you convey,
481
+ and you may offer support or warranty protection for a fee.
482
+
483
+ 5. Conveying Modified Source Versions.
484
+
485
+ You may convey a work based on the Program, or the modifications to
486
+ produce it from the Program, in the form of source code under the
487
+ terms of section 4, provided that you also meet all of these conditions:
488
+
489
+ a) The work must carry prominent notices stating that you modified
490
+ it, and giving a relevant date.
491
+
492
+ b) The work must carry prominent notices stating that it is
493
+ released under this License and any conditions added under section
494
+ 7. This requirement modifies the requirement in section 4 to
495
+ "keep intact all notices".
496
+
497
+ c) You must license the entire work, as a whole, under this
498
+ License to anyone who comes into possession of a copy. This
499
+ License will therefore apply, along with any applicable section 7
500
+ additional terms, to the whole of the work, and all its parts,
501
+ regardless of how they are packaged. This License gives no
502
+ permission to license the work in any other way, but it does not
503
+ invalidate such permission if you have separately received it.
504
+
505
+ d) If the work has interactive user interfaces, each must display
506
+ Appropriate Legal Notices; however, if the Program has interactive
507
+ interfaces that do not display Appropriate Legal Notices, your
508
+ work need not make them do so.
509
+
510
+ A compilation of a covered work with other separate and independent
511
+ works, which are not by their nature extensions of the covered work,
512
+ and which are not combined with it such as to form a larger program,
513
+ in or on a volume of a storage or distribution medium, is called an
514
+ "aggregate" if the compilation and its resulting copyright are not
515
+ used to limit the access or legal rights of the compilation's users
516
+ beyond what the individual works permit. Inclusion of a covered work
517
+ in an aggregate does not cause this License to apply to the other
518
+ parts of the aggregate.
519
+
520
+ 6. Conveying Non-Source Forms.
521
+
522
+ You may convey a covered work in object code form under the terms
523
+ of sections 4 and 5, provided that you also convey the
524
+ machine-readable Corresponding Source under the terms of this License,
525
+ in one of these ways:
526
+
527
+ a) Convey the object code in, or embodied in, a physical product
528
+ (including a physical distribution medium), accompanied by the
529
+ Corresponding Source fixed on a durable physical medium
530
+ customarily used for software interchange.
531
+
532
+ b) Convey the object code in, or embodied in, a physical product
533
+ (including a physical distribution medium), accompanied by a
534
+ written offer, valid for at least three years and valid for as
535
+ long as you offer spare parts or customer support for that product
536
+ model, to give anyone who possesses the object code either (1) a
537
+ copy of the Corresponding Source for all the software in the
538
+ product that is covered by this License, on a durable physical
539
+ medium customarily used for software interchange, for a price no
540
+ more than your reasonable cost of physically performing this
541
+ conveying of source, or (2) access to copy the
542
+ Corresponding Source from a network server at no charge.
543
+
544
+ c) Convey individual copies of the object code with a copy of the
545
+ written offer to provide the Corresponding Source. This
546
+ alternative is allowed only occasionally and noncommercially, and
547
+ only if you received the object code with such an offer, in accord
548
+ with subsection 6b.
549
+
550
+ d) Convey the object code by offering access from a designated
551
+ place (gratis or for a charge), and offer equivalent access to the
552
+ Corresponding Source in the same way through the same place at no
553
+ further charge. You need not require recipients to copy the
554
+ Corresponding Source along with the object code. If the place to
555
+ copy the object code is a network server, the Corresponding Source
556
+ may be on a different server (operated by you or a third party)
557
+ that supports equivalent copying facilities, provided you maintain
558
+ clear directions next to the object code saying where to find the
559
+ Corresponding Source. Regardless of what server hosts the
560
+ Corresponding Source, you remain obligated to ensure that it is
561
+ available for as long as needed to satisfy these requirements.
562
+
563
+ e) Convey the object code using peer-to-peer transmission, provided
564
+ you inform other peers where the object code and Corresponding
565
+ Source of the work are being offered to the general public at no
566
+ charge under subsection 6d.
567
+
568
+ A separable portion of the object code, whose source code is excluded
569
+ from the Corresponding Source as a System Library, need not be
570
+ included in conveying the object code work.
571
+
572
+ A "User Product" is either (1) a "consumer product", which means any
573
+ tangible personal property which is normally used for personal, family,
574
+ or household purposes, or (2) anything designed or sold for incorporation
575
+ into a dwelling. In determining whether a product is a consumer product,
576
+ doubtful cases shall be resolved in favor of coverage. For a particular
577
+ product received by a particular user, "normally used" refers to a
578
+ typical or common use of that class of product, regardless of the status
579
+ of the particular user or of the way in which the particular user
580
+ actually uses, or expects or is expected to use, the product. A product
581
+ is a consumer product regardless of whether the product has substantial
582
+ commercial, industrial or non-consumer uses, unless such uses represent
583
+ the only significant mode of use of the product.
584
+
585
+ "Installation Information" for a User Product means any methods,
586
+ procedures, authorization keys, or other information required to install
587
+ and execute modified versions of a covered work in that User Product from
588
+ a modified version of its Corresponding Source. The information must
589
+ suffice to ensure that the continued functioning of the modified object
590
+ code is in no case prevented or interfered with solely because
591
+ modification has been made.
592
+
593
+ If you convey an object code work under this section in, or with, or
594
+ specifically for use in, a User Product, and the conveying occurs as
595
+ part of a transaction in which the right of possession and use of the
596
+ User Product is transferred to the recipient in perpetuity or for a
597
+ fixed term (regardless of how the transaction is characterized), the
598
+ Corresponding Source conveyed under this section must be accompanied
599
+ by the Installation Information. But this requirement does not apply
600
+ if neither you nor any third party retains the ability to install
601
+ modified object code on the User Product (for example, the work has
602
+ been installed in ROM).
603
+
604
+ The requirement to provide Installation Information does not include a
605
+ requirement to continue to provide support service, warranty, or updates
606
+ for a work that has been modified or installed by the recipient, or for
607
+ the User Product in which it has been modified or installed. Access to a
608
+ network may be denied when the modification itself materially and
609
+ adversely affects the operation of the network or violates the rules and
610
+ protocols for communication across the network.
611
+
612
+ Corresponding Source conveyed, and Installation Information provided,
613
+ in accord with this section must be in a format that is publicly
614
+ documented (and with an implementation available to the public in
615
+ source code form), and must require no special password or key for
616
+ unpacking, reading or copying.
617
+
618
+ 7. Additional Terms.
619
+
620
+ "Additional permissions" are terms that supplement the terms of this
621
+ License by making exceptions from one or more of its conditions.
622
+ Additional permissions that are applicable to the entire Program shall
623
+ be treated as though they were included in this License, to the extent
624
+ that they are valid under applicable law. If additional permissions
625
+ apply only to part of the Program, that part may be used separately
626
+ under those permissions, but the entire Program remains governed by
627
+ this License without regard to the additional permissions.
628
+
629
+ When you convey a copy of a covered work, you may at your option
630
+ remove any additional permissions from that copy, or from any part of
631
+ it. (Additional permissions may be written to require their own
632
+ removal in certain cases when you modify the work.) You may place
633
+ additional permissions on material, added by you to a covered work,
634
+ for which you have or can give appropriate copyright permission.
635
+
636
+ Notwithstanding any other provision of this License, for material you
637
+ add to a covered work, you may (if authorized by the copyright holders of
638
+ that material) supplement the terms of this License with terms:
639
+
640
+ a) Disclaiming warranty or limiting liability differently from the
641
+ terms of sections 15 and 16 of this License; or
642
+
643
+ b) Requiring preservation of specified reasonable legal notices or
644
+ author attributions in that material or in the Appropriate Legal
645
+ Notices displayed by works containing it; or
646
+
647
+ c) Prohibiting misrepresentation of the origin of that material, or
648
+ requiring that modified versions of such material be marked in
649
+ reasonable ways as different from the original version; or
650
+
651
+ d) Limiting the use for publicity purposes of names of licensors or
652
+ authors of the material; or
653
+
654
+ e) Declining to grant rights under trademark law for use of some
655
+ trade names, trademarks, or service marks; or
656
+
657
+ f) Requiring indemnification of licensors and authors of that
658
+ material by anyone who conveys the material (or modified versions of
659
+ it) with contractual assumptions of liability to the recipient, for
660
+ any liability that these contractual assumptions directly impose on
661
+ those licensors and authors.
662
+
663
+ All other non-permissive additional terms are considered "further
664
+ restrictions" within the meaning of section 10. If the Program as you
665
+ received it, or any part of it, contains a notice stating that it is
666
+ governed by this License along with a term that is a further
667
+ restriction, you may remove that term. If a license document contains
668
+ a further restriction but permits relicensing or conveying under this
669
+ License, you may add to a covered work material governed by the terms
670
+ of that license document, provided that the further restriction does
671
+ not survive such relicensing or conveying.
672
+
673
+ If you add terms to a covered work in accord with this section, you
674
+ must place, in the relevant source files, a statement of the
675
+ additional terms that apply to those files, or a notice indicating
676
+ where to find the applicable terms.
677
+
678
+ Additional terms, permissive or non-permissive, may be stated in the
679
+ form of a separately written license, or stated as exceptions;
680
+ the above requirements apply either way.
681
+
682
+ 8. Termination.
683
+
684
+ You may not propagate or modify a covered work except as expressly
685
+ provided under this License. Any attempt otherwise to propagate or
686
+ modify it is void, and will automatically terminate your rights under
687
+ this License (including any patent licenses granted under the third
688
+ paragraph of section 11).
689
+
690
+ However, if you cease all violation of this License, then your
691
+ license from a particular copyright holder is reinstated (a)
692
+ provisionally, unless and until the copyright holder explicitly and
693
+ finally terminates your license, and (b) permanently, if the copyright
694
+ holder fails to notify you of the violation by some reasonable means
695
+ prior to 60 days after the cessation.
696
+
697
+ Moreover, your license from a particular copyright holder is
698
+ reinstated permanently if the copyright holder notifies you of the
699
+ violation by some reasonable means, this is the first time you have
700
+ received notice of violation of this License (for any work) from that
701
+ copyright holder, and you cure the violation prior to 30 days after
702
+ your receipt of the notice.
703
+
704
+ Termination of your rights under this section does not terminate the
705
+ licenses of parties who have received copies or rights from you under
706
+ this License. If your rights have been terminated and not permanently
707
+ reinstated, you do not qualify to receive new licenses for the same
708
+ material under section 10.
709
+
710
+ 9. Acceptance Not Required for Having Copies.
711
+
712
+ You are not required to accept this License in order to receive or
713
+ run a copy of the Program. Ancillary propagation of a covered work
714
+ occurring solely as a consequence of using peer-to-peer transmission
715
+ to receive a copy likewise does not require acceptance. However,
716
+ nothing other than this License grants you permission to propagate or
717
+ modify any covered work. These actions infringe copyright if you do
718
+ not accept this License. Therefore, by modifying or propagating a
719
+ covered work, you indicate your acceptance of this License to do so.
720
+
721
+ 10. Automatic Licensing of Downstream Recipients.
722
+
723
+ Each time you convey a covered work, the recipient automatically
724
+ receives a license from the original licensors, to run, modify and
725
+ propagate that work, subject to this License. You are not responsible
726
+ for enforcing compliance by third parties with this License.
727
+
728
+ An "entity transaction" is a transaction transferring control of an
729
+ organization, or substantially all assets of one, or subdividing an
730
+ organization, or merging organizations. If propagation of a covered
731
+ work results from an entity transaction, each party to that
732
+ transaction who receives a copy of the work also receives whatever
733
+ licenses to the work the party's predecessor in interest had or could
734
+ give under the previous paragraph, plus a right to possession of the
735
+ Corresponding Source of the work from the predecessor in interest, if
736
+ the predecessor has it or can get it with reasonable efforts.
737
+
738
+ You may not impose any further restrictions on the exercise of the
739
+ rights granted or affirmed under this License. For example, you may
740
+ not impose a license fee, royalty, or other charge for exercise of
741
+ rights granted under this License, and you may not initiate litigation
742
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
743
+ any patent claim is infringed by making, using, selling, offering for
744
+ sale, or importing the Program or any portion of it.
745
+
746
+ 11. Patents.
747
+
748
+ A "contributor" is a copyright holder who authorizes use under this
749
+ License of the Program or a work on which the Program is based. The
750
+ work thus licensed is called the contributor's "contributor version".
751
+
752
+ A contributor's "essential patent claims" are all patent claims
753
+ owned or controlled by the contributor, whether already acquired or
754
+ hereafter acquired, that would be infringed by some manner, permitted
755
+ by this License, of making, using, or selling its contributor version,
756
+ but do not include claims that would be infringed only as a
757
+ consequence of further modification of the contributor version. For
758
+ purposes of this definition, "control" includes the right to grant
759
+ patent sublicenses in a manner consistent with the requirements of
760
+ this License.
761
+
762
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
763
+ patent license under the contributor's essential patent claims, to
764
+ make, use, sell, offer for sale, import and otherwise run, modify and
765
+ propagate the contents of its contributor version.
766
+
767
+ In the following three paragraphs, a "patent license" is any express
768
+ agreement or commitment, however denominated, not to enforce a patent
769
+ (such as an express permission to practice a patent or covenant not to
770
+ sue for patent infringement). To "grant" such a patent license to a
771
+ party means to make such an agreement or commitment not to enforce a
772
+ patent against the party.
773
+
774
+ If you convey a covered work, knowingly relying on a patent license,
775
+ and the Corresponding Source of the work is not available for anyone
776
+ to copy, free of charge and under the terms of this License, through a
777
+ publicly available network server or other readily accessible means,
778
+ then you must either (1) cause the Corresponding Source to be so
779
+ available, or (2) arrange to deprive yourself of the benefit of the
780
+ patent license for this particular work, or (3) arrange, in a manner
781
+ consistent with the requirements of this License, to extend the patent
782
+ license to downstream recipients. "Knowingly relying" means you have
783
+ actual knowledge that, but for the patent license, your conveying the
784
+ covered work in a country, or your recipient's use of the covered work
785
+ in a country, would infringe one or more identifiable patents in that
786
+ country that you have reason to believe are valid.
787
+
788
+ If, pursuant to or in connection with a single transaction or
789
+ arrangement, you convey, or propagate by procuring conveyance of, a
790
+ covered work, and grant a patent license to some of the parties
791
+ receiving the covered work authorizing them to use, propagate, modify
792
+ or convey a specific copy of the covered work, then the patent license
793
+ you grant is automatically extended to all recipients of the covered
794
+ work and works based on it.
795
+
796
+ A patent license is "discriminatory" if it does not include within
797
+ the scope of its coverage, prohibits the exercise of, or is
798
+ conditioned on the non-exercise of one or more of the rights that are
799
+ specifically granted under this License. You may not convey a covered
800
+ work if you are a party to an arrangement with a third party that is
801
+ in the business of distributing software, under which you make payment
802
+ to the third party based on the extent of your activity of conveying
803
+ the work, and under which the third party grants, to any of the
804
+ parties who would receive the covered work from you, a discriminatory
805
+ patent license (a) in connection with copies of the covered work
806
+ conveyed by you (or copies made from those copies), or (b) primarily
807
+ for and in connection with specific products or compilations that
808
+ contain the covered work, unless you entered into that arrangement,
809
+ or that patent license was granted, prior to 28 March 2007.
810
+
811
+ Nothing in this License shall be construed as excluding or limiting
812
+ any implied license or other defenses to infringement that may
813
+ otherwise be available to you under applicable patent law.
814
+
815
+ 12. No Surrender of Others' Freedom.
816
+
817
+ If conditions are imposed on you (whether by court order, agreement or
818
+ otherwise) that contradict the conditions of this License, they do not
819
+ excuse you from the conditions of this License. If you cannot convey a
820
+ covered work so as to satisfy simultaneously your obligations under this
821
+ License and any other pertinent obligations, then as a consequence you may
822
+ not convey it at all. For example, if you agree to terms that obligate you
823
+ to collect a royalty for further conveying from those to whom you convey
824
+ the Program, the only way you could satisfy both those terms and this
825
+ License would be to refrain entirely from conveying the Program.
826
+
827
+ 13. Use with the GNU Affero General Public License.
828
+
829
+ Notwithstanding any other provision of this License, you have
830
+ permission to link or combine any covered work with a work licensed
831
+ under version 3 of the GNU Affero General Public License into a single
832
+ combined work, and to convey the resulting work. The terms of this
833
+ License will continue to apply to the part which is the covered work,
834
+ but the special requirements of the GNU Affero General Public License,
835
+ section 13, concerning interaction through a network will apply to the
836
+ combination as such.
837
+
838
+ 14. Revised Versions of this License.
839
+
840
+ The Free Software Foundation may publish revised and/or new versions of
841
+ the GNU General Public License from time to time. Such new versions will
842
+ be similar in spirit to the present version, but may differ in detail to
843
+ address new problems or concerns.
844
+
845
+ Each version is given a distinguishing version number. If the
846
+ Program specifies that a certain numbered version of the GNU General
847
+ Public License "or any later version" applies to it, you have the
848
+ option of following the terms and conditions either of that numbered
849
+ version or of any later version published by the Free Software
850
+ Foundation. If the Program does not specify a version number of the
851
+ GNU General Public License, you may choose any version ever published
852
+ by the Free Software Foundation.
853
+
854
+ If the Program specifies that a proxy can decide which future
855
+ versions of the GNU General Public License can be used, that proxy's
856
+ public statement of acceptance of a version permanently authorizes you
857
+ to choose that version for the Program.
858
+
859
+ Later license versions may give you additional or different
860
+ permissions. However, no additional obligations are imposed on any
861
+ author or copyright holder as a result of your choosing to follow a
862
+ later version.
863
+
864
+ 15. Disclaimer of Warranty.
865
+
866
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
867
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
868
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
869
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
870
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
871
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
872
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
873
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
874
+
875
+ 16. Limitation of Liability.
876
+
877
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
878
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
879
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
880
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
881
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
882
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
883
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
884
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
885
+ SUCH DAMAGES.
886
+
887
+ 17. Interpretation of Sections 15 and 16.
888
+
889
+ If the disclaimer of warranty and limitation of liability provided
890
+ above cannot be given local legal effect according to their terms,
891
+ reviewing courts shall apply local law that most closely approximates
892
+ an absolute waiver of all civil liability in connection with the
893
+ Program, unless a warranty or assumption of liability accompanies a
894
+ copy of the Program in return for a fee.
895
+
896
+ END OF TERMS AND CONDITIONS
897
+
898
+ How to Apply These Terms to Your New Programs
899
+
900
+ If you develop a new program, and you want it to be of the greatest
901
+ possible use to the public, the best way to achieve this is to make it
902
+ free software which everyone can redistribute and change under these terms.
903
+
904
+ To do so, attach the following notices to the program. It is safest
905
+ to attach them to the start of each source file to most effectively
906
+ state the exclusion of warranty; and each file should have at least
907
+ the "copyright" line and a pointer to where the full notice is found.
908
+
909
+ <one line to give the program's name and a brief idea of what it does.>
910
+ Copyright (C) <year> <name of author>
911
+
912
+ This program is free software: you can redistribute it and/or modify
913
+ it under the terms of the GNU General Public License as published by
914
+ the Free Software Foundation, either version 3 of the License, or
915
+ (at your option) any later version.
916
+
917
+ This program is distributed in the hope that it will be useful,
918
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
919
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
920
+ GNU General Public License for more details.
921
+
922
+ You should have received a copy of the GNU General Public License
923
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
924
+
925
+ Also add information on how to contact you by electronic and paper mail.
926
+
927
+ If the program does terminal interaction, make it output a short
928
+ notice like this when it starts in an interactive mode:
929
+
930
+ <program> Copyright (C) <year> <name of author>
931
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
932
+ This is free software, and you are welcome to redistribute it
933
+ under certain conditions; type `show c' for details.
934
+
935
+ The hypothetical commands `show w' and `show c' should show the appropriate
936
+ parts of the General Public License. Of course, your program's commands
937
+ might be different; for a GUI interface, you would use an "about box".
938
+
939
+ You should also get your employer (if you work as a programmer) or school,
940
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
941
+ For more information on this, and how to apply and follow the GNU GPL, see
942
+ <http://www.gnu.org/licenses/>.
943
+
944
+ The GNU General Public License does not permit incorporating your program
945
+ into proprietary programs. If your program is a subroutine library, you
946
+ may consider it more useful to permit linking proprietary applications with
947
+ the library. If this is what you want to do, use the GNU Lesser General
948
+ Public License instead of this License. But first, please read
949
+ <http://www.gnu.org/philosophy/why-not-lgpl.html>.
950
+
951
+ Name: libquadmath
952
+ Files: numpy.libs/libquadmath*.so
953
+ Description: dynamically linked to files compiled with gcc
954
+ Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
955
+ License: LGPL-2.1-or-later
956
+
957
+ GCC Quad-Precision Math Library
958
+ Copyright (C) 2010-2019 Free Software Foundation, Inc.
959
+ Written by Francois-Xavier Coudert <[email protected]>
960
+
961
+ This file is part of the libquadmath library.
962
+ Libquadmath is free software; you can redistribute it and/or
963
+ modify it under the terms of the GNU Library General Public
964
+ License as published by the Free Software Foundation; either
965
+ version 2.1 of the License, or (at your option) any later version.
966
+
967
+ Libquadmath is distributed in the hope that it will be useful,
968
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
969
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
970
+ Lesser General Public License for more details.
971
+ https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
llmeval-env/lib/python3.10/site-packages/numpy-1.26.4.dist-info/METADATA ADDED
@@ -0,0 +1,1092 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: numpy
3
+ Version: 1.26.4
4
+ Summary: Fundamental package for array computing in Python
5
+ Home-page: https://numpy.org
6
+ Author: Travis E. Oliphant et al.
7
+ Maintainer-Email: NumPy Developers <[email protected]>
8
+ License: Copyright (c) 2005-2023, NumPy Developers.
9
+ All rights reserved.
10
+
11
+ Redistribution and use in source and binary forms, with or without
12
+ modification, are permitted provided that the following conditions are
13
+ met:
14
+
15
+ * Redistributions of source code must retain the above copyright
16
+ notice, this list of conditions and the following disclaimer.
17
+
18
+ * Redistributions in binary form must reproduce the above
19
+ copyright notice, this list of conditions and the following
20
+ disclaimer in the documentation and/or other materials provided
21
+ with the distribution.
22
+
23
+ * Neither the name of the NumPy Developers nor the names of any
24
+ contributors may be used to endorse or promote products derived
25
+ from this software without specific prior written permission.
26
+
27
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38
+
39
+ ----
40
+
41
+ The NumPy repository and source distributions bundle several libraries that are
42
+ compatibly licensed. We list these here.
43
+
44
+ Name: lapack-lite
45
+ Files: numpy/linalg/lapack_lite/*
46
+ License: BSD-3-Clause
47
+ For details, see numpy/linalg/lapack_lite/LICENSE.txt
48
+
49
+ Name: tempita
50
+ Files: tools/npy_tempita/*
51
+ License: MIT
52
+ For details, see tools/npy_tempita/license.txt
53
+
54
+ Name: dragon4
55
+ Files: numpy/core/src/multiarray/dragon4.c
56
+ License: MIT
57
+ For license text, see numpy/core/src/multiarray/dragon4.c
58
+
59
+ Name: libdivide
60
+ Files: numpy/core/include/numpy/libdivide/*
61
+ License: Zlib
62
+ For license text, see numpy/core/include/numpy/libdivide/LICENSE.txt
63
+
64
+
65
+ Note that the following files are vendored in the repository and sdist but not
66
+ installed in built numpy packages:
67
+
68
+ Name: Meson
69
+ Files: vendored-meson/meson/*
70
+ License: Apache 2.0
71
+ For license text, see vendored-meson/meson/COPYING
72
+
73
+ Name: spin
74
+ Files: .spin/cmds.py
75
+ License: BSD-3
76
+ For license text, see .spin/LICENSE
77
+
78
+ ----
79
+
80
+ This binary distribution of NumPy also bundles the following software:
81
+
82
+
83
+ Name: OpenBLAS
84
+ Files: numpy.libs/libopenblas*.so
85
+ Description: bundled as a dynamically linked library
86
+ Availability: https://github.com/OpenMathLib/OpenBLAS/
87
+ License: BSD-3-Clause
88
+ Copyright (c) 2011-2014, The OpenBLAS Project
89
+ All rights reserved.
90
+
91
+ Redistribution and use in source and binary forms, with or without
92
+ modification, are permitted provided that the following conditions are
93
+ met:
94
+
95
+ 1. Redistributions of source code must retain the above copyright
96
+ notice, this list of conditions and the following disclaimer.
97
+
98
+ 2. Redistributions in binary form must reproduce the above copyright
99
+ notice, this list of conditions and the following disclaimer in
100
+ the documentation and/or other materials provided with the
101
+ distribution.
102
+ 3. Neither the name of the OpenBLAS project nor the names of
103
+ its contributors may be used to endorse or promote products
104
+ derived from this software without specific prior written
105
+ permission.
106
+
107
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
108
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
109
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
110
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
111
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
112
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
113
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
114
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
115
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
116
+ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
117
+
118
+
119
+ Name: LAPACK
120
+ Files: numpy.libs/libopenblas*.so
121
+ Description: bundled in OpenBLAS
122
+ Availability: https://github.com/OpenMathLib/OpenBLAS/
123
+ License: BSD-3-Clause-Attribution
124
+ Copyright (c) 1992-2013 The University of Tennessee and The University
125
+ of Tennessee Research Foundation. All rights
126
+ reserved.
127
+ Copyright (c) 2000-2013 The University of California Berkeley. All
128
+ rights reserved.
129
+ Copyright (c) 2006-2013 The University of Colorado Denver. All rights
130
+ reserved.
131
+
132
+ $COPYRIGHT$
133
+
134
+ Additional copyrights may follow
135
+
136
+ $HEADER$
137
+
138
+ Redistribution and use in source and binary forms, with or without
139
+ modification, are permitted provided that the following conditions are
140
+ met:
141
+
142
+ - Redistributions of source code must retain the above copyright
143
+ notice, this list of conditions and the following disclaimer.
144
+
145
+ - Redistributions in binary form must reproduce the above copyright
146
+ notice, this list of conditions and the following disclaimer listed
147
+ in this license in the documentation and/or other materials
148
+ provided with the distribution.
149
+
150
+ - Neither the name of the copyright holders nor the names of its
151
+ contributors may be used to endorse or promote products derived from
152
+ this software without specific prior written permission.
153
+
154
+ The copyright holders provide no reassurances that the source code
155
+ provided does not infringe any patent, copyright, or any other
156
+ intellectual property rights of third parties. The copyright holders
157
+ disclaim any liability to any recipient for claims brought against
158
+ recipient by any third party for infringement of that parties
159
+ intellectual property rights.
160
+
161
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
162
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
163
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
164
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
165
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
166
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
167
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
168
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
169
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
170
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
171
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
172
+
173
+
174
+ Name: GCC runtime library
175
+ Files: numpy.libs/libgfortran*.so
176
+ Description: dynamically linked to files compiled with gcc
177
+ Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
178
+ License: GPL-3.0-with-GCC-exception
179
+ Copyright (C) 2002-2017 Free Software Foundation, Inc.
180
+
181
+ Libgfortran is free software; you can redistribute it and/or modify
182
+ it under the terms of the GNU General Public License as published by
183
+ the Free Software Foundation; either version 3, or (at your option)
184
+ any later version.
185
+
186
+ Libgfortran is distributed in the hope that it will be useful,
187
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
188
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
189
+ GNU General Public License for more details.
190
+
191
+ Under Section 7 of GPL version 3, you are granted additional
192
+ permissions described in the GCC Runtime Library Exception, version
193
+ 3.1, as published by the Free Software Foundation.
194
+
195
+ You should have received a copy of the GNU General Public License and
196
+ a copy of the GCC Runtime Library Exception along with this program;
197
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
198
+ <http://www.gnu.org/licenses/>.
199
+
200
+ ----
201
+
202
+ Full text of license texts referred to above follows (that they are
203
+ listed below does not necessarily imply the conditions apply to the
204
+ present binary release):
205
+
206
+ ----
207
+
208
+ GCC RUNTIME LIBRARY EXCEPTION
209
+
210
+ Version 3.1, 31 March 2009
211
+
212
+ Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/>
213
+
214
+ Everyone is permitted to copy and distribute verbatim copies of this
215
+ license document, but changing it is not allowed.
216
+
217
+ This GCC Runtime Library Exception ("Exception") is an additional
218
+ permission under section 7 of the GNU General Public License, version
219
+ 3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
220
+ bears a notice placed by the copyright holder of the file stating that
221
+ the file is governed by GPLv3 along with this Exception.
222
+
223
+ When you use GCC to compile a program, GCC may combine portions of
224
+ certain GCC header files and runtime libraries with the compiled
225
+ program. The purpose of this Exception is to allow compilation of
226
+ non-GPL (including proprietary) programs to use, in this way, the
227
+ header files and runtime libraries covered by this Exception.
228
+
229
+ 0. Definitions.
230
+
231
+ A file is an "Independent Module" if it either requires the Runtime
232
+ Library for execution after a Compilation Process, or makes use of an
233
+ interface provided by the Runtime Library, but is not otherwise based
234
+ on the Runtime Library.
235
+
236
+ "GCC" means a version of the GNU Compiler Collection, with or without
237
+ modifications, governed by version 3 (or a specified later version) of
238
+ the GNU General Public License (GPL) with the option of using any
239
+ subsequent versions published by the FSF.
240
+
241
+ "GPL-compatible Software" is software whose conditions of propagation,
242
+ modification and use would permit combination with GCC in accord with
243
+ the license of GCC.
244
+
245
+ "Target Code" refers to output from any compiler for a real or virtual
246
+ target processor architecture, in executable form or suitable for
247
+ input to an assembler, loader, linker and/or execution
248
+ phase. Notwithstanding that, Target Code does not include data in any
249
+ format that is used as a compiler intermediate representation, or used
250
+ for producing a compiler intermediate representation.
251
+
252
+ The "Compilation Process" transforms code entirely represented in
253
+ non-intermediate languages designed for human-written code, and/or in
254
+ Java Virtual Machine byte code, into Target Code. Thus, for example,
255
+ use of source code generators and preprocessors need not be considered
256
+ part of the Compilation Process, since the Compilation Process can be
257
+ understood as starting with the output of the generators or
258
+ preprocessors.
259
+
260
+ A Compilation Process is "Eligible" if it is done using GCC, alone or
261
+ with other GPL-compatible software, or if it is done without using any
262
+ work based on GCC. For example, using non-GPL-compatible Software to
263
+ optimize any GCC intermediate representations would not qualify as an
264
+ Eligible Compilation Process.
265
+
266
+ 1. Grant of Additional Permission.
267
+
268
+ You have permission to propagate a work of Target Code formed by
269
+ combining the Runtime Library with Independent Modules, even if such
270
+ propagation would otherwise violate the terms of GPLv3, provided that
271
+ all Target Code was generated by Eligible Compilation Processes. You
272
+ may then convey such a combination under terms of your choice,
273
+ consistent with the licensing of the Independent Modules.
274
+
275
+ 2. No Weakening of GCC Copyleft.
276
+
277
+ The availability of this Exception does not imply any general
278
+ presumption that third-party software is unaffected by the copyleft
279
+ requirements of the license of GCC.
280
+
281
+ ----
282
+
283
+ GNU GENERAL PUBLIC LICENSE
284
+ Version 3, 29 June 2007
285
+
286
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
287
+ Everyone is permitted to copy and distribute verbatim copies
288
+ of this license document, but changing it is not allowed.
289
+
290
+ Preamble
291
+
292
+ The GNU General Public License is a free, copyleft license for
293
+ software and other kinds of works.
294
+
295
+ The licenses for most software and other practical works are designed
296
+ to take away your freedom to share and change the works. By contrast,
297
+ the GNU General Public License is intended to guarantee your freedom to
298
+ share and change all versions of a program--to make sure it remains free
299
+ software for all its users. We, the Free Software Foundation, use the
300
+ GNU General Public License for most of our software; it applies also to
301
+ any other work released this way by its authors. You can apply it to
302
+ your programs, too.
303
+
304
+ When we speak of free software, we are referring to freedom, not
305
+ price. Our General Public Licenses are designed to make sure that you
306
+ have the freedom to distribute copies of free software (and charge for
307
+ them if you wish), that you receive source code or can get it if you
308
+ want it, that you can change the software or use pieces of it in new
309
+ free programs, and that you know you can do these things.
310
+
311
+ To protect your rights, we need to prevent others from denying you
312
+ these rights or asking you to surrender the rights. Therefore, you have
313
+ certain responsibilities if you distribute copies of the software, or if
314
+ you modify it: responsibilities to respect the freedom of others.
315
+
316
+ For example, if you distribute copies of such a program, whether
317
+ gratis or for a fee, you must pass on to the recipients the same
318
+ freedoms that you received. You must make sure that they, too, receive
319
+ or can get the source code. And you must show them these terms so they
320
+ know their rights.
321
+
322
+ Developers that use the GNU GPL protect your rights with two steps:
323
+ (1) assert copyright on the software, and (2) offer you this License
324
+ giving you legal permission to copy, distribute and/or modify it.
325
+
326
+ For the developers' and authors' protection, the GPL clearly explains
327
+ that there is no warranty for this free software. For both users' and
328
+ authors' sake, the GPL requires that modified versions be marked as
329
+ changed, so that their problems will not be attributed erroneously to
330
+ authors of previous versions.
331
+
332
+ Some devices are designed to deny users access to install or run
333
+ modified versions of the software inside them, although the manufacturer
334
+ can do so. This is fundamentally incompatible with the aim of
335
+ protecting users' freedom to change the software. The systematic
336
+ pattern of such abuse occurs in the area of products for individuals to
337
+ use, which is precisely where it is most unacceptable. Therefore, we
338
+ have designed this version of the GPL to prohibit the practice for those
339
+ products. If such problems arise substantially in other domains, we
340
+ stand ready to extend this provision to those domains in future versions
341
+ of the GPL, as needed to protect the freedom of users.
342
+
343
+ Finally, every program is threatened constantly by software patents.
344
+ States should not allow patents to restrict development and use of
345
+ software on general-purpose computers, but in those that do, we wish to
346
+ avoid the special danger that patents applied to a free program could
347
+ make it effectively proprietary. To prevent this, the GPL assures that
348
+ patents cannot be used to render the program non-free.
349
+
350
+ The precise terms and conditions for copying, distribution and
351
+ modification follow.
352
+
353
+ TERMS AND CONDITIONS
354
+
355
+ 0. Definitions.
356
+
357
+ "This License" refers to version 3 of the GNU General Public License.
358
+
359
+ "Copyright" also means copyright-like laws that apply to other kinds of
360
+ works, such as semiconductor masks.
361
+
362
+ "The Program" refers to any copyrightable work licensed under this
363
+ License. Each licensee is addressed as "you". "Licensees" and
364
+ "recipients" may be individuals or organizations.
365
+
366
+ To "modify" a work means to copy from or adapt all or part of the work
367
+ in a fashion requiring copyright permission, other than the making of an
368
+ exact copy. The resulting work is called a "modified version" of the
369
+ earlier work or a work "based on" the earlier work.
370
+
371
+ A "covered work" means either the unmodified Program or a work based
372
+ on the Program.
373
+
374
+ To "propagate" a work means to do anything with it that, without
375
+ permission, would make you directly or secondarily liable for
376
+ infringement under applicable copyright law, except executing it on a
377
+ computer or modifying a private copy. Propagation includes copying,
378
+ distribution (with or without modification), making available to the
379
+ public, and in some countries other activities as well.
380
+
381
+ To "convey" a work means any kind of propagation that enables other
382
+ parties to make or receive copies. Mere interaction with a user through
383
+ a computer network, with no transfer of a copy, is not conveying.
384
+
385
+ An interactive user interface displays "Appropriate Legal Notices"
386
+ to the extent that it includes a convenient and prominently visible
387
+ feature that (1) displays an appropriate copyright notice, and (2)
388
+ tells the user that there is no warranty for the work (except to the
389
+ extent that warranties are provided), that licensees may convey the
390
+ work under this License, and how to view a copy of this License. If
391
+ the interface presents a list of user commands or options, such as a
392
+ menu, a prominent item in the list meets this criterion.
393
+
394
+ 1. Source Code.
395
+
396
+ The "source code" for a work means the preferred form of the work
397
+ for making modifications to it. "Object code" means any non-source
398
+ form of a work.
399
+
400
+ A "Standard Interface" means an interface that either is an official
401
+ standard defined by a recognized standards body, or, in the case of
402
+ interfaces specified for a particular programming language, one that
403
+ is widely used among developers working in that language.
404
+
405
+ The "System Libraries" of an executable work include anything, other
406
+ than the work as a whole, that (a) is included in the normal form of
407
+ packaging a Major Component, but which is not part of that Major
408
+ Component, and (b) serves only to enable use of the work with that
409
+ Major Component, or to implement a Standard Interface for which an
410
+ implementation is available to the public in source code form. A
411
+ "Major Component", in this context, means a major essential component
412
+ (kernel, window system, and so on) of the specific operating system
413
+ (if any) on which the executable work runs, or a compiler used to
414
+ produce the work, or an object code interpreter used to run it.
415
+
416
+ The "Corresponding Source" for a work in object code form means all
417
+ the source code needed to generate, install, and (for an executable
418
+ work) run the object code and to modify the work, including scripts to
419
+ control those activities. However, it does not include the work's
420
+ System Libraries, or general-purpose tools or generally available free
421
+ programs which are used unmodified in performing those activities but
422
+ which are not part of the work. For example, Corresponding Source
423
+ includes interface definition files associated with source files for
424
+ the work, and the source code for shared libraries and dynamically
425
+ linked subprograms that the work is specifically designed to require,
426
+ such as by intimate data communication or control flow between those
427
+ subprograms and other parts of the work.
428
+
429
+ The Corresponding Source need not include anything that users
430
+ can regenerate automatically from other parts of the Corresponding
431
+ Source.
432
+
433
+ The Corresponding Source for a work in source code form is that
434
+ same work.
435
+
436
+ 2. Basic Permissions.
437
+
438
+ All rights granted under this License are granted for the term of
439
+ copyright on the Program, and are irrevocable provided the stated
440
+ conditions are met. This License explicitly affirms your unlimited
441
+ permission to run the unmodified Program. The output from running a
442
+ covered work is covered by this License only if the output, given its
443
+ content, constitutes a covered work. This License acknowledges your
444
+ rights of fair use or other equivalent, as provided by copyright law.
445
+
446
+ You may make, run and propagate covered works that you do not
447
+ convey, without conditions so long as your license otherwise remains
448
+ in force. You may convey covered works to others for the sole purpose
449
+ of having them make modifications exclusively for you, or provide you
450
+ with facilities for running those works, provided that you comply with
451
+ the terms of this License in conveying all material for which you do
452
+ not control copyright. Those thus making or running the covered works
453
+ for you must do so exclusively on your behalf, under your direction
454
+ and control, on terms that prohibit them from making any copies of
455
+ your copyrighted material outside their relationship with you.
456
+
457
+ Conveying under any other circumstances is permitted solely under
458
+ the conditions stated below. Sublicensing is not allowed; section 10
459
+ makes it unnecessary.
460
+
461
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
462
+
463
+ No covered work shall be deemed part of an effective technological
464
+ measure under any applicable law fulfilling obligations under article
465
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
466
+ similar laws prohibiting or restricting circumvention of such
467
+ measures.
468
+
469
+ When you convey a covered work, you waive any legal power to forbid
470
+ circumvention of technological measures to the extent such circumvention
471
+ is effected by exercising rights under this License with respect to
472
+ the covered work, and you disclaim any intention to limit operation or
473
+ modification of the work as a means of enforcing, against the work's
474
+ users, your or third parties' legal rights to forbid circumvention of
475
+ technological measures.
476
+
477
+ 4. Conveying Verbatim Copies.
478
+
479
+ You may convey verbatim copies of the Program's source code as you
480
+ receive it, in any medium, provided that you conspicuously and
481
+ appropriately publish on each copy an appropriate copyright notice;
482
+ keep intact all notices stating that this License and any
483
+ non-permissive terms added in accord with section 7 apply to the code;
484
+ keep intact all notices of the absence of any warranty; and give all
485
+ recipients a copy of this License along with the Program.
486
+
487
+ You may charge any price or no price for each copy that you convey,
488
+ and you may offer support or warranty protection for a fee.
489
+
490
+ 5. Conveying Modified Source Versions.
491
+
492
+ You may convey a work based on the Program, or the modifications to
493
+ produce it from the Program, in the form of source code under the
494
+ terms of section 4, provided that you also meet all of these conditions:
495
+
496
+ a) The work must carry prominent notices stating that you modified
497
+ it, and giving a relevant date.
498
+
499
+ b) The work must carry prominent notices stating that it is
500
+ released under this License and any conditions added under section
501
+ 7. This requirement modifies the requirement in section 4 to
502
+ "keep intact all notices".
503
+
504
+ c) You must license the entire work, as a whole, under this
505
+ License to anyone who comes into possession of a copy. This
506
+ License will therefore apply, along with any applicable section 7
507
+ additional terms, to the whole of the work, and all its parts,
508
+ regardless of how they are packaged. This License gives no
509
+ permission to license the work in any other way, but it does not
510
+ invalidate such permission if you have separately received it.
511
+
512
+ d) If the work has interactive user interfaces, each must display
513
+ Appropriate Legal Notices; however, if the Program has interactive
514
+ interfaces that do not display Appropriate Legal Notices, your
515
+ work need not make them do so.
516
+
517
+ A compilation of a covered work with other separate and independent
518
+ works, which are not by their nature extensions of the covered work,
519
+ and which are not combined with it such as to form a larger program,
520
+ in or on a volume of a storage or distribution medium, is called an
521
+ "aggregate" if the compilation and its resulting copyright are not
522
+ used to limit the access or legal rights of the compilation's users
523
+ beyond what the individual works permit. Inclusion of a covered work
524
+ in an aggregate does not cause this License to apply to the other
525
+ parts of the aggregate.
526
+
527
+ 6. Conveying Non-Source Forms.
528
+
529
+ You may convey a covered work in object code form under the terms
530
+ of sections 4 and 5, provided that you also convey the
531
+ machine-readable Corresponding Source under the terms of this License,
532
+ in one of these ways:
533
+
534
+ a) Convey the object code in, or embodied in, a physical product
535
+ (including a physical distribution medium), accompanied by the
536
+ Corresponding Source fixed on a durable physical medium
537
+ customarily used for software interchange.
538
+
539
+ b) Convey the object code in, or embodied in, a physical product
540
+ (including a physical distribution medium), accompanied by a
541
+ written offer, valid for at least three years and valid for as
542
+ long as you offer spare parts or customer support for that product
543
+ model, to give anyone who possesses the object code either (1) a
544
+ copy of the Corresponding Source for all the software in the
545
+ product that is covered by this License, on a durable physical
546
+ medium customarily used for software interchange, for a price no
547
+ more than your reasonable cost of physically performing this
548
+ conveying of source, or (2) access to copy the
549
+ Corresponding Source from a network server at no charge.
550
+
551
+ c) Convey individual copies of the object code with a copy of the
552
+ written offer to provide the Corresponding Source. This
553
+ alternative is allowed only occasionally and noncommercially, and
554
+ only if you received the object code with such an offer, in accord
555
+ with subsection 6b.
556
+
557
+ d) Convey the object code by offering access from a designated
558
+ place (gratis or for a charge), and offer equivalent access to the
559
+ Corresponding Source in the same way through the same place at no
560
+ further charge. You need not require recipients to copy the
561
+ Corresponding Source along with the object code. If the place to
562
+ copy the object code is a network server, the Corresponding Source
563
+ may be on a different server (operated by you or a third party)
564
+ that supports equivalent copying facilities, provided you maintain
565
+ clear directions next to the object code saying where to find the
566
+ Corresponding Source. Regardless of what server hosts the
567
+ Corresponding Source, you remain obligated to ensure that it is
568
+ available for as long as needed to satisfy these requirements.
569
+
570
+ e) Convey the object code using peer-to-peer transmission, provided
571
+ you inform other peers where the object code and Corresponding
572
+ Source of the work are being offered to the general public at no
573
+ charge under subsection 6d.
574
+
575
+ A separable portion of the object code, whose source code is excluded
576
+ from the Corresponding Source as a System Library, need not be
577
+ included in conveying the object code work.
578
+
579
+ A "User Product" is either (1) a "consumer product", which means any
580
+ tangible personal property which is normally used for personal, family,
581
+ or household purposes, or (2) anything designed or sold for incorporation
582
+ into a dwelling. In determining whether a product is a consumer product,
583
+ doubtful cases shall be resolved in favor of coverage. For a particular
584
+ product received by a particular user, "normally used" refers to a
585
+ typical or common use of that class of product, regardless of the status
586
+ of the particular user or of the way in which the particular user
587
+ actually uses, or expects or is expected to use, the product. A product
588
+ is a consumer product regardless of whether the product has substantial
589
+ commercial, industrial or non-consumer uses, unless such uses represent
590
+ the only significant mode of use of the product.
591
+
592
+ "Installation Information" for a User Product means any methods,
593
+ procedures, authorization keys, or other information required to install
594
+ and execute modified versions of a covered work in that User Product from
595
+ a modified version of its Corresponding Source. The information must
596
+ suffice to ensure that the continued functioning of the modified object
597
+ code is in no case prevented or interfered with solely because
598
+ modification has been made.
599
+
600
+ If you convey an object code work under this section in, or with, or
601
+ specifically for use in, a User Product, and the conveying occurs as
602
+ part of a transaction in which the right of possession and use of the
603
+ User Product is transferred to the recipient in perpetuity or for a
604
+ fixed term (regardless of how the transaction is characterized), the
605
+ Corresponding Source conveyed under this section must be accompanied
606
+ by the Installation Information. But this requirement does not apply
607
+ if neither you nor any third party retains the ability to install
608
+ modified object code on the User Product (for example, the work has
609
+ been installed in ROM).
610
+
611
+ The requirement to provide Installation Information does not include a
612
+ requirement to continue to provide support service, warranty, or updates
613
+ for a work that has been modified or installed by the recipient, or for
614
+ the User Product in which it has been modified or installed. Access to a
615
+ network may be denied when the modification itself materially and
616
+ adversely affects the operation of the network or violates the rules and
617
+ protocols for communication across the network.
618
+
619
+ Corresponding Source conveyed, and Installation Information provided,
620
+ in accord with this section must be in a format that is publicly
621
+ documented (and with an implementation available to the public in
622
+ source code form), and must require no special password or key for
623
+ unpacking, reading or copying.
624
+
625
+ 7. Additional Terms.
626
+
627
+ "Additional permissions" are terms that supplement the terms of this
628
+ License by making exceptions from one or more of its conditions.
629
+ Additional permissions that are applicable to the entire Program shall
630
+ be treated as though they were included in this License, to the extent
631
+ that they are valid under applicable law. If additional permissions
632
+ apply only to part of the Program, that part may be used separately
633
+ under those permissions, but the entire Program remains governed by
634
+ this License without regard to the additional permissions.
635
+
636
+ When you convey a copy of a covered work, you may at your option
637
+ remove any additional permissions from that copy, or from any part of
638
+ it. (Additional permissions may be written to require their own
639
+ removal in certain cases when you modify the work.) You may place
640
+ additional permissions on material, added by you to a covered work,
641
+ for which you have or can give appropriate copyright permission.
642
+
643
+ Notwithstanding any other provision of this License, for material you
644
+ add to a covered work, you may (if authorized by the copyright holders of
645
+ that material) supplement the terms of this License with terms:
646
+
647
+ a) Disclaiming warranty or limiting liability differently from the
648
+ terms of sections 15 and 16 of this License; or
649
+
650
+ b) Requiring preservation of specified reasonable legal notices or
651
+ author attributions in that material or in the Appropriate Legal
652
+ Notices displayed by works containing it; or
653
+
654
+ c) Prohibiting misrepresentation of the origin of that material, or
655
+ requiring that modified versions of such material be marked in
656
+ reasonable ways as different from the original version; or
657
+
658
+ d) Limiting the use for publicity purposes of names of licensors or
659
+ authors of the material; or
660
+
661
+ e) Declining to grant rights under trademark law for use of some
662
+ trade names, trademarks, or service marks; or
663
+
664
+ f) Requiring indemnification of licensors and authors of that
665
+ material by anyone who conveys the material (or modified versions of
666
+ it) with contractual assumptions of liability to the recipient, for
667
+ any liability that these contractual assumptions directly impose on
668
+ those licensors and authors.
669
+
670
+ All other non-permissive additional terms are considered "further
671
+ restrictions" within the meaning of section 10. If the Program as you
672
+ received it, or any part of it, contains a notice stating that it is
673
+ governed by this License along with a term that is a further
674
+ restriction, you may remove that term. If a license document contains
675
+ a further restriction but permits relicensing or conveying under this
676
+ License, you may add to a covered work material governed by the terms
677
+ of that license document, provided that the further restriction does
678
+ not survive such relicensing or conveying.
679
+
680
+ If you add terms to a covered work in accord with this section, you
681
+ must place, in the relevant source files, a statement of the
682
+ additional terms that apply to those files, or a notice indicating
683
+ where to find the applicable terms.
684
+
685
+ Additional terms, permissive or non-permissive, may be stated in the
686
+ form of a separately written license, or stated as exceptions;
687
+ the above requirements apply either way.
688
+
689
+ 8. Termination.
690
+
691
+ You may not propagate or modify a covered work except as expressly
692
+ provided under this License. Any attempt otherwise to propagate or
693
+ modify it is void, and will automatically terminate your rights under
694
+ this License (including any patent licenses granted under the third
695
+ paragraph of section 11).
696
+
697
+ However, if you cease all violation of this License, then your
698
+ license from a particular copyright holder is reinstated (a)
699
+ provisionally, unless and until the copyright holder explicitly and
700
+ finally terminates your license, and (b) permanently, if the copyright
701
+ holder fails to notify you of the violation by some reasonable means
702
+ prior to 60 days after the cessation.
703
+
704
+ Moreover, your license from a particular copyright holder is
705
+ reinstated permanently if the copyright holder notifies you of the
706
+ violation by some reasonable means, this is the first time you have
707
+ received notice of violation of this License (for any work) from that
708
+ copyright holder, and you cure the violation prior to 30 days after
709
+ your receipt of the notice.
710
+
711
+ Termination of your rights under this section does not terminate the
712
+ licenses of parties who have received copies or rights from you under
713
+ this License. If your rights have been terminated and not permanently
714
+ reinstated, you do not qualify to receive new licenses for the same
715
+ material under section 10.
716
+
717
+ 9. Acceptance Not Required for Having Copies.
718
+
719
+ You are not required to accept this License in order to receive or
720
+ run a copy of the Program. Ancillary propagation of a covered work
721
+ occurring solely as a consequence of using peer-to-peer transmission
722
+ to receive a copy likewise does not require acceptance. However,
723
+ nothing other than this License grants you permission to propagate or
724
+ modify any covered work. These actions infringe copyright if you do
725
+ not accept this License. Therefore, by modifying or propagating a
726
+ covered work, you indicate your acceptance of this License to do so.
727
+
728
+ 10. Automatic Licensing of Downstream Recipients.
729
+
730
+ Each time you convey a covered work, the recipient automatically
731
+ receives a license from the original licensors, to run, modify and
732
+ propagate that work, subject to this License. You are not responsible
733
+ for enforcing compliance by third parties with this License.
734
+
735
+ An "entity transaction" is a transaction transferring control of an
736
+ organization, or substantially all assets of one, or subdividing an
737
+ organization, or merging organizations. If propagation of a covered
738
+ work results from an entity transaction, each party to that
739
+ transaction who receives a copy of the work also receives whatever
740
+ licenses to the work the party's predecessor in interest had or could
741
+ give under the previous paragraph, plus a right to possession of the
742
+ Corresponding Source of the work from the predecessor in interest, if
743
+ the predecessor has it or can get it with reasonable efforts.
744
+
745
+ You may not impose any further restrictions on the exercise of the
746
+ rights granted or affirmed under this License. For example, you may
747
+ not impose a license fee, royalty, or other charge for exercise of
748
+ rights granted under this License, and you may not initiate litigation
749
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
750
+ any patent claim is infringed by making, using, selling, offering for
751
+ sale, or importing the Program or any portion of it.
752
+
753
+ 11. Patents.
754
+
755
+ A "contributor" is a copyright holder who authorizes use under this
756
+ License of the Program or a work on which the Program is based. The
757
+ work thus licensed is called the contributor's "contributor version".
758
+
759
+ A contributor's "essential patent claims" are all patent claims
760
+ owned or controlled by the contributor, whether already acquired or
761
+ hereafter acquired, that would be infringed by some manner, permitted
762
+ by this License, of making, using, or selling its contributor version,
763
+ but do not include claims that would be infringed only as a
764
+ consequence of further modification of the contributor version. For
765
+ purposes of this definition, "control" includes the right to grant
766
+ patent sublicenses in a manner consistent with the requirements of
767
+ this License.
768
+
769
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
770
+ patent license under the contributor's essential patent claims, to
771
+ make, use, sell, offer for sale, import and otherwise run, modify and
772
+ propagate the contents of its contributor version.
773
+
774
+ In the following three paragraphs, a "patent license" is any express
775
+ agreement or commitment, however denominated, not to enforce a patent
776
+ (such as an express permission to practice a patent or covenant not to
777
+ sue for patent infringement). To "grant" such a patent license to a
778
+ party means to make such an agreement or commitment not to enforce a
779
+ patent against the party.
780
+
781
+ If you convey a covered work, knowingly relying on a patent license,
782
+ and the Corresponding Source of the work is not available for anyone
783
+ to copy, free of charge and under the terms of this License, through a
784
+ publicly available network server or other readily accessible means,
785
+ then you must either (1) cause the Corresponding Source to be so
786
+ available, or (2) arrange to deprive yourself of the benefit of the
787
+ patent license for this particular work, or (3) arrange, in a manner
788
+ consistent with the requirements of this License, to extend the patent
789
+ license to downstream recipients. "Knowingly relying" means you have
790
+ actual knowledge that, but for the patent license, your conveying the
791
+ covered work in a country, or your recipient's use of the covered work
792
+ in a country, would infringe one or more identifiable patents in that
793
+ country that you have reason to believe are valid.
794
+
795
+ If, pursuant to or in connection with a single transaction or
796
+ arrangement, you convey, or propagate by procuring conveyance of, a
797
+ covered work, and grant a patent license to some of the parties
798
+ receiving the covered work authorizing them to use, propagate, modify
799
+ or convey a specific copy of the covered work, then the patent license
800
+ you grant is automatically extended to all recipients of the covered
801
+ work and works based on it.
802
+
803
+ A patent license is "discriminatory" if it does not include within
804
+ the scope of its coverage, prohibits the exercise of, or is
805
+ conditioned on the non-exercise of one or more of the rights that are
806
+ specifically granted under this License. You may not convey a covered
807
+ work if you are a party to an arrangement with a third party that is
808
+ in the business of distributing software, under which you make payment
809
+ to the third party based on the extent of your activity of conveying
810
+ the work, and under which the third party grants, to any of the
811
+ parties who would receive the covered work from you, a discriminatory
812
+ patent license (a) in connection with copies of the covered work
813
+ conveyed by you (or copies made from those copies), or (b) primarily
814
+ for and in connection with specific products or compilations that
815
+ contain the covered work, unless you entered into that arrangement,
816
+ or that patent license was granted, prior to 28 March 2007.
817
+
818
+ Nothing in this License shall be construed as excluding or limiting
819
+ any implied license or other defenses to infringement that may
820
+ otherwise be available to you under applicable patent law.
821
+
822
+ 12. No Surrender of Others' Freedom.
823
+
824
+ If conditions are imposed on you (whether by court order, agreement or
825
+ otherwise) that contradict the conditions of this License, they do not
826
+ excuse you from the conditions of this License. If you cannot convey a
827
+ covered work so as to satisfy simultaneously your obligations under this
828
+ License and any other pertinent obligations, then as a consequence you may
829
+ not convey it at all. For example, if you agree to terms that obligate you
830
+ to collect a royalty for further conveying from those to whom you convey
831
+ the Program, the only way you could satisfy both those terms and this
832
+ License would be to refrain entirely from conveying the Program.
833
+
834
+ 13. Use with the GNU Affero General Public License.
835
+
836
+ Notwithstanding any other provision of this License, you have
837
+ permission to link or combine any covered work with a work licensed
838
+ under version 3 of the GNU Affero General Public License into a single
839
+ combined work, and to convey the resulting work. The terms of this
840
+ License will continue to apply to the part which is the covered work,
841
+ but the special requirements of the GNU Affero General Public License,
842
+ section 13, concerning interaction through a network will apply to the
843
+ combination as such.
844
+
845
+ 14. Revised Versions of this License.
846
+
847
+ The Free Software Foundation may publish revised and/or new versions of
848
+ the GNU General Public License from time to time. Such new versions will
849
+ be similar in spirit to the present version, but may differ in detail to
850
+ address new problems or concerns.
851
+
852
+ Each version is given a distinguishing version number. If the
853
+ Program specifies that a certain numbered version of the GNU General
854
+ Public License "or any later version" applies to it, you have the
855
+ option of following the terms and conditions either of that numbered
856
+ version or of any later version published by the Free Software
857
+ Foundation. If the Program does not specify a version number of the
858
+ GNU General Public License, you may choose any version ever published
859
+ by the Free Software Foundation.
860
+
861
+ If the Program specifies that a proxy can decide which future
862
+ versions of the GNU General Public License can be used, that proxy's
863
+ public statement of acceptance of a version permanently authorizes you
864
+ to choose that version for the Program.
865
+
866
+ Later license versions may give you additional or different
867
+ permissions. However, no additional obligations are imposed on any
868
+ author or copyright holder as a result of your choosing to follow a
869
+ later version.
870
+
871
+ 15. Disclaimer of Warranty.
872
+
873
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
874
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
875
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
876
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
877
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
878
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
879
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
880
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
881
+
882
+ 16. Limitation of Liability.
883
+
884
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
885
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
886
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
887
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
888
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
889
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
890
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
891
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
892
+ SUCH DAMAGES.
893
+
894
+ 17. Interpretation of Sections 15 and 16.
895
+
896
+ If the disclaimer of warranty and limitation of liability provided
897
+ above cannot be given local legal effect according to their terms,
898
+ reviewing courts shall apply local law that most closely approximates
899
+ an absolute waiver of all civil liability in connection with the
900
+ Program, unless a warranty or assumption of liability accompanies a
901
+ copy of the Program in return for a fee.
902
+
903
+ END OF TERMS AND CONDITIONS
904
+
905
+ How to Apply These Terms to Your New Programs
906
+
907
+ If you develop a new program, and you want it to be of the greatest
908
+ possible use to the public, the best way to achieve this is to make it
909
+ free software which everyone can redistribute and change under these terms.
910
+
911
+ To do so, attach the following notices to the program. It is safest
912
+ to attach them to the start of each source file to most effectively
913
+ state the exclusion of warranty; and each file should have at least
914
+ the "copyright" line and a pointer to where the full notice is found.
915
+
916
+ <one line to give the program's name and a brief idea of what it does.>
917
+ Copyright (C) <year> <name of author>
918
+
919
+ This program is free software: you can redistribute it and/or modify
920
+ it under the terms of the GNU General Public License as published by
921
+ the Free Software Foundation, either version 3 of the License, or
922
+ (at your option) any later version.
923
+
924
+ This program is distributed in the hope that it will be useful,
925
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
926
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
927
+ GNU General Public License for more details.
928
+
929
+ You should have received a copy of the GNU General Public License
930
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
931
+
932
+ Also add information on how to contact you by electronic and paper mail.
933
+
934
+ If the program does terminal interaction, make it output a short
935
+ notice like this when it starts in an interactive mode:
936
+
937
+ <program> Copyright (C) <year> <name of author>
938
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
939
+ This is free software, and you are welcome to redistribute it
940
+ under certain conditions; type `show c' for details.
941
+
942
+ The hypothetical commands `show w' and `show c' should show the appropriate
943
+ parts of the General Public License. Of course, your program's commands
944
+ might be different; for a GUI interface, you would use an "about box".
945
+
946
+ You should also get your employer (if you work as a programmer) or school,
947
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
948
+ For more information on this, and how to apply and follow the GNU GPL, see
949
+ <http://www.gnu.org/licenses/>.
950
+
951
+ The GNU General Public License does not permit incorporating your program
952
+ into proprietary programs. If your program is a subroutine library, you
953
+ may consider it more useful to permit linking proprietary applications with
954
+ the library. If this is what you want to do, use the GNU Lesser General
955
+ Public License instead of this License. But first, please read
956
+ <http://www.gnu.org/philosophy/why-not-lgpl.html>.
957
+
958
+ Name: libquadmath
959
+ Files: numpy.libs/libquadmath*.so
960
+ Description: dynamically linked to files compiled with gcc
961
+ Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
962
+ License: LGPL-2.1-or-later
963
+
964
+ GCC Quad-Precision Math Library
965
+ Copyright (C) 2010-2019 Free Software Foundation, Inc.
966
+ Written by Francois-Xavier Coudert <[email protected]>
967
+
968
+ This file is part of the libquadmath library.
969
+ Libquadmath is free software; you can redistribute it and/or
970
+ modify it under the terms of the GNU Library General Public
971
+ License as published by the Free Software Foundation; either
972
+ version 2.1 of the License, or (at your option) any later version.
973
+
974
+ Libquadmath is distributed in the hope that it will be useful,
975
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
976
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
977
+ Lesser General Public License for more details.
978
+ https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
979
+ Classifier: Development Status :: 5 - Production/Stable
980
+ Classifier: Intended Audience :: Science/Research
981
+ Classifier: Intended Audience :: Developers
982
+ Classifier: License :: OSI Approved :: BSD License
983
+ Classifier: Programming Language :: C
984
+ Classifier: Programming Language :: Python
985
+ Classifier: Programming Language :: Python :: 3
986
+ Classifier: Programming Language :: Python :: 3.9
987
+ Classifier: Programming Language :: Python :: 3.10
988
+ Classifier: Programming Language :: Python :: 3.11
989
+ Classifier: Programming Language :: Python :: 3.12
990
+ Classifier: Programming Language :: Python :: 3 :: Only
991
+ Classifier: Programming Language :: Python :: Implementation :: CPython
992
+ Classifier: Topic :: Software Development
993
+ Classifier: Topic :: Scientific/Engineering
994
+ Classifier: Typing :: Typed
995
+ Classifier: Operating System :: Microsoft :: Windows
996
+ Classifier: Operating System :: POSIX
997
+ Classifier: Operating System :: Unix
998
+ Classifier: Operating System :: MacOS
999
+ Project-URL: Homepage, https://numpy.org
1000
+ Project-URL: Documentation, https://numpy.org/doc/
1001
+ Project-URL: Source, https://github.com/numpy/numpy
1002
+ Project-URL: Download, https://pypi.org/project/numpy/#files
1003
+ Project-URL: Tracker, https://github.com/numpy/numpy/issues
1004
+ Project-URL: Release notes, https://numpy.org/doc/stable/release
1005
+ Requires-Python: >=3.9
1006
+ Description-Content-Type: text/markdown
1007
+
1008
+ <h1 align="center">
1009
+ <img src="https://raw.githubusercontent.com/numpy/numpy/main/branding/logo/primary/numpylogo.svg" width="300">
1010
+ </h1><br>
1011
+
1012
+
1013
+ [![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](
1014
+ https://numfocus.org)
1015
+ [![PyPI Downloads](https://img.shields.io/pypi/dm/numpy.svg?label=PyPI%20downloads)](
1016
+ https://pypi.org/project/numpy/)
1017
+ [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/numpy.svg?label=Conda%20downloads)](
1018
+ https://anaconda.org/conda-forge/numpy)
1019
+ [![Stack Overflow](https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg)](
1020
+ https://stackoverflow.com/questions/tagged/numpy)
1021
+ [![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41592--019--0686--2-blue)](
1022
+ https://doi.org/10.1038/s41586-020-2649-2)
1023
+ [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/numpy/numpy/badge)](https://api.securityscorecards.dev/projects/github.com/numpy/numpy)
1024
+
1025
+
1026
+ NumPy is the fundamental package for scientific computing with Python.
1027
+
1028
+ - **Website:** https://www.numpy.org
1029
+ - **Documentation:** https://numpy.org/doc
1030
+ - **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion
1031
+ - **Source code:** https://github.com/numpy/numpy
1032
+ - **Contributing:** https://www.numpy.org/devdocs/dev/index.html
1033
+ - **Bug reports:** https://github.com/numpy/numpy/issues
1034
+ - **Report a security vulnerability:** https://tidelift.com/docs/security
1035
+
1036
+ It provides:
1037
+
1038
+ - a powerful N-dimensional array object
1039
+ - sophisticated (broadcasting) functions
1040
+ - tools for integrating C/C++ and Fortran code
1041
+ - useful linear algebra, Fourier transform, and random number capabilities
1042
+
1043
+ Testing:
1044
+
1045
+ NumPy requires `pytest` and `hypothesis`. Tests can then be run after installation with:
1046
+
1047
+ python -c "import numpy, sys; sys.exit(numpy.test() is False)"
1048
+
1049
+ Code of Conduct
1050
+ ----------------------
1051
+
1052
+ NumPy is a community-driven open source project developed by a diverse group of
1053
+ [contributors](https://numpy.org/teams/). The NumPy leadership has made a strong
1054
+ commitment to creating an open, inclusive, and positive community. Please read the
1055
+ [NumPy Code of Conduct](https://numpy.org/code-of-conduct/) for guidance on how to interact
1056
+ with others in a way that makes our community thrive.
1057
+
1058
+ Call for Contributions
1059
+ ----------------------
1060
+
1061
+ The NumPy project welcomes your expertise and enthusiasm!
1062
+
1063
+ Small improvements or fixes are always appreciated. If you are considering larger contributions
1064
+ to the source code, please contact us through the [mailing
1065
+ list](https://mail.python.org/mailman/listinfo/numpy-discussion) first.
1066
+
1067
+ Writing code isn’t the only way to contribute to NumPy. You can also:
1068
+ - review pull requests
1069
+ - help us stay on top of new and old issues
1070
+ - develop tutorials, presentations, and other educational materials
1071
+ - maintain and improve [our website](https://github.com/numpy/numpy.org)
1072
+ - develop graphic design for our brand assets and promotional materials
1073
+ - translate website content
1074
+ - help with outreach and onboard new contributors
1075
+ - write grant proposals and help with other fundraising efforts
1076
+
1077
+ For more information about the ways you can contribute to NumPy, visit [our website](https://numpy.org/contribute/).
1078
+ If you’re unsure where to start or how your skills fit in, reach out! You can
1079
+ ask on the mailing list or here, on GitHub, by opening a new issue or leaving a
1080
+ comment on a relevant issue that is already open.
1081
+
1082
+ Our preferred channels of communication are all public, but if you’d like to
1083
+ speak to us in private first, contact our community coordinators at
1084
+ [email protected] or on Slack (write [email protected] for
1085
+ an invitation).
1086
+
1087
+ We also have a biweekly community call, details of which are announced on the
1088
+ mailing list. You are very welcome to join.
1089
+
1090
+ If you are new to contributing to open source, [this
1091
+ guide](https://opensource.guide/how-to-contribute/) helps explain why, what,
1092
+ and how to successfully get involved.
llmeval-env/lib/python3.10/site-packages/numpy-1.26.4.dist-info/RECORD ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/numpy-1.26.4.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: meson
3
+ Root-Is-Purelib: false
4
+ Tag: cp310-cp310-manylinux_2_17_x86_64
5
+ Tag: cp310-cp310-manylinux2014_x86_64
6
+
llmeval-env/lib/python3.10/site-packages/numpy-1.26.4.dist-info/entry_points.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ [array_api]
2
+ numpy = numpy.array_api
3
+
4
+ [pyinstaller40]
5
+ hook-dirs = numpy:_pyinstaller_hooks_dir
6
+
7
+ [console_scripts]
8
+ f2py = numpy.f2py.f2py2e:main
9
+
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/bleu.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc ADDED
Binary file (9.18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc ADDED
Binary file (3.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/triton/compiler/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from .compiler import (CompiledKernel, ASTSource, compile, AttrsDescriptor)
2
+ from .errors import CompilationError
3
+
4
+ __all__ = [
5
+ "compile", "ASTSource", "AttrsDescriptor", "CompiledKernel", "CompilationError", "get_arch_default_num_warps",
6
+ "get_arch_default_num_stages"
7
+ ]
llmeval-env/lib/python3.10/site-packages/triton/compiler/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (449 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/triton/compiler/__pycache__/code_generator.cpython-310.pyc ADDED
Binary file (40.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/triton/compiler/__pycache__/compiler.cpython-310.pyc ADDED
Binary file (9.66 kB). View file
 
llmeval-env/lib/python3.10/site-packages/triton/compiler/__pycache__/errors.cpython-310.pyc ADDED
Binary file (2.28 kB). View file
 
llmeval-env/lib/python3.10/site-packages/triton/compiler/__pycache__/make_launcher.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/triton/compiler/__pycache__/utils.cpython-310.pyc ADDED
Binary file (9.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/triton/compiler/backends/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/triton/compiler/backends/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (197 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/triton/compiler/backends/__pycache__/cuda.cpython-310.pyc ADDED
Binary file (9.17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/triton/compiler/backends/cuda.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from triton.common.backend import BaseBackend
2
+ from dataclasses import dataclass
3
+ from ..._C.libtriton.triton import ClusterInfo, get_num_warps, TMAInfos, translate_triton_gpu_to_llvmir, get_shared_memory_size, translate_llvmir_to_ptx, compile_ptx_to_cubin, add_external_libs
4
+ from ...common.backend import get_cuda_version_key, path_to_ptxas
5
+ from ..._C.libtriton.triton import ir, runtime
6
+ import functools
7
+ from typing import Any
8
+ from ..utils import get_ids_of_tensormaps, parse_tma_info
9
+ from ..make_launcher import make_stub
10
+ import hashlib
11
+
12
+
13
+ def get_kernel_name(src: str, pattern: str) -> str:
14
+ '''
15
+ Get kernel name from PTX code.
16
+ This Kernel name is required when launching the kernel.
17
+ '''
18
+ # There is a name mangling in PTX codegen, so the original kernel names in Triton IR are not available in PTX/cubin.
19
+ assert src
20
+ for line in src.split('\n'):
21
+ line = line.strip()
22
+ if line.startswith(pattern):
23
+ return line.split()[-1]
24
+
25
+
26
+ @functools.lru_cache()
27
+ def ptx_get_version(cuda_version) -> int:
28
+ '''
29
+ Get the highest PTX version supported by the current CUDA driver.
30
+ '''
31
+ assert isinstance(cuda_version, str)
32
+ major, minor = map(int, cuda_version.split('.'))
33
+ if major == 12:
34
+ return 80 + minor
35
+ if major == 11:
36
+ return 70 + minor
37
+ if major == 10:
38
+ return 63 + minor
39
+ raise RuntimeError("Triton only support CUDA 10.0 or higher")
40
+
41
+
42
+ @dataclass(frozen=True)
43
+ class CUDAOptions:
44
+ num_warps: int = 4
45
+ num_ctas: int = 1
46
+ num_stages: int = 3
47
+ cluster_dims: tuple = (1, 1, 1)
48
+ ptx_version: int = None
49
+ enable_warp_specialization: bool = False
50
+ enable_persistent: bool = False
51
+ optimize_epilogue: bool = False
52
+ enable_fp_fusion: bool = True
53
+ allow_fp8e4nv: bool = False
54
+ max_num_imprecise_acc_default: bool = None
55
+ extern_libs: dict = None
56
+ debug: bool = False
57
+
58
+ def __post_init__(self):
59
+ # TODO: change API
60
+ if isinstance(self.extern_libs, dict):
61
+ extern_libs = tuple([(k, v) for k, v in self.extern_libs.items() if v])
62
+ object.__setattr__(self, 'extern_libs', extern_libs)
63
+ assert self.num_warps > 0 and (self.num_warps & (self.num_warps - 1)) == 0, \
64
+ "num_warps must be a power of 2"
65
+
66
+ def hash(self):
67
+ key = '_'.join([f'{name}-{val}' for name, val in self.__dict__.items()])
68
+ return hashlib.md5(key.encode("utf-8")).hexdigest()
69
+
70
+
71
+ class CUDABackend(BaseBackend):
72
+
73
+ def __init__(self, device_type: tuple) -> None:
74
+ super().__init__(device_type)
75
+ self.capability = device_type[1]
76
+ assert isinstance(self.capability, int)
77
+
78
+ def parse_options(self, opts) -> Any:
79
+ args = {k: opts[k] for k in CUDAOptions.__dataclass_fields__.keys() if k in opts}
80
+ args["allow_fp8e4nv"] = self.capability >= 89
81
+ args["max_num_imprecise_acc_default"] = 0 if self.capability >= 89 else None
82
+ return CUDAOptions(**args)
83
+
84
+ @staticmethod
85
+ def make_ttir(mod, metadata, opt):
86
+ pm = ir.pass_manager(mod.context)
87
+ pm.enable_debug()
88
+ pm.add_inliner_pass()
89
+ pm.add_triton_combine_pass()
90
+ pm.add_canonicalizer_pass()
91
+ pm.add_reorder_broadcast_pass()
92
+ pm.add_cse_pass()
93
+ pm.add_licm_pass()
94
+ pm.add_symbol_dce_pass()
95
+ pm.run(mod)
96
+ return mod
97
+
98
+ @staticmethod
99
+ def make_ttgir(mod, metadata, opt, capability):
100
+ cluster_info = ClusterInfo()
101
+ if opt.cluster_dims is not None:
102
+ cluster_info.clusterDimX = opt.cluster_dims[0]
103
+ cluster_info.clusterDimY = opt.cluster_dims[1]
104
+ cluster_info.clusterDimZ = opt.cluster_dims[2]
105
+ # TTIR -> TTGIR
106
+ pm = ir.pass_manager(mod.context)
107
+ pm.enable_debug()
108
+ pm.add_convert_triton_to_tritongpu_pass(opt.num_warps, 32, opt.num_ctas, capability)
109
+ # optimize TTGIR
110
+ pm.add_tritongpu_coalesce_pass()
111
+ # TODO(Qingyi): Move PlanCTAPass to the front of CoalescePass
112
+ pm.add_plan_cta_pass(cluster_info)
113
+ pm.add_tritongpu_rewrite_tensor_pointer_pass(capability)
114
+ pm.add_plan_cta_pass(cluster_info)
115
+ pm.add_tritongpu_remove_layout_conversions_pass()
116
+ pm.add_tritongpu_accelerate_matmul_pass(capability)
117
+ pm.add_tritongpu_remove_layout_conversions_pass()
118
+ if opt.optimize_epilogue:
119
+ pm.add_tritongpu_optimize_epilogue_pass()
120
+ pm.add_tritongpu_optimize_dot_operands_pass()
121
+ pm.add_cse_pass()
122
+ ws_enabled = False
123
+ # `num_warps` does not mean the total number of warps of a CTA when
124
+ # warp specialization is enabled.
125
+ # it's the responsibility of the compiler to figure out the exact
126
+ # `num_warps` to use.
127
+ # TODO: support the case where `num_warps` from user is not 4.
128
+ if capability // 10 >= 9 and opt.enable_warp_specialization and opt.num_warps == 4:
129
+ pm.add_tritongpu_ws_feasibility_checking_pass(capability)
130
+ pm.run(mod)
131
+ ws_enabled = ir.is_ws_supported(mod)
132
+ pm = ir.pass_manager(mod.context)
133
+ pm.enable_debug()
134
+ if ws_enabled:
135
+ pm.add_tritongpu_wsdecomposing_pass(capability)
136
+ pm.add_tritongpu_wspipeline_pass(opt.num_stages, opt.num_warps, capability)
137
+ pm.add_tritongpu_wsmutex_pass(capability)
138
+ pm.add_tritongpu_wsmaterialization_pass(capability)
139
+ pm.add_licm_pass()
140
+ pm.add_cse_pass()
141
+ else:
142
+ pm.add_tritongpu_pipeline_pass(opt.num_stages, opt.num_warps, opt.num_ctas, capability)
143
+ pm.add_tritongpu_materialize_load_store_pass(opt.num_warps, capability)
144
+ if capability // 10 <= 8:
145
+ pm.add_tritongpu_prefetch_pass()
146
+ pm.add_tritongpu_optimize_dot_operands_pass()
147
+ pm.add_tritongpu_remove_layout_conversions_pass()
148
+ pm.add_tritongpu_decompose_conversions_pass()
149
+ pm.add_tritongpu_ws_fixup_missing_attrs_pass()
150
+ pm.add_tritongpu_reorder_instructions_pass()
151
+ pm.add_cse_pass()
152
+ pm.add_symbol_dce_pass()
153
+ if capability // 10 >= 9:
154
+ pm.add_tritongpu_fence_insertion_pass()
155
+ pm.add_tritongpu_ws_fixup_missing_attrs_pass()
156
+ pm.add_tritongpu_optimize_thread_locality_pass()
157
+ pm.add_canonicalizer_pass()
158
+ pm.run(mod)
159
+ metadata["cluster_dims"] = (cluster_info.clusterDimX, cluster_info.clusterDimY, cluster_info.clusterDimZ)
160
+ return mod
161
+
162
+ @staticmethod
163
+ def make_llir(src, metadata, options, capability):
164
+ metadata["enable_warp_specialization"] = ir.is_ws_supported(src)
165
+ metadata["num_warps"] = get_num_warps(src)
166
+ tma_infos = TMAInfos()
167
+ # link libraries
168
+ if options.extern_libs:
169
+ names = [lib[0] for lib in options.extern_libs]
170
+ paths = [lib[1] for lib in options.extern_libs]
171
+ add_external_libs(src, names, paths)
172
+ # TritonGPU -> LLVM-IR
173
+ ret = translate_triton_gpu_to_llvmir(src, capability, tma_infos, runtime.TARGET.NVVM)
174
+ if len(tma_infos) > 0:
175
+ metadata["tensormaps_info"] = parse_tma_info(tma_infos, metadata["ids_of_folded_args"])
176
+ for i, _ in enumerate(metadata["tensormaps_info"]):
177
+ metadata["tensormaps_info"][i].ids_of_folded_args = metadata["ids_of_folded_args"]
178
+ metadata["ids_of_tensormaps"] = get_ids_of_tensormaps(metadata.get("tensormaps_info", None))
179
+ metadata["shared"] = get_shared_memory_size(src)
180
+ return ret
181
+
182
+ @staticmethod
183
+ def make_ptx(src, metadata, opt, capability):
184
+ ptx_version = opt.ptx_version
185
+ if ptx_version is None:
186
+ _, cuda_version = path_to_ptxas()
187
+ ptx_version = ptx_get_version(cuda_version)
188
+ return translate_llvmir_to_ptx(src, capability, ptx_version, opt.enable_fp_fusion)
189
+
190
+ @staticmethod
191
+ def make_cubin(src, metadata, opt, capability):
192
+ metadata["name"] = get_kernel_name(src, pattern='// .globl')
193
+ ptxas, _ = path_to_ptxas()
194
+ return compile_ptx_to_cubin(src, ptxas, capability, opt.enable_fp_fusion)
195
+
196
+ def add_stages(self, stages, options):
197
+ stages["ttir"] = lambda src, metadata: self.make_ttir(src, metadata, options)
198
+ stages["ttgir"] = lambda src, metadata: self.make_ttgir(src, metadata, options, self.capability)
199
+ stages["llir"] = lambda src, metadata: self.make_llir(src, metadata, options, self.capability)
200
+ stages["ptx"] = lambda src, metadata: self.make_ptx(src, metadata, options, self.capability)
201
+ stages["cubin"] = lambda src, metadata: self.make_cubin(src, metadata, options, self.capability)
202
+
203
+ def hash(self):
204
+ return f'{get_cuda_version_key()}-{self.capability}'
205
+
206
+ def make_launcher_stub(self, src, metadata):
207
+ ids = {
208
+ "ids_of_tensormaps": metadata.get("ids_of_tensormaps", tuple()), "ids_of_folded_args":
209
+ metadata.get("ids_of_folded_args",
210
+ tuple()), "ids_of_const_exprs": src.fn.constexprs if hasattr(src, "fn") else tuple()
211
+ }
212
+ constants = src.constants if hasattr(src, "constants") else dict()
213
+ enable_warp_specialization = False
214
+
215
+ # set constant
216
+ return make_stub(src.name, src.signature, constants, ids, enable_warp_specialization=enable_warp_specialization)
217
+
218
+ @classmethod
219
+ def create_backend(cls, device_type: str):
220
+ return cls(device_type)
llmeval-env/lib/python3.10/site-packages/triton/compiler/code_generator.py ADDED
@@ -0,0 +1,1235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import inspect
3
+ import re
4
+ import sys
5
+ import warnings
6
+ from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
7
+
8
+ from .. import language
9
+ from .._C.libtriton.triton import ir
10
+ from ..language import constexpr, tensor
11
+ # ideally we wouldn't need any runtime component
12
+ from ..runtime import JITFunction
13
+ from .errors import (CompilationError, CompileTimeAssertionFailure, UnsupportedLanguageConstruct)
14
+
15
+
16
+ def mangle_ty(ty):
17
+ if ty.is_ptr():
18
+ return 'P' + mangle_ty(ty.element_ty)
19
+ if ty.is_int():
20
+ SIGNED = language.dtype.SIGNEDNESS.SIGNED
21
+ prefix = 'i' if ty.int_signedness == SIGNED else 'u'
22
+ return prefix + str(ty.int_bitwidth)
23
+ if ty.is_floating():
24
+ return str(ty)
25
+ if ty.is_block():
26
+ elt = mangle_ty(ty.scalar)
27
+ shape = '_'.join(map(str, ty.shape))
28
+ return f'{elt}S{shape}S'
29
+ if ty.is_void():
30
+ return 'V'
31
+ assert False, "Unsupported type"
32
+
33
+
34
+ def mangle_fn(name, arg_tys, constants):
35
+ # doesn't mangle ret type, which must be a function of arg tys
36
+ mangled_arg_names = '_'.join([mangle_ty(ty) for ty in arg_tys])
37
+ mangled_constants = '_'.join([f'{i}c{repr(constants[i])}' for i in sorted(constants)])
38
+ mangled_constants = mangled_constants.replace('.', '_d_')
39
+ mangled_constants = mangled_constants.replace("'", '_sq_')
40
+ # [ and ] are not allowed in LLVM identifiers
41
+ mangled_constants = mangled_constants.replace('[', '_').replace(']', '_')
42
+ ret = f'{name}__{mangled_arg_names}__{mangled_constants}'
43
+ return ret
44
+
45
+
46
+ def _is_triton_tensor(o: Any) -> bool:
47
+ return isinstance(o, tensor)
48
+
49
+
50
+ def _is_constexpr(o: Any) -> bool:
51
+ return isinstance(o, constexpr)
52
+
53
+
54
+ def _is_triton_scalar(o: Any) -> bool:
55
+ return _is_triton_tensor(o) and (not o.type.is_block() or o.type.numel == 1)
56
+
57
+
58
+ def _is_list_like(o: Any) -> bool:
59
+ return isinstance(o, (list, tuple))
60
+
61
+
62
+ def _unwrap_if_constexpr(o: Any):
63
+ return o.value if isinstance(o, constexpr) else o
64
+
65
+
66
+ def _check_fn_args(node, fn, args):
67
+ if fn.noinline:
68
+ for idx, arg in enumerate(args):
69
+ if not _is_constexpr(arg) and not _is_triton_scalar(arg):
70
+ raise UnsupportedLanguageConstruct(
71
+ fn.src, node,
72
+ f'Function {fn.__name__} is marked noinline, but was called with non-scalar argument {fn.arg_names[idx]}:{arg}'
73
+ )
74
+
75
+
76
+ def _get_fn_file_line(fn):
77
+ base_fn = fn
78
+ while not isinstance(base_fn, JITFunction):
79
+ base_fn = base_fn.fn
80
+ file_name = base_fn.fn.__code__.co_filename
81
+ lines, begin_line = inspect.getsourcelines(base_fn.fn)
82
+ # Match the following pattern:
83
+ # @triton.autotune(...) <- foo.__code__.co_firstlineno
84
+ # @triton.heuristics(...)
85
+ # @triton.jit
86
+ # def foo(...): <- this line is the first line
87
+ for idx, line in enumerate(lines):
88
+ if line.strip().startswith("def "):
89
+ begin_line += idx
90
+ break
91
+ return file_name, begin_line
92
+
93
+
94
+ _condition_types = {bool, int, type(None)} # Python types accepted for conditionals inside kernels
95
+
96
+
97
+ class enter_sub_region:
98
+
99
+ def __init__(self, generator):
100
+ self.generator = generator
101
+
102
+ def __enter__(self):
103
+ # record lscope & local_defs in the parent scope
104
+ self.liveins = self.generator.lscope.copy()
105
+ self.prev_defs = self.generator.local_defs.copy()
106
+ self.generator.local_defs = {}
107
+ self.insert_block = self.generator.builder.get_insertion_block()
108
+ self.insert_point = self.generator.builder.get_insertion_point()
109
+ return self.liveins, self.insert_block
110
+
111
+ def __exit__(self, *args, **kwargs):
112
+ self.generator.builder.restore_insertion_point(self.insert_point)
113
+ self.generator.lscope = self.liveins
114
+ self.generator.local_defs = self.prev_defs
115
+
116
+
117
+ # Check if the given syntax node has an "early" return
118
+ class ContainsReturnChecker(ast.NodeVisitor):
119
+
120
+ def __init__(self, gscope):
121
+ self.gscope = gscope
122
+
123
+ def _visit_stmts(self, body) -> bool:
124
+ for s in body:
125
+ if self.visit(s):
126
+ return True
127
+ return False
128
+
129
+ def _visit_function(self, fn) -> bool:
130
+ # Currently we only support JITFunctions defined in the global scope
131
+ if isinstance(fn, JITFunction) and not fn.noinline:
132
+ fn_node = fn.parse()
133
+ return ContainsReturnChecker(self.gscope).visit(fn_node)
134
+ return False
135
+
136
+ def generic_visit(self, node) -> bool:
137
+ ret = False
138
+ for _, value in ast.iter_fields(node):
139
+ if isinstance(value, list):
140
+ for item in value:
141
+ if isinstance(item, ast.AST):
142
+ ret = ret or self.visit(item)
143
+ elif isinstance(value, ast.AST):
144
+ ret = ret or self.visit(value)
145
+ return ret
146
+
147
+ def visit_Attribute(self, node: ast.Attribute) -> bool:
148
+ # If the left part is a name, it's possible that
149
+ # we call triton native function or a jit function from another module.
150
+ # If the left part is not a name, it must return a tensor or a constexpr
151
+ # whose methods do not contain return statements
152
+ # e.g., (tl.load(x)).to(y)
153
+ # So we only check if the expressions within value have return or not
154
+ if isinstance(node.value, ast.Name):
155
+ if node.value.id in self.gscope:
156
+ value = self.gscope[node.value.id]
157
+ fn = getattr(value, node.attr)
158
+ return self._visit_function(fn)
159
+ return False
160
+ return self.visit(node.value)
161
+
162
+ def visit_Name(self, node: ast.Name) -> bool:
163
+ if type(node.ctx) == ast.Store:
164
+ return False
165
+ if node.id in self.gscope:
166
+ fn = self.gscope[node.id]
167
+ return self._visit_function(fn)
168
+ return False
169
+
170
+ def visit_Return(self, node: ast.Return) -> bool:
171
+ return True
172
+
173
+ def visit_Assign(self, node: ast.Assign) -> bool:
174
+ # There couldn't be an early return
175
+ # x = ...
176
+ return False
177
+
178
+ def visit_AugAssign(self, node: ast.AugAssign) -> bool:
179
+ # There couldn't be an early return
180
+ # x += ...
181
+ return False
182
+
183
+ def visit_Module(self, node: ast.Module) -> bool:
184
+ return self._visit_stmts(node.body)
185
+
186
+ def visit_FunctionDef(self, node: ast.FunctionDef) -> bool:
187
+ return self._visit_stmts(node.body)
188
+
189
+ def visit_If(self, node: ast.If) -> bool:
190
+ # TODO: optimize the following case in which we actually don't have
191
+ # a return when static_cond is false:
192
+ # if dynamic_cond
193
+ # if static_cond
194
+ # func_with_return
195
+ # else
196
+ # func_without_return
197
+ ret = self._visit_stmts(node.body)
198
+ if node.orelse:
199
+ ret = ret or self._visit_stmts(node.orelse)
200
+ return ret
201
+
202
+ def visit_IfExp(self, node: ast.IfExp) -> bool:
203
+ return self.visit(node.body) or self.visit(node.orelse)
204
+
205
+ def visit_Call(self, node: ast.Call) -> bool:
206
+ return self.visit(node.func)
207
+
208
+
209
+ class CodeGenerator(ast.NodeVisitor):
210
+
211
+ def __init__(self, context, prototype, gscope, attributes, constants, function_name, options, debug=None,
212
+ module=None, is_kernel=False, function_types: Optional[Dict] = None, noinline=False,
213
+ file_name: Optional[str] = None, begin_line=0):
214
+ self.context = context
215
+ self.builder = ir.builder(context)
216
+ self.file_name = file_name
217
+ # node.lineno starts from 1, so we need to subtract 1
218
+ self.begin_line = begin_line - 1
219
+ self.builder.set_loc(file_name, begin_line, 0)
220
+ self.builder.options = options
221
+ self.module = self.builder.create_module() if module is None else module
222
+ self.function_ret_types = {} if function_types is None else function_types
223
+ self.prototype = prototype
224
+ self.gscope = gscope
225
+ self.lscope = dict()
226
+ self.attributes = attributes
227
+ self.constants = constants
228
+ self.function_name = function_name
229
+ self.is_kernel = is_kernel
230
+ self.last_node = None
231
+ self.debug = options.debug if debug is None else debug
232
+ self.noinline = noinline
233
+ self.scf_stack = []
234
+ self.last_ret_type = None
235
+ # SSA-construction
236
+ # name => language.tensor
237
+ self.local_defs: Dict[str, tensor] = {}
238
+ self.global_uses: Dict[str, tensor] = {}
239
+ self.dereference_name: Callable[[str], Any] = self._define_name_lookup()
240
+ self.fn = None
241
+
242
+ builtin_namespace: Dict[str, Any] = {_.__name__: _ for _ in (range, float, int, isinstance, getattr)}
243
+ builtin_namespace.update((
244
+ ('print', language.core.device_print),
245
+ ('min', language.minimum),
246
+ ))
247
+
248
+ def _define_name_lookup(self):
249
+
250
+ def local_lookup(name: str, absent):
251
+ # this needs to be re-fetched from `self` every time, because it gets switched occasionally
252
+ value = self.lscope.get(name, absent)
253
+ if value is not absent and name not in self.local_defs:
254
+ self.global_uses[name] = value
255
+ return value
256
+
257
+ absent_marker = object()
258
+
259
+ def name_lookup(name: str) -> Any:
260
+ absent = absent_marker
261
+ for lookup_function in local_lookup, self.gscope.get, self.builtin_namespace.get:
262
+ value = lookup_function(name, absent)
263
+ if value is not absent:
264
+ return value
265
+ raise NameError(f'{name} is not defined')
266
+
267
+ return name_lookup
268
+
269
+ def set_value(self, name: str, value: Union[tensor, constexpr]) -> None:
270
+ ''' This function:
271
+ called by visit_Assign() & visit_FunctionDef() to store left value (lvalue)
272
+ 1. record local defined name (FIXME: should consider control flow)
273
+ 2. store tensor in self.lvalue
274
+ '''
275
+ self.lscope[name] = value
276
+ self.local_defs[name] = value
277
+
278
+ def _get_insertion_point_and_loc(self):
279
+ # XXX: this is a hack to get the location of the insertion point.
280
+ # The insertion point's location could be invalid sometimes,
281
+ # so we need to explicitly set the location
282
+ loc = self.builder.get_loc()
283
+ ip = self.builder.get_insertion_point()
284
+ return ip, loc
285
+
286
+ def _set_insertion_point_and_loc(self, ip, loc):
287
+ self.builder.restore_insertion_point(ip)
288
+ self.builder.set_loc(loc)
289
+
290
+ #
291
+ # AST visitor
292
+ #
293
+ def visit_compound_statement(self, stmts):
294
+ # Ensure that stmts is iterable
295
+ if not _is_list_like(stmts):
296
+ stmts = [stmts]
297
+ for stmt in stmts:
298
+ ret_type = self.visit(stmt)
299
+ if ret_type is not None and isinstance(stmt, ast.Return):
300
+ self.last_ret_type = ret_type
301
+
302
+ def visit_Module(self, node):
303
+ ast.NodeVisitor.generic_visit(self, node)
304
+
305
+ def visit_List(self, node):
306
+ ctx = self.visit(node.ctx)
307
+ assert ctx is None
308
+ elts = [self.visit(elt) for elt in node.elts]
309
+ return elts
310
+
311
+ # By design, only non-kernel functions can return
312
+ def visit_Return(self, node):
313
+ ret_value = self.visit(node.value)
314
+ # ret_block = self.builder.create_block()
315
+ # post_ret_block = self.builder.create_block()
316
+ # self.builder.create_branch(ret_block)
317
+ # self.builder.set_insertion_point_to_end(ret_block)
318
+ if ret_value is None:
319
+ self.builder.ret([])
320
+ ret_ty = None
321
+ elif isinstance(ret_value, tuple):
322
+ ret_values = [language.core._to_tensor(v, self.builder) for v in ret_value]
323
+ ret_types = [v.type for v in ret_values]
324
+ self.builder.ret([v.handle for v in ret_values])
325
+ ret_ty = tuple(ret_types)
326
+ else:
327
+ ret = language.core._to_tensor(ret_value, self.builder)
328
+ self.builder.ret([ret.handle])
329
+ ret_ty = ret.type
330
+ # self.builder.create_branch(post_ret_block)
331
+ # self.builder.set_insertion_point_to_end(post_ret_block)
332
+ return ret_ty
333
+
334
+ def visit_FunctionDef(self, node):
335
+ arg_names, kwarg_names = self.visit(node.args)
336
+ if self.fn:
337
+ raise UnsupportedLanguageConstruct(None, node, "nested function definition is not supported.")
338
+ # initialize defaults
339
+ for i, default_value in enumerate(node.args.defaults):
340
+ arg_node = node.args.args[-i - 1]
341
+ annotation = arg_node.annotation
342
+ name = arg_node.arg
343
+ st_target = ast.Name(id=name, ctx=ast.Store())
344
+ if annotation is None:
345
+ init_node = ast.Assign(targets=[st_target], value=default_value)
346
+ else:
347
+ init_node = ast.AnnAssign(target=st_target, value=default_value, annotation=annotation)
348
+ self.visit(init_node)
349
+ # initialize function
350
+ visibility = "public" if self.is_kernel else "private"
351
+ self.fn = self.builder.get_or_insert_function(self.module, self.function_name,
352
+ self.prototype.to_ir(self.builder), visibility, self.noinline)
353
+ self.module.push_back(self.fn)
354
+ entry = self.fn.add_entry_block()
355
+ arg_values = []
356
+ idx = 0
357
+ for i, arg_name in enumerate(arg_names):
358
+ if i in self.constants:
359
+ cst = self.constants[i]
360
+ if not _is_constexpr(cst):
361
+ cst = constexpr(self.constants[i])
362
+ arg_values.append(cst)
363
+ continue
364
+ else:
365
+ if i in self.attributes:
366
+ for name, value in self.attributes[i]:
367
+ self.fn.set_arg_attr(idx, name, value)
368
+ arg_values.append(tensor(self.fn.args(idx), self.prototype.param_types[idx]))
369
+ idx += 1
370
+
371
+ insert_pt = self.builder.get_insertion_block()
372
+ for arg_name, arg_value in zip(arg_names, arg_values):
373
+ self.set_value(arg_name, arg_value)
374
+ self.builder.set_insertion_point_to_start(entry)
375
+ # visit function body
376
+ self.visit_compound_statement(node.body)
377
+ # finalize function
378
+ if self.last_ret_type is None:
379
+ self.builder.ret([])
380
+ else:
381
+ # update return type
382
+ if isinstance(self.last_ret_type, tuple):
383
+ self.prototype.ret_types = list(self.last_ret_type)
384
+ self.fn.reset_type(self.prototype.to_ir(self.builder))
385
+ else:
386
+ self.prototype.ret_types = [self.last_ret_type]
387
+ self.fn.reset_type(self.prototype.to_ir(self.builder))
388
+ if insert_pt:
389
+ self.builder.set_insertion_point_to_end(insert_pt)
390
+ # Remove dead code
391
+ self.fn.finalize()
392
+
393
+ def visit_arguments(self, node):
394
+ arg_names = []
395
+ for arg in node.args:
396
+ arg_names += [self.visit(arg)]
397
+ kwarg_names = self.visit(node.kwarg)
398
+ return arg_names, kwarg_names
399
+
400
+ def visit_arg(self, node):
401
+ ast.NodeVisitor.generic_visit(self, node)
402
+ return node.arg
403
+
404
+ def visit_AnnAssign(self, node):
405
+ # extract attributes
406
+ annotation = self.visit(node.annotation)
407
+ target = self.visit(node.target)
408
+ value = self.visit(node.value)
409
+ # constexpr
410
+ if annotation == constexpr:
411
+ if target in self.lscope:
412
+ raise ValueError(f'{target} is already defined.'
413
+ f' constexpr cannot be reassigned.')
414
+ if not _is_constexpr(value):
415
+ value = constexpr(value)
416
+ self.lscope[target] = value
417
+ return self.lscope[target]
418
+ # default: call visit_Assign
419
+ return self.visit_Assign(node)
420
+
421
+ def visit_Assign(self, node):
422
+ _names = []
423
+ for target in node.targets:
424
+ _names += [self.visit(target)]
425
+ if len(_names) > 1:
426
+ raise UnsupportedLanguageConstruct(None, node, "simultaneous multiple assignment is not supported.")
427
+ names = _names[0]
428
+ values = self.visit(node.value)
429
+ if not _is_list_like(names):
430
+ names = [names]
431
+ if not _is_list_like(values):
432
+ values = [values]
433
+ native_nontensor_types = (language.dtype, )
434
+ for name, value in zip(names, values):
435
+ # by default, constexpr are assigned into python variable
436
+ value = _unwrap_if_constexpr(value)
437
+ if value is not None and \
438
+ not _is_triton_tensor(value) and \
439
+ not isinstance(value, native_nontensor_types):
440
+ value = language.core._to_tensor(value, self.builder)
441
+ self.set_value(name, value)
442
+
443
+ def visit_AugAssign(self, node):
444
+ name = node.target.id
445
+ lhs = ast.Name(id=name, ctx=ast.Load())
446
+ rhs = ast.BinOp(lhs, node.op, node.value)
447
+ assign = ast.Assign(targets=[node.target], value=rhs)
448
+ self.visit(assign)
449
+ return self.dereference_name(name)
450
+
451
+ def visit_Name(self, node):
452
+ if type(node.ctx) == ast.Store:
453
+ return node.id
454
+ return self.dereference_name(node.id)
455
+
456
+ def visit_Store(self, node):
457
+ ast.NodeVisitor.generic_visit(self, node)
458
+
459
+ def visit_Load(self, node):
460
+ ast.NodeVisitor.generic_visit(self, node)
461
+
462
+ def visit_Tuple(self, node):
463
+ args = [self.visit(x) for x in node.elts]
464
+ return tuple(args)
465
+
466
+ def _apply_binary_method(self, method_name, lhs, rhs):
467
+ # TODO: raise something meaningful if getattr fails below, esp for reverse method
468
+ if _is_triton_tensor(lhs):
469
+ return getattr(lhs, method_name)(rhs, _builder=self.builder)
470
+ if _is_triton_tensor(rhs):
471
+ reverse_method_name = re.sub(r"__(.*)__", r"__r\1__", method_name)
472
+ return getattr(rhs, reverse_method_name)(lhs, _builder=self.builder)
473
+ return getattr(lhs, method_name)(rhs)
474
+
475
+ def visit_BinOp(self, node):
476
+ lhs = self.visit(node.left)
477
+ rhs = self.visit(node.right)
478
+ method_name = self._method_name_for_bin_op.get(type(node.op))
479
+ if method_name is None:
480
+ raise UnsupportedLanguageConstruct(
481
+ None, node, "AST binary operator '{}' is not (currently) implemented.".format(node.op.__name__))
482
+ return self._apply_binary_method(method_name, lhs, rhs)
483
+
484
+ _method_name_for_bin_op: Dict[Type[ast.operator], str] = {
485
+ ast.Add: '__add__',
486
+ ast.Sub: '__sub__',
487
+ ast.Mult: '__mul__',
488
+ ast.Div: '__truediv__',
489
+ ast.FloorDiv: '__floordiv__',
490
+ ast.Mod: '__mod__',
491
+ ast.Pow: '__pow__',
492
+ ast.LShift: '__lshift__',
493
+ ast.RShift: '__rshift__',
494
+ ast.BitAnd: '__and__',
495
+ ast.BitOr: '__or__',
496
+ ast.BitXor: '__xor__',
497
+ }
498
+
499
+ def visit_then_else_blocks(self, node, liveins, then_block, else_block):
500
+ # then block
501
+ self.builder.set_insertion_point_to_start(then_block)
502
+ self.visit_compound_statement(node.body)
503
+ then_block = self.builder.get_insertion_block()
504
+ then_defs = self.local_defs.copy()
505
+ # else block
506
+ else_defs = {}
507
+ if node.orelse:
508
+ self.builder.set_insertion_point_to_start(else_block)
509
+ self.lscope = liveins.copy()
510
+ self.local_defs = {}
511
+ self.visit_compound_statement(node.orelse)
512
+ else_defs = self.local_defs.copy()
513
+ else_block = self.builder.get_insertion_block()
514
+
515
+ # update block arguments
516
+ names = []
517
+ ret_types = []
518
+ ir_ret_types = []
519
+ # variables in livein whose value is updated in `if`
520
+ for name in liveins:
521
+ # check type
522
+ for defs, block_name in [(then_defs, 'then'), (else_defs, 'else')]:
523
+ if name in defs:
524
+ assert defs[name].type == liveins[name].type, \
525
+ f'initial value for `{name}` is of type {liveins[name].type}, '\
526
+ f'but the {block_name} block redefines it as {defs[name].type}'
527
+ if name in then_defs or name in else_defs:
528
+ names.append(name)
529
+ ret_types.append(then_defs[name].type if name in then_defs else else_defs[name].type)
530
+ ir_ret_types.append(then_defs[name].handle.get_type() if name in
531
+ then_defs else else_defs[name].handle.get_type())
532
+ # variable defined in then but not in else
533
+ if name in then_defs and name not in else_defs:
534
+ else_defs[name] = liveins[name]
535
+ # variable defined in else but not in then
536
+ if name in else_defs and name not in then_defs:
537
+ then_defs[name] = liveins[name]
538
+ # variables that are both in then and else but not in liveins
539
+ # TODO: could probably be cleaned up
540
+ for name in then_defs.keys() & else_defs.keys():
541
+ if name in names:
542
+ continue
543
+ then_ty = then_defs[name].type
544
+ else_ty = else_defs[name].type
545
+ assert then_ty == else_ty, \
546
+ f'mismatched type for {name} between then block ({then_ty}) '\
547
+ f'and else block ({else_ty})'
548
+ names.append(name)
549
+ ret_types.append(then_ty)
550
+ ir_ret_types.append(then_defs[name].handle.get_type())
551
+
552
+ return then_defs, else_defs, then_block, else_block, names, ret_types, ir_ret_types
553
+
554
+ def visit_if_top_level(self, cond, node):
555
+ has_endif_block = True
556
+ with enter_sub_region(self) as sr:
557
+ liveins, ip_block = sr
558
+ then_block = self.builder.create_block()
559
+ else_block = self.builder.create_block()
560
+ # create basic-block after conditional
561
+ endif_block = self.builder.create_block()
562
+ # create branch
563
+ self.builder.set_insertion_point_to_end(ip_block)
564
+ self.builder.create_cond_branch(cond.handle, then_block, else_block)
565
+ # visit then and else blocks
566
+ then_defs, else_defs, then_block, else_block, names, ret_types, ir_ret_types = \
567
+ self.visit_then_else_blocks(node, liveins, then_block, else_block)
568
+ # then terminator
569
+ self.builder.set_insertion_point_to_end(then_block)
570
+ if then_block.has_return() and else_block.has_return():
571
+ has_endif_block = False
572
+ endif_block.erase()
573
+ if not then_block.has_terminator() and has_endif_block:
574
+ self.builder.create_branch(endif_block, [then_defs[n].handle for n in names])
575
+ # else terminator
576
+ self.builder.set_insertion_point_to_end(else_block)
577
+ if not else_block.has_terminator() and has_endif_block:
578
+ self.builder.create_branch(endif_block, [else_defs[n].handle for n in names])
579
+ if has_endif_block:
580
+ for ty in ir_ret_types:
581
+ endif_block.add_argument(ty)
582
+ if has_endif_block:
583
+ # change block
584
+ self.builder.set_insertion_point_to_start(endif_block)
585
+ # update value
586
+ for i, name in enumerate(names):
587
+ new_tensor = language.core.tensor(endif_block.arg(i), ret_types[i])
588
+ self.set_value(name, new_tensor)
589
+
590
+ # TODO: refactor
591
+ def visit_if_scf(self, cond, node):
592
+ with enter_sub_region(self) as sr:
593
+ liveins, _ = sr
594
+ ip, last_loc = self._get_insertion_point_and_loc()
595
+ then_block = self.builder.create_block()
596
+ else_block = self.builder.create_block() if node.orelse else None
597
+ then_defs, else_defs, then_block, else_block, names, ret_types, _ = \
598
+ self.visit_then_else_blocks(node, liveins, then_block, else_block)
599
+ # create if op
600
+ self._set_insertion_point_and_loc(ip, last_loc)
601
+ if_op = self.builder.create_if_op([ty.to_ir(self.builder) for ty in ret_types], cond.handle, True)
602
+ then_block.merge_block_before(if_op.get_then_block())
603
+ self.builder.set_insertion_point_to_end(if_op.get_then_block())
604
+ if len(names) > 0:
605
+ self.builder.create_yield_op([then_defs[n].handle for n in names])
606
+ if not node.orelse:
607
+ else_block = if_op.get_else_block()
608
+ else:
609
+ else_block.merge_block_before(if_op.get_else_block())
610
+ self.builder.set_insertion_point_to_end(if_op.get_else_block())
611
+ if len(names) > 0:
612
+ self.builder.create_yield_op([else_defs[n].handle for n in names])
613
+ # update values
614
+ for i, name in enumerate(names):
615
+ new_tensor = language.core.tensor(if_op.get_result(i), ret_types[i])
616
+ self.set_value(name, new_tensor)
617
+
618
+ def visit_If(self, node):
619
+ cond = self.visit(node.test)
620
+ if _is_triton_tensor(cond):
621
+ cond = cond.to(language.int1, _builder=self.builder)
622
+ contains_return = ContainsReturnChecker(self.gscope).visit(node)
623
+ if self.scf_stack and contains_return:
624
+ raise UnsupportedLanguageConstruct(
625
+ None, node, "Cannot have `return` statements inside `while` or `for` statements in triton "
626
+ "(note that this also applies to `return` statements that are inside functions "
627
+ "transitively called from within `while`/`for` statements)")
628
+ elif self.scf_stack or not contains_return:
629
+ self.visit_if_scf(cond, node)
630
+ else:
631
+ self.visit_if_top_level(cond, node)
632
+ else:
633
+ cond = _unwrap_if_constexpr(cond)
634
+ # not isinstance - we insist the real thing, no subclasses and no ducks
635
+ if type(cond) not in _condition_types:
636
+ raise UnsupportedLanguageConstruct(
637
+ None, node,
638
+ "`if` conditionals can only accept values of type {{{}}}, not objects of type {}".format(
639
+ ', '.join(_.__name__ for _ in _condition_types),
640
+ type(cond).__name__))
641
+ if cond:
642
+ self.visit_compound_statement(node.body)
643
+ else:
644
+ self.visit_compound_statement(node.orelse)
645
+
646
+ def visit_IfExp(self, node):
647
+ cond = self.visit(node.test)
648
+ if _is_triton_tensor(cond):
649
+ cond = cond.to(language.int1, _builder=self.builder)
650
+ # TODO: Deal w/ more complicated return types (e.g tuple)
651
+ with enter_sub_region(self):
652
+ ip, last_loc = self._get_insertion_point_and_loc()
653
+
654
+ then_block = self.builder.create_block()
655
+ self.builder.set_insertion_point_to_start(then_block)
656
+ then_val = language.core._to_tensor(self.visit(node.body), self.builder)
657
+ then_block = self.builder.get_insertion_block()
658
+
659
+ else_block = self.builder.create_block()
660
+ self.builder.set_insertion_point_to_start(else_block)
661
+ # do not need to reset lscope since
662
+ # ternary expressions cannot define new variables
663
+ else_val = language.core._to_tensor(self.visit(node.orelse), self.builder)
664
+ else_block = self.builder.get_insertion_block()
665
+
666
+ self._set_insertion_point_and_loc(ip, last_loc)
667
+
668
+ assert then_val.type == else_val.type, \
669
+ f'ternary expression with dynamic condition has inconsistent types {then_val.type} and {else_val.type}'
670
+ ret_type = then_val.type
671
+
672
+ ret_type_ir = [ret_type.to_ir(self.builder)] if ret_type != language.void else []
673
+ if_op = self.builder.create_if_op(ret_type_ir, cond.handle, True)
674
+ then_block.merge_block_before(if_op.get_then_block())
675
+ if ret_type_ir:
676
+ self.builder.set_insertion_point_to_end(if_op.get_then_block())
677
+ self.builder.create_yield_op([then_val.handle])
678
+
679
+ self.builder.set_insertion_point_to_end(if_op.get_then_block())
680
+ else_block.merge_block_before(if_op.get_else_block())
681
+ if ret_type_ir:
682
+ self.builder.set_insertion_point_to_end(if_op.get_else_block())
683
+ self.builder.create_yield_op([else_val.handle])
684
+ return language.core.tensor(if_op.get_result(0), ret_type) if ret_type_ir else None
685
+ else:
686
+ cond = _unwrap_if_constexpr(cond)
687
+
688
+ # not isinstance - we insist the real thing, no subclasses and no ducks
689
+ if type(cond) not in _condition_types:
690
+ raise UnsupportedLanguageConstruct(
691
+ None, node,
692
+ "`if` conditionals can only accept values of type {{{}}}, not objects of type {}".format(
693
+ ', '.join(_.__name__ for _ in _condition_types),
694
+ type(cond).__name__))
695
+ if cond:
696
+ return self.visit(node.body)
697
+ else:
698
+ return self.visit(node.orelse)
699
+
700
+ def visit_Pass(self, node):
701
+ pass
702
+
703
+ def visit_Compare(self, node):
704
+ if not (len(node.comparators) == 1 and len(node.ops) == 1):
705
+ raise UnsupportedLanguageConstruct(None, node, "simultaneous multiple comparison is not supported")
706
+ lhs = self.visit(node.left)
707
+ rhs = self.visit(node.comparators[0])
708
+ lhs_value = _unwrap_if_constexpr(lhs)
709
+ rhs_value = _unwrap_if_constexpr(rhs)
710
+ if type(node.ops[0]) == ast.Is:
711
+ return constexpr(lhs_value is rhs_value)
712
+ if type(node.ops[0]) == ast.IsNot:
713
+ return constexpr(lhs_value is not rhs_value)
714
+ method_name = self._method_name_for_comp_op.get(type(node.ops[0]))
715
+ if method_name is None:
716
+ raise UnsupportedLanguageConstruct(
717
+ None, node, "AST comparison operator '{}' is not (currently) implemented.".format(node.ops[0].__name__))
718
+ return self._apply_binary_method(method_name, lhs, rhs)
719
+
720
+ _method_name_for_comp_op: Dict[Type[ast.cmpop], str] = {
721
+ ast.Eq: '__eq__', ast.NotEq: '__ne__', ast.Lt: '__lt__', ast.LtE: '__le__', ast.Gt: '__gt__', ast.GtE: '__ge__'
722
+ }
723
+
724
+ def visit_UnaryOp(self, node):
725
+ op = self.visit(node.operand)
726
+ fn = self._method_name_for_unary_op.get(type(node.op))
727
+ if fn is None:
728
+ raise UnsupportedLanguageConstruct(
729
+ None, node, "AST unary operator '{}' is not (currently) implemented.".format(node.op.__name__))
730
+ if _is_triton_tensor(op):
731
+ return getattr(op, fn)(_builder=self.builder)
732
+ return getattr(op, fn)()
733
+
734
+ _method_name_for_unary_op: Dict[Type[ast.unaryop], str] = {
735
+ ast.USub: '__neg__', ast.UAdd: '__pos__', ast.Not: '__not__', ast.Invert: '__invert__'
736
+ }
737
+
738
+ def visit_While(self, node):
739
+ with enter_sub_region(self) as sr:
740
+ liveins, insert_block = sr
741
+ ip, last_loc = self._get_insertion_point_and_loc()
742
+
743
+ # loop body (the after region)
744
+ # loop_block = self.builder.create_block()
745
+ dummy = self.builder.create_block()
746
+ self.builder.set_insertion_point_to_start(dummy)
747
+ self.scf_stack.append(node)
748
+ self.visit_compound_statement(node.body)
749
+ self.scf_stack.pop()
750
+ loop_defs = self.local_defs
751
+ dummy.erase()
752
+
753
+ # collect loop-carried values
754
+ names = []
755
+ ret_types = []
756
+ init_args = []
757
+ for name in loop_defs:
758
+ if name in liveins:
759
+ # We should not def new constexpr
760
+ assert _is_triton_tensor(loop_defs[name]), f'cannoe reassign constxpr {name} in the loop'
761
+ assert _is_triton_tensor(liveins[name]), f'cannot reasign constexpr {name} in the loop'
762
+ assert loop_defs[name].type == liveins[name].type, \
763
+ f'Loop-carried variable {name} has initial type {liveins[name].type} '\
764
+ f'but is re-assigned to {loop_defs[name].type} in loop! '\
765
+ f'Please make sure that the type stays consistent.'
766
+
767
+ # these are loop-carried values
768
+ names.append(name)
769
+ ret_types.append(loop_defs[name].type)
770
+ init_args.append(liveins[name])
771
+
772
+ self._set_insertion_point_and_loc(ip, last_loc)
773
+ while_op = self.builder.create_while_op([ty.to_ir(self.builder) for ty in ret_types],
774
+ [arg.handle for arg in init_args])
775
+ # merge the condition region
776
+ before_block = self.builder.create_block_with_parent(while_op.get_before(),
777
+ [ty.to_ir(self.builder) for ty in ret_types])
778
+ self.builder.set_insertion_point_to_start(before_block)
779
+ for i, name in enumerate(names):
780
+ self.lscope[name] = language.core.tensor(before_block.arg(i), ret_types[i])
781
+ self.local_defs[name] = self.lscope[name]
782
+ cond = self.visit(node.test)
783
+ self.builder.set_insertion_point_to_end(before_block)
784
+ # create ConditionOp: e.g., scf.condition(%cond) %arg0, %arg1, ...
785
+ self.builder.create_condition_op(cond.handle, [before_block.arg(i) for i in range(len(init_args))])
786
+ # merge the loop body
787
+ after_block = self.builder.create_block_with_parent(while_op.get_after(),
788
+ [ty.to_ir(self.builder) for ty in ret_types])
789
+
790
+ # generate loop body
791
+ self.builder.set_insertion_point_to_start(after_block)
792
+ for i, name in enumerate(names):
793
+ self.lscope[name] = language.core.tensor(after_block.arg(i), ret_types[i])
794
+ self.local_defs[name] = self.lscope[name]
795
+ self.scf_stack.append(node)
796
+ self.visit_compound_statement(node.body)
797
+ self.scf_stack.pop()
798
+ loop_defs = self.local_defs
799
+ yields = []
800
+ for name in loop_defs:
801
+ if name in liveins:
802
+ yields.append(loop_defs[name])
803
+ self.builder.create_yield_op([y.handle for y in yields])
804
+
805
+ # WhileOp defines new values, update the symbol table (lscope, local_defs)
806
+ for i, name in enumerate(names):
807
+ new_def = language.core.tensor(while_op.get_result(i), ret_types[i])
808
+ self.lscope[name] = new_def
809
+ self.local_defs[name] = new_def
810
+
811
+ for stmt in node.orelse:
812
+ assert False, "Not implemented"
813
+ ast.NodeVisitor.generic_visit(self, stmt)
814
+
815
+ def visit_Subscript(self, node):
816
+ assert node.ctx.__class__.__name__ == "Load"
817
+ lhs = self.visit(node.value)
818
+ slices = self.visit(node.slice)
819
+ if _is_triton_tensor(lhs):
820
+ return lhs.__getitem__(slices, _builder=self.builder)
821
+ return lhs[slices]
822
+
823
+ def visit_ExtSlice(self, node):
824
+ return [self.visit(dim) for dim in node.dims]
825
+
826
+ def visit_For(self, node):
827
+ IteratorClass = self.visit(node.iter.func)
828
+ iter_args = [self.visit(arg) for arg in node.iter.args]
829
+ if IteratorClass == language.static_range:
830
+ iterator = IteratorClass(*iter_args)
831
+ static_range = range(iterator.start.value, iterator.end.value, iterator.step.value)
832
+ for i in static_range:
833
+ self.lscope[node.target.id] = constexpr(i)
834
+ self.visit_compound_statement(node.body)
835
+ for stmt in node.orelse:
836
+ ast.NodeVisitor.generic_visit(self, stmt)
837
+ return
838
+
839
+ if IteratorClass is not range:
840
+ raise RuntimeError('Only `range` and `static_range` iterators are currently supported')
841
+
842
+ # visit iterator arguments
843
+ # note: only `range` iterator is supported now
844
+ # collect lower bound (lb), upper bound (ub), and step
845
+ lb = iter_args[0] if len(iter_args) > 1 else self.visit(ast.Num(0))
846
+ ub = iter_args[1] if len(iter_args) > 1 else self.visit(node.iter.args[0])
847
+ step = iter_args[2] if len(iter_args) > 2 else self.visit(ast.Num(1))
848
+ # handle negative constant step (not supported by scf.for in MLIR)
849
+ negative_step = False
850
+ if _is_constexpr(step) and step.value < 0:
851
+ step = constexpr(-step.value)
852
+ negative_step = True
853
+ lb, ub = ub, lb
854
+ lb = language.core._to_tensor(lb, self.builder)
855
+ ub = language.core._to_tensor(ub, self.builder)
856
+ step = language.core._to_tensor(step, self.builder)
857
+ # induction variable type
858
+ if not lb.dtype.is_int() or not ub.dtype.is_int() or not step.dtype.is_int():
859
+ raise TypeError(f"For loop bounds and step must all be ints, are ({lb.dtype}, {ub.dtype}, {step.dtype})")
860
+ iv_type = language.semantic.integer_promote_impl(lb.dtype, ub.dtype)
861
+ iv_type = language.semantic.integer_promote_impl(iv_type, step.dtype)
862
+ iv_ir_type = iv_type.to_ir(self.builder)
863
+ iv_is_signed = iv_type.int_signedness == language.core.dtype.SIGNEDNESS.SIGNED
864
+ # lb/ub/step might be constexpr, we need to cast them to tensor
865
+ lb = lb.handle
866
+ ub = ub.handle
867
+ step = step.handle
868
+ # ForOp can only accept IndexType as lb/ub/step. Cast integer to Index
869
+ lb = self.builder.create_int_cast(lb, iv_ir_type, iv_is_signed)
870
+ ub = self.builder.create_int_cast(ub, iv_ir_type, iv_is_signed)
871
+ step = self.builder.create_int_cast(step, iv_ir_type, iv_is_signed)
872
+ # Create placeholder for the loop induction variable
873
+ iv = self.builder.create_undef(iv_ir_type)
874
+ self.set_value(node.target.id, language.core.tensor(iv, iv_type))
875
+
876
+ with enter_sub_region(self) as sr:
877
+ liveins, insert_block = sr
878
+ ip, last_loc = self._get_insertion_point_and_loc()
879
+
880
+ # create loop body block
881
+ block = self.builder.create_block()
882
+ self.builder.set_insertion_point_to_start(block)
883
+ # dry visit loop body
884
+ self.scf_stack.append(node)
885
+ self.visit_compound_statement(node.body)
886
+ self.scf_stack.pop()
887
+ block.erase()
888
+
889
+ # If a variable (name) is defined in both its parent & itself, then it's
890
+ # a loop-carried variable. (They must be of the same type)
891
+ init_args = []
892
+ yields = []
893
+ names = []
894
+ for name in self.local_defs:
895
+ if name in liveins:
896
+ assert _is_triton_tensor(self.local_defs[name]), f'{name} is not tensor'
897
+ assert _is_triton_tensor(liveins[name])
898
+ assert self.local_defs[name].type == liveins[name].type, \
899
+ f'Loop-carried variable {name} has initial type {liveins[name].type} '\
900
+ f'but is re-assigned to {self.local_defs[name].type} in loop! '\
901
+ f'Please make sure that the type stays consistent.'
902
+
903
+ names.append(name)
904
+ init_args.append(language.core._to_tensor(liveins[name], self.builder))
905
+ yields.append(language.core._to_tensor(self.local_defs[name], self.builder))
906
+
907
+ # create ForOp
908
+ self._set_insertion_point_and_loc(ip, last_loc)
909
+ for_op = self.builder.create_for_op(lb, ub, step, [arg.handle for arg in init_args])
910
+
911
+ self.scf_stack.append(node)
912
+ self.builder.set_insertion_point_to_start(for_op.get_body(0))
913
+ for i, name in enumerate(names):
914
+ self.set_value(name, language.core.tensor(for_op.get_body(0).arg(i + 1), yields[i].type))
915
+ self.visit_compound_statement(node.body)
916
+ self.scf_stack.pop()
917
+ yields = []
918
+ for name in self.local_defs:
919
+ if name in liveins:
920
+ yields.append(language.core._to_tensor(self.local_defs[name], self.builder))
921
+
922
+ # create YieldOp
923
+ if len(yields) > 0:
924
+ self.builder.create_yield_op([y.handle for y in yields])
925
+ for_op_region = for_op.get_body(0).get_parent()
926
+ assert for_op_region.size() == 1, "We use SCF, so the loop body should only have one block"
927
+
928
+ # update induction variable with actual value, and replace all uses
929
+ self.builder.set_insertion_point_to_start(for_op.get_body(0))
930
+ iv = for_op.get_induction_var()
931
+ if negative_step:
932
+ iv = self.builder.create_sub(ub, iv)
933
+ iv = self.builder.create_add(iv, lb)
934
+ self.lscope[node.target.id].handle.replace_all_uses_with(iv)
935
+ self.set_value(node.target.id, language.core.tensor(iv, iv_type))
936
+
937
+ # update lscope & local_defs (ForOp defines new values)
938
+ for i, name in enumerate(names):
939
+ self.set_value(name, language.core.tensor(for_op.get_result(i), yields[i].type))
940
+
941
+ for stmt in node.orelse:
942
+ assert False, "Don't know what to do with else after for"
943
+ ast.NodeVisitor.generic_visit(self, stmt)
944
+
945
+ def visit_Slice(self, node):
946
+ lower = self.visit(node.lower)
947
+ upper = self.visit(node.upper)
948
+ step = self.visit(node.step)
949
+ return slice(lower, upper, step)
950
+
951
+ def visit_Index(self, node):
952
+ return self.visit(node.value)
953
+
954
+ def visit_keyword(self, node) -> Tuple[str, Any]:
955
+ return node.arg, self.visit(node.value)
956
+
957
+ def visit_Assert(self, node) -> Any:
958
+ if not self.debug:
959
+ return
960
+ test = self.visit(node.test)
961
+ msg = self.visit(node.msg)
962
+ # Convert assert to triton's device_assert which happens on the device
963
+ return language.core.device_assert(test, msg, _builder=self.builder)
964
+
965
+ def call_JitFunction(self, fn: JITFunction, args, kwargs):
966
+ args = inspect.getcallargs(fn.fn, *args, **kwargs)
967
+ args = [args[name] for name in fn.arg_names]
968
+ args = [arg if _is_triton_tensor(arg) else constexpr(arg) for arg in args]
969
+ # generate function def
970
+ attributes = dict()
971
+ constexprs = [i for i, arg in enumerate(args) if _is_constexpr(arg)]
972
+ constants = {i: args[i] for i in constexprs}
973
+ # generate call
974
+ args = [None if i in constexprs else arg for i, arg in enumerate(args)]
975
+ arg_vals = [arg.handle for arg in args if arg is not None]
976
+ arg_types = [arg.type for arg in args if arg is not None]
977
+ fn_name = mangle_fn(fn.__name__, arg_types, constants)
978
+ # generate function def if necessary
979
+ if not self.module.has_function(fn_name):
980
+ prototype = language.function_type([], arg_types)
981
+ gscope = sys.modules[fn.fn.__module__].__dict__
982
+ # If the callee is not set, we use the same debug setting as the caller
983
+ file_name, begin_line = _get_fn_file_line(fn)
984
+ debug = self.debug if fn.debug is None else fn.debug
985
+ generator = CodeGenerator(self.context, prototype, gscope, attributes, constants, module=self.module,
986
+ function_name=fn_name, function_types=self.function_ret_types,
987
+ noinline=fn.noinline, file_name=file_name, begin_line=begin_line,
988
+ options=self.builder.options, debug=debug)
989
+ generator.visit(fn.parse())
990
+ callee_ret_type = generator.last_ret_type
991
+ self.function_ret_types[fn_name] = callee_ret_type
992
+ else:
993
+ callee_ret_type = self.function_ret_types[fn_name]
994
+ symbol = self.module.get_function(fn_name)
995
+ call_op = self.builder.call(symbol, arg_vals)
996
+ if call_op.get_num_results() == 0 or callee_ret_type is None:
997
+ return None
998
+ elif call_op.get_num_results() == 1:
999
+ return tensor(call_op.get_result(0), callee_ret_type)
1000
+ else:
1001
+ # should return a tuple of tl.tensor
1002
+ results = []
1003
+ for i in range(call_op.get_num_results()):
1004
+ results.append(tensor(call_op.get_result(i), callee_ret_type[i]))
1005
+ return tuple(results)
1006
+
1007
+ def visit_Call(self, node):
1008
+ fn = _unwrap_if_constexpr(self.visit(node.func))
1009
+
1010
+ static_implementation = self.statically_implemented_functions.get(fn)
1011
+ if static_implementation is not None:
1012
+ return static_implementation(self, node)
1013
+
1014
+ kws = dict(self.visit(keyword) for keyword in node.keywords)
1015
+ args = [self.visit(arg) for arg in node.args]
1016
+ if fn is language.core.device_assert: # TODO: this should not be so hardcoded
1017
+ if not self.debug:
1018
+ return
1019
+ if isinstance(fn, JITFunction):
1020
+ _check_fn_args(node, fn, args)
1021
+ return self.call_JitFunction(fn, args, kws)
1022
+ if (hasattr(fn, '__self__') and _is_triton_tensor(fn.__self__)) or language.core.is_builtin(fn):
1023
+ extra_kwargs = dict(_builder=self.builder)
1024
+ sig = inspect.signature(fn)
1025
+ if '_generator' in sig.parameters:
1026
+ extra_kwargs['_generator'] = self
1027
+ return fn(*args, **extra_kwargs, **kws)
1028
+ if fn in self.builtin_namespace.values():
1029
+ args = map(_unwrap_if_constexpr, args)
1030
+ return fn(*args, **kws)
1031
+
1032
+ def visit_Constant(self, node):
1033
+ return constexpr(node.value)
1034
+
1035
+ def visit_BoolOp(self, node: ast.BoolOp):
1036
+ if len(node.values) != 2:
1037
+ raise UnsupportedLanguageConstruct(
1038
+ None, node,
1039
+ "chained boolean operators (A or B or C) are not supported; use parentheses to split the chain.")
1040
+ lhs = self.visit(node.values[0])
1041
+ rhs = self.visit(node.values[1])
1042
+ method_name = self._method_name_for_bool_op.get(type(node.op))
1043
+ if method_name is None:
1044
+ raise UnsupportedLanguageConstruct(
1045
+ None, node, "AST boolean operator '{}' is not (currently) implemented.".format(node.op.__name__))
1046
+ return self._apply_binary_method(method_name, lhs, rhs)
1047
+
1048
+ _method_name_for_bool_op: Dict[Type[ast.boolop], str] = {ast.And: 'logical_and', ast.Or: 'logical_or'}
1049
+
1050
+ if sys.version_info < (3, 8):
1051
+
1052
+ def visit_NameConstant(self, node):
1053
+ return constexpr(node.value)
1054
+
1055
+ def visit_Num(self, node):
1056
+ return constexpr(node.n)
1057
+
1058
+ def visit_Str(self, node):
1059
+ return constexpr(ast.literal_eval(node))
1060
+
1061
+ def visit_Attribute(self, node):
1062
+ lhs = self.visit(node.value)
1063
+ if _is_triton_tensor(lhs):
1064
+ if node.attr == "T":
1065
+ return language.semantic.trans(lhs, builder=self.builder)
1066
+ return getattr(lhs, node.attr)
1067
+
1068
+ def visit_Expr(self, node):
1069
+ ast.NodeVisitor.generic_visit(self, node)
1070
+
1071
+ def visit_NoneType(self, node):
1072
+ return None
1073
+
1074
+ def visit_JoinedStr(self, node):
1075
+ values = list(node.values)
1076
+ for i, value in enumerate(values):
1077
+ if isinstance(value, ast.Constant):
1078
+ values[i] = str(value.value)
1079
+ elif isinstance(value, ast.FormattedValue):
1080
+ conversion_code = value.conversion
1081
+ evaluated = self.visit(value.value)
1082
+ if not _is_constexpr(evaluated):
1083
+ raise UnsupportedLanguageConstruct(
1084
+ None, node,
1085
+ "Cannot evaluate f-string containing non-constexpr conversion values, found conversion of type "
1086
+ + str(type(evaluated)))
1087
+ values[i] = ("{}" if conversion_code < 0 else "{!" + chr(conversion_code) + "}").format(evaluated.value)
1088
+ else:
1089
+ raise AssertionError("encountered unexpected node of type {} in a JoinedStr node".format(type(value)))
1090
+ return ''.join(values)
1091
+
1092
+ def visit(self, node):
1093
+ if node is None:
1094
+ return
1095
+ with warnings.catch_warnings():
1096
+ # The ast library added visit_Constant and deprecated some other
1097
+ # methods but we can't move to that without breaking Python 3.6 and 3.7.
1098
+ warnings.simplefilter("ignore", DeprecationWarning) # python 3.9
1099
+ warnings.simplefilter("ignore", PendingDeprecationWarning) # python 3.8
1100
+ self.last_node = node
1101
+ last_loc = self.builder.get_loc()
1102
+ if hasattr(node, 'lineno') and hasattr(node, 'col_offset'):
1103
+ self.builder.set_loc(self.file_name, self.begin_line + node.lineno, node.col_offset)
1104
+ last_loc = self.builder.get_loc()
1105
+ ret = super().visit(node)
1106
+ # Reset the location to the last one before the visit
1107
+ if last_loc:
1108
+ self.builder.set_loc(last_loc)
1109
+ return ret
1110
+
1111
+ def generic_visit(self, node):
1112
+ raise UnsupportedLanguageConstruct(None, node, "unsupported AST node type: {}".format(type(node).__name__))
1113
+
1114
+ def execute_static_print(self, node: ast.Call) -> None:
1115
+ # TODO: too simplistic? Perhaps do something else with non-constexpr
1116
+
1117
+ kws = {name: _unwrap_if_constexpr(value) for name, value in (self.visit(keyword) for keyword in node.keywords)}
1118
+ args = [_unwrap_if_constexpr(self.visit(arg)) for arg in node.args]
1119
+ print(*args, **kws)
1120
+
1121
+ def execute_static_assert(self, node: ast.Call) -> None:
1122
+ arg_count = len(node.args)
1123
+ if not (0 < arg_count <= 2) or len(node.keywords):
1124
+ raise TypeError("`static_assert` requires one or two positional arguments only")
1125
+
1126
+ passed = _unwrap_if_constexpr(self.visit(node.args[0]))
1127
+ if not isinstance(passed, bool):
1128
+ raise NotImplementedError(
1129
+ "Assertion condition could not be determined at compile-time. Make sure that it depends only on `constexpr` values"
1130
+ )
1131
+ if not passed:
1132
+ if arg_count == 1:
1133
+ message = ""
1134
+ else:
1135
+ try:
1136
+ message = self.visit(node.args[1])
1137
+ except Exception as e:
1138
+ message = "<failed to evaluate assertion message: " + repr(e) + ">"
1139
+
1140
+ raise CompileTimeAssertionFailure(None, node, _unwrap_if_constexpr(message))
1141
+ return None
1142
+
1143
+ statically_implemented_functions: Dict[object, Callable[[ast.Call], Any]] = {
1144
+ language.core.static_assert: execute_static_assert,
1145
+ language.core.static_print: execute_static_print,
1146
+ }
1147
+
1148
+
1149
+ def str_to_ty(name):
1150
+ if name[0] == "*":
1151
+ ty = str_to_ty(name[1:])
1152
+ return language.pointer_type(ty)
1153
+ tys = {
1154
+ "fp8e4nv": language.float8e4nv,
1155
+ "fp8e5": language.float8e5,
1156
+ "fp8e4b15": language.float8e4b15,
1157
+ "fp8e4b15x4": language.float8e4b15x4,
1158
+ "fp16": language.float16,
1159
+ "bf16": language.bfloat16,
1160
+ "fp32": language.float32,
1161
+ "fp64": language.float64,
1162
+ "i1": language.int1,
1163
+ "i8": language.int8,
1164
+ "i16": language.int16,
1165
+ "i32": language.int32,
1166
+ "i64": language.int64,
1167
+ "u8": language.uint8,
1168
+ "u16": language.uint16,
1169
+ "u32": language.uint32,
1170
+ "u64": language.uint64,
1171
+ "B": language.int1,
1172
+ }
1173
+ return tys[name]
1174
+
1175
+
1176
+ def kernel_suffix(signature, specialization):
1177
+ # suffix format:
1178
+ # <argid><'c' if equal to 1><'d' if divisible by 16><'e' if divisible by 8>
1179
+ suffix = ''
1180
+ for i, _ in enumerate(signature):
1181
+ suffix += str(i)
1182
+ if i in specialization.equal_to_1:
1183
+ suffix += 'c'
1184
+ if i in specialization.divisible_by_16:
1185
+ suffix += 'd'
1186
+ if i in specialization.divisible_by_8:
1187
+ suffix += 'e'
1188
+ return suffix
1189
+
1190
+
1191
+ def ast_to_ttir(fn, specialization, options):
1192
+ attrs = specialization.attrs
1193
+ context = ir.context()
1194
+ context.load_triton()
1195
+ # create kernel prototype
1196
+ cst_key = lambda i: fn.arg_names.index(i) if isinstance(i, str) else i
1197
+ constants = {cst_key(key): value for key, value in specialization.constants.items()}
1198
+ # visit kernel AST
1199
+ gscope = fn.__globals__.copy()
1200
+ function_name = '_'.join([fn.__name__, kernel_suffix(specialization.signature.values(), attrs)])
1201
+ tys = list(specialization.signature.values())
1202
+ new_constants = {k: True if k in tys and tys[k] == "i1" else 1 for k in attrs.equal_to_1}
1203
+ new_attrs = {k: [("tt.divisibility", 16)] for k in attrs.divisible_by_16}
1204
+ for k in attrs.divisible_by_8:
1205
+ attr = new_attrs[k] if k in new_attrs else []
1206
+ if k in attrs.divisible_by_16:
1207
+ attr.append(("tt.max_divisibility", 16))
1208
+ else:
1209
+ attr.append(("tt.max_divisibility", 8))
1210
+ new_attrs[k] = attr
1211
+
1212
+ all_constants = constants.copy()
1213
+ all_constants.update(new_constants)
1214
+ arg_types = [str_to_ty(v) for k, v in specialization.signature.items() if k not in constants]
1215
+ file_name, begin_line = _get_fn_file_line(fn)
1216
+
1217
+ prototype = language.function_type([], arg_types)
1218
+ generator = CodeGenerator(context, prototype, gscope=gscope, constants=all_constants, function_name=function_name,
1219
+ attributes=new_attrs, is_kernel=True, file_name=file_name, begin_line=begin_line,
1220
+ options=options)
1221
+ try:
1222
+ generator.visit(fn.parse())
1223
+ except CompilationError as e:
1224
+ if e.src is None:
1225
+ e.set_source_code(fn.src)
1226
+ raise
1227
+ except Exception as e:
1228
+ node = generator.last_node
1229
+ if node is None:
1230
+ raise
1231
+ raise CompilationError(fn.src, node, repr(e)) from e
1232
+ ret = generator.module
1233
+ # module takes ownership of the context
1234
+ ret.context = context
1235
+ return ret
llmeval-env/lib/python3.10/site-packages/triton/compiler/compiler.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import hashlib
4
+ import json
5
+
6
+ from .._C.libtriton.triton import (get_env_vars, ir)
7
+ # from ..runtime import driver, jit, JITFunction
8
+ # TODO: runtime.errors
9
+ from ..runtime.autotuner import OutOfResources
10
+ from ..runtime.cache import get_cache_manager
11
+ from ..runtime.driver import driver
12
+ from .utils import InfoFromBackendForTensorMap
13
+ from .backends.cuda import CUDABackend
14
+ from dataclasses import dataclass
15
+ from .code_generator import ast_to_ttir
16
+ from pathlib import Path
17
+ import re
18
+
19
+
20
+ @dataclass
21
+ class AttrsDescriptor:
22
+ divisible_by_16: set = None
23
+ equal_to_1: set = None
24
+ ids_of_folded_args: set = None
25
+ divisible_by_8: set = None
26
+
27
+ def __post_init__(self):
28
+ if self.divisible_by_16 is None:
29
+ self.divisible_by_16 = set()
30
+ if self.equal_to_1 is None:
31
+ self.equal_to_1 = set()
32
+ if self.ids_of_folded_args is None:
33
+ self.ids_of_folded_args = set()
34
+ if self.divisible_by_8 is None:
35
+ self.divisible_by_8 = set()
36
+
37
+ def hash(self):
38
+ key = str([sorted(x) for x in self.__dict__.values()])
39
+ return hashlib.md5(key.encode("utf-8")).hexdigest()
40
+
41
+
42
+ # - ^\s*tt\.func\s+ : match the start of the string, any leading whitespace, the keyword func,
43
+ # and any following whitespace
44
+ # - (public\s+)? : optionally match the keyword public and any following whitespace
45
+ # - (@\w+) : match an @ symbol followed by one or more word characters
46
+ # (letters, digits, or underscores), and capture it as group 1 (the function name)
47
+ # - (\((?:%\w+: \S+(?: \{\S+ = \S+ : \S+\})?(?:, )?)*\)) : match a pair of parentheses enclosing
48
+ # zero or more arguments separated by commas, and capture it as group 2 (the argument list)
49
+ # - (attributes \{[\S\s]+\})? : optionally match attributes enclosed in braces and capture it as group 3
50
+ mlir_prototype_pattern = r"^\s*tt\.func\s+(?:public\s+)?(@\w+)(\((?:%\w+: [\S\s]+(?: \{\S+ = \S+ : \S+\})?(?:, )?)*\))\s*(attributes \{[\S\s]+\})?\s+\{\s*$"
51
+ ptx_prototype_pattern = r"\.(?:visible|extern)\s+\.(?:entry|func)\s+(\w+)\s*\(([^)]*)\)"
52
+ prototype_pattern = {
53
+ "ttir": mlir_prototype_pattern,
54
+ "ttgir": mlir_prototype_pattern,
55
+ "ptx": ptx_prototype_pattern,
56
+ }
57
+
58
+ mlir_arg_type_pattern = r'%\w+: ((?:[^,\s<]+|<[^>]+>)+),?'
59
+ ptx_arg_type_pattern = r"\.param\s+\.(\w+)"
60
+ arg_type_pattern = {
61
+ "ttir": mlir_arg_type_pattern,
62
+ "ttgir": mlir_arg_type_pattern,
63
+ "ptx": ptx_arg_type_pattern,
64
+ }
65
+
66
+
67
+ def convert_type_repr(x):
68
+ # Currently we only capture the pointer type and assume the pointer is on global memory.
69
+ # TODO: Capture and support shared memory space
70
+ match = re.search(r'!tt\.ptr<([^,]+)', x)
71
+ if match is not None:
72
+ return '*' + convert_type_repr(match.group(1))
73
+ return x
74
+
75
+
76
+ def _get_num_warps_from_ir_str(src: str):
77
+ ttgir_num_warps_pattern = r'"triton_gpu.num-warps"\s?=\s?(\d+)\s?:'
78
+ # TODO(jlebar): Using a regex to get num-warps is a hack, and will break if
79
+ # e.g. someone has an instruction (not module) attribute named "num-warps".
80
+ num_warps_matches = re.findall(ttgir_num_warps_pattern, src)
81
+ assert len(num_warps_matches) == 1, "Expected exactly one match for num_warps"
82
+ num_warps = int(num_warps_matches[0])
83
+
84
+ # If warp specialization is enabled, the true number of warps from
85
+ # the perspective of e.g. CUDA is num-warps times the number of
86
+ # specialized groups.
87
+ num_warp_groups_matches = re.findall(r'"triton_gpu.num-warp-groups-per-cta"\s?=\s?(\d+)\s?:', src)
88
+ assert len(num_warp_groups_matches) == 0 or len(num_warp_groups_matches) == 1, \
89
+ "Expected triton_gpu.num-warp-groups-per-cta attribute to appear 0 or 1 times"
90
+ if num_warp_groups_matches:
91
+ num_warps *= int(num_warp_groups_matches[0])
92
+
93
+ return num_warps
94
+
95
+
96
+ class ASTSource:
97
+
98
+ def __init__(self, fn, signature, constants=None, attrs=None) -> None:
99
+ self.fn = fn
100
+ self.ext = "ttir"
101
+ self.name = fn.__name__
102
+ self.signature = signature
103
+ self.constants = constants
104
+ self.attrs = attrs
105
+ if isinstance(self.signature, str):
106
+ self.signature = {k: v.strip() for k, v in enumerate(self.signature.split(","))}
107
+ if self.constants is None:
108
+ self.constants = dict()
109
+ if self.attrs is None:
110
+ self.attrs = AttrsDescriptor()
111
+
112
+ def hash(self):
113
+ key = f"{self.fn.cache_key}-{self.attrs.hash()}-{self.signature.values()}-{self.constants}"
114
+ return hashlib.md5(key.encode("utf-8")).hexdigest()
115
+
116
+ def make_ir(self, options):
117
+ return ast_to_ttir(self.fn, self, options=options)
118
+
119
+ def metadata(self):
120
+ # TODO: remove once TMA support is cleaned up
121
+ return {"ids_of_folded_args": tuple([int(k) for k in self.attrs.ids_of_folded_args])}
122
+
123
+ def parse_options(self):
124
+ return dict()
125
+
126
+
127
+ class IRSource:
128
+
129
+ def __init__(self, path):
130
+ self.path = path
131
+ path = Path(path)
132
+ self.ext = path.suffix[1:]
133
+ self.src = path.read_text()
134
+ match = re.search(prototype_pattern[self.ext], self.src, re.MULTILINE)
135
+ self.name = match.group(1)
136
+ signature = match.group(2)
137
+ types = re.findall(arg_type_pattern[self.ext], signature)
138
+ self.signature = {k: convert_type_repr(ty) for k, ty in enumerate(types)}
139
+
140
+ def hash(self):
141
+ return hashlib.md5(self.src.encode("utf-8")).hexdigest()
142
+
143
+ def make_ir(self, options):
144
+ context = ir.context()
145
+ module = ir.parse_mlir_module(self.path, context)
146
+ module.context = context
147
+ return module
148
+
149
+ def metadata(self):
150
+ return dict()
151
+
152
+ def parse_options(self):
153
+ if self.ext == "ttgir":
154
+ return {'num_warps': _get_num_warps_from_ir_str(self.src)}
155
+ return dict()
156
+
157
+
158
+ def compile(src, target=None, options=None):
159
+ if target is None:
160
+ target = driver.get_current_target()
161
+ backend = CUDABackend(target)
162
+ # create backend
163
+ if not isinstance(src, ASTSource):
164
+ assert isinstance(src, str), "source must be either AST or a filepath"
165
+ src = IRSource(src)
166
+ extra_options = src.parse_options()
167
+ options = backend.parse_options(dict(options or dict(), **extra_options))
168
+ # create cache manager
169
+ key = f"{src.hash()}-{backend.hash()}-{options.hash()}-{frozenset(sorted(get_env_vars().items()))}"
170
+ hash = hashlib.md5(key.encode("utf-8")).hexdigest()
171
+ fn_cache_manager = get_cache_manager(hash)
172
+ metadata_filename = f"{src.name}.json"
173
+ metadata_group = fn_cache_manager.get_group(metadata_filename) or {}
174
+ metadata_path = metadata_group.get(metadata_filename)
175
+ if metadata_path is not None:
176
+ # cache hit!
177
+ metadata = json.loads(Path(metadata_path).read_text())
178
+ so_path = backend.make_launcher_stub(src, metadata)
179
+ return CompiledKernel(so_path, metadata_path)
180
+ # initialize metadata
181
+ metadata = {
182
+ "target": target,
183
+ **options.__dict__,
184
+ **get_env_vars(),
185
+ **src.metadata(),
186
+ }
187
+ # run compilation pipeline and populate metadata
188
+ stages = dict()
189
+ backend.add_stages(stages, options)
190
+ first_stage = list(stages.keys()).index(src.ext)
191
+ module = src.make_ir(options)
192
+ for ext, compile_ir in list(stages.items())[first_stage:]:
193
+ next_module = compile_ir(module, metadata)
194
+ metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
195
+ module = next_module
196
+ # write-back metadata
197
+ metadata_group[metadata_filename] = fn_cache_manager.put(json.dumps(metadata, default=vars), metadata_filename,
198
+ binary=False)
199
+ fn_cache_manager.put_group(metadata_filename, metadata_group)
200
+ so_path = backend.make_launcher_stub(src, metadata)
201
+ # return handle to compiled kernel
202
+ return CompiledKernel(so_path, metadata_group.get(metadata_filename))
203
+
204
+
205
+ class CompiledKernel:
206
+
207
+ # Hooks for external tools to monitor the execution of triton kernels
208
+ # TODO: move out of this namespace since it's a runtime thing
209
+ launch_enter_hook = None
210
+ launch_exit_hook = None
211
+
212
+ def __init__(self, so_path, metadata_path):
213
+ metadata_path = Path(metadata_path)
214
+ # initialize launcher
215
+ import importlib.util
216
+ spec = importlib.util.spec_from_file_location("__triton_launcher", so_path)
217
+ mod = importlib.util.module_from_spec(spec)
218
+ spec.loader.exec_module(mod)
219
+ self.run = getattr(mod, "launch")
220
+ # initialize metadata
221
+ self.metadata = json.loads(metadata_path.read_text())
222
+ self.metadata['tensormaps_info'] = [InfoFromBackendForTensorMap(e) for e in self.metadata['tensormaps_info']
223
+ ] if 'tensormaps_info' in self.metadata else []
224
+ for i, _ in enumerate(self.metadata["tensormaps_info"]):
225
+ self.metadata["tensormaps_info"][i].ids_of_folded_args = tuple(self.metadata["ids_of_folded_args"])
226
+ for key, val in self.metadata.items():
227
+ setattr(self, key, val)
228
+ # stores the text of each level of IR that was generated during compilation
229
+ asm_files = [file for file in metadata_path.parent.glob(f'{metadata_path.stem}.*') if file.suffix != '.json']
230
+ self.asm = {
231
+ file.suffix[1:]: file.read_bytes() if file.suffix[1:] == driver.binary_ext else file.read_text()
232
+ for file in asm_files
233
+ }
234
+ self.kernel = self.asm[driver.binary_ext]
235
+ # binaries are lazily initialized
236
+ # because it involves doing runtime things
237
+ # (e.g., checking amount of shared memory on current device)
238
+ self.module = None
239
+ self.function = None
240
+
241
+ def _init_handles(self):
242
+ if self.module is not None:
243
+ return
244
+ device = driver.get_current_device()
245
+ # not enough shared memory to run the kernel
246
+ max_shared = driver.utils.get_device_properties(device)["max_shared_mem"]
247
+ if self.shared > max_shared:
248
+ raise OutOfResources(self.shared, max_shared, "shared memory")
249
+ # TODO: n_regs, n_spills should be metadata generated when calling `ptxas`
250
+ self.module, self.function, self.n_regs, self.n_spills = driver.utils.load_binary(
251
+ self.name, self.kernel, self.shared, device)
252
+
253
+ def __getattribute__(self, name):
254
+ if name == 'run':
255
+ self._init_handles()
256
+ return super().__getattribute__(name)
257
+
258
+ def __getitem__(self, grid):
259
+ self._init_handles()
260
+
261
+ def runner(*args, stream=None):
262
+ args_expand = driver.assemble_tensormap_to_arg(self.tensormaps_info, args)
263
+ if stream is None:
264
+ device = driver.get_current_device()
265
+ stream = driver.get_current_stream(device)
266
+ self.run(grid[0], grid[1], grid[2], self.num_warps, self.num_ctas, self.cluster_dims[0],
267
+ self.cluster_dims[1], self.cluster_dims[2], self.shared, stream, self.function,
268
+ CompiledKernel.launch_enter_hook, CompiledKernel.launch_exit_hook, self, *args_expand)
269
+
270
+ return runner
llmeval-env/lib/python3.10/site-packages/triton/compiler/errors.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ from typing import Optional, Union
3
+
4
+
5
+ class CompilationError(Exception):
6
+ source_line_count_max_in_message = 12
7
+
8
+ def _format_message(self) -> str:
9
+ node = self.node
10
+ if self.src is None:
11
+ source_excerpt = " <source unavailable>"
12
+ else:
13
+ source_excerpt = self.src.split('\n')[:node.lineno][-self.source_line_count_max_in_message:]
14
+ if source_excerpt:
15
+ source_excerpt.append(' ' * node.col_offset + '^')
16
+ source_excerpt = '\n'.join(source_excerpt)
17
+ else:
18
+ source_excerpt = " <source empty>"
19
+
20
+ message = "at {}:{}:{}".format(node.lineno, node.col_offset, source_excerpt)
21
+ if self.error_message:
22
+ message += '\n' + self.error_message
23
+ return message
24
+
25
+ def __init__(self, src: Optional[str], node: ast.AST, error_message: Union[str, None]):
26
+ self.src = src
27
+ self.node = node
28
+ self.error_message = error_message
29
+ self.message = self._format_message()
30
+
31
+ def set_source_code(self, src: Optional[str]):
32
+ self.src = src
33
+ self.message = self._format_message()
34
+
35
+ def __str__(self):
36
+ return self.message
37
+
38
+ def __repr__(self):
39
+ return "{}({!r})".format(type(self).__name__, self.message)
40
+
41
+ def __reduce__(self):
42
+ # this is necessary to make CompilationError picklable
43
+ return type(self), (self.src, self.node, self.error_message)
44
+
45
+
46
+ class CompileTimeAssertionFailure(CompilationError):
47
+ """Specific exception for failed tests in `static_assert` invocations"""
48
+ pass
49
+
50
+
51
+ class UnsupportedLanguageConstruct(CompilationError):
52
+ pass
llmeval-env/lib/python3.10/site-packages/triton/compiler/make_launcher.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+ import tempfile
4
+
5
+ from ..common import _build
6
+ from ..common.backend import get_cuda_version_key
7
+ from ..common.build import is_hip
8
+ from ..runtime.cache import get_cache_manager
9
+ from .utils import generate_cu_signature
10
+
11
+ # ----- stub --------
12
+
13
+
14
+ def make_so_cache_key(version_hash, signature, constants, ids, **kwargs):
15
+ # Get unique key for the compiled code
16
+ signature = {k: 'ptr' if v[0] == '*' else v for k, v in signature.items()}
17
+ key = f"{version_hash}-{''.join(signature.values())}-{constants}-{ids}"
18
+ for kw in kwargs:
19
+ key = f"{key}-{kwargs.get(kw)}"
20
+ key = hashlib.md5(key.encode("utf-8")).hexdigest()
21
+ return key
22
+
23
+
24
+ def make_stub(name, signature, constants, ids, **kwargs):
25
+ # name of files that are cached
26
+ so_cache_key = make_so_cache_key(get_cuda_version_key(), signature, constants, ids, **kwargs)
27
+ so_cache_manager = get_cache_manager(so_cache_key)
28
+ so_name = f"{name}.so"
29
+ # retrieve stub from cache if it exists
30
+ cache_path = so_cache_manager.get_file(so_name)
31
+ if cache_path is None:
32
+ with tempfile.TemporaryDirectory() as tmpdir:
33
+ src = generate_launcher(constants, signature, ids)
34
+ src_path = os.path.join(tmpdir, "main.c")
35
+ with open(src_path, "w") as f:
36
+ f.write(src)
37
+ so = _build(name, src_path, tmpdir)
38
+ with open(so, "rb") as f:
39
+ return so_cache_manager.put(f.read(), so_name, binary=True)
40
+ else:
41
+ return cache_path
42
+
43
+
44
+ # ----- source code generation --------
45
+
46
+
47
+ def ty_to_cpp(ty):
48
+ if ty[0] == '*':
49
+ return "hipDeviceptr_t" if is_hip() else "CUdeviceptr"
50
+ return {
51
+ "i1": "int32_t",
52
+ "i8": "int8_t",
53
+ "i16": "int16_t",
54
+ "i32": "int32_t",
55
+ "i64": "int64_t",
56
+ "u32": "uint32_t",
57
+ "u64": "uint64_t",
58
+ "fp16": "float",
59
+ "bf16": "float",
60
+ "fp32": "float",
61
+ "f32": "float",
62
+ "fp64": "double",
63
+ }[ty]
64
+
65
+
66
+ def generate_launcher(constants, signature, ids):
67
+ # Record the end of regular arguments;
68
+ # subsequent arguments are architecture-specific descriptors, such as tensor descriptors for CUDA.
69
+ signature, desc_start_idx = generate_cu_signature(constants, signature, ids)
70
+ arg_decls = ', '.join(f"{ty_to_cpp(ty)} arg{i}" for i, ty in signature.items())
71
+
72
+ def _extracted_type(ty):
73
+ if ty[0] == '*':
74
+ return "PyObject*"
75
+ return {
76
+ 'i1': 'int32_t',
77
+ 'i32': 'int32_t',
78
+ 'i64': 'int64_t',
79
+ 'u32': 'uint32_t',
80
+ 'u64': 'uint64_t',
81
+ 'fp16': 'float',
82
+ 'bf16': 'float',
83
+ 'fp32': 'float',
84
+ 'f32': 'float',
85
+ 'fp64': 'double',
86
+ }[ty]
87
+
88
+ def format_of(ty):
89
+ return {
90
+ "PyObject*": "O",
91
+ "float": "f",
92
+ "double": "d",
93
+ "long": "l",
94
+ "uint32_t": "I",
95
+ "int32_t": "i",
96
+ "uint64_t": "K",
97
+ "int64_t": "L",
98
+ }[ty]
99
+
100
+ format = "iiiiiiiiiKKOOO" + ''.join([format_of(_extracted_type(ty)) for ty in signature.values()])
101
+
102
+ # generate glue code
103
+ folded_without_constexprs = [c for c in ids['ids_of_folded_args'] if c not in ids['ids_of_const_exprs']]
104
+ params = [
105
+ i for i in signature.keys()
106
+ if i >= desc_start_idx or (i not in constants and i not in folded_without_constexprs)
107
+ ]
108
+ src = f"""
109
+ #include \"cuda.h\"
110
+ #include <stdbool.h>
111
+ #include <Python.h>
112
+ #include <dlfcn.h>
113
+
114
+ static inline void gpuAssert(CUresult code, const char *file, int line)
115
+ {{
116
+ if (code != CUDA_SUCCESS)
117
+ {{
118
+ const char* prefix = "Triton Error [CUDA]: ";
119
+ const char* str;
120
+ cuGetErrorString(code, &str);
121
+ char err[1024] = {{0}};
122
+ strcat(err, prefix);
123
+ strcat(err, str);
124
+ PyGILState_STATE gil_state;
125
+ gil_state = PyGILState_Ensure();
126
+ PyErr_SetString(PyExc_RuntimeError, err);
127
+ PyGILState_Release(gil_state);
128
+ }}
129
+ }}
130
+
131
+ #define CUDA_CHECK(ans) {{ gpuAssert((ans), __FILE__, __LINE__); }}
132
+
133
+ typedef CUresult (*cuLaunchKernelEx_t)(const CUlaunchConfig* config, CUfunction f, void** kernelParams, void** extra);
134
+
135
+ static cuLaunchKernelEx_t getLaunchKernelExHandle() {{
136
+ // Open the shared library
137
+ void* handle = dlopen("libcuda.so", RTLD_LAZY);
138
+ if (!handle) {{
139
+ PyErr_SetString(PyExc_RuntimeError, "Failed to open libcuda.so");
140
+ return NULL;
141
+ }}
142
+ // Clear any existing error
143
+ dlerror();
144
+ cuLaunchKernelEx_t cuLaunchKernelExHandle = (cuLaunchKernelEx_t)dlsym(handle, "cuLaunchKernelEx");
145
+ // Check for errors
146
+ const char *dlsym_error = dlerror();
147
+ if (dlsym_error) {{
148
+ PyErr_SetString(PyExc_RuntimeError, "Failed to retrieve cuLaunchKernelEx from libcuda.so");
149
+ return NULL;
150
+ }}
151
+ return cuLaunchKernelExHandle;
152
+ }}
153
+
154
+ static void _launch(int gridX, int gridY, int gridZ, int num_warps, int num_ctas, int clusterDimX, int clusterDimY, int clusterDimZ, int shared_memory, CUstream stream, CUfunction function{', ' + arg_decls if len(arg_decls) > 0 else ''}) {{
155
+ void *params[] = {{ {', '.join(f"&arg{i}" for i in params)} }};
156
+ if (gridX*gridY*gridZ > 0) {{
157
+ if (num_ctas == 1) {{
158
+ CUDA_CHECK(cuLaunchKernel(function, gridX, gridY, gridZ, 32*num_warps, 1, 1, shared_memory, stream, params, 0));
159
+ }} else {{
160
+ CUlaunchAttribute launchAttr[2];
161
+ launchAttr[0].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION;
162
+ launchAttr[0].value.clusterDim.x = clusterDimX;
163
+ launchAttr[0].value.clusterDim.y = clusterDimY;
164
+ launchAttr[0].value.clusterDim.z = clusterDimZ;
165
+ launchAttr[1].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE;
166
+ launchAttr[1].value.clusterSchedulingPolicyPreference = CU_CLUSTER_SCHEDULING_POLICY_SPREAD;
167
+ CUlaunchConfig config;
168
+ config.gridDimX = gridX * clusterDimX;
169
+ config.gridDimY = gridY * clusterDimY;
170
+ config.gridDimZ = gridZ * clusterDimZ;
171
+ config.blockDimX = 32 * num_warps;
172
+ config.blockDimY = 1;
173
+ config.blockDimZ = 1;
174
+ config.sharedMemBytes = shared_memory;
175
+ config.hStream = stream;
176
+ config.attrs = launchAttr;
177
+ config.numAttrs = 2;
178
+ static cuLaunchKernelEx_t cuLaunchKernelExHandle = NULL;
179
+ if (cuLaunchKernelExHandle == NULL) {{
180
+ cuLaunchKernelExHandle = getLaunchKernelExHandle();
181
+ }}
182
+ CUDA_CHECK(cuLaunchKernelExHandle(&config, function, params, 0));
183
+ }}
184
+ }}
185
+ }}
186
+
187
+ typedef struct _DevicePtrInfo {{
188
+ CUdeviceptr dev_ptr;
189
+ bool valid;
190
+ }} DevicePtrInfo;
191
+
192
+ static inline DevicePtrInfo getPointer(PyObject *obj, int idx) {{
193
+ DevicePtrInfo ptr_info;
194
+ ptr_info.dev_ptr = 0;
195
+ ptr_info.valid = true;
196
+ if (PyLong_Check(obj)) {{
197
+ ptr_info.dev_ptr = PyLong_AsUnsignedLongLong(obj);
198
+ return ptr_info;
199
+ }}
200
+ if (obj == Py_None) {{
201
+ // valid nullptr
202
+ return ptr_info;
203
+ }}
204
+ PyObject *ptr = PyObject_GetAttrString(obj, "data_ptr");
205
+ if(ptr){{
206
+ PyObject *empty_tuple = PyTuple_New(0);
207
+ PyObject *ret = PyObject_Call(ptr, empty_tuple, NULL);
208
+ Py_DECREF(empty_tuple);
209
+ Py_DECREF(ptr);
210
+ if (!PyLong_Check(ret)) {{
211
+ PyErr_SetString(PyExc_TypeError, "data_ptr method of Pointer object must return 64-bit int");
212
+ ptr_info.valid = false;
213
+ return ptr_info;
214
+ }}
215
+ ptr_info.dev_ptr = PyLong_AsUnsignedLongLong(ret);
216
+ if(!ptr_info.dev_ptr)
217
+ return ptr_info;
218
+ uint64_t dev_ptr;
219
+ int status = cuPointerGetAttribute(&dev_ptr, CU_POINTER_ATTRIBUTE_DEVICE_POINTER, ptr_info.dev_ptr);
220
+ if (status == CUDA_ERROR_INVALID_VALUE) {{
221
+ PyErr_Format(PyExc_ValueError,
222
+ "Pointer argument (at %d) cannot be accessed from Triton (cpu tensor?)", idx);
223
+ ptr_info.valid = false;
224
+ }}
225
+ ptr_info.dev_ptr = dev_ptr;
226
+ Py_DECREF(ret); // Thanks ChatGPT!
227
+ return ptr_info;
228
+ }}
229
+ PyErr_SetString(PyExc_TypeError, "Pointer argument must be either uint64 or have data_ptr method");
230
+ ptr_info.valid = false;
231
+ return ptr_info;
232
+ }}
233
+
234
+ static PyObject* launch(PyObject* self, PyObject* args) {{
235
+ int gridX, gridY, gridZ;
236
+ uint64_t _stream;
237
+ uint64_t _function;
238
+ int num_warps;
239
+ int num_ctas;
240
+ int clusterDimX;
241
+ int clusterDimY;
242
+ int clusterDimZ;
243
+ int shared_memory;
244
+ PyObject *launch_enter_hook = NULL;
245
+ PyObject *launch_exit_hook = NULL;
246
+ PyObject *compiled_kernel = NULL;
247
+ {' '.join([f"{_extracted_type(ty)} _arg{i}; " for i, ty in signature.items()])}
248
+ if(!PyArg_ParseTuple(args, \"{format}\", &gridX, &gridY, &gridZ, &num_warps, &num_ctas, &clusterDimX, &clusterDimY, &clusterDimZ, &shared_memory, &_stream, &_function, &launch_enter_hook, &launch_exit_hook, &compiled_kernel{', ' + ', '.join(f"&_arg{i}" for i, ty in signature.items()) if len(signature) > 0 else ''})) {{
249
+ return NULL;
250
+ }}
251
+
252
+ if (launch_enter_hook != Py_None && !PyObject_CallObject(launch_enter_hook, args)) {{
253
+ return NULL;
254
+ }}
255
+
256
+
257
+ // raise exception asap
258
+ {"; ".join([f"DevicePtrInfo ptr_info{i} = getPointer(_arg{i}, {i}); if (!ptr_info{i}.valid) return NULL;" if ty[0] == "*" else "" for i, ty in signature.items()])};
259
+ Py_BEGIN_ALLOW_THREADS;
260
+ _launch(gridX, gridY, gridZ, num_warps, num_ctas, clusterDimX, clusterDimY, clusterDimZ, shared_memory, (CUstream)_stream, (CUfunction)_function{', ' + ', '.join(f"ptr_info{i}.dev_ptr" if ty[0]=="*" else f"_arg{i}"for i, ty in signature.items()) if len(signature) > 0 else ''});
261
+ Py_END_ALLOW_THREADS;
262
+ if (PyErr_Occurred()) {{
263
+ return NULL;
264
+ }}
265
+
266
+ if (launch_exit_hook != Py_None && !PyObject_CallObject(launch_exit_hook, args)) {{
267
+ return NULL;
268
+ }}
269
+
270
+ // return None
271
+ Py_INCREF(Py_None);
272
+ return Py_None;
273
+ }}
274
+
275
+ static PyMethodDef ModuleMethods[] = {{
276
+ {{"launch", launch, METH_VARARGS, "Entry point for all kernels with this signature"}},
277
+ {{NULL, NULL, 0, NULL}} // sentinel
278
+ }};
279
+
280
+ static struct PyModuleDef ModuleDef = {{
281
+ PyModuleDef_HEAD_INIT,
282
+ \"__triton_launcher\",
283
+ NULL, //documentation
284
+ -1, //size
285
+ ModuleMethods
286
+ }};
287
+
288
+ PyMODINIT_FUNC PyInit___triton_launcher(void) {{
289
+ PyObject *m = PyModule_Create(&ModuleDef);
290
+ if(m == NULL) {{
291
+ return NULL;
292
+ }}
293
+ PyModule_AddFunctions(m, ModuleMethods);
294
+ return m;
295
+ }}
296
+ """
297
+ return src
llmeval-env/lib/python3.10/site-packages/triton/compiler/utils.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 NVIDIA Corporation & Affiliates. All rights reserved.
2
+ #
3
+ # Permission is hereby granted, free of charge, to any person obtaining
4
+ # a copy of this software and associated documentation files
5
+ # (the "Software"), to deal in the Software without restriction,
6
+ # including without limitation the rights to use, copy, modify, merge,
7
+ # publish, distribute, sublicense, and/or sell copies of the Software,
8
+ # and to permit persons to whom the Software is furnished to do so,
9
+ # subject to the following conditions:
10
+ #
11
+ # The above copyright notice and this permission notice shall be
12
+ # included in all copies or substantial portions of the Software.
13
+ #
14
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
17
+ # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
18
+ # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
19
+ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
20
+ # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21
+
22
+ from __future__ import annotations
23
+
24
+ from ..runtime import driver
25
+
26
+
27
+ def generate_cu_signature(constants, signature, ids):
28
+ # CUtensorMap*s are always the last arguments
29
+ num_regular_signatures = max(signature.keys()) + 1 if len(signature) > 0 else 0
30
+ if ids["ids_of_tensormaps"] is not None:
31
+ for i, _ in enumerate(ids["ids_of_tensormaps"]):
32
+ signature[num_regular_signatures + i] = '*CUtensorMap'
33
+ return signature, num_regular_signatures
34
+
35
+
36
+ def dummy_tensormaps_info(n=2):
37
+ ret = []
38
+ for i in range(n):
39
+ ret.append(InfoFromBackendForTensorMap(dummy=True))
40
+ return ret
41
+
42
+
43
+ def parse_tma_info(infos, ids_of_folded_args):
44
+ ret = []
45
+ for info in infos:
46
+ e = InfoFromBackendForTensorMap(infos=info)
47
+ e.ids_of_folded_args = ids_of_folded_args
48
+ ret.append(e)
49
+ return ret
50
+
51
+
52
+ def get_tma_mapping(tensormaps_info):
53
+ ret = {}
54
+ if tensormaps_info is not None:
55
+ for i, e in enumerate(tensormaps_info):
56
+ ret.update(e.get_address_tma_mapping())
57
+ else:
58
+ ret = None
59
+ return ret
60
+
61
+
62
+ def get_ids_of_tensormaps(tensormaps_info):
63
+ ret = None
64
+ # order is not relevant
65
+ if tensormaps_info is not None:
66
+ ret = [e.get_id_of_tensormap() for e in tensormaps_info]
67
+ return ret
68
+
69
+
70
+ # decouple information for tensormap from backend
71
+ # please ignore the naming style, xx_yy is compiler.py style, xxYy is to comply with cuda tensormap style
72
+ # mixing style is for readability
73
+ class InfoFromBackendForTensorMap:
74
+ N = 2
75
+ n = 0
76
+ ntma = 0
77
+
78
+ def __init__(self, infos=None, dummy=False):
79
+ self.dummy = dummy
80
+ self.ids_of_folded_args = ()
81
+ if not dummy and not isinstance(infos, dict):
82
+ self._extract_info_from_backend(infos)
83
+ elif not dummy and isinstance(infos, dict):
84
+ self._extract_info_from_dict(infos)
85
+ elif dummy:
86
+ self._dummy()
87
+
88
+ def _dummy(self):
89
+ assert InfoFromBackendForTensorMap.n < InfoFromBackendForTensorMap.N
90
+ if InfoFromBackendForTensorMap.n == 0:
91
+ self.tensorDataType = driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_FLOAT16"]
92
+ self.tensorRank = 4
93
+ self.globalAddressArgIdx = 0
94
+ self.globalStridesArgIdx = [7, 6, -1, -1]
95
+ self.globalDimsArgIdx = [5, 3, -1, -1]
96
+ self.boxDims = [16, 64, 1, 1]
97
+ self.elementStrides = [1, 1, 1, 1]
98
+ self.interleave = driver.utils.CUtensorMapInterleave["CU_TENSOR_MAP_INTERLEAVE_NONE"]
99
+ self.swizzle = driver.utils.CUtensorMapSwizzle["CU_TENSOR_MAP_SWIZZLE_32B"]
100
+ self.l2Promotion = driver.utils.CUtensorMapL2promotion["CU_TENSOR_MAP_L2_PROMOTION_L2_128B"]
101
+ self.TMADescArgIdx = 11
102
+ self.oobFill = driver.utils.CUtensorMapFloatOOBfill["CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE"]
103
+ InfoFromBackendForTensorMap.n += 1
104
+ return
105
+ if InfoFromBackendForTensorMap.n == 1:
106
+ self.tensorDataType = driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_FLOAT16"]
107
+ self.tensorRank = 4
108
+ self.globalAddressArgIdx = 1
109
+ self.globalStridesArgIdx = [7, 6, -1, -1]
110
+ self.globalDimsArgIdx = [5, 3, -1, -1]
111
+ self.boxDims = [16, 64, 1, 1]
112
+ self.elementStrides = [1, 1, 1, 1]
113
+ self.interleave = driver.utils.CUtensorMapInterleave["CU_TENSOR_MAP_INTERLEAVE_NONE"]
114
+ self.swizzle = driver.utils.CUtensorMapSwizzle["CU_TENSOR_MAP_SWIZZLE_32B"]
115
+ self.l2Promotion = driver.utils.CUtensorMapL2promotion["CU_TENSOR_MAP_L2_PROMOTION_L2_128B"]
116
+ self.TMADescArgIdx = 12
117
+ self.oobFill = driver.utils.CUtensorMapFloatOOBfill["CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE"]
118
+ InfoFromBackendForTensorMap.n += 1
119
+ return
120
+
121
+ def _extract_info_from_backend(self, infos):
122
+ self.tensorDataType = infos.tensorDataType
123
+ self.tensorRank = infos.tensorRank
124
+ self.globalAddressArgIdx = infos.globalAddressArgIdx
125
+ self.globalStridesArgIdx = infos.globalStridesArgIdx
126
+ self.globalDimsArgIdx = infos.globalDimsArgIdx
127
+ self.boxDims = infos.boxDims
128
+ self.elementStrides = infos.elementStrides
129
+ self.interleave = infos.interleave
130
+ self.swizzle = infos.swizzle
131
+ self.l2Promotion = infos.l2Promotion
132
+ self.oobFill = infos.oobFill
133
+ self.TMADescArgIdx = infos.TMADescArgIdx
134
+
135
+ # dict could be from cached metadata json
136
+ def _extract_info_from_dict(self, infos: dict):
137
+ self.tensorDataType = infos['tensorDataType']
138
+ self.tensorRank = infos['tensorRank']
139
+ self.globalAddressArgIdx = infos['globalAddressArgIdx']
140
+ self.globalStridesArgIdx = infos['globalStridesArgIdx']
141
+ self.globalDimsArgIdx = infos['globalDimsArgIdx']
142
+ self.boxDims = infos['boxDims']
143
+ self.elementStrides = infos['elementStrides']
144
+ self.interleave = infos['interleave']
145
+ self.swizzle = infos['swizzle']
146
+ self.l2Promotion = infos['l2Promotion']
147
+ self.oobFill = infos['oobFill']
148
+ self.TMADescArgIdx = infos['TMADescArgIdx']
149
+
150
+ def get_address_tma_mapping(self):
151
+ return {self.globalAddressArgIdx: self.TMADescArgIdx + len(self.ids_of_folded_args)}
152
+
153
+ def get_id_of_tensormap(self):
154
+ return self.TMADescArgIdx + len(self.ids_of_folded_args)
155
+
156
+ def getTMADescArgIdx(self):
157
+ return self.TMADescArgIdx
158
+
159
+ # dtype:cuda.CUtensorMapDataType | int
160
+ def bytes_from_type(self, dtype):
161
+ return {
162
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_UINT8"]: 1,
163
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_UINT16"]: 2,
164
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_UINT32"]: 4,
165
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_INT32"]: 4,
166
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_UINT64"]: 8,
167
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_INT64"]: 8,
168
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_FLOAT16"]: 2,
169
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_FLOAT32"]: 4,
170
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_FLOAT64"]: 8,
171
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_BFLOAT16"]: 2,
172
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ"]: 4,
173
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_TFLOAT32"]: 4,
174
+ driver.utils.CUtensorMapDataType["CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ"]: 4
175
+ }[dtype]
176
+
177
+ def getTensorMapDataType(self):
178
+ return self.tensorDataType
179
+
180
+ def getInterleave(self):
181
+ return self.interleave
182
+
183
+ def getSwizzle(self):
184
+ return self.swizzle
185
+
186
+ def getL2Promotion(self):
187
+ return self.l2Promotion
188
+
189
+ def getOobFill(self):
190
+ return self.oobFill
191
+
192
+ def getTensorRank(self):
193
+ return self.tensorRank
194
+
195
+ def getBoxDims(self):
196
+ return self.boxDims
197
+
198
+ def getElementStrides(self):
199
+ return self.elementStrides
200
+
201
+ def getGlobalAddress(self, args):
202
+ idx = self.getOriginArgIdx(self.globalAddressArgIdx, args)
203
+ return args[idx]
204
+
205
+ # args, captured kernel args in runtime
206
+ def getGlobalDims(self, args):
207
+ shape = []
208
+ for e in self.globalDimsArgIdx:
209
+ t = 1
210
+ # < 0 means folded arg or constant (-1 - value)
211
+ # -1 means extended dim which is 1, -2 means folded arg with constant 1 (-1 - value)
212
+ if e == -1:
213
+ t = 1
214
+ elif e < 0 and e != -1:
215
+ t = -e - 1
216
+ else:
217
+ idx = self.getOriginArgIdx(e, args)
218
+ t = args[idx]
219
+ shape.append(t)
220
+ return shape
221
+
222
+ def getGlobalStrides(self, args):
223
+ t_globalDims = [int(e) for e in self.getGlobalDims(args)]
224
+ t_globalStridesArgIdx = self.globalStridesArgIdx.copy()
225
+ strides_in_elements = []
226
+ # todo: get all stride from backend even in extended mode
227
+ for i in range(self.tensorRank):
228
+ t = 1
229
+ if t_globalStridesArgIdx[i] == -1:
230
+ for ii in range(i):
231
+ t *= t_globalDims[ii]
232
+ # -2 means the sride in arguments is folded constant 1, we don't use 1 because it can not be distinguished from index 1
233
+ elif t_globalStridesArgIdx[i] < 0:
234
+ t = -1 - t_globalStridesArgIdx[i]
235
+ else:
236
+ new_idx = self.getOriginArgIdx(t_globalStridesArgIdx[i], args)
237
+ t = args[new_idx]
238
+
239
+ strides_in_elements.append(t)
240
+
241
+ strides_in_elements = strides_in_elements[1:]
242
+ strides_in_bytes = [e * self.bytes_from_type(self.tensorDataType) for e in strides_in_elements]
243
+ return strides_in_bytes
244
+
245
+ def getOriginArgIdx(self, idx, args):
246
+ if self.ids_of_folded_args:
247
+ ids_before_folding_arg = [i for i in range(len(args)) if i not in self.ids_of_folded_args]
248
+ return ids_before_folding_arg[idx]
249
+ else:
250
+ return idx
251
+
252
+ def tensormap(self, args):
253
+ return driver.utils.cuTensorMapEncodeTiled(
254
+ self.getTensorMapDataType(),
255
+ self.getTensorRank(),
256
+ self.getGlobalAddress(args),
257
+ self.getGlobalDims(args),
258
+ self.getGlobalStrides(args),
259
+ self.getBoxDims(),
260
+ self.getElementStrides(),
261
+ self.getInterleave(),
262
+ self.getSwizzle(),
263
+ self.getL2Promotion(),
264
+ self.getOobFill(),
265
+ )
266
+
267
+ # make hashable to use as partial key in cache
268
+ def __hash__(self):
269
+ return hash((self.ids_of_folded_args, self.globalAddressArgIdx, tuple(self.globalDimsArgIdx),
270
+ tuple(self.globalStridesArgIdx), self.tensorDataType, self.tensorRank, tuple(self.boxDims),
271
+ tuple(self.elementStrides), self.interleave, self.swizzle, self.l2Promotion, self.oobFill))
272
+
273
+ def __eq__(self, other):
274
+ if not isinstance(other, self.__class__):
275
+ return False
276
+ return (self.ids_of_folded_args, self.globalAddressArgIdx, self.globalDimsArgIdx, self.globalStridesArgIdx,
277
+ self.tensorDataType, self.tensorRank, self.boxDims, self.elementStrides, self.interleave, self.swizzle,
278
+ self.l2Promotion,
279
+ self.oobFill) == (other.ids_of_folded_args, other.globalAddressArgIdx, other.globalDimsArgIdx,
280
+ other.globalStridesArgIdx, other.tensorDataType, other.tensorRank, other.boxDims,
281
+ other.elementStrides, other.interleave, other.swizzle, other.l2Promotion,
282
+ other.oobFill)
llmeval-env/lib/python3.10/site-packages/triton/language/extra/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from . import cuda
2
+
3
+ __all__ = ['cuda']
llmeval-env/lib/python3.10/site-packages/triton/language/extra/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (245 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/triton/language/extra/__pycache__/cuda.cpython-310.pyc ADDED
Binary file (807 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/triton/language/extra/cuda.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .. import core
2
+
3
+
4
+ @core.extern
5
+ def globaltimer(_builder=None):
6
+ return core.inline_asm_elementwise("mov.u64 $0, %globaltimer;", "=l", [], dtype=core.int64, is_pure=False, pack=1,
7
+ _builder=_builder)
8
+
9
+
10
+ @core.extern
11
+ def smid(_builder=None):
12
+ return core.inline_asm_elementwise("mov.u32 $0, %smid;", "=r", [], dtype=core.int32, is_pure=True, pack=1,
13
+ _builder=_builder)
14
+
15
+
16
+ @core.builtin
17
+ def num_threads(_builder=None):
18
+ return core.constexpr(_builder.options.num_warps * 32)
llmeval-env/lib/python3.10/site-packages/triton/ops/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from .conv import _conv, conv
2
+ from . import blocksparse
3
+ from .cross_entropy import _cross_entropy, cross_entropy
4
+ from .flash_attention import attention
5
+ from .matmul import _matmul, matmul
6
+
7
+ __all__ = [
8
+ "blocksparse",
9
+ "_cross_entropy",
10
+ "cross_entropy",
11
+ "_matmul",
12
+ "matmul",
13
+ "attention",
14
+ ]
llmeval-env/lib/python3.10/site-packages/triton/ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (427 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/triton/ops/__pycache__/cross_entropy.cpython-310.pyc ADDED
Binary file (3.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/triton/ops/__pycache__/flash_attention.cpython-310.pyc ADDED
Binary file (8.92 kB). View file
 
llmeval-env/lib/python3.10/site-packages/triton/ops/__pycache__/matmul.cpython-310.pyc ADDED
Binary file (5.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/triton/ops/__pycache__/matmul_perf_model.cpython-310.pyc ADDED
Binary file (4.28 kB). View file
 
llmeval-env/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (287 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/matmul.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/triton/ops/blocksparse/matmul.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ... import cdiv, heuristics, jit
4
+ from ... import language as tl
5
+
6
+ # ********************************************************
7
+ # --------------------------------------------------------
8
+ # Sparse = Dense x Dense (SDD)
9
+ # This operation uses super-blocking to make sure that
10
+ # it's done efficiently when small blocks can be grouped
11
+ # together
12
+ # --------------------------------------------------------
13
+ # ********************************************************
14
+
15
+
16
+ @heuristics({
17
+ 'EVEN_K': lambda nargs: nargs['K'] % nargs['TILE_K'] == 0,
18
+ })
19
+ @jit
20
+ def _sdd_kernel(A, B, C, #
21
+ stride_za, stride_ha, stride_ma, stride_ak, #
22
+ stride_zb, stride_hb, stride_bk, stride_nb, #
23
+ stride_zc, stride_hc, stride_mc, stride_nc, #
24
+ K, grid_offset, lut, #
25
+ TILE_M: tl.constexpr, TILE_N: tl.constexpr, TILE_K: tl.constexpr, #
26
+ BLOCK: tl.constexpr, EVEN_K: tl.constexpr #
27
+ ):
28
+ # ------------ #
29
+ # - Prologue - #
30
+ # ------------ #
31
+ block_id = tl.program_id(0) + grid_offset
32
+ lut += block_id * 3
33
+ # offsets
34
+ off_z = tl.program_id(2) # batch
35
+ off_h = tl.load(lut + 0) # head
36
+
37
+ # initialize pointers to A
38
+ start_am = tl.load(lut + 1)
39
+ offs_am = start_am * BLOCK + (tl.arange(0, TILE_M) % BLOCK)
40
+ offs_ak = tl.arange(0, TILE_K)
41
+ a_ptrs = A \
42
+ + off_z * stride_za \
43
+ + off_h * stride_ha \
44
+ + offs_am[:, None] * stride_ma \
45
+ + offs_ak[None, :] * stride_ak
46
+ # initialize pointers to B
47
+ start_bn = tl.load(lut + 2)
48
+ offs_bn = start_bn * BLOCK + (tl.arange(0, TILE_N) % BLOCK)
49
+ offs_bk = tl.arange(0, TILE_K)
50
+ b_ptrs = B \
51
+ + off_z * stride_zb \
52
+ + off_h * stride_hb \
53
+ + offs_bn[None, :] * stride_nb \
54
+ + offs_bk[:, None] * stride_bk
55
+ # ---------------- #
56
+ # Inner Loop #
57
+ # ---------------- #
58
+ acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32)
59
+ for k in range(K, 0, -TILE_K):
60
+ if EVEN_K:
61
+ a = tl.load(a_ptrs)
62
+ b = tl.load(b_ptrs)
63
+ else:
64
+ a = tl.load(a_ptrs, mask=offs_ak[None, :] < k, other=0.)
65
+ b = tl.load(b_ptrs, mask=offs_bk[:, None] < k, other=0.)
66
+ acc += tl.dot(a, b, out_dtype=tl.float32)
67
+ a_ptrs += TILE_K * stride_ak
68
+ b_ptrs += TILE_K * stride_bk
69
+ c = acc.to(C.dtype.element_ty)
70
+ # ---------------- #
71
+ # Epilogue #
72
+ # ---------------- #
73
+ offs_cm = tl.arange(0, TILE_M) % BLOCK
74
+ offs_cn = tl.arange(0, TILE_N) % BLOCK
75
+ pc = C \
76
+ + off_z * stride_zc \
77
+ + block_id * stride_hc \
78
+ + offs_cm[:, None] * stride_mc \
79
+ + offs_cn[None, :] * stride_nc
80
+ tl.store(pc, c, mask=True)
81
+
82
+
83
+ def sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, widths, out=None):
84
+ if a.stride(2) != 1 and a.stride(3) != 1:
85
+ a = a.contiguous()
86
+ if b.stride(2) != 1 and b.stride(3) != 1:
87
+ b = b.contiguous()
88
+ # (A * B)^T = B^T * A^T
89
+ if trans_c:
90
+ a, b = b, a
91
+ trans_a, trans_b = not trans_b, not trans_a
92
+ # shape constraints
93
+ a_dim = -2 if trans_a else -1
94
+ b_dim = -1 if trans_b else -2
95
+ Ka, Kb = a.shape[a_dim], b.shape[b_dim]
96
+ if Ka != Kb:
97
+ raise ValueError(f"Inner dimension mismatch (A: {Ka} vs B: {Kb})")
98
+ # allocate output
99
+ if out is None:
100
+ c = torch.empty((a.shape[0], lut.shape[0], block, block), dtype=a.dtype, device=a.device)
101
+ else:
102
+ assert out.shape == (a.shape[0], lut.shape[0], block, block)
103
+ c = out
104
+ grid = [c.shape[1], 1, c.shape[0]]
105
+ _sdd_kernel[grid](
106
+ a, b, c, #
107
+ a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3), #
108
+ b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3), #
109
+ c.stride(0), c.stride(1), c.stride(2), c.stride(3), #
110
+ Ka, 0, lut, #
111
+ TILE_M=block, TILE_N=block, TILE_K=32, BLOCK=block, num_stages=4, #
112
+ num_warps=4 #
113
+ )
114
+ return c
115
+
116
+
117
+ def sdd_lut(layout, block, device):
118
+ lut = layout.nonzero(as_tuple=False).to(device).int()
119
+ lut = lut.contiguous()
120
+ return lut, None
121
+
122
+
123
+ # -----------------------------
124
+ # Dense = Sparse x Dense (DSD)
125
+ # This operation uses a look-up table that contains pre-computed pointer increments
126
+ # in order to minimize computations in the inner loop of the matmul kernel.
127
+ # -----------------------------
128
+
129
+
130
+ @jit
131
+ def _dsd_kernel(A, B, C, #
132
+ stride_az, stride_ha, stride_am, stride_ak, #
133
+ stride_zb, stride_hb, stride_bk, stride_bn, #
134
+ stride_zc, stride_hc, stride_cm, stride_cn, #
135
+ DS0, DS1, lut, #
136
+ TILE_M: tl.constexpr, TILE_N: tl.constexpr, TILE_K: tl.constexpr, #
137
+ GROUP_SIZE_M: tl.constexpr, BLOCK: tl.constexpr #
138
+ ):
139
+ # ------------ #
140
+ # - Prologue - #
141
+ # ------------ #
142
+ pid_m = tl.program_id(0)
143
+ pid_n = tl.program_id(1)
144
+ num_pid_m = tl.num_programs(0)
145
+ num_pid_n = tl.num_programs(1)
146
+ pid_n, pid_m = tl.swizzle2d(pid_n, pid_m, num_pid_n, num_pid_m, GROUP_SIZE_M)
147
+ pidz = tl.program_id(2)
148
+ header = lut + pid_n * 4
149
+ offset = tl.load(header + 0)
150
+ K = tl.load(header + 1)
151
+ column = tl.load(header + 2)
152
+ off_h = tl.load(header + 3)
153
+ pinc = lut + offset
154
+ # initialize pointers to A (sparse)
155
+ block_id = tl.load(pinc + 1)
156
+ block_id = tl.multiple_of(block_id, 8) # compiler hint
157
+ offs_am = tl.arange(0, TILE_M)
158
+ offs_ak = tl.arange(0, TILE_K)
159
+ pa = A + pidz * stride_az \
160
+ + block_id * stride_ha \
161
+ + offs_am[:, None] * stride_am \
162
+ + offs_ak[None, :] * stride_ak
163
+ # initialize pointers to B (dense)
164
+ offs_bn = pid_m * TILE_N + tl.arange(0, TILE_N)
165
+ offs_bn = tl.max_contiguous(tl.multiple_of(offs_bn % DS0, TILE_N), TILE_N)
166
+ start_bk = tl.load(pinc)
167
+ start_bk = tl.multiple_of(start_bk, 8) # compiler hint
168
+ offs_bk = start_bk + tl.arange(0, TILE_K)
169
+ pb = B + pidz * stride_zb \
170
+ + off_h * stride_hb \
171
+ + offs_bn[None, :] * stride_bn \
172
+ + offs_bk[:, None] * stride_bk
173
+ # ---------------- #
174
+ # Inner Loop #
175
+ # ---------------- #
176
+ acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32)
177
+ pinc += 2
178
+ inc_a = tl.load(pinc + 1)
179
+ inc_a = tl.multiple_of(inc_a, 8)
180
+ inc_b = tl.load(pinc)
181
+ inc_b = tl.multiple_of(inc_b, 8)
182
+ for k in range(K, 0, -TILE_K):
183
+ a = tl.load(pa)
184
+ b = tl.load(pb)
185
+ acc += tl.dot(a, b, out_dtype=tl.float32)
186
+ pa += inc_a
187
+ pb += inc_b * stride_bk
188
+ pinc += 2
189
+ inc_a = tl.load(pinc + 1)
190
+ inc_a = tl.multiple_of(inc_a, 8)
191
+ inc_b = tl.load(pinc)
192
+ inc_b = tl.multiple_of(inc_b, 8)
193
+ c = acc.to(C.dtype.element_ty)
194
+ # initialize pointers to C
195
+ offs_cm = column * TILE_M + tl.arange(0, TILE_M)
196
+ offs_cn = pid_m * TILE_N + tl.arange(0, TILE_N)
197
+ pc = C \
198
+ + off_h * stride_hc \
199
+ + pidz * stride_zc \
200
+ + offs_cm[:, None] * stride_cm \
201
+ + offs_cn[None, :] * stride_cn
202
+ tl.store(pc, c, mask=offs_cn[None, :] < DS0)
203
+
204
+
205
+ def dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, width, out=None):
206
+ if a.stride(2) != 1 and a.stride(3) != 1:
207
+ a = a.contiguous()
208
+ if b.stride(2) != 1 and b.stride(3) != 1:
209
+ b = b.contiguous()
210
+ # shapes / dtypes
211
+ AS1 = block * spdims[2 if trans_a else 1]
212
+ BS0 = b.size(0)
213
+ BS1 = b.size(1)
214
+ BS3 = b.size(2 if trans_b else 3)
215
+ dtype = a.dtype
216
+ # allocate output
217
+ CS0 = BS0
218
+ CS1 = BS1
219
+ CS2 = BS3 if trans_c else AS1
220
+ CS3 = AS1 if trans_c else BS3
221
+ if out is None:
222
+ c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
223
+ else:
224
+ assert out.shape == (CS0, CS1, CS2, CS3)
225
+ c = out
226
+ # meta-parameter heuristics
227
+ TILE_N = 128
228
+ # compute output
229
+ grid = lambda meta: [cdiv(BS3, meta['TILE_N']), width, BS0]
230
+ _dsd_kernel[grid](
231
+ a, b, c, #
232
+ a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3), #
233
+ b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3), #
234
+ c.stride(0), c.stride(1), c.stride(3 if trans_c else 2), c.stride(2 if trans_c else 3), #
235
+ BS3, AS1, lut, #
236
+ TILE_M=block, TILE_N=TILE_N, TILE_K=min(block, 32), BLOCK=block, num_stages=4, #
237
+ num_warps=4, GROUP_SIZE_M=4 #
238
+ )
239
+ # exit()
240
+ return c
241
+
242
+
243
+ def dsd_lut(layout, block, step, trans, device):
244
+ """
245
+ Generates the look-up table for incrementing pointers in the DSD/DDS matmul.
246
+ Example (BLOCK=32, STEP=16)
247
+ [[1, 0, 0, 1, 0],
248
+ [0, 1, 1, 0, 1],
249
+ [1, 0, 1, 0, 0]]
250
+
251
+ Then the offsets for A are
252
+ [0 , 16, 32, 48] <- row 0
253
+ \\----/ \\----/
254
+ col=0 col=3
255
+ [64, 80, 96, 112, 128, 144] <- row 1
256
+ \\----/ \\----/ \\------/
257
+ col=1 col=2 col=3
258
+ [160, 176, 192, 208]
259
+ which leads to increments table
260
+ [0, 16, 16, 16, || 64, 16, 16, 16, 16, 16, || 160, 16, 16, 16]
261
+
262
+ Because B is dense, the offsets are
263
+ [0, 16, 96, 112] <- row 0
264
+ [32, 48, 64, 80] <- row 1
265
+ [0, 16, 64, 80] <- row 2
266
+ """
267
+ sizes = torch.sum(layout, 2 if trans else 1)
268
+ head_id, col_id = torch.ones_like(sizes).nonzero(as_tuple=True)
269
+ sizes = sizes.flatten()
270
+ segments = sizes * step
271
+ # pointer increments
272
+ if trans:
273
+ nnz = layout.nonzero(as_tuple=False)
274
+ else:
275
+ nnz = layout.transpose(1, 2).nonzero(as_tuple=False)
276
+ num_blocks = nnz.size(0)
277
+ offsets = torch.zeros_like(sizes)
278
+ offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
279
+ offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets))
280
+ # -------------------------------
281
+ # dense input pointer increments
282
+ # -------------------------------
283
+ # Note that the inner loop matmul kernel may have a fixed step size (e.g., TILE_K)
284
+ # that is smaller than the block size, so we need to do a bit of extra work
285
+ # to handle this case
286
+ B_idx = nnz[:, 2] * block
287
+ B_incs = B_idx.clone()
288
+ B_incs[1:] -= B_idx[:-1]
289
+ div = block // step
290
+ B_incs = B_incs.view(-1, 1).repeat(1, div)
291
+ B_incs[:, 1:] = step
292
+ B_incs[:, 0] -= (div - 1) * step
293
+ # first increment for each reduction is actually the offset
294
+ B_incs[offsets[segments > 0], 0] = B_idx[offsets[segments > 0]]
295
+ B_incs = B_incs.view(-1)
296
+ # -------------------------------
297
+ # sparse input pointer increments
298
+ # -------------------------------
299
+ # same as above, except that the increments are in the sparse memory layout
300
+ if trans:
301
+ A_idx = torch.arange(num_blocks, device=layout.device)
302
+ else:
303
+ A_idx = torch.tensor([], dtype=torch.int64, device=layout.device)
304
+ current_offset = 0
305
+ for z in range(layout.size(0)):
306
+ layoutw = layout[z, :, :].clone().long()
307
+ msum = layoutw.sum()
308
+ layoutw[layoutw > 0] = 1 + torch.arange(msum, device=layout.device)
309
+ A_idx = torch.cat((A_idx, current_offset + layoutw.T[layoutw.T > 0] - 1))
310
+ current_offset += msum
311
+ A_incs = A_idx * block * block
312
+ A_incs[1:] -= A_idx[:-1] * block * block
313
+ A_incs = A_incs.view(-1, 1).repeat(1, div)
314
+ if trans:
315
+ A_incs[:, 1:] = step
316
+ A_incs[:, 0] -= (div - 1) * step
317
+ else:
318
+ A_incs[:, 1:] = step * block
319
+ A_incs[:, 0] -= (div - 1) * step * block
320
+ A_incs[offsets[segments > 0], 0] = A_idx[offsets[segments > 0]]
321
+ A_incs = A_incs.view(-1)
322
+ # create header
323
+ width = col_id.size(0)
324
+ offsets = offsets * 2 * div + 4 * width
325
+ segments = segments * div
326
+ header = torch.stack((offsets, segments, col_id, head_id), dim=1).view(-1).contiguous()
327
+ # create increments
328
+ incs = torch.stack((B_incs, A_incs), dim=1).view(-1).contiguous()
329
+ # pad by a factor 2*MAX_NUM_STAGES
330
+ # to accommodate pre-fetching inside the kernel
331
+ pad = torch.zeros(20, device=incs.device, dtype=incs.dtype)
332
+ incs = torch.cat((incs, pad))
333
+ # create lut
334
+ lut = torch.cat((header, incs))
335
+ lut = lut.type(torch.int32).to(device)
336
+ # create locks
337
+ return lut, width
338
+
339
+
340
+ # -----------------------------
341
+ # Dense = Dense x Sparse (DDS)
342
+ # -----------------------------
343
+ # AB = (B^T A^T)^T
344
+
345
+
346
+ def dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, width, out=None):
347
+ return dsd_matmul(b, a, not trans_b, not trans_a, not trans_c, spdims, block, lut, width, out=out)
348
+
349
+
350
+ ##############
351
+ # MAIN API #
352
+ ##############
353
+
354
+
355
+ class _matmul(torch.autograd.Function):
356
+
357
+ fn = {'sdd': sdd_matmul, 'dsd': dsd_matmul, 'dds': dds_matmul}
358
+
359
+ @staticmethod
360
+ def forward(ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, c_lut, c_width, da_lut, da_width, db_lut,
361
+ db_width, out):
362
+ c = _matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_width, out=out)
363
+ # save for backward
364
+ ctx.save_for_backward(a, b)
365
+ ctx.da_lut = da_lut
366
+ ctx.da_width = da_width
367
+ ctx.db_lut = db_lut
368
+ ctx.db_width = db_width
369
+ ctx.mode = mode
370
+ ctx.spdims = spdims
371
+ ctx.block = block
372
+ ctx.trans_a = trans_a
373
+ ctx.trans_b = trans_b
374
+ ctx.trans_c = trans_c
375
+ ctx.has_out = out is not None
376
+ return c
377
+
378
+ @staticmethod
379
+ def backward(ctx, dc):
380
+ # saved for backward
381
+ a, b = ctx.saved_tensors
382
+ da, db = None, None
383
+ mode = ctx.mode
384
+ # gradients w.r.t. a
385
+ if ctx.needs_input_grad[0]:
386
+ mode_da = mode[1] + mode[0] + mode[2]
387
+ da = _matmul.fn[mode_da](dc, b, ctx.trans_c, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block,
388
+ ctx.da_lut, ctx.da_width)
389
+ # gradients w.r.t. b
390
+ if ctx.needs_input_grad[1]:
391
+ mode_db = mode[2] + mode[1] + mode[0]
392
+ db = _matmul.fn[mode_db](a, dc, not ctx.trans_a, ctx.trans_c, ctx.trans_b, ctx.spdims, ctx.block,
393
+ ctx.db_lut, ctx.db_width)
394
+ dout = dc if ctx.has_out else None
395
+ return da, db, None, None, None, \
396
+ None, None, None, None, \
397
+ None, None, None, None, None, dout
398
+
399
+
400
+ class matmul:
401
+
402
+ def __init__(self, layout, block, mode, device, trans_a=False, trans_b=False, trans_c=False):
403
+ if mode not in ['sdd', 'dsd', 'dds']:
404
+ raise NotImplementedError('Supported modes are: sdd, dsd, dds')
405
+ self.block = block
406
+ self.mode = mode
407
+ self.trans_a = trans_a
408
+ self.trans_b = trans_b
409
+ self.trans_c = trans_c
410
+ self.layout = layout
411
+ self.spdims = layout.shape
412
+ step = min(block, 32)
413
+ if self.mode == 'sdd':
414
+ self.c_lut, self.c_width = sdd_lut(layout, block, device)
415
+ self.da_lut, self.da_width = dsd_lut(layout, block, step, True, device)
416
+ self.db_lut, self.db_width = dsd_lut(layout, block, step, False, device)
417
+ if self.mode == 'dsd':
418
+ self.c_lut, self.c_width = dsd_lut(layout, block, step, not self.trans_a, device)
419
+ self.da_lut, self.da_width = sdd_lut(layout, block, device)
420
+ self.db_lut, self.db_width = dsd_lut(layout, block, step, self.trans_a, device)
421
+ if self.mode == 'dds':
422
+ self.c_lut, self.c_width = dsd_lut(layout, block, step, self.trans_b, device)
423
+ self.da_lut, self.da_width = dsd_lut(layout, block, step, not self.trans_b, device)
424
+ self.db_lut, self.db_width = sdd_lut(layout, block, device)
425
+
426
+ def __call__(self, a, b, out=None):
427
+ c = _matmul.apply(a, b, self.trans_a, self.trans_b, self.trans_c, self.mode, self.spdims, self.block, #
428
+ self.c_lut, self.c_width, #
429
+ self.da_lut, self.da_width, #
430
+ self.db_lut, self.db_width, #
431
+ out)
432
+ return c