applied-ai-018 commited on
Commit
c90f994
·
verified ·
1 Parent(s): 618c5a9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/dataproperty/logger/__init__.py +7 -0
  2. llmeval-env/lib/python3.10/site-packages/dataproperty/logger/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_logger.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_null_logger.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/dataproperty/logger/_logger.py +22 -0
  6. llmeval-env/lib/python3.10/site-packages/dataproperty/logger/_null_logger.py +41 -0
  7. llmeval-env/lib/python3.10/site-packages/numexpr/__pycache__/cpuinfo.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/numexpr/__pycache__/expressions.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/numexpr/__pycache__/utils.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/numexpr/__pycache__/version.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/numexpr/expressions.py +523 -0
  12. llmeval-env/lib/python3.10/site-packages/numexpr/necompiler.py +1007 -0
  13. llmeval-env/lib/python3.10/site-packages/numexpr/tests/__init__.py +14 -0
  14. llmeval-env/lib/python3.10/site-packages/numexpr/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/numexpr/tests/__pycache__/test_numexpr.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/numexpr/tests/test_numexpr.py +1348 -0
  17. llmeval-env/lib/python3.10/site-packages/numexpr/utils.py +228 -0
  18. llmeval-env/lib/python3.10/site-packages/numexpr/version.py +4 -0
  19. llmeval-env/lib/python3.10/site-packages/torchgen/__init__.py +10 -0
  20. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen_aoti_c_shim.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen_functionalization_type.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen_lazy_tensor.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/local.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/utils.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/torchgen/code_template.py +96 -0
  26. llmeval-env/lib/python3.10/site-packages/torchgen/context.py +128 -0
  27. llmeval-env/lib/python3.10/site-packages/torchgen/dest/__init__.py +19 -0
  28. llmeval-env/lib/python3.10/site-packages/torchgen/dest/__pycache__/__init__.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ir.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ts_lowering.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/torchgen/dest/__pycache__/native_functions.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/torchgen/dest/__pycache__/register_dispatch_key.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/torchgen/dest/__pycache__/ufunc.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/torchgen/dest/lazy_ir.py +707 -0
  35. llmeval-env/lib/python3.10/site-packages/torchgen/dest/lazy_ts_lowering.py +48 -0
  36. llmeval-env/lib/python3.10/site-packages/torchgen/dest/native_functions.py +64 -0
  37. llmeval-env/lib/python3.10/site-packages/torchgen/dest/register_dispatch_key.py +989 -0
  38. llmeval-env/lib/python3.10/site-packages/torchgen/dest/ufunc.py +545 -0
  39. llmeval-env/lib/python3.10/site-packages/torchgen/gen.py +0 -0
  40. llmeval-env/lib/python3.10/site-packages/torchgen/gen_aoti_c_shim.py +431 -0
  41. llmeval-env/lib/python3.10/site-packages/torchgen/gen_backend_stubs.py +609 -0
  42. llmeval-env/lib/python3.10/site-packages/torchgen/gen_executorch.py +995 -0
  43. llmeval-env/lib/python3.10/site-packages/torchgen/gen_functionalization_type.py +809 -0
  44. llmeval-env/lib/python3.10/site-packages/torchgen/gen_lazy_tensor.py +605 -0
  45. llmeval-env/lib/python3.10/site-packages/torchgen/gen_vmap_plumbing.py +265 -0
  46. llmeval-env/lib/python3.10/site-packages/torchgen/local.py +56 -0
  47. llmeval-env/lib/python3.10/site-packages/torchgen/model.py +0 -0
  48. llmeval-env/lib/python3.10/site-packages/torchgen/native_function_generation.py +643 -0
  49. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/native/native_functions.yaml +0 -0
  50. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/native/tags.yaml +65 -0
llmeval-env/lib/python3.10/site-packages/dataproperty/logger/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from ._logger import logger, set_logger
2
+
3
+
4
+ __all__ = (
5
+ "logger",
6
+ "set_logger",
7
+ )
llmeval-env/lib/python3.10/site-packages/dataproperty/logger/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (266 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_logger.cpython-310.pyc ADDED
Binary file (703 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_null_logger.cpython-310.pyc ADDED
Binary file (1.93 kB). View file
 
llmeval-env/lib/python3.10/site-packages/dataproperty/logger/_logger.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ from ._null_logger import NullLogger
6
+
7
+
8
+ MODULE_NAME = "dataproperty"
9
+
10
+ try:
11
+ from loguru import logger
12
+
13
+ logger.disable(MODULE_NAME)
14
+ except ImportError:
15
+ logger = NullLogger() # type: ignore
16
+
17
+
18
+ def set_logger(is_enable: bool, propagation_depth: int = 1) -> None:
19
+ if is_enable:
20
+ logger.enable(MODULE_NAME)
21
+ else:
22
+ logger.disable(MODULE_NAME)
llmeval-env/lib/python3.10/site-packages/dataproperty/logger/_null_logger.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class NullLogger:
2
+ level_name = None
3
+
4
+ def remove(self, handler_id=None): # pragma: no cover
5
+ pass
6
+
7
+ def add(self, sink, **kwargs): # pragma: no cover
8
+ pass
9
+
10
+ def disable(self, name): # pragma: no cover
11
+ pass
12
+
13
+ def enable(self, name): # pragma: no cover
14
+ pass
15
+
16
+ def critical(self, __message, *args, **kwargs): # pragma: no cover
17
+ pass
18
+
19
+ def debug(self, __message, *args, **kwargs): # pragma: no cover
20
+ pass
21
+
22
+ def error(self, __message, *args, **kwargs): # pragma: no cover
23
+ pass
24
+
25
+ def exception(self, __message, *args, **kwargs): # pragma: no cover
26
+ pass
27
+
28
+ def info(self, __message, *args, **kwargs): # pragma: no cover
29
+ pass
30
+
31
+ def log(self, __level, __message, *args, **kwargs): # pragma: no cover
32
+ pass
33
+
34
+ def success(self, __message, *args, **kwargs): # pragma: no cover
35
+ pass
36
+
37
+ def trace(self, __message, *args, **kwargs): # pragma: no cover
38
+ pass
39
+
40
+ def warning(self, __message, *args, **kwargs): # pragma: no cover
41
+ pass
llmeval-env/lib/python3.10/site-packages/numexpr/__pycache__/cpuinfo.cpython-310.pyc ADDED
Binary file (31.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/numexpr/__pycache__/expressions.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/numexpr/__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/numexpr/__pycache__/version.cpython-310.pyc ADDED
Binary file (266 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/numexpr/expressions.py ADDED
@@ -0,0 +1,523 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###################################################################
2
+ # Numexpr - Fast numerical array expression evaluator for NumPy.
3
+ #
4
+ # License: MIT
5
+ # Author: See AUTHORS.txt
6
+ #
7
+ # See LICENSE.txt and LICENSES/*.txt for details about copyright and
8
+ # rights to use.
9
+ ####################################################################
10
+
11
+ __all__ = ['E']
12
+
13
+ import operator
14
+ import sys
15
+ import threading
16
+
17
+ import numpy
18
+
19
+ # Declare a double type that does not exist in Python space
20
+ double = numpy.double
21
+
22
+ # The default kind for undeclared variables
23
+ default_kind = 'double'
24
+ int_ = numpy.int32
25
+ long_ = numpy.int64
26
+
27
+ type_to_kind = {bool: 'bool', int_: 'int', long_: 'long', float: 'float',
28
+ double: 'double', complex: 'complex', bytes: 'bytes', str: 'str'}
29
+ kind_to_type = {'bool': bool, 'int': int_, 'long': long_, 'float': float,
30
+ 'double': double, 'complex': complex, 'bytes': bytes, 'str': str}
31
+ kind_rank = ('bool', 'int', 'long', 'float', 'double', 'complex', 'none')
32
+ scalar_constant_types = [bool, int_, int, float, double, complex, bytes, str]
33
+
34
+ scalar_constant_types = tuple(scalar_constant_types)
35
+
36
+ from numexpr import interpreter
37
+
38
+ class Expression():
39
+
40
+ def __getattr__(self, name):
41
+ if name.startswith('_'):
42
+ try:
43
+ return self.__dict__[name]
44
+ except KeyError:
45
+ raise AttributeError
46
+ else:
47
+ return VariableNode(name, default_kind)
48
+
49
+
50
+ E = Expression()
51
+
52
+
53
+ class Context(threading.local):
54
+
55
+ def get(self, value, default):
56
+ return self.__dict__.get(value, default)
57
+
58
+ def get_current_context(self):
59
+ return self.__dict__
60
+
61
+ def set_new_context(self, dict_):
62
+ self.__dict__.update(dict_)
63
+
64
+ # This will be called each time the local object is used in a separate thread
65
+ _context = Context()
66
+
67
+
68
+ def get_optimization():
69
+ return _context.get('optimization', 'none')
70
+
71
+
72
+ # helper functions for creating __magic__ methods
73
+ def ophelper(f):
74
+ def func(*args):
75
+ args = list(args)
76
+ for i, x in enumerate(args):
77
+ if isConstant(x):
78
+ args[i] = x = ConstantNode(x)
79
+ if not isinstance(x, ExpressionNode):
80
+ raise TypeError("unsupported object type: %s" % type(x))
81
+ return f(*args)
82
+
83
+ func.__name__ = f.__name__
84
+ func.__doc__ = f.__doc__
85
+ func.__dict__.update(f.__dict__)
86
+ return func
87
+
88
+
89
+ def allConstantNodes(args):
90
+ "returns True if args are all ConstantNodes."
91
+ for x in args:
92
+ if not isinstance(x, ConstantNode):
93
+ return False
94
+ return True
95
+
96
+
97
+ def isConstant(ex):
98
+ "Returns True if ex is a constant scalar of an allowed type."
99
+ return isinstance(ex, scalar_constant_types)
100
+
101
+
102
+ def commonKind(nodes):
103
+ node_kinds = [node.astKind for node in nodes]
104
+ str_count = node_kinds.count('bytes') + node_kinds.count('str')
105
+ if 0 < str_count < len(node_kinds): # some args are strings, but not all
106
+ raise TypeError("strings can only be operated with strings")
107
+ if str_count > 0: # if there are some, all of them must be
108
+ return 'bytes'
109
+ n = -1
110
+ for x in nodes:
111
+ n = max(n, kind_rank.index(x.astKind))
112
+ return kind_rank[n]
113
+
114
+
115
+ max_int32 = 2147483647
116
+ min_int32 = -max_int32 - 1
117
+
118
+
119
+ def bestConstantType(x):
120
+ # ``numpy.string_`` is a subclass of ``bytes``
121
+ if isinstance(x, (bytes, str)):
122
+ return bytes
123
+ # Numeric conversion to boolean values is not tried because
124
+ # ``bool(1) == True`` (same for 0 and False), so 0 and 1 would be
125
+ # interpreted as booleans when ``False`` and ``True`` are already
126
+ # supported.
127
+ if isinstance(x, (bool, numpy.bool_)):
128
+ return bool
129
+ # ``long`` objects are kept as is to allow the user to force
130
+ # promotion of results by using long constants, e.g. by operating
131
+ # a 32-bit array with a long (64-bit) constant.
132
+ if isinstance(x, (long_, numpy.int64)):
133
+ return long_
134
+ # ``double`` objects are kept as is to allow the user to force
135
+ # promotion of results by using double constants, e.g. by operating
136
+ # a float (32-bit) array with a double (64-bit) constant.
137
+ if isinstance(x, double):
138
+ return double
139
+ if isinstance(x, numpy.float32):
140
+ return float
141
+ if isinstance(x, (int, numpy.integer)):
142
+ # Constants needing more than 32 bits are always
143
+ # considered ``long``, *regardless of the platform*, so we
144
+ # can clearly tell 32- and 64-bit constants apart.
145
+ if not (min_int32 <= x <= max_int32):
146
+ return long_
147
+ return int_
148
+ # The duality of float and double in Python avoids that we have to list
149
+ # ``double`` too.
150
+ for converter in float, complex:
151
+ try:
152
+ y = converter(x)
153
+ except Exception as err:
154
+ continue
155
+ if y == x or numpy.isnan(y):
156
+ return converter
157
+
158
+
159
+ def getKind(x):
160
+ converter = bestConstantType(x)
161
+ return type_to_kind[converter]
162
+
163
+
164
+ def binop(opname, reversed=False, kind=None):
165
+ # Getting the named method from self (after reversal) does not
166
+ # always work (e.g. int constants do not have a __lt__ method).
167
+ opfunc = getattr(operator, "__%s__" % opname)
168
+
169
+ @ophelper
170
+ def operation(self, other):
171
+ if reversed:
172
+ self, other = other, self
173
+ if allConstantNodes([self, other]):
174
+ return ConstantNode(opfunc(self.value, other.value))
175
+ else:
176
+ return OpNode(opname, (self, other), kind=kind)
177
+
178
+ return operation
179
+
180
+
181
+ def func(func, minkind=None, maxkind=None):
182
+ @ophelper
183
+ def function(*args):
184
+ if allConstantNodes(args):
185
+ return ConstantNode(func(*[x.value for x in args]))
186
+ kind = commonKind(args)
187
+ if kind in ('int', 'long'):
188
+ # Exception for following NumPy casting rules
189
+ #FIXME: this is not always desirable. The following
190
+ # functions which return ints (for int inputs) on numpy
191
+ # but not on numexpr: copy, abs, fmod, ones_like
192
+ kind = 'double'
193
+ else:
194
+ # Apply regular casting rules
195
+ if minkind and kind_rank.index(minkind) > kind_rank.index(kind):
196
+ kind = minkind
197
+ if maxkind and kind_rank.index(maxkind) < kind_rank.index(kind):
198
+ kind = maxkind
199
+ return FuncNode(func.__name__, args, kind)
200
+
201
+ return function
202
+
203
+
204
+ @ophelper
205
+ def where_func(a, b, c):
206
+ if isinstance(a, ConstantNode):
207
+ return b if a.value else c
208
+ if allConstantNodes([a, b, c]):
209
+ return ConstantNode(numpy.where(a, b, c))
210
+ return FuncNode('where', [a, b, c])
211
+
212
+
213
+ def encode_axis(axis):
214
+ if isinstance(axis, ConstantNode):
215
+ axis = axis.value
216
+ if axis is None:
217
+ axis = interpreter.allaxes
218
+ else:
219
+ if axis < 0:
220
+ raise ValueError("negative axis are not supported")
221
+ if axis > 254:
222
+ raise ValueError("cannot encode axis")
223
+ return RawNode(axis)
224
+
225
+
226
+ def gen_reduce_axis_func(name):
227
+ def _func(a, axis=None):
228
+ axis = encode_axis(axis)
229
+ if isinstance(a, ConstantNode):
230
+ return a
231
+ if isinstance(a, (bool, int_, long_, float, double, complex)):
232
+ a = ConstantNode(a)
233
+ return FuncNode(name, [a, axis], kind=a.astKind)
234
+ return _func
235
+
236
+
237
+ @ophelper
238
+ def contains_func(a, b):
239
+ return FuncNode('contains', [a, b], kind='bool')
240
+
241
+
242
+ @ophelper
243
+ def div_op(a, b):
244
+ if get_optimization() in ('moderate', 'aggressive'):
245
+ if (isinstance(b, ConstantNode) and
246
+ (a.astKind == b.astKind) and
247
+ a.astKind in ('float', 'double', 'complex')):
248
+ return OpNode('mul', [a, ConstantNode(1. / b.value)])
249
+ return OpNode('div', [a, b])
250
+
251
+
252
+ @ophelper
253
+ def truediv_op(a, b):
254
+ if get_optimization() in ('moderate', 'aggressive'):
255
+ if (isinstance(b, ConstantNode) and
256
+ (a.astKind == b.astKind) and
257
+ a.astKind in ('float', 'double', 'complex')):
258
+ return OpNode('mul', [a, ConstantNode(1. / b.value)])
259
+ kind = commonKind([a, b])
260
+ if kind in ('bool', 'int', 'long'):
261
+ kind = 'double'
262
+ return OpNode('div', [a, b], kind=kind)
263
+
264
+
265
+ @ophelper
266
+ def rtruediv_op(a, b):
267
+ return truediv_op(b, a)
268
+
269
+
270
+ @ophelper
271
+ def pow_op(a, b):
272
+
273
+ if isinstance(b, ConstantNode):
274
+ x = b.value
275
+ if ( a.astKind in ('int', 'long') and
276
+ b.astKind in ('int', 'long') and x < 0) :
277
+ raise ValueError(
278
+ 'Integers to negative integer powers are not allowed.')
279
+ if get_optimization() == 'aggressive':
280
+ RANGE = 50 # Approximate break even point with pow(x,y)
281
+ # Optimize all integral and half integral powers in [-RANGE, RANGE]
282
+ # Note: for complex numbers RANGE could be larger.
283
+ if (int(2 * x) == 2 * x) and (-RANGE <= abs(x) <= RANGE):
284
+ n = int_(abs(x))
285
+ ishalfpower = int_(abs(2 * x)) % 2
286
+
287
+ def multiply(x, y):
288
+ if x is None: return y
289
+ return OpNode('mul', [x, y])
290
+
291
+ r = None
292
+ p = a
293
+ mask = 1
294
+ while True:
295
+ if (n & mask):
296
+ r = multiply(r, p)
297
+ mask <<= 1
298
+ if mask > n:
299
+ break
300
+ p = OpNode('mul', [p, p])
301
+ if ishalfpower:
302
+ kind = commonKind([a])
303
+ if kind in ('int', 'long'):
304
+ kind = 'double'
305
+ r = multiply(r, OpNode('sqrt', [a], kind))
306
+ if r is None:
307
+ r = OpNode('ones_like', [a])
308
+ if x < 0:
309
+ # Issue #428
310
+ r = truediv_op(ConstantNode(1), r)
311
+ return r
312
+ if get_optimization() in ('moderate', 'aggressive'):
313
+ if x == -1:
314
+ return OpNode('div', [ConstantNode(1), a])
315
+ if x == 0:
316
+ return OpNode('ones_like', [a])
317
+ if x == 0.5:
318
+ kind = a.astKind
319
+ if kind in ('int', 'long'): kind = 'double'
320
+ return FuncNode('sqrt', [a], kind=kind)
321
+ if x == 1:
322
+ return a
323
+ if x == 2:
324
+ return OpNode('mul', [a, a])
325
+ return OpNode('pow', [a, b])
326
+
327
+ # The functions and the minimum and maximum types accepted
328
+ numpy.expm1x = numpy.expm1
329
+ functions = {
330
+ 'copy': func(numpy.copy),
331
+ 'ones_like': func(numpy.ones_like),
332
+ 'sqrt': func(numpy.sqrt, 'float'),
333
+
334
+ 'sin': func(numpy.sin, 'float'),
335
+ 'cos': func(numpy.cos, 'float'),
336
+ 'tan': func(numpy.tan, 'float'),
337
+ 'arcsin': func(numpy.arcsin, 'float'),
338
+ 'arccos': func(numpy.arccos, 'float'),
339
+ 'arctan': func(numpy.arctan, 'float'),
340
+
341
+ 'sinh': func(numpy.sinh, 'float'),
342
+ 'cosh': func(numpy.cosh, 'float'),
343
+ 'tanh': func(numpy.tanh, 'float'),
344
+ 'arcsinh': func(numpy.arcsinh, 'float'),
345
+ 'arccosh': func(numpy.arccosh, 'float'),
346
+ 'arctanh': func(numpy.arctanh, 'float'),
347
+
348
+ 'fmod': func(numpy.fmod, 'float'),
349
+ 'arctan2': func(numpy.arctan2, 'float'),
350
+
351
+ 'log': func(numpy.log, 'float'),
352
+ 'log1p': func(numpy.log1p, 'float'),
353
+ 'log10': func(numpy.log10, 'float'),
354
+ 'exp': func(numpy.exp, 'float'),
355
+ 'expm1': func(numpy.expm1, 'float'),
356
+
357
+ 'abs': func(numpy.absolute, 'float'),
358
+ 'ceil': func(numpy.ceil, 'float', 'double'),
359
+ 'floor': func(numpy.floor, 'float', 'double'),
360
+
361
+ 'where': where_func,
362
+
363
+ 'real': func(numpy.real, 'double', 'double'),
364
+ 'imag': func(numpy.imag, 'double', 'double'),
365
+ 'complex': func(complex, 'complex'),
366
+ 'conj': func(numpy.conj, 'complex'),
367
+
368
+ 'sum': gen_reduce_axis_func('sum'),
369
+ 'prod': gen_reduce_axis_func('prod'),
370
+ 'min': gen_reduce_axis_func('min'),
371
+ 'max': gen_reduce_axis_func('max'),
372
+ 'contains': contains_func,
373
+ }
374
+
375
+
376
+ class ExpressionNode():
377
+ """
378
+ An object that represents a generic number object.
379
+
380
+ This implements the number special methods so that we can keep
381
+ track of how this object has been used.
382
+ """
383
+ astType = 'generic'
384
+
385
+ def __init__(self, value=None, kind=None, children=None):
386
+ self.value = value
387
+ if kind is None:
388
+ kind = 'none'
389
+ self.astKind = kind
390
+ if children is None:
391
+ self.children = ()
392
+ else:
393
+ self.children = tuple(children)
394
+
395
+ def get_real(self):
396
+ if self.astType == 'constant':
397
+ return ConstantNode(complex(self.value).real)
398
+ return OpNode('real', (self,), 'double')
399
+
400
+ real = property(get_real)
401
+
402
+ def get_imag(self):
403
+ if self.astType == 'constant':
404
+ return ConstantNode(complex(self.value).imag)
405
+ return OpNode('imag', (self,), 'double')
406
+
407
+ imag = property(get_imag)
408
+
409
+ def __str__(self):
410
+ return '%s(%s, %s, %s)' % (self.__class__.__name__, self.value,
411
+ self.astKind, self.children)
412
+
413
+ def __repr__(self):
414
+ return self.__str__()
415
+
416
+ def __neg__(self):
417
+ return OpNode('neg', (self,))
418
+
419
+ def __invert__(self):
420
+ return OpNode('invert', (self,))
421
+
422
+ def __pos__(self):
423
+ return self
424
+
425
+ # The next check is commented out. See #24 for more info.
426
+
427
+ def __bool__(self):
428
+ raise TypeError("You can't use Python's standard boolean operators in "
429
+ "NumExpr expressions. You should use their bitwise "
430
+ "counterparts instead: '&' instead of 'and', "
431
+ "'|' instead of 'or', and '~' instead of 'not'.")
432
+
433
+ __add__ = __radd__ = binop('add')
434
+ __sub__ = binop('sub')
435
+ __rsub__ = binop('sub', reversed=True)
436
+ __mul__ = __rmul__ = binop('mul')
437
+ __truediv__ = truediv_op
438
+ __rtruediv__ = rtruediv_op
439
+ __pow__ = pow_op
440
+ __rpow__ = binop('pow', reversed=True)
441
+ __mod__ = binop('mod')
442
+ __rmod__ = binop('mod', reversed=True)
443
+
444
+ __lshift__ = binop('lshift')
445
+ __rlshift__ = binop('lshift', reversed=True)
446
+ __rshift__ = binop('rshift')
447
+ __rrshift__ = binop('rshift', reversed=True)
448
+
449
+ # boolean operations
450
+
451
+ __and__ = binop('and', kind='bool')
452
+ __or__ = binop('or', kind='bool')
453
+
454
+ __gt__ = binop('gt', kind='bool')
455
+ __ge__ = binop('ge', kind='bool')
456
+ __eq__ = binop('eq', kind='bool')
457
+ __ne__ = binop('ne', kind='bool')
458
+ __lt__ = binop('gt', reversed=True, kind='bool')
459
+ __le__ = binop('ge', reversed=True, kind='bool')
460
+
461
+
462
+ class LeafNode(ExpressionNode):
463
+ leafNode = True
464
+
465
+
466
+ class VariableNode(LeafNode):
467
+ astType = 'variable'
468
+
469
+ def __init__(self, value=None, kind=None, children=None):
470
+ LeafNode.__init__(self, value=value, kind=kind)
471
+
472
+
473
+ class RawNode():
474
+ """
475
+ Used to pass raw integers to interpreter.
476
+ For instance, for selecting what function to use in func1.
477
+ Purposely don't inherit from ExpressionNode, since we don't wan't
478
+ this to be used for anything but being walked.
479
+ """
480
+ astType = 'raw'
481
+ astKind = 'none'
482
+
483
+ def __init__(self, value):
484
+ self.value = value
485
+ self.children = ()
486
+
487
+ def __str__(self):
488
+ return 'RawNode(%s)' % (self.value,)
489
+
490
+ __repr__ = __str__
491
+
492
+
493
+ class ConstantNode(LeafNode):
494
+ astType = 'constant'
495
+
496
+ def __init__(self, value=None, children=None):
497
+ kind = getKind(value)
498
+ # Python float constants are double precision by default
499
+ if kind == 'float' and isinstance(value, float):
500
+ kind = 'double'
501
+ LeafNode.__init__(self, value=value, kind=kind)
502
+
503
+ def __neg__(self):
504
+ return ConstantNode(-self.value)
505
+
506
+ def __invert__(self):
507
+ return ConstantNode(~self.value)
508
+
509
+
510
+ class OpNode(ExpressionNode):
511
+ astType = 'op'
512
+
513
+ def __init__(self, opcode=None, args=None, kind=None):
514
+ if (kind is None) and (args is not None):
515
+ kind = commonKind(args)
516
+ ExpressionNode.__init__(self, value=opcode, kind=kind, children=args)
517
+
518
+
519
+ class FuncNode(OpNode):
520
+ def __init__(self, opcode=None, args=None, kind=None):
521
+ if (kind is None) and (args is not None):
522
+ kind = commonKind(args)
523
+ OpNode.__init__(self, opcode, args, kind)
llmeval-env/lib/python3.10/site-packages/numexpr/necompiler.py ADDED
@@ -0,0 +1,1007 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###################################################################
2
+ # Numexpr - Fast numerical array expression evaluator for NumPy.
3
+ #
4
+ # License: MIT
5
+ # Author: See AUTHORS.txt
6
+ #
7
+ # See LICENSE.txt and LICENSES/*.txt for details about copyright and
8
+ # rights to use.
9
+ ####################################################################
10
+
11
+ from typing import Optional, Dict
12
+ import __future__
13
+ import sys
14
+ import os
15
+ import threading
16
+ import re
17
+
18
+ import numpy
19
+
20
+ is_cpu_amd_intel = False # DEPRECATION WARNING: WILL BE REMOVED IN FUTURE RELEASE
21
+ from numexpr import interpreter, expressions, use_vml
22
+ from numexpr.utils import CacheDict
23
+
24
+ # Declare a double type that does not exist in Python space
25
+ double = numpy.double
26
+ double = numpy.double
27
+
28
+ int_ = numpy.int32
29
+ long_ = numpy.int64
30
+
31
+ typecode_to_kind = {'b': 'bool', 'i': 'int', 'l': 'long', 'f': 'float', 'd': 'double',
32
+ 'c': 'complex', 'n': 'none', 's': 'str'}
33
+ kind_to_typecode = {'bool': 'b', 'int': 'i', 'long': 'l', 'float': 'f', 'double': 'd',
34
+ 'complex': 'c', 'bytes': 's', 'str': 's', 'none': 'n'}
35
+ type_to_typecode = {bool: 'b', int_: 'i', long_: 'l', float: 'f',
36
+ double: 'd', complex: 'c', bytes: 's', str: 's'}
37
+ type_to_kind = expressions.type_to_kind
38
+ kind_to_type = expressions.kind_to_type
39
+ default_type = kind_to_type[expressions.default_kind]
40
+ scalar_constant_kinds = list(kind_to_typecode.keys())
41
+
42
+ # VML functions that are implemented in numexpr
43
+ vml_functions = [
44
+ "div", # interp_body.cpp
45
+ "inv", # interp_body.cpp
46
+ "pow", # interp_body.cpp
47
+ # Keep the rest of this list in sync with the ones listed in functions.hpp
48
+ "sqrt",
49
+ "sin",
50
+ "cos",
51
+ "tan",
52
+ "arcsin",
53
+ "arccos",
54
+ "arctan",
55
+ "sinh",
56
+ "cosh",
57
+ "tanh",
58
+ "arcsinh",
59
+ "arccosh",
60
+ "arctanh",
61
+ "log",
62
+ "log1p",
63
+ "log10",
64
+ "exp",
65
+ "expm1",
66
+ "absolute",
67
+ "conjugate",
68
+ "arctan2",
69
+ "fmod",
70
+ "ceil",
71
+ "floor"
72
+ ]
73
+
74
+
75
+ class ASTNode():
76
+ """Abstract Syntax Tree node.
77
+
78
+ Members:
79
+
80
+ astType -- type of node (op, constant, variable, raw, or alias)
81
+ astKind -- the type of the result (bool, float, etc.)
82
+ value -- value associated with this node.
83
+ An opcode, numerical value, a variable name, etc.
84
+ children -- the children below this node
85
+ reg -- the register assigned to the result for this node.
86
+ """
87
+ cmpnames = ['astType', 'astKind', 'value', 'children']
88
+
89
+ def __init__(self, astType='generic', astKind='unknown', value=None, children=()):
90
+ self.astType = astType
91
+ self.astKind = astKind
92
+ self.value = value
93
+ self.children = tuple(children)
94
+ self.reg = None
95
+
96
+ def __eq__(self, other):
97
+ if self.astType == 'alias':
98
+ self = self.value
99
+ if other.astType == 'alias':
100
+ other = other.value
101
+ if not isinstance(other, ASTNode):
102
+ return False
103
+ for name in self.cmpnames:
104
+ if getattr(self, name) != getattr(other, name):
105
+ return False
106
+ return True
107
+
108
+ def __lt__(self,other):
109
+ # RAM: this is a fix for issue #88 whereby sorting on constants
110
+ # that may be of astKind == 'complex' but type(self.value) == int or float
111
+ # Here we let NumPy sort as it will cast data properly for comparison
112
+ # when the Python built-ins will raise an error.
113
+ if self.astType == 'constant':
114
+ if self.astKind == other.astKind:
115
+ return numpy.array(self.value) < numpy.array(other.value)
116
+ return self.astKind < other.astKind
117
+ else:
118
+ raise TypeError('Sorting not implemented for astType: %s'%self.astType)
119
+
120
+ def __hash__(self):
121
+ if self.astType == 'alias':
122
+ self = self.value
123
+ return hash((self.astType, self.astKind, self.value, self.children))
124
+
125
+ def __str__(self):
126
+ return 'AST(%s, %s, %s, %s, %s)' % (self.astType, self.astKind,
127
+ self.value, self.children, self.reg)
128
+
129
+ def __repr__(self):
130
+ return '<AST object at %s>' % id(self)
131
+
132
+ def key(self):
133
+ return (self.astType, self.astKind, self.value, self.children)
134
+
135
+ def typecode(self):
136
+ return kind_to_typecode[self.astKind]
137
+
138
+ def postorderWalk(self):
139
+ for c in self.children:
140
+ for w in c.postorderWalk():
141
+ yield w
142
+ yield self
143
+
144
+ def allOf(self, *astTypes):
145
+ astTypes = set(astTypes)
146
+ for w in self.postorderWalk():
147
+ if w.astType in astTypes:
148
+ yield w
149
+
150
+
151
+ def expressionToAST(ex):
152
+ """Take an expression tree made out of expressions.ExpressionNode,
153
+ and convert to an AST tree.
154
+
155
+ This is necessary as ExpressionNode overrides many methods to act
156
+ like a number.
157
+ """
158
+ return ASTNode(ex.astType, ex.astKind, ex.value,
159
+ [expressionToAST(c) for c in ex.children])
160
+
161
+
162
+ def sigPerms(s):
163
+ """Generate all possible signatures derived by upcasting the given
164
+ signature.
165
+ """
166
+ codes = 'bilfdc'
167
+ if not s:
168
+ yield ''
169
+ elif s[0] in codes:
170
+ start = codes.index(s[0])
171
+ for x in codes[start:]:
172
+ for y in sigPerms(s[1:]):
173
+ yield x + y
174
+ elif s[0] == 's': # numbers shall not be cast to strings
175
+ for y in sigPerms(s[1:]):
176
+ yield 's' + y
177
+ else:
178
+ yield s
179
+
180
+
181
+ def typeCompileAst(ast):
182
+ """Assign appropriate types to each node in the AST.
183
+
184
+ Will convert opcodes and functions to appropriate upcast version,
185
+ and add "cast" ops if needed.
186
+ """
187
+ children = list(ast.children)
188
+ if ast.astType == 'op':
189
+ retsig = ast.typecode()
190
+ basesig = ''.join(x.typecode() for x in list(ast.children))
191
+ # Find some operation that will work on an acceptable casting of args.
192
+ for sig in sigPerms(basesig):
193
+ value = (ast.value + '_' + retsig + sig).encode('ascii')
194
+ if value in interpreter.opcodes:
195
+ break
196
+ else:
197
+ for sig in sigPerms(basesig):
198
+ funcname = (ast.value + '_' + retsig + sig).encode('ascii')
199
+ if funcname in interpreter.funccodes:
200
+ value = ('func_%sn' % (retsig + sig)).encode('ascii')
201
+ children += [ASTNode('raw', 'none',
202
+ interpreter.funccodes[funcname])]
203
+ break
204
+ else:
205
+ raise NotImplementedError(
206
+ "couldn't find matching opcode for '%s'"
207
+ % (ast.value + '_' + retsig + basesig))
208
+ # First just cast constants, then cast variables if necessary:
209
+ for i, (have, want) in enumerate(zip(basesig, sig)):
210
+ if have != want:
211
+ kind = typecode_to_kind[want]
212
+ if children[i].astType == 'constant':
213
+ children[i] = ASTNode('constant', kind, children[i].value)
214
+ else:
215
+ opname = "cast"
216
+ children[i] = ASTNode('op', kind, opname, [children[i]])
217
+ else:
218
+ value = ast.value
219
+ children = ast.children
220
+ return ASTNode(ast.astType, ast.astKind, value,
221
+ [typeCompileAst(c) for c in children])
222
+
223
+
224
+ class Register():
225
+ """Abstraction for a register in the VM.
226
+
227
+ Members:
228
+ node -- the AST node this corresponds to
229
+ temporary -- True if this isn't an input or output
230
+ immediate -- not a register, but an immediate value
231
+ n -- the physical register number.
232
+ None if no number assigned yet.
233
+ """
234
+
235
+ def __init__(self, astnode, temporary=False):
236
+ self.node = astnode
237
+ self.temporary = temporary
238
+ self.immediate = False
239
+ self.n = None
240
+
241
+ def __str__(self):
242
+ if self.temporary:
243
+ name = 'Temporary'
244
+ else:
245
+ name = 'Register'
246
+ return '%s(%s, %s, %s)' % (name, self.node.astType,
247
+ self.node.astKind, self.n,)
248
+
249
+ def __repr__(self):
250
+ return self.__str__()
251
+
252
+
253
+ class Immediate(Register):
254
+ """Representation of an immediate (integer) operand, instead of
255
+ a register.
256
+ """
257
+
258
+ def __init__(self, astnode):
259
+ Register.__init__(self, astnode)
260
+ self.immediate = True
261
+
262
+ def __str__(self):
263
+ return 'Immediate(%d)' % (self.node.value,)
264
+
265
+
266
+ _flow_pat = r'[\;\[\:]'
267
+ _dunder_pat = r'(^|[^\w])__[\w]+__($|[^\w])'
268
+ _attr_pat = r'\.\b(?!(real|imag|(\d*[eE]?[+-]?\d+)|\d*j)\b)'
269
+ _blacklist_re = re.compile(f'{_flow_pat}|{_dunder_pat}|{_attr_pat}')
270
+
271
+ def stringToExpression(s, types, context, sanitize: bool=True):
272
+ """Given a string, convert it to a tree of ExpressionNode's.
273
+ """
274
+ # sanitize the string for obvious attack vectors that NumExpr cannot
275
+ # parse into its homebrew AST. This is to protect the call to `eval` below.
276
+ # We forbid `;`, `:`. `[` and `__`, and attribute access via '.'.
277
+ # We cannot ban `.real` or `.imag` however...
278
+ # We also cannot ban `.\d*j`, where `\d*` is some digits (or none), e.g. 1.5j, 1.j
279
+ if sanitize:
280
+ no_whitespace = re.sub(r'\s+', '', s)
281
+ skip_quotes = re.sub(r'(\'[^\']*\')', '', no_whitespace)
282
+ if _blacklist_re.search(skip_quotes) is not None:
283
+ raise ValueError(f'Expression {s} has forbidden control characters.')
284
+
285
+ old_ctx = expressions._context.get_current_context()
286
+ try:
287
+ expressions._context.set_new_context(context)
288
+ # first compile to a code object to determine the names
289
+ if context.get('truediv', False):
290
+ flags = __future__.division.compiler_flag
291
+ else:
292
+ flags = 0
293
+ c = compile(s, '<expr>', 'eval', flags)
294
+ # make VariableNode's for the names
295
+ names = {}
296
+ for name in c.co_names:
297
+ if name == "None":
298
+ names[name] = None
299
+ elif name == "True":
300
+ names[name] = True
301
+ elif name == "False":
302
+ names[name] = False
303
+ else:
304
+ t = types.get(name, default_type)
305
+ names[name] = expressions.VariableNode(name, type_to_kind[t])
306
+ names.update(expressions.functions)
307
+
308
+ # now build the expression
309
+ ex = eval(c, names)
310
+
311
+ if expressions.isConstant(ex):
312
+ ex = expressions.ConstantNode(ex, expressions.getKind(ex))
313
+ elif not isinstance(ex, expressions.ExpressionNode):
314
+ raise TypeError("unsupported expression type: %s" % type(ex))
315
+ finally:
316
+ expressions._context.set_new_context(old_ctx)
317
+ return ex
318
+
319
+
320
+ def isReduction(ast):
321
+ prefixes = (b'sum_', b'prod_', b'min_', b'max_')
322
+ return any(ast.value.startswith(p) for p in prefixes)
323
+
324
+
325
+ def getInputOrder(ast, input_order=None):
326
+ """
327
+ Derive the input order of the variables in an expression.
328
+ """
329
+ variables = {}
330
+ for a in ast.allOf('variable'):
331
+ variables[a.value] = a
332
+ variable_names = set(variables.keys())
333
+
334
+ if input_order:
335
+ if variable_names != set(input_order):
336
+ raise ValueError(
337
+ "input names (%s) don't match those found in expression (%s)"
338
+ % (input_order, variable_names))
339
+
340
+ ordered_names = input_order
341
+ else:
342
+ ordered_names = list(variable_names)
343
+ ordered_names.sort()
344
+ ordered_variables = [variables[v] for v in ordered_names]
345
+ return ordered_variables
346
+
347
+
348
+ def convertConstantToKind(x, kind):
349
+ # Exception for 'float' types that will return the NumPy float32 type
350
+ if kind == 'float':
351
+ return numpy.float32(x)
352
+ elif isinstance(x,str):
353
+ return x.encode('ascii')
354
+ return kind_to_type[kind](x)
355
+
356
+
357
+ def getConstants(ast):
358
+ """
359
+ RAM: implemented magic method __lt__ for ASTNode to fix issues
360
+ #88 and #209. The following test code works now, as does the test suite.
361
+
362
+ import numexpr as ne
363
+ a = 1 + 3j; b = 5.0
364
+ ne.evaluate('a*2 + 15j - b')
365
+ """
366
+ constant_registers = set([node.reg for node in ast.allOf("constant")])
367
+ constants_order = sorted([r.node for r in constant_registers])
368
+ constants = [convertConstantToKind(a.value, a.astKind)
369
+ for a in constants_order]
370
+ return constants_order, constants
371
+
372
+
373
+ def sortNodesByOrder(nodes, order):
374
+ order_map = {}
375
+ for i, (_, v, _) in enumerate(order):
376
+ order_map[v] = i
377
+ dec_nodes = [(order_map[n.value], n) for n in nodes]
378
+ dec_nodes.sort()
379
+ return [a[1] for a in dec_nodes]
380
+
381
+
382
+ def assignLeafRegisters(inodes, registerMaker):
383
+ """
384
+ Assign new registers to each of the leaf nodes.
385
+ """
386
+ leafRegisters = {}
387
+ for node in inodes:
388
+ key = node.key()
389
+ if key in leafRegisters:
390
+ node.reg = leafRegisters[key]
391
+ else:
392
+ node.reg = leafRegisters[key] = registerMaker(node)
393
+
394
+
395
+ def assignBranchRegisters(inodes, registerMaker):
396
+ """
397
+ Assign temporary registers to each of the branch nodes.
398
+ """
399
+ for node in inodes:
400
+ node.reg = registerMaker(node, temporary=True)
401
+
402
+
403
+ def collapseDuplicateSubtrees(ast):
404
+ """
405
+ Common subexpression elimination.
406
+ """
407
+ seen = {}
408
+ aliases = []
409
+ for a in ast.allOf('op'):
410
+ if a in seen:
411
+ target = seen[a]
412
+ a.astType = 'alias'
413
+ a.value = target
414
+ a.children = ()
415
+ aliases.append(a)
416
+ else:
417
+ seen[a] = a
418
+ # Set values and registers so optimizeTemporariesAllocation
419
+ # doesn't get confused
420
+ for a in aliases:
421
+ while a.value.astType == 'alias':
422
+ a.value = a.value.value
423
+ return aliases
424
+
425
+
426
+ def optimizeTemporariesAllocation(ast):
427
+ """
428
+ Attempt to minimize the number of temporaries needed, by reusing old ones.
429
+ """
430
+ nodes = [n for n in ast.postorderWalk() if n.reg.temporary]
431
+ users_of = dict((n.reg, set()) for n in nodes)
432
+
433
+ node_regs = dict((n, set(c.reg for c in n.children if c.reg.temporary))
434
+ for n in nodes)
435
+ if nodes and nodes[-1] is not ast:
436
+ nodes_to_check = nodes + [ast]
437
+ else:
438
+ nodes_to_check = nodes
439
+ for n in nodes_to_check:
440
+ for c in n.children:
441
+ if c.reg.temporary:
442
+ users_of[c.reg].add(n)
443
+
444
+ unused = dict([(tc, set()) for tc in scalar_constant_kinds])
445
+ for n in nodes:
446
+ for c in n.children:
447
+ reg = c.reg
448
+ if reg.temporary:
449
+ users = users_of[reg]
450
+ users.discard(n)
451
+ if not users:
452
+ unused[reg.node.astKind].add(reg)
453
+ if unused[n.astKind]:
454
+ reg = unused[n.astKind].pop()
455
+ users_of[reg] = users_of[n.reg]
456
+ n.reg = reg
457
+
458
+
459
+ def setOrderedRegisterNumbers(order, start):
460
+ """
461
+ Given an order of nodes, assign register numbers.
462
+ """
463
+ for i, node in enumerate(order):
464
+ node.reg.n = start + i
465
+ return start + len(order)
466
+
467
+
468
+ def setRegisterNumbersForTemporaries(ast, start):
469
+ """
470
+ Assign register numbers for temporary registers, keeping track of
471
+ aliases and handling immediate operands.
472
+ """
473
+ seen = 0
474
+ signature = ''
475
+ aliases = []
476
+ for node in ast.postorderWalk():
477
+ if node.astType == 'alias':
478
+ aliases.append(node)
479
+ node = node.value
480
+ if node.reg.immediate:
481
+ node.reg.n = node.value
482
+ continue
483
+ reg = node.reg
484
+ if reg.n is None:
485
+ reg.n = start + seen
486
+ seen += 1
487
+ signature += reg.node.typecode()
488
+ for node in aliases:
489
+ node.reg = node.value.reg
490
+ return start + seen, signature
491
+
492
+
493
+ def convertASTtoThreeAddrForm(ast):
494
+ """
495
+ Convert an AST to a three address form.
496
+
497
+ Three address form is (op, reg1, reg2, reg3), where reg1 is the
498
+ destination of the result of the instruction.
499
+
500
+ I suppose this should be called three register form, but three
501
+ address form is found in compiler theory.
502
+ """
503
+ return [(node.value, node.reg) + tuple([c.reg for c in node.children])
504
+ for node in ast.allOf('op')]
505
+
506
+
507
+ def compileThreeAddrForm(program):
508
+ """
509
+ Given a three address form of the program, compile it a string that
510
+ the VM understands.
511
+ """
512
+
513
+ def nToChr(reg):
514
+ if reg is None:
515
+ return b'\xff'
516
+ elif reg.n < 0:
517
+ raise ValueError("negative value for register number %s" % reg.n)
518
+ else:
519
+ return bytes([reg.n])
520
+
521
+ def quadrupleToString(opcode, store, a1=None, a2=None):
522
+ cop = chr(interpreter.opcodes[opcode]).encode('ascii')
523
+ cs = nToChr(store)
524
+ ca1 = nToChr(a1)
525
+ ca2 = nToChr(a2)
526
+ return cop + cs + ca1 + ca2
527
+
528
+ def toString(args):
529
+ while len(args) < 4:
530
+ args += (None,)
531
+ opcode, store, a1, a2 = args[:4]
532
+ s = quadrupleToString(opcode, store, a1, a2)
533
+ l = [s]
534
+ args = args[4:]
535
+ while args:
536
+ s = quadrupleToString(b'noop', *args[:3])
537
+ l.append(s)
538
+ args = args[3:]
539
+ return b''.join(l)
540
+
541
+ prog_str = b''.join([toString(t) for t in program])
542
+ return prog_str
543
+
544
+
545
+ context_info = [
546
+ ('optimization', ('none', 'moderate', 'aggressive'), 'aggressive'),
547
+ ('truediv', (False, True, 'auto'), 'auto')
548
+ ]
549
+
550
+
551
+ def getContext(kwargs, _frame_depth=1):
552
+ d = kwargs.copy()
553
+ context = {}
554
+ for name, allowed, default in context_info:
555
+ value = d.pop(name, default)
556
+ if value in allowed:
557
+ context[name] = value
558
+ else:
559
+ raise ValueError("'%s' must be one of %s" % (name, allowed))
560
+
561
+ if d:
562
+ raise ValueError("Unknown keyword argument '%s'" % d.popitem()[0])
563
+ if context['truediv'] == 'auto':
564
+ caller_globals = sys._getframe(_frame_depth + 1).f_globals
565
+ context['truediv'] = caller_globals.get('division', None) == __future__.division
566
+
567
+ return context
568
+
569
+
570
+ def precompile(ex, signature=(), context={}, sanitize: bool=True):
571
+ """
572
+ Compile the expression to an intermediate form.
573
+ """
574
+ types = dict(signature)
575
+ input_order = [name for (name, type_) in signature]
576
+
577
+ if isinstance(ex, str):
578
+ ex = stringToExpression(ex, types, context, sanitize)
579
+
580
+ # the AST is like the expression, but the node objects don't have
581
+ # any odd interpretations
582
+
583
+ ast = expressionToAST(ex)
584
+
585
+ if ex.astType != 'op':
586
+ ast = ASTNode('op', value='copy', astKind=ex.astKind, children=(ast,))
587
+
588
+ ast = typeCompileAst(ast)
589
+
590
+ aliases = collapseDuplicateSubtrees(ast)
591
+
592
+ assignLeafRegisters(ast.allOf('raw'), Immediate)
593
+ assignLeafRegisters(ast.allOf('variable', 'constant'), Register)
594
+ assignBranchRegisters(ast.allOf('op'), Register)
595
+
596
+ # assign registers for aliases
597
+ for a in aliases:
598
+ a.reg = a.value.reg
599
+
600
+ input_order = getInputOrder(ast, input_order)
601
+ constants_order, constants = getConstants(ast)
602
+
603
+ if isReduction(ast):
604
+ ast.reg.temporary = False
605
+
606
+ optimizeTemporariesAllocation(ast)
607
+
608
+ ast.reg.temporary = False
609
+ r_output = 0
610
+ ast.reg.n = 0
611
+
612
+ r_inputs = r_output + 1
613
+ r_constants = setOrderedRegisterNumbers(input_order, r_inputs)
614
+ r_temps = setOrderedRegisterNumbers(constants_order, r_constants)
615
+ r_end, tempsig = setRegisterNumbersForTemporaries(ast, r_temps)
616
+
617
+ threeAddrProgram = convertASTtoThreeAddrForm(ast)
618
+ input_names = tuple([a.value for a in input_order])
619
+ signature = ''.join(type_to_typecode[types.get(x, default_type)]
620
+ for x in input_names)
621
+ return threeAddrProgram, signature, tempsig, constants, input_names
622
+
623
+
624
+ def NumExpr(ex, signature=(), sanitize: bool=True, **kwargs):
625
+ """
626
+ Compile an expression built using E.<variable> variables to a function.
627
+
628
+ ex can also be specified as a string "2*a+3*b".
629
+
630
+ The order of the input variables and their types can be specified using the
631
+ signature parameter, which is a list of (name, type) pairs.
632
+
633
+ Returns a `NumExpr` object containing the compiled function.
634
+ """
635
+
636
+ # In that case _frame_depth is wrong (it should be 2) but it doesn't matter
637
+ # since it will not be used (because truediv='auto' has already been
638
+ # translated to either True or False).
639
+ _frame_depth = 1
640
+ context = getContext(kwargs, _frame_depth=_frame_depth)
641
+ threeAddrProgram, inputsig, tempsig, constants, input_names = precompile(ex, signature, context, sanitize=sanitize)
642
+ program = compileThreeAddrForm(threeAddrProgram)
643
+ return interpreter.NumExpr(inputsig.encode('ascii'),
644
+ tempsig.encode('ascii'),
645
+ program, constants, input_names)
646
+
647
+
648
+ def disassemble(nex):
649
+ """
650
+ Given a NumExpr object, return a list which is the program disassembled.
651
+ """
652
+ rev_opcodes = {}
653
+ for op in interpreter.opcodes:
654
+ rev_opcodes[interpreter.opcodes[op]] = op
655
+ r_constants = 1 + len(nex.signature)
656
+ r_temps = r_constants + len(nex.constants)
657
+
658
+ def parseOp(op):
659
+ name, sig = [*op.rsplit(b'_', 1), ''][:2]
660
+ return name, sig
661
+
662
+ def getArg(pc, offset):
663
+ arg = nex.program[pc + (offset if offset < 4 else offset+1)]
664
+ _, sig = parseOp(rev_opcodes.get(nex.program[pc]))
665
+ try:
666
+ code = sig[offset - 1]
667
+ except IndexError:
668
+ return None
669
+
670
+ code = bytes([code])
671
+
672
+ if arg == 255:
673
+ return None
674
+ if code != b'n':
675
+ if arg == 0:
676
+ return b'r0'
677
+ elif arg < r_constants:
678
+ return ('r%d[%s]' % (arg, nex.input_names[arg - 1])).encode('ascii')
679
+ elif arg < r_temps:
680
+ return ('c%d[%s]' % (arg, nex.constants[arg - r_constants])).encode('ascii')
681
+ else:
682
+ return ('t%d' % (arg,)).encode('ascii')
683
+ else:
684
+ return arg
685
+
686
+ source = []
687
+ for pc in range(0, len(nex.program), 4):
688
+ op = rev_opcodes.get(nex.program[pc])
689
+ _, sig = parseOp(op)
690
+ parsed = [op]
691
+ for i in range(len(sig)):
692
+ parsed.append(getArg(pc, 1 + i))
693
+ while len(parsed) < 4:
694
+ parsed.append(None)
695
+ source.append(parsed)
696
+ return source
697
+
698
+
699
+ def getType(a):
700
+ kind = a.dtype.kind
701
+ if kind == 'b':
702
+ return bool
703
+ if kind in 'iu':
704
+ if a.dtype.itemsize > 4:
705
+ return long_ # ``long`` is for integers of more than 32 bits
706
+ if kind == 'u' and a.dtype.itemsize == 4:
707
+ return long_ # use ``long`` here as an ``int`` is not enough
708
+ return int_
709
+ if kind == 'f':
710
+ if a.dtype.itemsize > 4:
711
+ return double # ``double`` is for floats of more than 32 bits
712
+ return float
713
+ if kind == 'c':
714
+ return complex
715
+ if kind == 'S':
716
+ return bytes
717
+ if kind == 'U':
718
+ raise ValueError('NumExpr 2 does not support Unicode as a dtype.')
719
+ raise ValueError("unknown type %s" % a.dtype.name)
720
+
721
+
722
+ def getExprNames(text, context, sanitize: bool=True):
723
+ ex = stringToExpression(text, {}, context, sanitize)
724
+ ast = expressionToAST(ex)
725
+ input_order = getInputOrder(ast, None)
726
+ #try to figure out if vml operations are used by expression
727
+ if not use_vml:
728
+ ex_uses_vml = False
729
+ else:
730
+ for node in ast.postorderWalk():
731
+ if node.astType == 'op' and node.value in vml_functions:
732
+ ex_uses_vml = True
733
+ break
734
+ else:
735
+ ex_uses_vml = False
736
+
737
+ return [a.value for a in input_order], ex_uses_vml
738
+
739
+
740
+ def getArguments(names, local_dict=None, global_dict=None, _frame_depth: int=2):
741
+ """
742
+ Get the arguments based on the names.
743
+ """
744
+ call_frame = sys._getframe(_frame_depth)
745
+
746
+ clear_local_dict = False
747
+ if local_dict is None:
748
+ local_dict = call_frame.f_locals
749
+ clear_local_dict = True
750
+ try:
751
+ frame_globals = call_frame.f_globals
752
+ if global_dict is None:
753
+ global_dict = frame_globals
754
+
755
+ # If `call_frame` is the top frame of the interpreter we can't clear its
756
+ # `local_dict`, because it is actually the `global_dict`.
757
+ clear_local_dict = clear_local_dict and not frame_globals is local_dict
758
+
759
+ arguments = []
760
+ for name in names:
761
+ try:
762
+ a = local_dict[name]
763
+ except KeyError:
764
+ a = global_dict[name]
765
+ arguments.append(numpy.asarray(a))
766
+ finally:
767
+ # If we generated local_dict via an explicit reference to f_locals,
768
+ # clear the dict to prevent creating extra ref counts in the caller's scope
769
+ # See https://github.com/pydata/numexpr/issues/310
770
+ if clear_local_dict:
771
+ local_dict.clear()
772
+
773
+ return arguments
774
+
775
+
776
+ # Dictionaries for caching variable names and compiled expressions
777
+ _names_cache = CacheDict(256)
778
+ _numexpr_cache = CacheDict(256)
779
+ _numexpr_last = {}
780
+ evaluate_lock = threading.Lock()
781
+
782
+ # MAYBE: decorate this function to add attributes instead of having the
783
+ # _numexpr_last dictionary?
784
+ def validate(ex: str,
785
+ local_dict: Optional[Dict] = None,
786
+ global_dict: Optional[Dict] = None,
787
+ out: numpy.ndarray = None,
788
+ order: str = 'K',
789
+ casting: str = 'safe',
790
+ _frame_depth: int = 2,
791
+ sanitize: Optional[bool] = None,
792
+ **kwargs) -> Optional[Exception]:
793
+ r"""
794
+ Validate a NumExpr expression with the given `local_dict` or `locals()`.
795
+ Returns `None` on success and the Exception object if one occurs. Note that
796
+ you can proceed directly to call `re_evaluate()` if you use `validate()`
797
+ to sanitize your expressions and variables in advance.
798
+
799
+ Parameters
800
+ ----------
801
+ ex: str
802
+ a string forming an expression, like "2*a+3*b". The values for "a"
803
+ and "b" will by default be taken from the calling function's frame
804
+ (through use of sys._getframe()). Alternatively, they can be specified
805
+ using the 'local_dict' or 'global_dict' arguments.
806
+
807
+ local_dict: dictionary, optional
808
+ A dictionary that replaces the local operands in current frame.
809
+
810
+ global_dict: dictionary, optional
811
+ A dictionary that replaces the global operands in current frame.
812
+
813
+ out: NumPy array, optional
814
+ An existing array where the outcome is going to be stored. Care is
815
+ required so that this array has the same shape and type than the
816
+ actual outcome of the computation. Useful for avoiding unnecessary
817
+ new array allocations.
818
+
819
+ order: {'C', 'F', 'A', or 'K'}, optional
820
+ Controls the iteration order for operands. 'C' means C order, 'F'
821
+ means Fortran order, 'A' means 'F' order if all the arrays are
822
+ Fortran contiguous, 'C' order otherwise, and 'K' means as close to
823
+ the order the array elements appear in memory as possible. For
824
+ efficient computations, typically 'K'eep order (the default) is
825
+ desired.
826
+
827
+ casting: {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
828
+ Controls what kind of data casting may occur when making a copy or
829
+ buffering. Setting this to 'unsafe' is not recommended, as it can
830
+ adversely affect accumulations.
831
+
832
+ * 'no' means the data types should not be cast at all.
833
+ * 'equiv' means only byte-order changes are allowed.
834
+ * 'safe' means only casts which can preserve values are allowed.
835
+ * 'same_kind' means only safe casts or casts within a kind,
836
+ like float64 to float32, are allowed.
837
+ * 'unsafe' means any data conversions may be done.
838
+
839
+ sanitize: Optional[bool]
840
+ Both `validate` and by extension `evaluate` call `eval(ex)`, which is
841
+ potentially dangerous on unsanitized inputs. As such, NumExpr by default
842
+ performs simple sanitization, banning the character ':;[', the
843
+ dunder '__[\w+]__', and attribute access to all but '.real' and '.imag'.
844
+
845
+ Using `None` defaults to `True` unless the environment variable
846
+ `NUMEXPR_SANITIZE=0` is set, in which case the default is `False`.
847
+ Nominally this can be set via `os.environ` before `import numexpr`.
848
+
849
+ _frame_depth: int
850
+ The calling frame depth. Unless you are a NumExpr developer you should
851
+ not set this value.
852
+
853
+ Note
854
+ ----
855
+
856
+ """
857
+ global _numexpr_last
858
+
859
+ try:
860
+
861
+ if not isinstance(ex, str):
862
+ raise ValueError("must specify expression as a string")
863
+
864
+ if sanitize is None:
865
+ if 'NUMEXPR_SANITIZE' in os.environ:
866
+ sanitize = bool(int(os.environ['NUMEXPR_SANITIZE']))
867
+ else:
868
+ sanitize = True
869
+
870
+ # Get the names for this expression
871
+ context = getContext(kwargs)
872
+ expr_key = (ex, tuple(sorted(context.items())))
873
+ if expr_key not in _names_cache:
874
+ _names_cache[expr_key] = getExprNames(ex, context, sanitize=sanitize)
875
+ names, ex_uses_vml = _names_cache[expr_key]
876
+ arguments = getArguments(names, local_dict, global_dict, _frame_depth=_frame_depth)
877
+
878
+ # Create a signature
879
+ signature = [(name, getType(arg)) for (name, arg) in
880
+ zip(names, arguments)]
881
+
882
+ # Look up numexpr if possible.
883
+ numexpr_key = expr_key + (tuple(signature),)
884
+ try:
885
+ compiled_ex = _numexpr_cache[numexpr_key]
886
+ except KeyError:
887
+ compiled_ex = _numexpr_cache[numexpr_key] = NumExpr(ex, signature, sanitize=sanitize, **context)
888
+ kwargs = {'out': out, 'order': order, 'casting': casting,
889
+ 'ex_uses_vml': ex_uses_vml}
890
+ _numexpr_last = dict(ex=compiled_ex, argnames=names, kwargs=kwargs)
891
+ except Exception as e:
892
+ return e
893
+ return None
894
+
895
+ def evaluate(ex: str,
896
+ local_dict: Optional[Dict] = None,
897
+ global_dict: Optional[Dict] = None,
898
+ out: numpy.ndarray = None,
899
+ order: str = 'K',
900
+ casting: str = 'safe',
901
+ sanitize: Optional[bool] = None,
902
+ _frame_depth: int = 3,
903
+ **kwargs) -> numpy.ndarray:
904
+ r"""
905
+ Evaluate a simple array expression element-wise using the virtual machine.
906
+
907
+ Parameters
908
+ ----------
909
+ ex: str
910
+ a string forming an expression, like "2*a+3*b". The values for "a"
911
+ and "b" will by default be taken from the calling function's frame
912
+ (through use of sys._getframe()). Alternatively, they can be specified
913
+ using the 'local_dict' or 'global_dict' arguments.
914
+
915
+ local_dict: dictionary, optional
916
+ A dictionary that replaces the local operands in current frame.
917
+
918
+ global_dict: dictionary, optional
919
+ A dictionary that replaces the global operands in current frame.
920
+
921
+ out: NumPy array, optional
922
+ An existing array where the outcome is going to be stored. Care is
923
+ required so that this array has the same shape and type than the
924
+ actual outcome of the computation. Useful for avoiding unnecessary
925
+ new array allocations.
926
+
927
+ order: {'C', 'F', 'A', or 'K'}, optional
928
+ Controls the iteration order for operands. 'C' means C order, 'F'
929
+ means Fortran order, 'A' means 'F' order if all the arrays are
930
+ Fortran contiguous, 'C' order otherwise, and 'K' means as close to
931
+ the order the array elements appear in memory as possible. For
932
+ efficient computations, typically 'K'eep order (the default) is
933
+ desired.
934
+
935
+ casting: {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
936
+ Controls what kind of data casting may occur when making a copy or
937
+ buffering. Setting this to 'unsafe' is not recommended, as it can
938
+ adversely affect accumulations.
939
+
940
+ * 'no' means the data types should not be cast at all.
941
+ * 'equiv' means only byte-order changes are allowed.
942
+ * 'safe' means only casts which can preserve values are allowed.
943
+ * 'same_kind' means only safe casts or casts within a kind,
944
+ like float64 to float32, are allowed.
945
+ * 'unsafe' means any data conversions may be done.
946
+
947
+ sanitize: bool
948
+ Both `validate` and by extension `evaluate` call `eval(ex)`, which is
949
+ potentially dangerous on unsanitized inputs. As such, NumExpr by default
950
+ performs simple sanitization, banning the character ':;[', the
951
+ dunder '__[\w+]__', and attribute access to all but '.real' and '.imag'.
952
+
953
+ Using `None` defaults to `True` unless the environment variable
954
+ `NUMEXPR_SANITIZE=0` is set, in which case the default is `False`.
955
+ Nominally this can be set via `os.environ` before `import numexpr`.
956
+
957
+ _frame_depth: int
958
+ The calling frame depth. Unless you are a NumExpr developer you should
959
+ not set this value.
960
+
961
+ Note
962
+ ----
963
+ Both `validate` and by extension `evaluate` call `eval(ex)`, which is
964
+ potentially dangerous on unsanitized inputs. As such, NumExpr does some
965
+ sanitization, banning the character ':;[', the dunder '__', and attribute
966
+ access to all but '.r' for real and '.i' for imag access to complex numbers.
967
+ """
968
+ # We could avoid code duplication if we called validate and then re_evaluate
969
+ # here, but they we have difficulties with the `sys.getframe(2)` call in
970
+ # `getArguments`
971
+ e = validate(ex, local_dict=local_dict, global_dict=global_dict,
972
+ out=out, order=order, casting=casting,
973
+ _frame_depth=_frame_depth, sanitize=sanitize, **kwargs)
974
+ if e is None:
975
+ return re_evaluate(local_dict=local_dict, global_dict=global_dict, _frame_depth=_frame_depth)
976
+ else:
977
+ raise e
978
+
979
+ def re_evaluate(local_dict: Optional[Dict] = None,
980
+ global_dict: Optional[Dict] = None,
981
+ _frame_depth: int=2) -> numpy.ndarray:
982
+ """
983
+ Re-evaluate the previous executed array expression without any check.
984
+
985
+ This is meant for accelerating loops that are re-evaluating the same
986
+ expression repeatedly without changing anything else than the operands.
987
+ If unsure, use evaluate() which is safer.
988
+
989
+ Parameters
990
+ ----------
991
+ local_dict: dictionary, optional
992
+ A dictionary that replaces the local operands in current frame.
993
+ _frame_depth: int
994
+ The calling frame depth. Unless you are a NumExpr developer you should
995
+ not set this value.
996
+ """
997
+ global _numexpr_last
998
+
999
+ try:
1000
+ compiled_ex = _numexpr_last['ex']
1001
+ except KeyError:
1002
+ raise RuntimeError("A previous evaluate() execution was not found, please call `validate` or `evaluate` once before `re_evaluate`")
1003
+ argnames = _numexpr_last['argnames']
1004
+ args = getArguments(argnames, local_dict, global_dict, _frame_depth=_frame_depth)
1005
+ kwargs = _numexpr_last['kwargs']
1006
+ with evaluate_lock:
1007
+ return compiled_ex(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/numexpr/tests/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###################################################################
2
+ # Numexpr - Fast numerical array expression evaluator for NumPy.
3
+ #
4
+ # License: MIT
5
+ # Author: See AUTHORS.txt
6
+ #
7
+ # See LICENSE.txt and LICENSES/*.txt for details about copyright and
8
+ # rights to use.
9
+ ####################################################################
10
+
11
+ from numexpr.tests.test_numexpr import test, print_versions
12
+
13
+ if __name__ == '__main__':
14
+ test()
llmeval-env/lib/python3.10/site-packages/numexpr/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (310 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/numexpr/tests/__pycache__/test_numexpr.cpython-310.pyc ADDED
Binary file (43.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/numexpr/tests/test_numexpr.py ADDED
@@ -0,0 +1,1348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ###################################################################
3
+ # Numexpr - Fast numerical array expression evaluator for NumPy.
4
+ #
5
+ # License: MIT
6
+ # Author: See AUTHORS.txt
7
+ #
8
+ # See LICENSE.txt and LICENSES/*.txt for details about copyright and
9
+ # rights to use.
10
+ ####################################################################
11
+
12
+
13
+ import os
14
+ import sys
15
+ import platform
16
+ import warnings
17
+ from contextlib import contextmanager
18
+ import subprocess
19
+
20
+ import numpy as np
21
+ from numpy import (
22
+ array, arange, empty, zeros, int32, int64, uint16, cdouble, float64, rec,
23
+ copy, ones_like, where, all as alltrue, linspace,
24
+ sum, prod, sqrt, fmod, floor, ceil,
25
+ sin, cos, tan, arcsin, arccos, arctan, arctan2,
26
+ sinh, cosh, tanh, arcsinh, arccosh, arctanh,
27
+ log, log1p, log10, exp, expm1, conj)
28
+ import numpy
29
+ from numpy.testing import (assert_equal, assert_array_equal,
30
+ assert_array_almost_equal, assert_allclose)
31
+ from numpy import shape, allclose, array_equal, ravel, isnan, isinf
32
+
33
+ import numexpr
34
+ from numexpr import E, NumExpr, evaluate, re_evaluate, validate, disassemble, use_vml
35
+ from numexpr.expressions import ConstantNode
36
+ from numexpr.utils import detect_number_of_cores
37
+
38
+ import unittest
39
+
40
+ TestCase = unittest.TestCase
41
+
42
+ double = np.double
43
+ long = int
44
+
45
+
46
+ class test_numexpr(TestCase):
47
+ """Testing with 1 thread"""
48
+ nthreads = 1
49
+
50
+ def setUp(self):
51
+ numexpr.set_num_threads(self.nthreads)
52
+
53
+ def test_simple(self):
54
+ ex = 2.0 * E.a + 3.0 * E.b * E.c
55
+ sig = [('a', double), ('b', double), ('c', double)]
56
+ func = NumExpr(ex, signature=sig)
57
+ x = func(array([1., 2, 3]), array([4., 5, 6]), array([7., 8, 9]))
58
+ assert_array_equal(x, array([86., 124., 168.]))
59
+
60
+ def test_simple_expr_small_array(self):
61
+ func = NumExpr(E.a)
62
+ x = arange(100.0)
63
+ y = func(x)
64
+ assert_array_equal(x, y)
65
+
66
+ def test_simple_expr(self):
67
+ func = NumExpr(E.a)
68
+ x = arange(1e6)
69
+ y = func(x)
70
+ assert_array_equal(x, y)
71
+
72
+ def test_rational_expr(self):
73
+ func = NumExpr((E.a + 2.0 * E.b) / (1 + E.a + 4 * E.b * E.b))
74
+ a = arange(1e6)
75
+ b = arange(1e6) * 0.1
76
+ x = (a + 2 * b) / (1 + a + 4 * b * b)
77
+ y = func(a, b)
78
+ assert_array_almost_equal(x, y)
79
+
80
+ def test_reductions(self):
81
+ # Check that they compile OK.
82
+ assert_equal(disassemble(
83
+ NumExpr("sum(x**2+2, axis=None)", [('x', double)])),
84
+ [(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
85
+ (b'add_ddd', b't3', b't3', b'c2[2.0]'),
86
+ (b'sum_ddn', b'r0', b't3', None)])
87
+ assert_equal(disassemble(
88
+ NumExpr("sum(x**2+2, axis=1)", [('x', double)])),
89
+ [(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
90
+ (b'add_ddd', b't3', b't3', b'c2[2.0]'),
91
+ (b'sum_ddn', b'r0', b't3', 1)])
92
+ assert_equal(disassemble(
93
+ NumExpr("prod(x**2+2, axis=2)", [('x', double)])),
94
+ [(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
95
+ (b'add_ddd', b't3', b't3', b'c2[2.0]'),
96
+ (b'prod_ddn', b'r0', b't3', 2)])
97
+ # Check that full reductions work.
98
+ x = zeros(100000) + .01 # checks issue #41
99
+ assert_allclose(evaluate("sum(x+2,axis=None)"), sum(x + 2, axis=None))
100
+ assert_allclose(evaluate("sum(x+2,axis=0)"), sum(x + 2, axis=0))
101
+ assert_allclose(evaluate("prod(x,axis=0)"), prod(x, axis=0))
102
+ assert_allclose(evaluate("min(x)"), np.min(x))
103
+ assert_allclose(evaluate("max(x,axis=0)"), np.max(x, axis=0))
104
+
105
+ # Fix for #277, array with leading singleton dimension
106
+ x = np.arange(10).reshape(1,10)
107
+ assert_allclose(evaluate("sum(x,axis=None)"), sum(x, axis=None) )
108
+ assert_allclose(evaluate("sum(x,axis=0)"), sum(x, axis=0) )
109
+ assert_allclose(evaluate("sum(x,axis=1)"), sum(x, axis=1) )
110
+
111
+ x = arange(10.0)
112
+ assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
113
+ assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
114
+ assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0))
115
+ assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0))
116
+
117
+ x = arange(100.0)
118
+ assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
119
+ assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
120
+ assert_allclose(evaluate("min(x-1,axis=0)"), np.min(x - 1, axis=0))
121
+ assert_allclose(evaluate("max(x-1,axis=0)"), np.max(x - 1, axis=0))
122
+ x = linspace(0.1, 1.0, 2000)
123
+ assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
124
+ assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
125
+ assert_allclose(evaluate("min(x-1,axis=0)"), np.min(x - 1, axis=0))
126
+ assert_allclose(evaluate("max(x-1,axis=0)"), np.max(x - 1, axis=0))
127
+
128
+ # Check that reductions along an axis work
129
+ y = arange(9.0).reshape(3, 3)
130
+ assert_allclose(evaluate("sum(y**2, axis=1)"), sum(y ** 2, axis=1))
131
+ assert_allclose(evaluate("sum(y**2, axis=0)"), sum(y ** 2, axis=0))
132
+ assert_allclose(evaluate("sum(y**2, axis=None)"), sum(y ** 2, axis=None))
133
+ assert_allclose(evaluate("prod(y**2, axis=1)"), prod(y ** 2, axis=1))
134
+ assert_allclose(evaluate("prod(y**2, axis=0)"), prod(y ** 2, axis=0))
135
+ assert_allclose(evaluate("prod(y**2, axis=None)"), prod(y ** 2, axis=None))
136
+ assert_allclose(evaluate("min(y**2, axis=1)"), np.min(y ** 2, axis=1))
137
+ assert_allclose(evaluate("min(y**2, axis=0)"), np.min(y ** 2, axis=0))
138
+ assert_allclose(evaluate("min(y**2, axis=None)"), np.min(y ** 2, axis=None))
139
+ assert_allclose(evaluate("max(y**2, axis=1)"), np.max(y ** 2, axis=1))
140
+ assert_allclose(evaluate("max(y**2, axis=0)"), np.max(y ** 2, axis=0))
141
+ assert_allclose(evaluate("max(y**2, axis=None)"), np.max(y ** 2, axis=None))
142
+ # Check integers
143
+ x = arange(10.)
144
+ x = x.astype(int)
145
+ assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
146
+ assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
147
+ assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0))
148
+ assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0))
149
+ # Check longs
150
+ x = x.astype(int)
151
+ assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
152
+ assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
153
+ assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0))
154
+ assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0))
155
+ # Check complex
156
+ x = x + .1j
157
+ assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
158
+ assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
159
+
160
+ def test_in_place(self):
161
+ x = arange(10000.).reshape(1000, 10)
162
+ evaluate("x + 3", out=x)
163
+ assert_equal(x, arange(10000.).reshape(1000, 10) + 3)
164
+ y = arange(10)
165
+ evaluate("(x - 3) * y + (x - 3)", out=x)
166
+ assert_equal(x, arange(10000.).reshape(1000, 10) * (arange(10) + 1))
167
+
168
+ def test_axis(self):
169
+ y = arange(9.0).reshape(3, 3)
170
+ try:
171
+ evaluate("sum(y, axis=2)")
172
+ except ValueError:
173
+ pass
174
+ else:
175
+ raise ValueError("should raise exception!")
176
+ try:
177
+ evaluate("sum(y, axis=-3)")
178
+ except ValueError:
179
+ pass
180
+ else:
181
+ raise ValueError("should raise exception!")
182
+ try:
183
+ # Negative axis are not supported
184
+ evaluate("sum(y, axis=-1)")
185
+ except ValueError:
186
+ pass
187
+ else:
188
+ raise ValueError("should raise exception!")
189
+
190
+ def test_r0_reuse(self):
191
+ assert_equal(disassemble(NumExpr("x * x + 2", [('x', double)])),
192
+ [(b'mul_ddd', b'r0', b'r1[x]', b'r1[x]'),
193
+ (b'add_ddd', b'r0', b'r0', b'c2[2.0]')])
194
+
195
+ def test_str_contains_basic0(self):
196
+ res = evaluate('contains(b"abc", b"ab")')
197
+ assert_equal(res, True)
198
+
199
+ def test_str_contains_basic1(self):
200
+ haystack = array([b'abc', b'def', b'xyz', b'x11', b'za'])
201
+ res = evaluate('contains(haystack, b"ab")')
202
+ assert_equal(res, [True, False, False, False, False])
203
+
204
+ def test_str_contains_basic2(self):
205
+ haystack = array([b'abc', b'def', b'xyz', b'x11', b'za'])
206
+ res = evaluate('contains(b"abcd", haystack)')
207
+ assert_equal(res, [True, False, False, False, False])
208
+
209
+ def test_str_contains_basic3(self):
210
+ haystacks = array(
211
+ [b'abckkk', b'adef', b'xyz', b'x11abcp', b'za', b'abc'])
212
+ needles = array(
213
+ [b'abc', b'def', b'aterr', b'oot', b'zu', b'ab'])
214
+ res = evaluate('contains(haystacks, needles)')
215
+ assert_equal(res, [True, True, False, False, False, True])
216
+
217
+ def test_str_contains_basic4(self):
218
+ needles = array(
219
+ [b'abc', b'def', b'aterr', b'oot', b'zu', b'ab c', b' abc',
220
+ b'abc '])
221
+ res = evaluate('contains(b"test abc here", needles)')
222
+ assert_equal(res, [True, False, False, False, False, False, True, True])
223
+
224
+ def test_str_contains_basic5(self):
225
+ needles = array(
226
+ [b'abc', b'ab c', b' abc', b' abc ', b'\tabc', b'c h'])
227
+ res = evaluate('contains(b"test abc here", needles)')
228
+ assert_equal(res, [True, False, True, True, False, True])
229
+
230
+ # Compare operation of Python 'in' operator with 'contains' using a
231
+ # product of two lists of strings.
232
+
233
+ def test_str_contains_listproduct(self):
234
+ from itertools import product
235
+
236
+ small = [
237
+ 'It w', 'as th', 'e Whit', 'e Rab', 'bit,', ' tro', 'tting',
238
+ ' sl', 'owly', ' back ', 'again,', ' and', ' lo', 'okin', 'g a',
239
+ 'nxious', 'ly a', 'bou', 't a', 's it w', 'ent,', ' as i', 'f it',
240
+ ' had l', 'ost', ' some', 'thi', 'ng; a', 'nd ', 'she ', 'heard ',
241
+ 'it mut', 'terin', 'g to ', 'its', 'elf ', "'The",
242
+ ' Duch', 'ess! T', 'he ', 'Duches', 's! Oh ', 'my dea', 'r paws',
243
+ '! Oh ', 'my f', 'ur ', 'and ', 'whiske', 'rs! ', 'She', "'ll g",
244
+ 'et me', ' ex', 'ecu', 'ted, ', 'as su', 're a', 's f', 'errets',
245
+ ' are f', 'errets', '! Wh', 'ere ', 'CAN', ' I hav', 'e d',
246
+ 'roppe', 'd t', 'hem,', ' I wo', 'nder?', "' A", 'lice',
247
+ ' gu', 'essed', ' in a', ' mom', 'ent ', 'tha', 't it w', 'as ',
248
+ 'looki', 'ng f', 'or ', 'the fa', 'n and ', 'the', ' pai',
249
+ 'r of w', 'hit', 'e kid', ' glo', 'ves', ', and ', 'she ',
250
+ 'very g', 'ood', '-na', 'turedl', 'y be', 'gan h', 'unt', 'ing',
251
+ ' about', ' for t', 'hem', ', but', ' they ', 'wer', 'e nowh',
252
+ 'ere to', ' be', ' se', 'en--', 'ever', 'ythin', 'g seem', 'ed ',
253
+ 'to ', 'have c', 'hang', 'ed ', 'since', ' he', 'r swim', ' in',
254
+ ' the', ' pool,', ' and', ' the g', 'reat ', 'hal', 'l, w', 'ith',
255
+ ' th', 'e gl', 'ass t', 'abl', 'e and ', 'the', ' li', 'ttle',
256
+ ' doo', 'r, ha', 'd v', 'ani', 'shed c', 'omp', 'lete', 'ly.']
257
+ big = [
258
+ 'It wa', 's the', ' W', 'hit', 'e ', 'Ra', 'bb', 'it, t', 'ro',
259
+ 'tting s', 'lowly', ' back ', 'agai', 'n, and', ' l', 'ookin',
260
+ 'g ', 'an', 'xiously', ' about ', 'as it w', 'ent, as', ' if ',
261
+ 'it had', ' los', 't ', 'so', 'mething', '; and', ' she h',
262
+ 'eard ', 'it ', 'mutteri', 'ng to', ' itself', " 'The ",
263
+ 'Duchess', '! ', 'Th', 'e ', 'Duchess', '! Oh m', 'y de',
264
+ 'ar paws', '! ', 'Oh my ', 'fu', 'r and w', 'hiskers', "! She'",
265
+ 'll ', 'get', ' me ', 'execute', 'd,', ' a', 's ', 'su', 're as ',
266
+ 'fe', 'rrets', ' are f', 'errets!', ' Wher', 'e CAN', ' I ha',
267
+ 've dro', 'pped t', 'hem', ', I ', 'won', "der?' A",
268
+ 'lice g', 'uess', 'ed ', 'in a m', 'omen', 't that', ' i',
269
+ 't was l', 'ook', 'ing f', 'or th', 'e ', 'fan and', ' th', 'e p',
270
+ 'air o', 'f whit', 'e ki', 'd glove', 's, and ', 'she v', 'ery ',
271
+ 'good-na', 'tu', 'redl', 'y be', 'gan hun', 'ti', 'ng abou',
272
+ 't for t', 'he', 'm, bu', 't t', 'hey ', 'were n', 'owhere',
273
+ ' to b', 'e s', 'een-', '-eve', 'rythi', 'ng see', 'me', 'd ',
274
+ 'to ha', 've', ' c', 'hanged', ' sinc', 'e her s', 'wim ',
275
+ 'in the ', 'pool,', ' an', 'd the g', 'rea', 't h', 'all, wi',
276
+ 'th the ', 'glas', 's t', 'able an', 'd th', 'e littl', 'e door,',
277
+ ' had va', 'ni', 'shed co', 'mpletel', 'y.']
278
+ p = list(product(small, big))
279
+ python_in = [x[0] in x[1] for x in p]
280
+ a = [x[0].encode() for x in p]
281
+ b = [x[1].encode() for x in p]
282
+ res = [bool(x) for x in evaluate('contains(b, a)')]
283
+ assert_equal(res, python_in)
284
+
285
+ def test_str_contains_withemptystr1(self):
286
+ withemptystr = array([b'abc', b'def', b''])
287
+ res = evaluate('contains(b"abcd", withemptystr)')
288
+ assert_equal(res, [True, False, True])
289
+
290
+ def test_str_contains_withemptystr2(self):
291
+ withemptystr = array([b'abc', b'def', b''])
292
+ res = evaluate('contains(withemptystr, b"")')
293
+ assert_equal(res, [True, True, True])
294
+
295
+ def test_str_contains_long_needle(self):
296
+ a = b'1' + b'a' * 40
297
+ b = b'a' * 40
298
+ res = evaluate('contains(a, b)')
299
+ assert_equal(res, True)
300
+
301
+ def test_where_scalar_bool(self):
302
+ a = True
303
+ b = array([1, 2])
304
+ c = array([3, 4])
305
+ res = evaluate('where(a, b, c)')
306
+ assert_array_equal(res, b)
307
+ a = False
308
+ res = evaluate('where(a, b, c)')
309
+ assert_array_equal(res, c)
310
+
311
+ @unittest.skipIf(hasattr(sys, "pypy_version_info"),
312
+ "PyPy does not have sys.getrefcount()")
313
+ def test_refcount(self):
314
+ # Regression test for issue #310
315
+ a = array([1])
316
+ assert sys.getrefcount(a) == 2
317
+ evaluate('1')
318
+ assert sys.getrefcount(a) == 2
319
+
320
+ def test_locals_clears_globals(self):
321
+ # Check for issue #313, whereby clearing f_locals also clear f_globals
322
+ # if in the top-frame. This cannot be done inside `unittest` as it is always
323
+ # executing code in a child frame.
324
+ script = r';'.join([
325
+ r"import numexpr as ne",
326
+ r"a=10",
327
+ r"ne.evaluate('1')",
328
+ r"a += 1",
329
+ r"ne.evaluate('2', local_dict={})",
330
+ r"a += 1",
331
+ r"ne.evaluate('3', global_dict={})",
332
+ r"a += 1",
333
+ r"ne.evaluate('4', local_dict={}, global_dict={})",
334
+ r"a += 1",
335
+ ])
336
+ # Raises CalledProcessError on a non-normal exit
337
+ check = subprocess.check_call([sys.executable, '-c', script])
338
+ # Ideally this test should also be done against ipython but it's not
339
+ # a requirement.
340
+
341
+
342
+
343
+ class test_numexpr2(test_numexpr):
344
+ """Testing with 2 threads"""
345
+ nthreads = 2
346
+
347
+
348
+ class test_evaluate(TestCase):
349
+ def test_simple(self):
350
+ a = array([1., 2., 3.])
351
+ b = array([4., 5., 6.])
352
+ c = array([7., 8., 9.])
353
+ x = evaluate("2*a + 3*b*c")
354
+ assert_array_equal(x, array([86., 124., 168.]))
355
+
356
+ def test_simple_expr_small_array(self):
357
+ x = arange(100.0)
358
+ y = evaluate("x")
359
+ assert_array_equal(x, y)
360
+
361
+ def test_simple_expr(self):
362
+ x = arange(1e6)
363
+ y = evaluate("x")
364
+ assert_array_equal(x, y)
365
+
366
+ def test_re_evaluate(self):
367
+ a = array([1., 2., 3.])
368
+ b = array([4., 5., 6.])
369
+ c = array([7., 8., 9.])
370
+ x = evaluate("2*a + 3*b*c")
371
+ x = re_evaluate()
372
+ assert_array_equal(x, array([86., 124., 168.]))
373
+
374
+ def test_re_evaluate_dict(self):
375
+ a1 = array([1., 2., 3.])
376
+ b1 = array([4., 5., 6.])
377
+ c1 = array([7., 8., 9.])
378
+ local_dict={'a': a1, 'b': b1, 'c': c1}
379
+ x = evaluate("2*a + 3*b*c", local_dict=local_dict)
380
+ x = re_evaluate(local_dict=local_dict)
381
+ assert_array_equal(x, array([86., 124., 168.]))
382
+
383
+ def test_validate(self):
384
+ a = array([1., 2., 3.])
385
+ b = array([4., 5., 6.])
386
+ c = array([7., 8., 9.])
387
+ retval = validate("2*a + 3*b*c")
388
+ assert(retval is None)
389
+ x = re_evaluate()
390
+ assert_array_equal(x, array([86., 124., 168.]))
391
+
392
+ def test_validate_missing_var(self):
393
+ a = array([1., 2., 3.])
394
+ b = array([4., 5., 6.])
395
+ retval = validate("2*a + 3*b*c")
396
+ assert(isinstance(retval, KeyError))
397
+
398
+ def test_validate_syntax(self):
399
+ retval = validate("2+")
400
+ assert(isinstance(retval, SyntaxError))
401
+
402
+ def test_validate_dict(self):
403
+ a1 = array([1., 2., 3.])
404
+ b1 = array([4., 5., 6.])
405
+ c1 = array([7., 8., 9.])
406
+ local_dict={'a': a1, 'b': b1, 'c': c1}
407
+ retval = validate("2*a + 3*b*c", local_dict=local_dict)
408
+ assert(retval is None)
409
+ x = re_evaluate(local_dict=local_dict)
410
+ assert_array_equal(x, array([86., 124., 168.]))
411
+
412
+ # Test for issue #22
413
+ def test_true_div(self):
414
+ x = arange(10, dtype='i4')
415
+ assert_array_equal(evaluate("x/2"), x / 2)
416
+ assert_array_equal(evaluate("x/2", truediv=False), x / 2)
417
+ assert_array_equal(evaluate("x/2", truediv='auto'), x / 2)
418
+ assert_array_equal(evaluate("x/2", truediv=True), x / 2.0)
419
+
420
+ def test_left_shift(self):
421
+ x = arange(10, dtype='i4')
422
+ assert_array_equal(evaluate("x<<2"), x << 2)
423
+
424
+ def test_right_shift(self):
425
+ x = arange(10, dtype='i4')
426
+ assert_array_equal(evaluate("x>>2"), x >> 2)
427
+
428
+ # PyTables uses __nonzero__ among ExpressionNode objects internally
429
+ # so this should be commented out for the moment. See #24.
430
+ def test_boolean_operator(self):
431
+ x = arange(10, dtype='i4')
432
+ try:
433
+ evaluate("(x > 1) and (x < 9)")
434
+ except TypeError:
435
+ pass
436
+ else:
437
+ raise ValueError("should raise exception!")
438
+
439
+ def test_rational_expr(self):
440
+ a = arange(1e6)
441
+ b = arange(1e6) * 0.1
442
+ x = (a + 2 * b) / (1 + a + 4 * b * b)
443
+ y = evaluate("(a + 2*b) / (1 + a + 4*b*b)")
444
+ assert_array_almost_equal(x, y)
445
+
446
+ def test_complex_expr(self):
447
+ def complex(a, b):
448
+ c = zeros(a.shape, dtype=cdouble)
449
+ c.real = a
450
+ c.imag = b
451
+ return c
452
+
453
+ a = arange(1e4)
454
+ b = arange(1e4) ** 1e-5
455
+ z = a + 1j * b
456
+ x = z.imag
457
+ x = sin(complex(a, b)).real + z.imag
458
+ y = evaluate("sin(complex(a, b)).real + z.imag")
459
+ assert_array_almost_equal(x, y)
460
+
461
+ def test_complex_strides(self):
462
+ a = arange(100).reshape(10, 10)[::2]
463
+ b = arange(50).reshape(5, 10)
464
+ assert_array_equal(evaluate("a+b"), a + b)
465
+ c = empty([10], dtype=[('c1', int32), ('c2', uint16)])
466
+ c['c1'] = arange(10)
467
+ c['c2'].fill(0xaaaa)
468
+ c1 = c['c1']
469
+ a0 = a[0]
470
+ assert_array_equal(evaluate("c1"), c1)
471
+ assert_array_equal(evaluate("a0+c1"), a0 + c1)
472
+
473
+ def test_recarray_strides(self):
474
+ a = arange(100)
475
+ b = arange(100,200)
476
+ recarr = np.rec.array(None, formats='f4,f4', shape=(100,))
477
+ recarr['f0'] = a
478
+ recarr['f1'] = b
479
+ c = recarr['f1']
480
+ assert_array_almost_equal(evaluate("sqrt(c) > 1."), sqrt(c) > 1.)
481
+ assert_array_almost_equal(evaluate("log10(c)"), log10(c))
482
+
483
+ def test_broadcasting(self):
484
+ a = arange(100).reshape(10, 10)[::2]
485
+ c = arange(10)
486
+ d = arange(5).reshape(5, 1)
487
+ assert_array_equal(evaluate("a+c"), a + c)
488
+ assert_array_equal(evaluate("a+d"), a + d)
489
+ expr = NumExpr("2.0*a+3.0*c", [('a', double), ('c', double)])
490
+ assert_array_equal(expr(a, c), 2.0 * a + 3.0 * c)
491
+
492
+ def test_all_scalar(self):
493
+ a = 3.
494
+ b = 4.
495
+ assert_allclose(evaluate("a+b"), a + b)
496
+ expr = NumExpr("2*a+3*b", [('a', double), ('b', double)])
497
+ assert_equal(expr(a, b), 2 * a + 3 * b)
498
+
499
+ def test_run(self):
500
+ a = arange(100).reshape(10, 10)[::2]
501
+ b = arange(10)
502
+ expr = NumExpr("2*a+3*b", [('a', double), ('b', double)])
503
+ assert_array_equal(expr(a, b), expr.run(a, b))
504
+
505
+ def test_illegal_value(self):
506
+ a = arange(3)
507
+ try:
508
+ evaluate("a < [0, 0, 0]")
509
+ except (ValueError, TypeError):
510
+ pass
511
+ else:
512
+ self.fail()
513
+
514
+ def test_sanitize(self):
515
+ with _environment('NUMEXPR_SANITIZE', '1'):
516
+ # Forbid dunder
517
+ try:
518
+ evaluate('__builtins__')
519
+ except ValueError:
520
+ pass
521
+ else:
522
+ self.fail()
523
+
524
+ # Forbid colon for lambda funcs
525
+ try:
526
+ evaluate('lambda x: x')
527
+ except ValueError:
528
+ pass
529
+ else:
530
+ self.fail()
531
+
532
+ # Forbid indexing
533
+ try:
534
+ evaluate('locals()["evaluate"]')
535
+ except ValueError:
536
+ pass
537
+ else:
538
+ self.fail()
539
+
540
+ # Forbid semicolon
541
+ try:
542
+ evaluate('import os;')
543
+ except ValueError:
544
+ pass
545
+ else:
546
+ self.fail()
547
+
548
+ # Attribute access with spaces
549
+ try:
550
+ evaluate('os. cpu_count()')
551
+ except ValueError:
552
+ pass
553
+ else:
554
+ self.fail()
555
+
556
+ # Attribute access with funny unicode characters that eval translates
557
+ # into ASCII.
558
+ try:
559
+ evaluate("(3+1).ᵇit_length()")
560
+ except ValueError:
561
+ pass
562
+ else:
563
+ self.fail()
564
+
565
+ # Pass decimal points including scientific notation
566
+ a = 3.0
567
+ evaluate('a*2.e-5')
568
+ evaluate('a*2.e+5')
569
+ evaluate('a*2e-5')
570
+ evaluate('a*2e+5')
571
+ evaluate('a*2E-5')
572
+ evaluate('a*2.0e5')
573
+ evaluate('a*2.2e5')
574
+ evaluate('2.+a')
575
+
576
+ # pass .real and .imag
577
+ c = 2.5 + 1.5j
578
+ evaluate('c.real')
579
+ evaluate('c.imag')
580
+
581
+ # pass imaginary unit j
582
+ evaluate('1.5j')
583
+ evaluate('3.j')
584
+
585
+ # pass forbidden characters within quotes
586
+ x = np.array(['a', 'b'], dtype=bytes)
587
+ evaluate("x == 'b:'")
588
+
589
+
590
+ def test_no_sanitize(self):
591
+ try: # Errors on compile() after eval()
592
+ evaluate('import os;', sanitize=False)
593
+ except SyntaxError:
594
+ pass
595
+ else:
596
+ self.fail()
597
+
598
+ with _environment('NUMEXPR_SANITIZE', '0'):
599
+ try: # Errors on compile() after eval()
600
+ evaluate('import os;', sanitize=None)
601
+ except SyntaxError:
602
+ pass
603
+ else:
604
+ self.fail()
605
+
606
+ def test_disassemble(self):
607
+ assert_equal(disassemble(NumExpr(
608
+ "where(m, a, -1)", [('m', bool), ('a', float)])),
609
+ [[b'where_fbff', b'r0', b'r1[m]', b'r2[a]', b'c3[-1.0]'],
610
+ [b'noop', None, None, None]])
611
+
612
+ def test_constant_deduplication(self):
613
+ assert_equal(NumExpr("(a + 1)*(a - 1)", [('a', np.int32)]).constants, (1,))
614
+
615
+ def test_nan_constant(self):
616
+ assert_equal(str(ConstantNode(float("nan")).value), 'nan')
617
+
618
+ # check de-duplication works for nan
619
+ _nan = ConstantNode(float("nan"))
620
+ expr = (E.a + _nan)*(E.b + _nan)
621
+ assert_equal(NumExpr(expr, [('a', double), ('b', double)]).constants, (float("nan"),))
622
+
623
+
624
+ def test_f32_constant(self):
625
+ assert_equal(ConstantNode(numpy.float32(1)).astKind, "float")
626
+ assert_equal(ConstantNode(numpy.float32("nan")).astKind, "float")
627
+ assert_equal(ConstantNode(numpy.float32(3)).value.dtype, numpy.dtype("float32"))
628
+ assert_array_equal(NumExpr(ConstantNode(numpy.float32(1))).run(),
629
+ numpy.array(1, dtype="float32"))
630
+
631
+ def test_unaligned_singleton(self):
632
+ # Test for issue #397 whether singletons outputs assigned to consts must be
633
+ # aligned or not.
634
+ a = np.empty(5, dtype=np.uint8)[1:].view(np.int32)
635
+ evaluate('3', out=a)
636
+ assert_equal(a, 3)
637
+
638
+ def test_negative_mod(self):
639
+ # Test for issue #413, modulus of negative integers. C modulus is
640
+ # actually remainder op, and hence different from Python modulus.
641
+ a = np.array([-500, -135, 0, 0, 135, 500], dtype=np.int32)
642
+ n = np.array([-360, -360, -360, 360, 360, 360], dtype=np.int32)
643
+ out_i = evaluate('a % n')
644
+ assert_equal(out_i, np.mod(a, n))
645
+
646
+ b = a.astype(np.int64)
647
+ m = n.astype(np.int64)
648
+ out_l = evaluate('b % m')
649
+ assert_equal(out_l, np.mod(b, m))
650
+
651
+ def test_negative_power_scalar(self):
652
+ # Test for issue #428, where the power is negative and the base is an
653
+ # integer. This was running afoul in the precomputation in `expressions.py:pow_op()`
654
+ base = np.array([-2, -1, 1, 2, 3], dtype=np.int32)
655
+ out_i = evaluate('base ** -1.0')
656
+ assert_equal(out_i, np.power(base, -1.0))
657
+
658
+ base = np.array([-2, -1, 1, 2, 3], dtype=np.int64)
659
+ out_l = evaluate('base ** -1.0')
660
+ assert_equal(out_l, np.power(base, -1.0))
661
+
662
+
663
+ def test_ex_uses_vml(self):
664
+ vml_funcs = [ "sin", "cos", "tan", "arcsin", "arccos", "arctan",
665
+ "sinh", "cosh", "tanh", "arcsinh", "arccosh", "arctanh",
666
+ "log", "log1p","log10", "exp", "expm1", "abs", "conj",
667
+ "arctan2", "fmod"]
668
+ for func in vml_funcs:
669
+ strexpr = func+'(a)'
670
+ _, ex_uses_vml = numexpr.necompiler.getExprNames(strexpr, {})
671
+ assert_equal(ex_uses_vml, use_vml, strexpr)
672
+
673
+ if 'sparc' not in platform.machine():
674
+ # Execution order set here so as to not use too many threads
675
+ # during the rest of the execution. See #33 for details.
676
+ def test_changing_nthreads_00_inc(self):
677
+ a = linspace(-1, 1, 1000000)
678
+ b = ((.25 * a + .75) * a - 1.5) * a - 2
679
+ for nthreads in range(1, 7):
680
+ numexpr.set_num_threads(nthreads)
681
+ c = evaluate("((.25*a + .75)*a - 1.5)*a - 2")
682
+ assert_array_almost_equal(b, c)
683
+
684
+ def test_changing_nthreads_01_dec(self):
685
+ a = linspace(-1, 1, 1000000)
686
+ b = ((.25 * a + .75) * a - 1.5) * a - 2
687
+ for nthreads in range(6, 1, -1):
688
+ numexpr.set_num_threads(nthreads)
689
+ c = evaluate("((.25*a + .75)*a - 1.5)*a - 2")
690
+ assert_array_almost_equal(b, c)
691
+
692
+
693
+ tests = [
694
+ ('MISC', ['b*c+d*e',
695
+ '2*a+3*b',
696
+ '-a',
697
+ 'sinh(a)',
698
+ '2*a + (cos(3)+5)*sinh(cos(b))',
699
+ '2*a + arctan2(a, b)',
700
+ 'arcsin(0.5)',
701
+ 'where(a != 0.0, 2, a)',
702
+ 'where(a > 10, b < a, b > a)',
703
+ 'where((a-10).real != 0.0, a, 2)',
704
+ '0.25 * (a < 5) + 0.33 * (a >= 5)',
705
+ 'cos(1+1)',
706
+ '1+1',
707
+ '1',
708
+ 'cos(a2)',
709
+ ])]
710
+
711
+ optests = []
712
+ for op in list('+-*/%') + ['**']:
713
+ optests.append("(a+1) %s (b+3)" % op)
714
+ optests.append("3 %s (b+3)" % op)
715
+ optests.append("(a+1) %s 4" % op)
716
+ optests.append("2 %s (b+3)" % op)
717
+ optests.append("(a+1) %s 2" % op)
718
+ optests.append("(a+1) %s -1" % op)
719
+ optests.append("(a+1) %s 0.5" % op)
720
+ # Check divisions and modulus by zero (see ticket #107)
721
+ optests.append("(a+1) %s 0" % op)
722
+ tests.append(('OPERATIONS', optests))
723
+
724
+ cmptests = []
725
+ for op in ['<', '<=', '==', '>=', '>', '!=']:
726
+ cmptests.append("a/2+5 %s b" % op)
727
+ cmptests.append("a/2+5 %s 7" % op)
728
+ cmptests.append("7 %s b" % op)
729
+ cmptests.append("7.0 %s 5" % op)
730
+ tests.append(('COMPARISONS', cmptests))
731
+
732
+ func1tests = []
733
+ for func in ['copy', 'ones_like', 'sqrt',
734
+ 'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan',
735
+ 'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh',
736
+ 'log', 'log1p', 'log10', 'exp', 'expm1', 'abs', 'conj',
737
+ 'ceil', 'floor']:
738
+ func1tests.append("a + %s(b+c)" % func)
739
+ tests.append(('1_ARG_FUNCS', func1tests))
740
+
741
+ func2tests = []
742
+ for func in ['arctan2', 'fmod']:
743
+ func2tests.append("a + %s(b+c, d+1)" % func)
744
+ func2tests.append("a + %s(b+c, 1)" % func)
745
+ func2tests.append("a + %s(1, d+1)" % func)
746
+ tests.append(('2_ARG_FUNCS', func2tests))
747
+
748
+ powtests = []
749
+ # n = -1, 0.5, 2, 4 already handled in section "OPERATIONS"
750
+ for n in (-7, -2.5, -1.5, -1.3, -.5, 0, 0.0, 1, 2.3, 2.5, 3):
751
+ powtests.append("(a+1)**%s" % n)
752
+ tests.append(('POW_TESTS', powtests))
753
+
754
+
755
+ def equal(a, b, exact):
756
+ if array_equal(a, b):
757
+ return True
758
+
759
+ if hasattr(a, 'dtype') and a.dtype in ['f4', 'f8']:
760
+ nnans = isnan(a).sum()
761
+ if nnans > 0:
762
+ # For results containing NaNs, just check that the number
763
+ # of NaNs is the same in both arrays. This check could be
764
+ # made more exhaustive, but checking element by element in
765
+ # python space is very expensive in general.
766
+ return nnans == isnan(b).sum()
767
+ ninfs = isinf(a).sum()
768
+ if ninfs > 0:
769
+ # Ditto for Inf's
770
+ return ninfs == isinf(b).sum()
771
+ if exact:
772
+ return (shape(a) == shape(b)) and alltrue(ravel(a) == ravel(b), axis=0)
773
+ else:
774
+ if hasattr(a, 'dtype') and a.dtype == 'f4':
775
+ atol = 1e-5 # Relax precision for special opcodes, like fmod
776
+ else:
777
+ atol = 1e-8
778
+ return (shape(a) == shape(b) and
779
+ allclose(ravel(a), ravel(b), atol=atol))
780
+
781
+
782
+ class Skip(Exception): pass
783
+
784
+
785
+ def test_expressions():
786
+ test_no = [0]
787
+
788
+ def make_test_method(a, a2, b, c, d, e, x, expr,
789
+ test_scalar, dtype, optimization, exact, section):
790
+ this_locals = locals()
791
+
792
+ def method():
793
+ try:
794
+ # We don't want to listen at RuntimeWarnings like
795
+ # "overflows" or "divide by zero" in plain eval().
796
+ warnings.simplefilter("ignore")
797
+ npval = eval(expr, globals(), this_locals)
798
+ warnings.simplefilter("always")
799
+ npval = eval(expr, globals(), this_locals)
800
+ except Exception as ex:
801
+ # just store the exception in a variable
802
+ # compatibility with numpy v1.12
803
+ # see also https://github.com/pydata/numexpr/issues/239
804
+ np_exception = ex
805
+ npval = None
806
+ else:
807
+ np_exception = None
808
+
809
+ try:
810
+ neval = evaluate(expr, local_dict=this_locals,
811
+ optimization=optimization)
812
+ except AssertionError:
813
+ raise
814
+ except NotImplementedError:
815
+ print('%r not implemented for %s (scalar=%d, opt=%s)'
816
+ % (expr, dtype.__name__, test_scalar, optimization))
817
+ except Exception as ne_exception:
818
+ same_exc_type = issubclass(type(ne_exception),
819
+ type(np_exception))
820
+ if np_exception is None or not same_exc_type:
821
+ print('numexpr error for expression %r' % (expr,))
822
+ raise
823
+ except:
824
+ print('numexpr error for expression %r' % (expr,))
825
+ raise
826
+ else:
827
+ msg = ('expected numexpr error not raised for expression '
828
+ '%r' % (expr,))
829
+ assert np_exception is None, msg
830
+
831
+ assert equal(npval, neval, exact), """%r
832
+ (test_scalar=%r, dtype=%r, optimization=%r, exact=%r,
833
+ npval=%r (%r - %r)\n neval=%r (%r - %r))""" % (expr, test_scalar, dtype.__name__,
834
+ optimization, exact,
835
+ npval, type(npval), shape(npval),
836
+ neval, type(neval), shape(neval))
837
+
838
+ method.description = ('test_expressions(%s, test_scalar=%r, '
839
+ 'dtype=%r, optimization=%r, exact=%r)') % (expr, test_scalar, dtype.__name__, optimization, exact)
840
+ test_no[0] += 1
841
+ method.__name__ = 'test_scalar%d_%s_%s_%s_%04d' % (test_scalar,
842
+ dtype.__name__,
843
+ optimization.encode('ascii'),
844
+ section.encode('ascii'),
845
+ test_no[0])
846
+ return method
847
+
848
+ x = None
849
+ for test_scalar in (0, 1, 2):
850
+ for dtype in (int, int, np.float32, double, complex):
851
+ array_size = 100
852
+ a = arange(2 * array_size, dtype=dtype)[::2]
853
+ a2 = zeros([array_size, array_size], dtype=dtype)
854
+ b = arange(array_size, dtype=dtype) / array_size
855
+ c = arange(array_size, dtype=dtype)
856
+ d = arange(array_size, dtype=dtype)
857
+ e = arange(array_size, dtype=dtype)
858
+ if dtype == complex:
859
+ a = a.real
860
+ for x in [a2, b, c, d, e]:
861
+ x += 1j
862
+ x *= 1 + 1j
863
+ if test_scalar == 1:
864
+ a = a[array_size // 2]
865
+ if test_scalar == 2:
866
+ b = b[array_size // 2]
867
+ for optimization, exact in [
868
+ ('none', False), ('moderate', False), ('aggressive', False)]:
869
+ for section_name, section_tests in tests:
870
+ for expr in section_tests:
871
+ if (dtype == complex and
872
+ ('<' in expr or '>' in expr or '%' in expr
873
+ or "arctan2" in expr or "fmod" in expr
874
+ or "floor" in expr or "ceil" in expr)):
875
+ # skip complex comparisons or functions not
876
+ # defined in complex domain.
877
+ continue
878
+ if (dtype in (int, int) and test_scalar and
879
+ expr == '(a+1) ** -1'):
880
+ continue
881
+
882
+ m = make_test_method(a, a2, b, c, d, e, x,
883
+ expr, test_scalar, dtype,
884
+ optimization, exact,
885
+ section_name)
886
+ yield m
887
+
888
+
889
+ class test_int64(TestCase):
890
+ def test_neg(self):
891
+ a = array([2 ** 31 - 1, 2 ** 31, 2 ** 32, 2 ** 63 - 1], dtype=int64)
892
+ res = evaluate('-a')
893
+ assert_array_equal(res, [1 - 2 ** 31, -(2 ** 31), -(2 ** 32), 1 - 2 ** 63])
894
+ self.assertEqual(res.dtype.name, 'int64')
895
+
896
+
897
+ class test_int32_int64(TestCase):
898
+
899
+ def test_small_int(self):
900
+ # Small ints (32-bit ones) should not be promoted to longs.
901
+ res = evaluate('2')
902
+ assert_array_equal(res, 2)
903
+ self.assertEqual(res.dtype.name, 'int32')
904
+
905
+ def test_big_int(self):
906
+ # Big ints should be promoted to longs.
907
+ res = evaluate('2**40')
908
+ assert_array_equal(res, 2 ** 40)
909
+ self.assertEqual(res.dtype.name, 'int64')
910
+
911
+ def test_long_constant_promotion(self):
912
+ int32array = arange(100, dtype='int32')
913
+ itwo = np.int32(2)
914
+ ltwo = np.int64(2)
915
+ res = int32array * 2
916
+ res32 = evaluate('int32array * itwo')
917
+ res64 = evaluate('int32array * ltwo')
918
+ assert_array_equal(res, res32)
919
+ assert_array_equal(res, res64)
920
+ self.assertEqual(res32.dtype.name, 'int32')
921
+ self.assertEqual(res64.dtype.name, 'int64')
922
+
923
+ def test_int64_array_promotion(self):
924
+ int32array = arange(100, dtype='int32')
925
+ int64array = arange(100, dtype='int64')
926
+ respy = int32array * int64array
927
+ resnx = evaluate('int32array * int64array')
928
+ assert_array_equal(respy, resnx)
929
+ self.assertEqual(resnx.dtype.name, 'int64')
930
+
931
+
932
+ class test_uint32_int64(TestCase):
933
+ def test_small_uint32(self):
934
+ # Small uint32 should not be downgraded to ints.
935
+ a = np.uint32(42)
936
+ res = evaluate('a')
937
+ assert_array_equal(res, 42)
938
+ self.assertEqual(res.dtype.name, 'int64')
939
+
940
+ def test_uint32_constant_promotion(self):
941
+ int32array = arange(100, dtype='int32')
942
+ stwo = np.int32(2)
943
+ utwo = np.uint32(2)
944
+ res = int32array * utwo
945
+ res32 = evaluate('int32array * stwo')
946
+ res64 = evaluate('int32array * utwo')
947
+ assert_array_equal(res, res32)
948
+ assert_array_equal(res, res64)
949
+ self.assertEqual(res32.dtype.name, 'int32')
950
+ self.assertEqual(res64.dtype.name, 'int64')
951
+
952
+ def test_int64_array_promotion(self):
953
+ uint32array = arange(100, dtype='uint32')
954
+ int64array = arange(100, dtype='int64')
955
+ respy = uint32array * int64array
956
+ resnx = evaluate('uint32array * int64array')
957
+ assert_array_equal(respy, resnx)
958
+ self.assertEqual(resnx.dtype.name, 'int64')
959
+
960
+
961
+ class test_strings(TestCase):
962
+ BLOCK_SIZE1 = 128
963
+ BLOCK_SIZE2 = 8
964
+ str_list1 = [b'foo', b'bar', b'', b' ']
965
+ str_list2 = [b'foo', b'', b'x', b' ']
966
+ str_nloops = len(str_list1) * (BLOCK_SIZE1 + BLOCK_SIZE2 + 1)
967
+ str_array1 = array(str_list1 * str_nloops)
968
+ str_array2 = array(str_list2 * str_nloops)
969
+ str_constant = b'doodoo'
970
+
971
+ def test_null_chars(self):
972
+ str_list = [
973
+ b'\0\0\0', b'\0\0foo\0', b'\0\0foo\0b', b'\0\0foo\0b\0',
974
+ b'foo\0', b'foo\0b', b'foo\0b\0', b'foo\0bar\0baz\0\0']
975
+ for s in str_list:
976
+ r = evaluate('s')
977
+ self.assertEqual(s, r.tobytes()) # check *all* stored data
978
+
979
+ def test_compare_copy(self):
980
+ sarr = self.str_array1
981
+ expr = 'sarr'
982
+ res1 = eval(expr)
983
+ res2 = evaluate(expr)
984
+ assert_array_equal(res1, res2)
985
+
986
+ def test_compare_array(self):
987
+ sarr1 = self.str_array1
988
+ sarr2 = self.str_array2
989
+ expr = 'sarr1 >= sarr2'
990
+ res1 = eval(expr)
991
+ res2 = evaluate(expr)
992
+ assert_array_equal(res1, res2)
993
+
994
+ def test_compare_variable(self):
995
+ sarr = self.str_array1
996
+ svar = self.str_constant
997
+ expr = 'sarr >= svar'
998
+ res1 = eval(expr)
999
+ res2 = evaluate(expr)
1000
+ assert_array_equal(res1, res2)
1001
+
1002
+ def test_compare_constant(self):
1003
+ sarr = self.str_array1
1004
+ expr = 'sarr >= %r' % self.str_constant
1005
+ res1 = eval(expr)
1006
+ res2 = evaluate(expr)
1007
+ assert_array_equal(res1, res2)
1008
+
1009
+ def test_add_string_array(self):
1010
+ sarr1 = self.str_array1
1011
+ sarr2 = self.str_array2
1012
+ expr = 'sarr1 + sarr2'
1013
+ self.assert_missing_op('add_sss', expr, locals())
1014
+
1015
+ def test_empty_string1(self):
1016
+ a = np.array([b"", b"pepe"])
1017
+ b = np.array([b"pepe2", b""])
1018
+ res = evaluate("(a == b'') & (b == b'pepe2')")
1019
+ assert_array_equal(res, np.array([True, False]))
1020
+ res2 = evaluate("(a == b'pepe') & (b == b'')")
1021
+ assert_array_equal(res2, np.array([False, True]))
1022
+
1023
+ def test_empty_string2(self):
1024
+ a = np.array([b"p", b"pepe"])
1025
+ b = np.array([b"pepe2", b""])
1026
+ res = evaluate("(a == b'') & (b == b'pepe2')")
1027
+ assert_array_equal(res, np.array([False, False]))
1028
+ res2 = evaluate("(a == b'pepe') & (b == b'')")
1029
+ assert_array_equal(res, np.array([False, False]))
1030
+
1031
+ def test_add_numeric_array(self):
1032
+ sarr = self.str_array1
1033
+ narr = arange(len(sarr), dtype='int32')
1034
+ expr = 'sarr >= narr'
1035
+ self.assert_missing_op('ge_bsi', expr, locals())
1036
+
1037
+ def assert_missing_op(self, op, expr, local_dict):
1038
+ msg = "expected NotImplementedError regarding '%s'" % op
1039
+ try:
1040
+ evaluate(expr, local_dict)
1041
+ except NotImplementedError as nie:
1042
+ if "'%s'" % op not in nie.args[0]:
1043
+ self.fail(msg)
1044
+ else:
1045
+ self.fail(msg)
1046
+
1047
+ def test_compare_prefix(self):
1048
+ # Check comparing two strings where one is a prefix of the
1049
+ # other.
1050
+ for s1, s2 in [(b'foo', b'foobar'), (b'foo', b'foo\0bar'),
1051
+ (b'foo\0a', b'foo\0bar')]:
1052
+ self.assertTrue(evaluate('s1 < s2'))
1053
+ self.assertTrue(evaluate('s1 <= s2'))
1054
+ self.assertTrue(evaluate('~(s1 == s2)'))
1055
+ self.assertTrue(evaluate('~(s1 >= s2)'))
1056
+ self.assertTrue(evaluate('~(s1 > s2)'))
1057
+
1058
+ # Check for NumPy array-style semantics in string equality.
1059
+ s1, s2 = b'foo', b'foo\0\0'
1060
+ self.assertTrue(evaluate('s1 == s2'))
1061
+
1062
+
1063
+ # Case for testing selections in fields which are aligned but whose
1064
+ # data length is not an exact multiple of the length of the record.
1065
+ # The following test exposes the problem only in 32-bit machines,
1066
+ # because in 64-bit machines 'c2' is unaligned. However, this should
1067
+ # check most platforms where, while not unaligned, 'len(datatype) >
1068
+ # boundary_alignment' is fullfilled.
1069
+ class test_irregular_stride(TestCase):
1070
+ def test_select(self):
1071
+ f0 = arange(10, dtype=int32)
1072
+ f1 = arange(10, dtype=float64)
1073
+
1074
+ irregular = rec.fromarrays([f0, f1])
1075
+
1076
+ f0 = irregular['f0']
1077
+ f1 = irregular['f1']
1078
+
1079
+ i0 = evaluate('f0 < 5')
1080
+ i1 = evaluate('f1 < 5')
1081
+
1082
+ assert_array_equal(f0[i0], arange(5, dtype=int32))
1083
+ assert_array_equal(f1[i1], arange(5, dtype=float64))
1084
+
1085
+
1086
+ # Cases for testing arrays with dimensions that can be zero.
1087
+ class test_zerodim(TestCase):
1088
+ def test_zerodim1d(self):
1089
+ a0 = array([], dtype=int32)
1090
+ a1 = array([], dtype=float64)
1091
+
1092
+ r0 = evaluate('a0 + a1')
1093
+ r1 = evaluate('a0 * a1')
1094
+
1095
+ assert_array_equal(r0, a1)
1096
+ assert_array_equal(r1, a1)
1097
+
1098
+ def test_zerodim3d(self):
1099
+ a0 = array([], dtype=int32).reshape(0, 2, 4)
1100
+ a1 = array([], dtype=float64).reshape(0, 2, 4)
1101
+
1102
+ r0 = evaluate('a0 + a1')
1103
+ r1 = evaluate('a0 * a1')
1104
+
1105
+ assert_array_equal(r0, a1)
1106
+ assert_array_equal(r1, a1)
1107
+
1108
+
1109
+ @contextmanager
1110
+ def _environment(key, value):
1111
+ old = os.environ.get(key)
1112
+ os.environ[key] = value
1113
+ try:
1114
+ yield
1115
+ finally:
1116
+ if old:
1117
+ os.environ[key] = old
1118
+ else:
1119
+ del os.environ[key]
1120
+
1121
+ # Test cases for the threading configuration
1122
+ class test_threading_config(TestCase):
1123
+ def test_max_threads_unset(self):
1124
+ # Has to be done in a subprocess as `importlib.reload` doesn't let us
1125
+ # re-initialize the threadpool
1126
+ script = '\n'.join([
1127
+ "import os",
1128
+ "if 'NUMEXPR_MAX_THREADS' in os.environ: os.environ.pop('NUMEXPR_MAX_THREADS')",
1129
+ "if 'OMP_NUM_THREADS' in os.environ: os.environ.pop('OMP_NUM_THREADS')",
1130
+ "import numexpr",
1131
+ "assert(numexpr.nthreads <= 8)",
1132
+ "exit(0)"])
1133
+ subprocess.check_call([sys.executable, '-c', script])
1134
+
1135
+ def test_max_threads_set(self):
1136
+ # Has to be done in a subprocess as `importlib.reload` doesn't let us
1137
+ # re-initialize the threadpool
1138
+ script = '\n'.join([
1139
+ "import os",
1140
+ "os.environ['NUMEXPR_MAX_THREADS'] = '4'",
1141
+ "import numexpr",
1142
+ "assert(numexpr.MAX_THREADS == 4)",
1143
+ "exit(0)"])
1144
+ subprocess.check_call([sys.executable, '-c', script])
1145
+
1146
+ def test_numexpr_num_threads(self):
1147
+ with _environment('OMP_NUM_THREADS', '5'):
1148
+ # NUMEXPR_NUM_THREADS has priority
1149
+ with _environment('NUMEXPR_NUM_THREADS', '3'):
1150
+ if 'sparc' in platform.machine():
1151
+ self.assertEqual(1, numexpr._init_num_threads())
1152
+ else:
1153
+ self.assertEqual(3, numexpr._init_num_threads())
1154
+
1155
+ def test_omp_num_threads(self):
1156
+ with _environment('OMP_NUM_THREADS', '5'):
1157
+ if 'sparc' in platform.machine():
1158
+ self.assertEqual(1, numexpr._init_num_threads())
1159
+ else:
1160
+ self.assertEqual(5, numexpr._init_num_threads())
1161
+
1162
+ def test_omp_num_threads_empty_string(self):
1163
+ with _environment('OMP_NUM_THREADS', ''):
1164
+ if 'sparc' in platform.machine():
1165
+ self.assertEqual(1, numexpr._init_num_threads())
1166
+ else:
1167
+ self.assertEqual(detect_number_of_cores(), numexpr._init_num_threads())
1168
+
1169
+ def test_numexpr_max_threads_empty_string(self):
1170
+ with _environment('NUMEXPR_MAX_THREADS', ''):
1171
+ if 'sparc' in platform.machine():
1172
+ self.assertEqual(1, numexpr._init_num_threads())
1173
+ else:
1174
+ self.assertEqual(detect_number_of_cores(), numexpr._init_num_threads())
1175
+
1176
+ def test_vml_threads_round_trip(self):
1177
+ n_threads = 3
1178
+ if use_vml:
1179
+ numexpr.utils.set_vml_num_threads(n_threads)
1180
+ set_threads = numexpr.utils.get_vml_num_threads()
1181
+ self.assertEqual(n_threads, set_threads)
1182
+ else:
1183
+ self.assertIsNone(numexpr.utils.set_vml_num_threads(n_threads))
1184
+ self.assertIsNone(numexpr.utils.get_vml_num_threads())
1185
+
1186
+
1187
+ # Case test for threads
1188
+ class test_threading(TestCase):
1189
+
1190
+ def test_thread(self):
1191
+ import threading
1192
+
1193
+ class ThreadTest(threading.Thread):
1194
+ def run(self):
1195
+ a = arange(3)
1196
+ assert_array_equal(evaluate('a**3'), array([0, 1, 8]))
1197
+
1198
+ test = ThreadTest()
1199
+ test.start()
1200
+ test.join()
1201
+
1202
+ def test_multithread(self):
1203
+ import threading
1204
+
1205
+ # Running evaluate() from multiple threads shouldn't crash
1206
+ def work(n):
1207
+ a = arange(n)
1208
+ evaluate('a+a')
1209
+
1210
+ work(10) # warm compilation cache
1211
+
1212
+ nthreads = 30
1213
+ threads = [threading.Thread(target=work, args=(1e5,))
1214
+ for i in range(nthreads)]
1215
+ for t in threads:
1216
+ t.start()
1217
+ for t in threads:
1218
+ t.join()
1219
+
1220
+
1221
+ # The worker function for the subprocess (needs to be here because Windows
1222
+ # has problems pickling nested functions with the multiprocess module :-/)
1223
+ def _worker(qout=None):
1224
+ ra = np.arange(1e3)
1225
+ rows = evaluate('ra > 0')
1226
+ #print "Succeeded in evaluation!\n"
1227
+ if qout is not None:
1228
+ qout.put("Done")
1229
+
1230
+
1231
+ # Case test for subprocesses (via multiprocessing module)
1232
+ class test_subprocess(TestCase):
1233
+ def test_multiprocess(self):
1234
+ try:
1235
+ import multiprocessing as mp
1236
+ except ImportError:
1237
+ return
1238
+ # Check for two threads at least
1239
+ numexpr.set_num_threads(2)
1240
+ #print "**** Running from main process:"
1241
+ _worker()
1242
+ #print "**** Running from subprocess:"
1243
+ qout = mp.Queue()
1244
+ ps = mp.Process(target=_worker, args=(qout,))
1245
+ ps.daemon = True
1246
+ ps.start()
1247
+
1248
+ result = qout.get()
1249
+ #print result
1250
+
1251
+
1252
+ def print_versions():
1253
+ """Print the versions of software that numexpr relies on."""
1254
+ # from pkg_resources import parse_version
1255
+ from numexpr.cpuinfo import cpu
1256
+ import platform
1257
+
1258
+ print('-=' * 38)
1259
+ print('Numexpr version: %s' % numexpr.__version__)
1260
+ print('NumPy version: %s' % np.__version__)
1261
+ print('Python version: %s' % sys.version)
1262
+ (sysname, nodename, release, os_version, machine, processor) = platform.uname()
1263
+ print('Platform: %s-%s-%s' % (sys.platform, machine, os_version))
1264
+ try:
1265
+ # cpuinfo doesn't work on OSX well it seems, so protect these outputs
1266
+ # with a try block
1267
+ cpu_info = cpu.info[0]
1268
+ print('CPU vendor: %s' % cpu_info.get('VendorIdentifier', ''))
1269
+ print('CPU model: %s' % cpu_info.get('ProcessorNameString', ''))
1270
+ print('CPU clock speed: %s MHz' % cpu_info.get('~MHz',''))
1271
+ except KeyError:
1272
+ pass
1273
+ print('VML available? %s' % use_vml)
1274
+ if use_vml:
1275
+ print('VML/MKL version: %s' % numexpr.get_vml_version())
1276
+ print('Number of threads used by default: %d '
1277
+ '(out of %d detected cores)' % (numexpr.nthreads, numexpr.ncores))
1278
+ print('Maximum number of threads: %s' % numexpr.MAX_THREADS)
1279
+ print('-=' * 38)
1280
+
1281
+
1282
+ def test(verbosity=1):
1283
+ """
1284
+ Run all the tests in the test suite.
1285
+ """
1286
+ print_versions()
1287
+ # For some reason, NumPy issues all kinds of warnings when using Python3.
1288
+ # Ignoring them in tests should be ok, as all results are checked out.
1289
+ # See https://github.com/pydata/numexpr/issues/183 for details.
1290
+ np.seterr(divide='ignore', invalid='ignore', over='ignore', under='ignore')
1291
+ return unittest.TextTestRunner(verbosity=verbosity).run(suite())
1292
+
1293
+
1294
+ test.__test__ = False
1295
+
1296
+
1297
+ def suite():
1298
+ import unittest
1299
+ import platform as pl
1300
+
1301
+ theSuite = unittest.TestSuite()
1302
+ niter = 1
1303
+
1304
+ class TestExpressions(TestCase):
1305
+ pass
1306
+
1307
+ def add_method(func):
1308
+ def method(self):
1309
+ return func()
1310
+
1311
+ setattr(TestExpressions, func.__name__,
1312
+ method.__get__(None, TestExpressions))
1313
+
1314
+ for func in test_expressions():
1315
+ add_method(func)
1316
+
1317
+ for n in range(niter):
1318
+ theSuite.addTest(unittest.makeSuite(test_numexpr))
1319
+ if 'sparc' not in platform.machine():
1320
+ theSuite.addTest(unittest.makeSuite(test_numexpr2))
1321
+ theSuite.addTest(unittest.makeSuite(test_evaluate))
1322
+ theSuite.addTest(unittest.makeSuite(TestExpressions))
1323
+ theSuite.addTest(unittest.makeSuite(test_int32_int64))
1324
+ theSuite.addTest(unittest.makeSuite(test_uint32_int64))
1325
+ theSuite.addTest(unittest.makeSuite(test_strings))
1326
+ theSuite.addTest(
1327
+ unittest.makeSuite(test_irregular_stride))
1328
+ theSuite.addTest(unittest.makeSuite(test_zerodim))
1329
+ theSuite.addTest(unittest.makeSuite(test_threading_config))
1330
+
1331
+ # multiprocessing module is not supported on Hurd/kFreeBSD
1332
+ if (pl.system().lower() not in ('gnu', 'gnu/kfreebsd')):
1333
+ theSuite.addTest(unittest.makeSuite(test_subprocess))
1334
+
1335
+ # I need to put this test after test_subprocess because
1336
+ # if not, the test suite locks immediately before test_subproces.
1337
+ # This only happens with Windows, so I suspect of a subtle bad
1338
+ # interaction with threads and subprocess :-/
1339
+ theSuite.addTest(unittest.makeSuite(test_threading))
1340
+
1341
+ return theSuite
1342
+
1343
+
1344
+ if __name__ == '__main__':
1345
+ print_versions()
1346
+ unittest.main(defaultTest='suite')
1347
+ # suite = suite()
1348
+ # unittest.TextTestRunner(verbosity=2).run(suite)
llmeval-env/lib/python3.10/site-packages/numexpr/utils.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###################################################################
2
+ # Numexpr - Fast numerical array expression evaluator for NumPy.
3
+ #
4
+ # License: MIT
5
+ # Author: See AUTHORS.txt
6
+ #
7
+ # See LICENSE.txt and LICENSES/*.txt for details about copyright and
8
+ # rights to use.
9
+ ####################################################################
10
+
11
+ import logging
12
+ log = logging.getLogger(__name__)
13
+
14
+ import os
15
+ import subprocess
16
+
17
+ from numexpr.interpreter import _set_num_threads, _get_num_threads, MAX_THREADS
18
+ from numexpr import use_vml
19
+ from . import version
20
+
21
+ if use_vml:
22
+ from numexpr.interpreter import (
23
+ _get_vml_version, _set_vml_accuracy_mode, _set_vml_num_threads,
24
+ _get_vml_num_threads)
25
+
26
+
27
+ def get_vml_version():
28
+ """
29
+ Get the VML/MKL library version.
30
+ """
31
+ if use_vml:
32
+ return _get_vml_version()
33
+ else:
34
+ return None
35
+
36
+
37
+ def set_vml_accuracy_mode(mode):
38
+ """
39
+ Set the accuracy mode for VML operations.
40
+
41
+ The `mode` parameter can take the values:
42
+ - 'high': high accuracy mode (HA), <1 least significant bit
43
+ - 'low': low accuracy mode (LA), typically 1-2 least significant bits
44
+ - 'fast': enhanced performance mode (EP)
45
+ - None: mode settings are ignored
46
+
47
+ This call is equivalent to the `vmlSetMode()` in the VML library.
48
+ See:
49
+
50
+ http://www.intel.com/software/products/mkl/docs/webhelp/vml/vml_DataTypesAccuracyModes.html
51
+
52
+ for more info on the accuracy modes.
53
+
54
+ Returns old accuracy settings.
55
+ """
56
+ if use_vml:
57
+ acc_dict = {None: 0, 'low': 1, 'high': 2, 'fast': 3}
58
+ acc_reverse_dict = {1: 'low', 2: 'high', 3: 'fast'}
59
+ if mode not in list(acc_dict.keys()):
60
+ raise ValueError(
61
+ "mode argument must be one of: None, 'high', 'low', 'fast'")
62
+ retval = _set_vml_accuracy_mode(acc_dict.get(mode, 0))
63
+ return acc_reverse_dict.get(retval)
64
+ else:
65
+ return None
66
+
67
+
68
+ def set_vml_num_threads(nthreads):
69
+ """
70
+ Suggests a maximum number of threads to be used in VML operations.
71
+
72
+ This function is equivalent to the call
73
+ `mkl_domain_set_num_threads(nthreads, MKL_DOMAIN_VML)` in the MKL
74
+ library. See:
75
+
76
+ http://www.intel.com/software/products/mkl/docs/webhelp/support/functn_mkl_domain_set_num_threads.html
77
+
78
+ for more info about it.
79
+ """
80
+ if use_vml:
81
+ _set_vml_num_threads(nthreads)
82
+ pass
83
+
84
+ def get_vml_num_threads():
85
+ """
86
+ Gets the maximum number of threads to be used in VML operations.
87
+
88
+ This function is equivalent to the call
89
+ `mkl_domain_get_max_threads (MKL_DOMAIN_VML)` in the MKL
90
+ library. See:
91
+
92
+ http://software.intel.com/en-us/node/522118
93
+
94
+ for more info about it.
95
+ """
96
+ if use_vml:
97
+ return _get_vml_num_threads()
98
+ return None
99
+
100
+ def set_num_threads(nthreads):
101
+ """
102
+ Sets a number of threads to be used in operations.
103
+
104
+ DEPRECATED: returns the previous setting for the number of threads.
105
+
106
+ During initialization time NumExpr sets this number to the number
107
+ of detected cores in the system (see `detect_number_of_cores()`).
108
+ """
109
+ old_nthreads = _set_num_threads(nthreads)
110
+ return old_nthreads
111
+
112
+ def get_num_threads():
113
+ """
114
+ Gets the number of threads currently in use for operations.
115
+ """
116
+ return _get_num_threads()
117
+
118
+ def _init_num_threads():
119
+ """
120
+ Detects the environment variable 'NUMEXPR_MAX_THREADS' to set the threadpool
121
+ size, and if necessary the slightly redundant 'NUMEXPR_NUM_THREADS' or
122
+ 'OMP_NUM_THREADS' env vars to set the initial number of threads used by
123
+ the virtual machine.
124
+ """
125
+ # Any platform-specific short-circuits
126
+ if 'sparc' in version.platform_machine:
127
+ log.warning('The number of threads have been set to 1 because problems related '
128
+ 'to threading have been reported on some sparc machine. '
129
+ 'The number of threads can be changed using the "set_num_threads" '
130
+ 'function.')
131
+ set_num_threads(1)
132
+ return 1
133
+
134
+ env_configured = False
135
+ n_cores = detect_number_of_cores()
136
+ if ('NUMEXPR_MAX_THREADS' in os.environ and os.environ['NUMEXPR_MAX_THREADS'] != '' or
137
+ 'OMP_NUM_THREADS' in os.environ and os.environ['OMP_NUM_THREADS'] != ''):
138
+ # The user has configured NumExpr in the expected way, so suppress logs.
139
+ env_configured = True
140
+ n_cores = MAX_THREADS
141
+ else:
142
+ # The use has not set 'NUMEXPR_MAX_THREADS', so likely they have not
143
+ # configured NumExpr as desired, so we emit info logs.
144
+ if n_cores > MAX_THREADS:
145
+ log.info('Note: detected %d virtual cores but NumExpr set to maximum of %d, check "NUMEXPR_MAX_THREADS" environment variable.'%(n_cores, MAX_THREADS))
146
+ if n_cores > 8:
147
+ # The historical 'safety' limit.
148
+ log.info('Note: NumExpr detected %d cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.'%n_cores)
149
+ n_cores = 8
150
+
151
+ # Now we check for 'NUMEXPR_NUM_THREADS' or 'OMP_NUM_THREADS' to set the
152
+ # actual number of threads used.
153
+ if 'NUMEXPR_NUM_THREADS' in os.environ and os.environ['NUMEXPR_NUM_THREADS'] != '':
154
+ requested_threads = int(os.environ['NUMEXPR_NUM_THREADS'])
155
+ elif 'OMP_NUM_THREADS' in os.environ and os.environ['OMP_NUM_THREADS'] != '':
156
+ # Empty string is commonly used to unset the variable
157
+ requested_threads = int(os.environ['OMP_NUM_THREADS'])
158
+ else:
159
+ requested_threads = n_cores
160
+ if not env_configured:
161
+ log.info('NumExpr defaulting to %d threads.'%n_cores)
162
+
163
+ # The C-extension function performs its own checks against `MAX_THREADS`
164
+ set_num_threads(requested_threads)
165
+ return requested_threads
166
+
167
+
168
+ def detect_number_of_cores():
169
+ """
170
+ Detects the number of cores on a system. Cribbed from pp.
171
+ """
172
+ # Linux, Unix and MacOS:
173
+ if hasattr(os, "sysconf"):
174
+ if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
175
+ # Linux & Unix:
176
+ ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
177
+ if isinstance(ncpus, int) and ncpus > 0:
178
+ return ncpus
179
+ else: # OSX:
180
+ return int(subprocess.check_output(["sysctl", "-n", "hw.ncpu"]))
181
+ # Windows:
182
+ try:
183
+ ncpus = int(os.environ.get("NUMBER_OF_PROCESSORS", ""))
184
+ if ncpus > 0:
185
+ return ncpus
186
+ except ValueError:
187
+ pass
188
+ return 1 # Default
189
+
190
+
191
+ def detect_number_of_threads():
192
+ """
193
+ DEPRECATED: use `_init_num_threads` instead.
194
+ If this is modified, please update the note in: https://github.com/pydata/numexpr/wiki/Numexpr-Users-Guide
195
+ """
196
+ log.warning('Deprecated, use `init_num_threads` instead.')
197
+ try:
198
+ nthreads = int(os.environ.get('NUMEXPR_NUM_THREADS', ''))
199
+ except ValueError:
200
+ try:
201
+ nthreads = int(os.environ.get('OMP_NUM_THREADS', ''))
202
+ except ValueError:
203
+ nthreads = detect_number_of_cores()
204
+
205
+ # Check that we don't surpass the MAX_THREADS in interpreter.cpp
206
+ if nthreads > MAX_THREADS:
207
+ nthreads = MAX_THREADS
208
+ return nthreads
209
+
210
+
211
+ class CacheDict(dict):
212
+ """
213
+ A dictionary that prevents itself from growing too much.
214
+ """
215
+
216
+ def __init__(self, maxentries):
217
+ self.maxentries = maxentries
218
+ super(CacheDict, self).__init__(self)
219
+
220
+ def __setitem__(self, key, value):
221
+ # Protection against growing the cache too much
222
+ if len(self) > self.maxentries:
223
+ # Remove a 10% of (arbitrary) elements from the cache
224
+ entries_to_remove = self.maxentries // 10
225
+ for k in list(self.keys())[:entries_to_remove]:
226
+ super(CacheDict, self).__delitem__(k)
227
+ super(CacheDict, self).__setitem__(key, value)
228
+
llmeval-env/lib/python3.10/site-packages/numexpr/version.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # THIS FILE IS GENERATED BY `SETUP.PY`
2
+ version = '2.10.0'
3
+ numpy_build_version = '2.0.0rc1'
4
+ platform_machine = 'x86_64'
llmeval-env/lib/python3.10/site-packages/torchgen/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """torchgen
2
+
3
+ This module contains codegeneration utilities for PyTorch. It is used to
4
+ build PyTorch from source, but may also be used for out-of-tree projects
5
+ that extend PyTorch.
6
+
7
+ Note well that we provide no BC guarantees for torchgen. If you're interested
8
+ in using torchgen and want the PyTorch team to be aware, please reach out
9
+ on GitHub.
10
+ """
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen_aoti_c_shim.cpython-310.pyc ADDED
Binary file (9.98 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen_functionalization_type.cpython-310.pyc ADDED
Binary file (22.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen_lazy_tensor.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/local.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/utils.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/code_template.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from typing import Mapping, Match, Optional, Sequence
3
+
4
+ # match $identifier or ${identifier} and replace with value in env
5
+ # If this identifier is at the beginning of whitespace on a line
6
+ # and its value is a list then it is treated as
7
+ # block substitution by indenting to that depth and putting each element
8
+ # of the list on its own line
9
+ # if the identifier is on a line starting with non-whitespace and a list
10
+ # then it is comma separated ${,foo} will insert a comma before the list
11
+ # if this list is not empty and ${foo,} will insert one after.
12
+
13
+
14
+ class CodeTemplate:
15
+ substitution_str = r"(^[^\n\S]*)?\$([^\d\W]\w*|\{,?[^\d\W]\w*\,?})"
16
+ substitution = re.compile(substitution_str, re.MULTILINE)
17
+
18
+ pattern: str
19
+ filename: str
20
+
21
+ @staticmethod
22
+ def from_file(filename: str) -> "CodeTemplate":
23
+ with open(filename) as f:
24
+ return CodeTemplate(f.read(), filename)
25
+
26
+ def __init__(self, pattern: str, filename: str = "") -> None:
27
+ self.pattern = pattern
28
+ self.filename = filename
29
+
30
+ def substitute(
31
+ self, env: Optional[Mapping[str, object]] = None, **kwargs: object
32
+ ) -> str:
33
+ if env is None:
34
+ env = {}
35
+
36
+ def lookup(v: str) -> object:
37
+ assert env is not None
38
+ return kwargs[v] if v in kwargs else env[v]
39
+
40
+ def indent_lines(indent: str, v: Sequence[object]) -> str:
41
+ return "".join(
42
+ [indent + l + "\n" for e in v for l in str(e).splitlines()]
43
+ ).rstrip()
44
+
45
+ def replace(match: Match[str]) -> str:
46
+ indent = match.group(1)
47
+ key = match.group(2)
48
+ comma_before = ""
49
+ comma_after = ""
50
+ if key[0] == "{":
51
+ key = key[1:-1]
52
+ if key[0] == ",":
53
+ comma_before = ", "
54
+ key = key[1:]
55
+ if key[-1] == ",":
56
+ comma_after = ", "
57
+ key = key[:-1]
58
+ v = lookup(key)
59
+ if indent is not None:
60
+ if not isinstance(v, list):
61
+ v = [v]
62
+ return indent_lines(indent, v)
63
+ elif isinstance(v, list):
64
+ middle = ", ".join([str(x) for x in v])
65
+ if len(v) == 0:
66
+ return middle
67
+ return comma_before + middle + comma_after
68
+ else:
69
+ return str(v)
70
+
71
+ return self.substitution.sub(replace, self.pattern)
72
+
73
+
74
+ if __name__ == "__main__":
75
+ c = CodeTemplate(
76
+ """\
77
+ int foo($args) {
78
+
79
+ $bar
80
+ $bar
81
+ $a+$b
82
+ }
83
+ int commatest(int a${,stuff})
84
+ int notest(int a${,empty,})
85
+ """
86
+ )
87
+ print(
88
+ c.substitute(
89
+ args=["hi", 8],
90
+ bar=["what", 7],
91
+ a=3,
92
+ b=4,
93
+ stuff=["things...", "others"],
94
+ empty=[],
95
+ )
96
+ )
llmeval-env/lib/python3.10/site-packages/torchgen/context.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+
3
+ import functools
4
+ from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, TypeVar, Union
5
+
6
+ import torchgen.local as local
7
+ from torchgen.model import (
8
+ BackendIndex,
9
+ DispatchKey,
10
+ NativeFunction,
11
+ NativeFunctionsGroup,
12
+ NativeFunctionsViewGroup,
13
+ )
14
+ from torchgen.utils import context, S, T
15
+
16
+ # Helper functions for defining generators on things in the model
17
+
18
+ F = TypeVar(
19
+ "F",
20
+ NativeFunction,
21
+ NativeFunctionsGroup,
22
+ NativeFunctionsViewGroup,
23
+ Union[NativeFunction, NativeFunctionsGroup],
24
+ Union[NativeFunction, NativeFunctionsViewGroup],
25
+ )
26
+
27
+ F2 = TypeVar(
28
+ "F2",
29
+ NativeFunction,
30
+ NativeFunctionsGroup,
31
+ Optional[NativeFunction],
32
+ bool,
33
+ str,
34
+ )
35
+
36
+ F3 = TypeVar("F3", Tuple[NativeFunction, Any], List[NativeFunction])
37
+
38
+
39
+ @contextlib.contextmanager
40
+ def native_function_manager(
41
+ g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup, NativeFunction]
42
+ ) -> Iterator[None]:
43
+ if isinstance(g, NativeFunctionsGroup):
44
+ # By default, we associate all errors with structured native functions
45
+ # with the out variant. In some cases, it might be better to have
46
+ # a more specific place to hang things; if so, use
47
+ # native_function_manager again on the inside
48
+ f = g.out
49
+ elif isinstance(g, NativeFunctionsViewGroup):
50
+ # We associate errors with the view operator
51
+ f = g.view
52
+ else:
53
+ f = g
54
+ with context(lambda: f"in native_functions.yaml line {f.loc}:\n {f.func}"):
55
+ with local.parametrize(
56
+ use_const_ref_for_mutable_tensors=f.use_const_ref_for_mutable_tensors,
57
+ use_ilistref_for_tensor_lists=f.part_of_structured_group,
58
+ ):
59
+ yield
60
+
61
+
62
+ # Given a function that operates on NativeFunction, wrap it into a new function
63
+ # that sets some appropriate context managers for that native function.
64
+ # YOU MUST WRAP FUNCTIONS IN THIS for calls to api modules to be sound
65
+ # (you will get an error if we try to access the local variables without having
66
+ # set them).
67
+ def with_native_function(func: Callable[[F], T]) -> Callable[[F], T]:
68
+ @functools.wraps(func)
69
+ def wrapper(f: F) -> T:
70
+ with native_function_manager(f):
71
+ return func(f)
72
+
73
+ return wrapper
74
+
75
+
76
+ def with_native_function_and(func: Callable[[F, F2], T]) -> Callable[[F, F2], T]:
77
+ @functools.wraps(func)
78
+ def wrapper(f: F, f2: F2) -> T:
79
+ # The first native_function is assumed to be the one with the appropriate context.
80
+ with native_function_manager(f):
81
+ return func(f, f2)
82
+
83
+ return wrapper
84
+
85
+
86
+ def method_with_native_function(func: Callable[[S, F], T]) -> Callable[[S, F], T]:
87
+ @functools.wraps(func)
88
+ def wrapper(slf: S, f: F) -> T:
89
+ with native_function_manager(f):
90
+ return func(slf, f)
91
+
92
+ return wrapper
93
+
94
+
95
+ def method_with_nested_native_function(
96
+ func: Callable[[S, F3], T]
97
+ ) -> Callable[[S, F3], T]:
98
+ @functools.wraps(func)
99
+ def wrapper(slf: S, f: F3) -> T:
100
+ with native_function_manager(f[0]):
101
+ return func(slf, f)
102
+
103
+ return wrapper
104
+
105
+
106
+ # Convenience decorator for functions that explicitly take in a BackendIndex,
107
+ # instead of indirectly taking one in as a closure
108
+ def with_native_function_and_index(
109
+ func: Callable[[F, BackendIndex], T]
110
+ ) -> Callable[[F, BackendIndex], T]:
111
+ @functools.wraps(func)
112
+ def wrapper(f: F, backend_index: BackendIndex) -> T:
113
+ with native_function_manager(f):
114
+ return func(f, backend_index)
115
+
116
+ return wrapper
117
+
118
+
119
+ # Convenience decorator for functions that explicitly take in a Dict of BackendIndices
120
+ def with_native_function_and_indices(
121
+ func: Callable[[F, Dict[DispatchKey, BackendIndex]], T]
122
+ ) -> Callable[[F, Dict[DispatchKey, BackendIndex]], T]:
123
+ @functools.wraps(func)
124
+ def wrapper(f: F, backend_indices: Dict[DispatchKey, BackendIndex]) -> T:
125
+ with native_function_manager(f):
126
+ return func(f, backend_indices)
127
+
128
+ return wrapper
llmeval-env/lib/python3.10/site-packages/torchgen/dest/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .lazy_ir import (
2
+ generate_non_native_lazy_ir_nodes as generate_non_native_lazy_ir_nodes,
3
+ GenLazyIR as GenLazyIR,
4
+ GenLazyNativeFuncDefinition as GenLazyNativeFuncDefinition,
5
+ GenLazyShapeInferenceDefinition as GenLazyShapeInferenceDefinition,
6
+ )
7
+ from .native_functions import (
8
+ compute_native_function_declaration as compute_native_function_declaration,
9
+ )
10
+ from .register_dispatch_key import (
11
+ gen_registration_headers as gen_registration_headers,
12
+ gen_registration_helpers as gen_registration_helpers,
13
+ RegisterDispatchKey as RegisterDispatchKey,
14
+ )
15
+ from .ufunc import (
16
+ compute_ufunc_cpu as compute_ufunc_cpu,
17
+ compute_ufunc_cpu_kernel as compute_ufunc_cpu_kernel,
18
+ compute_ufunc_cuda as compute_ufunc_cuda,
19
+ )
llmeval-env/lib/python3.10/site-packages/torchgen/dest/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (673 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ir.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ts_lowering.cpython-310.pyc ADDED
Binary file (2.19 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/dest/__pycache__/native_functions.cpython-310.pyc ADDED
Binary file (2.24 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/dest/__pycache__/register_dispatch_key.cpython-310.pyc ADDED
Binary file (23.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/dest/__pycache__/ufunc.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/dest/lazy_ir.py ADDED
@@ -0,0 +1,707 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from abc import ABC
3
+ from dataclasses import dataclass
4
+ from typing import Any, Dict, List, Optional, Tuple, Union
5
+
6
+ import torchgen.api.dispatcher as dispatcher
7
+ from torchgen.api.lazy import (
8
+ getValueT,
9
+ isValueType,
10
+ LazyArgument,
11
+ LazyIrProperties,
12
+ LazyIrSchema,
13
+ tensorListValueT,
14
+ )
15
+ from torchgen.api.translate import translate
16
+ from torchgen.api.types import (
17
+ BaseCType,
18
+ Binding,
19
+ deviceT,
20
+ DispatcherSignature,
21
+ kernel_signature,
22
+ NativeSignature,
23
+ OptionalCType,
24
+ VectorCType,
25
+ )
26
+ from torchgen.context import method_with_native_function
27
+ from torchgen.dest.lazy_ts_lowering import ts_lowering_body
28
+ from torchgen.model import (
29
+ Argument,
30
+ BackendIndex,
31
+ BackendMetadata,
32
+ BaseTy,
33
+ BaseType,
34
+ FunctionSchema,
35
+ ListType,
36
+ NativeFunction,
37
+ NativeFunctionsGroup,
38
+ )
39
+
40
+
41
+ def node_ctor_arg_rvalue_string(arg: LazyArgument) -> str:
42
+ """
43
+ Given a LazyArgument,
44
+ generate a c++ string for materializing an rvalue of that arg for passing into
45
+ a lazy Node constructor.
46
+ """
47
+
48
+ # TODO: Matching on CType seems wrong; should be matching on Type
49
+ if isValueType(arg.lazy_type):
50
+ if isinstance(arg.lazy_type, BaseCType):
51
+ if arg.is_wrapped_scalar:
52
+ return f"node_{arg.name}"
53
+ elif arg.lazy_type.type is tensorListValueT:
54
+ return f"lazy_{arg.name}_tensorlist"
55
+ elif arg.is_symint_or_list:
56
+ return f"GetSymIntValue({arg.name})"
57
+ return f"lazy_{arg.name}->GetIrValue()"
58
+ elif isinstance(arg.lazy_type, OptionalCType):
59
+ if arg.is_symint_or_list:
60
+ # TODO: I don't understand when you should put lazy_ in the name
61
+ # or not
62
+ return f"{arg.name} ? c10::make_optional(GetSymIntValue(*{arg.name})) : c10::nullopt"
63
+ elif arg.is_wrapped_scalar:
64
+ return f"node_{arg.name}"
65
+ return (
66
+ f"lazy_{arg.name} ? "
67
+ f"c10::make_optional(lazy_{arg.name}->GetIrValue()) : "
68
+ "c10::nullopt"
69
+ )
70
+ else:
71
+ raise AssertionError(
72
+ f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
73
+ )
74
+ else:
75
+ # NB: this is here because right now we aren't treating SymInt[] as a
76
+ # value type; when we do this needs to move above
77
+ # NB: we cannot test arg.lazy_type as we've already specified it is an
78
+ # int64_t and so we cannot distinguish between SymInt and int64_t
79
+ if isinstance(arg.orig_type, ListType) and arg.orig_type.elem == BaseType(
80
+ BaseTy.SymInt
81
+ ):
82
+ if arg.symint:
83
+ return f"GetSymIntArrayRefValue({arg.name})"
84
+ else:
85
+ return f"std::vector<int64_t>({arg.name}.begin(), {arg.name}.end())"
86
+ elif isinstance(arg.lazy_type, VectorCType) and isinstance(
87
+ arg.lazy_type.elem, BaseCType
88
+ ):
89
+ return f"std::vector<{arg.lazy_type.elem.type}>({arg.name}.begin(), {arg.name}.end())"
90
+ elif (
91
+ isinstance(arg.lazy_type, OptionalCType)
92
+ and isinstance(arg.lazy_type.elem, VectorCType)
93
+ and isinstance(arg.lazy_type.elem.elem, BaseCType)
94
+ ):
95
+ return f"torch::lazy::ToOptionalVector<{arg.lazy_type.elem.elem.type}>({arg.name})"
96
+ else:
97
+ return f"{arg.name}"
98
+
99
+
100
+ def node_ctor_inputs(schema: LazyIrSchema) -> str:
101
+ """
102
+ Produce a formatted string with the arguments as passed into the constructor of a node class.
103
+ """
104
+ node_ctor_values = [
105
+ node_ctor_arg_rvalue_string(arg) for arg in schema.filtered_args()
106
+ ]
107
+ return ", ".join(node_ctor_values)
108
+
109
+
110
+ def gen_fallback_code(
111
+ schema: LazyIrSchema,
112
+ sig: Union[DispatcherSignature, NativeSignature],
113
+ overload_name: str,
114
+ ) -> str:
115
+ """
116
+ Generate code that falls back to eager conditioned on a predicate
117
+ """
118
+ dispatcher_sig = DispatcherSignature.from_schema(schema.func)
119
+ exprs = translate(sig.arguments(), dispatcher_sig.arguments())
120
+ fallback_args = ",\n ".join([a.expr for a in exprs])
121
+ if len(overload_name):
122
+ aten_op_str = f"ATEN_OP2({schema.aten_name}, {overload_name})"
123
+ else:
124
+ aten_op_str = f"ATEN_OP({schema.aten_name})"
125
+ return f"""
126
+ if (force_eager_fallback({aten_symbol(schema)})) {{
127
+ return at::native::call_fallback_fn_symint<&ltc_eager_fallback, {aten_op_str}>::call(
128
+ {fallback_args}
129
+ );
130
+ }}
131
+ """
132
+
133
+
134
+ def aten_symbol(schema: LazyIrSchema) -> str:
135
+ missing_interned_strings = {
136
+ "sigmoid_backward",
137
+ }
138
+ if schema.aten_name in missing_interned_strings:
139
+ return f'c10::Symbol::fromQualString("aten::{schema.aten_name}")'
140
+
141
+ if not schema.aten_name.startswith("at::"):
142
+ return f"at::aten::{schema.aten_name}"
143
+ else:
144
+ return schema.aten_name
145
+
146
+
147
+ # converts all tensor-like arguments to meta tensors. Returns:
148
+ # (1) a string containing all of the logic that does the conversions.
149
+ # (2) a context, to be used by translate(), with all of the relevant bindings.
150
+ def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
151
+ context: List[Binding] = []
152
+ unwrapped_tensor_args: List[str] = []
153
+ for arg in sig.arguments():
154
+ if isinstance(arg.argument, Argument) and arg.argument.type.is_tensor_like():
155
+ unwrapped_name = f"{arg.name}_meta"
156
+ unwrapped_tensor_args.append(
157
+ f"auto {unwrapped_name} = to_meta({arg.name});"
158
+ )
159
+ context.append(arg.with_name(unwrapped_name))
160
+ else:
161
+ context.append(arg)
162
+ unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
163
+ return unwrap_tensor_args_str, context
164
+
165
+
166
+ @dataclass(frozen=True)
167
+ class GenLazyIR(ABC):
168
+ backend_index: BackendIndex
169
+ backend_name: str
170
+ node_base: str
171
+ use_lazy_shape: bool
172
+
173
+ @method_with_native_function
174
+ def __call__(self, f: Union[NativeFunctionsGroup, NativeFunction]) -> List[str]:
175
+ func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
176
+ metadata = self.backend_index.get_kernel(
177
+ f.functional if isinstance(f, NativeFunctionsGroup) else f
178
+ )
179
+ schema = LazyIrSchema(
180
+ func, symint=metadata is not None and metadata.supports_symint()
181
+ )
182
+ return self.gen(schema)
183
+
184
+ # there is no lowering functionality generated unless this IR base class is subclassed and
185
+ # implemented as a backend-specific node
186
+ def lowering_function(self, schema: LazyIrSchema) -> str:
187
+ return ""
188
+
189
+ def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
190
+ return ""
191
+
192
+ def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
193
+ return f"""bool CanBeReused({node_ctor_args}) const {{
194
+ return false;
195
+ }}"""
196
+
197
+ def node_base_ctor_call(self, schema: LazyIrSchema) -> str:
198
+ value_args = schema.filtered_args(values=True, scalars=False)
199
+ # backends can customize the way the node base class constructor is called,
200
+ # as long as all of its arguments can be generated from information available from the schema
201
+ base_ctor_value_args_list = []
202
+ for arg in value_args:
203
+ if isinstance(arg.lazy_type, (BaseCType, VectorCType)):
204
+ base_ctor_value_args_list.append(f"{arg.name}")
205
+ elif isinstance(arg.lazy_type, OptionalCType):
206
+ base_ctor_value_args_list.append(f"{arg.name}.value_or(kNullValue)")
207
+ else:
208
+ raise AssertionError(
209
+ f"Unsupported type ({arg.lazy_type}) - add support if necessary"
210
+ )
211
+ base_ctor_value_args = ", ".join(base_ctor_value_args_list)
212
+
213
+ scalar_args = schema.filtered_args(values=False, scalars=True)
214
+
215
+ # Shape construction.
216
+ # Conditionally build shape depending on specified shape property
217
+ if schema.properties.ShapePrecompute:
218
+ shape_ctor_arg = "std::move(shapes),"
219
+ elif schema.properties.ShapeCompute:
220
+ shape_args = [a.name for a in value_args]
221
+ shape_args.extend(a.name for a in scalar_args)
222
+ shape_ctor_arg = f"compute_shape_{schema.name}({', '.join(shape_args)}),"
223
+ elif schema.properties.ShapeCache:
224
+ shape_args = [f"operand({i})" for i in range(len(value_args))]
225
+ shape_args.extend(a.name for a in scalar_args)
226
+ shape_ctor_arg = f"[&](){{ return compute_shape_{schema.name}({', '.join(shape_args)})[0]; }},"
227
+ else:
228
+ shape_ctor_arg = ""
229
+
230
+ scalar_hashes = ", ".join(f"{a.name}" for a in scalar_args)
231
+
232
+ return f"""{self.node_base}(
233
+ {schema.node_name}::ClassOpKind(),
234
+ OpList{{{base_ctor_value_args}}},
235
+ {shape_ctor_arg}
236
+ /* num_outputs */ {len(schema.returns)},
237
+ torch::lazy::MHash({scalar_hashes}))"""
238
+
239
+ def gen(self, schema: LazyIrSchema) -> List[str]:
240
+ opkind = schema.opkind or aten_symbol(schema)
241
+
242
+ # for now, we just want one IR class decl and soon after also the method defs
243
+ # and we use the functional version not out/inplace.
244
+ all_args = schema.filtered_args()
245
+ value_args = schema.filtered_args(values=True, scalars=False)
246
+ scalar_args = schema.filtered_args(values=False, scalars=True)
247
+
248
+ ctor_args = [f"const {i.lazy_type.cpp_type()}& {i.name}" for i in all_args]
249
+ reuse_ctor_args = ", ".join(ctor_args)
250
+ if self.use_lazy_shape and schema.properties.ShapePrecompute:
251
+ ctor_args.append("std::vector<torch::lazy::Shape>&& shapes")
252
+ node_ctor_args = ", ".join(ctor_args)
253
+
254
+ scalar_initializers = ",\n ".join(
255
+ [
256
+ # This code is just special casing the mapping from string_view -> strings
257
+ f"{a.name}({a.name}.has_value() ? c10::make_optional(std::string(*{a.name})) : c10::nullopt)"
258
+ if a.lazy_type.cpp_type() == "c10::optional<c10::string_view>"
259
+ else f"{a.name}({a.name})"
260
+ for a in scalar_args
261
+ ]
262
+ )
263
+ if len(scalar_initializers):
264
+ scalar_initializers = f",\n {scalar_initializers}"
265
+ scalar_decls = "\n ".join(
266
+ [
267
+ f"std::string {a.name};"
268
+ if a.lazy_type.cpp_type() == "c10::string_view"
269
+ else f"c10::optional<std::string> {a.name};"
270
+ if a.lazy_type.cpp_type() == "c10::optional<c10::string_view>"
271
+ else f"{a.lazy_type.cpp_type()} {a.name};"
272
+ for a in scalar_args
273
+ ]
274
+ )
275
+ optional_values = [
276
+ arg.name
277
+ for arg in schema.filtered_args(values=True, scalars=False)
278
+ if isinstance(arg.lazy_type, OptionalCType)
279
+ ]
280
+ has_optional_decls = "\n ".join(
281
+ [f"bool has_{value}: 1;" for value in optional_values]
282
+ )
283
+ has_optional_defs = "\n ".join(
284
+ [f"has_{value} = !!{value};" for value in optional_values]
285
+ )
286
+ members_to_string = []
287
+ for arg in scalar_args:
288
+ if isinstance(arg.lazy_type, OptionalCType):
289
+ value = f"{arg.name}.value()"
290
+ if arg.is_generator:
291
+ value = '"torch.Generator()"'
292
+ members_to_string.append(
293
+ f"""if ({arg.name}.has_value()) {{
294
+ ss << ", {arg.name}=" << {value};
295
+ }} else {{
296
+ ss << ", {arg.name}=null";
297
+ }}"""
298
+ )
299
+ else:
300
+ members_to_string.append(f'ss << ", {arg.name}=" << {arg.name};')
301
+ members_to_string_str = "\n ".join(members_to_string)
302
+
303
+ return [
304
+ f"""\
305
+ class {schema.node_name} : public {self.node_base} {{
306
+ public:
307
+ static torch::lazy::OpKind ClassOpKind() {{
308
+ return torch::lazy::OpKind({opkind});
309
+ }}
310
+
311
+ {schema.node_name}({node_ctor_args})
312
+ : {self.node_base_ctor_call(schema)}{scalar_initializers}
313
+ {{
314
+ {has_optional_defs}
315
+ }}
316
+
317
+ std::string ToString() const override {{
318
+ std::stringstream ss;
319
+ ss << {self.node_base}::ToString();
320
+ {members_to_string_str}
321
+ return ss.str();
322
+ }}
323
+
324
+ {self.create_function(schema, reuse_ctor_args)}
325
+
326
+ {self.can_be_reused_function(schema, reuse_ctor_args)}
327
+
328
+ {self.lowering_function(schema)}
329
+
330
+ {scalar_decls}
331
+ {has_optional_decls}
332
+
333
+ }};
334
+
335
+ """,
336
+ ]
337
+
338
+
339
+ @dataclass(frozen=True)
340
+ class GenTSLazyIR(GenLazyIR):
341
+ def lowering_function(self, schema: LazyIrSchema) -> str:
342
+ signature = """
343
+ torch::lazy::TSOpVector Lower(
344
+ std::shared_ptr<torch::jit::GraphFunction> function,
345
+ torch::lazy::TSLoweringContext* loctx) const override"""
346
+
347
+ if schema.properties.LowerDeclOnly:
348
+ return f"{signature};"
349
+ elif schema.properties.Lower:
350
+ return f"""{signature} {{
351
+ {ts_lowering_body(schema)}
352
+ }}
353
+ """
354
+ else:
355
+ return ""
356
+
357
+ def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
358
+ signature = f"static NodePtr Create({node_ctor_args})"
359
+ if schema.properties.CreateFnDeclOnly:
360
+ return f"{signature};"
361
+ elif not schema.properties.CreateFn:
362
+ return ""
363
+ return f"""{signature} {{
364
+ return ReuseOrMakeNode<{schema.node_name}>(data);
365
+ }}"""
366
+
367
+ def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
368
+ signature = f"bool CanBeReused({node_ctor_args}) const"
369
+ if schema.properties.CanBeReusedDeclOnly:
370
+ return f"{signature};"
371
+ elif not schema.properties.CanBeReused:
372
+ return ""
373
+ value_comparison = []
374
+ for arg in itertools.chain(schema.positional_values, schema.keyword_values):
375
+ if isinstance(arg.lazy_type, OptionalCType):
376
+ value_comparison.append(
377
+ f"nullable_operand(i++) == {arg.name}.value_or(kNullValue)"
378
+ )
379
+ else:
380
+ value_comparison.append(f"operand(i++) == {arg.name}")
381
+ for arg in itertools.chain(schema.positional_scalars, schema.keyword_scalars):
382
+ if isinstance(arg.lazy_type, OptionalCType):
383
+ value_comparison.append(
384
+ f"((!this->{arg.name}&&!{arg.name}) || (this->{arg.name}&&{arg.name} && *(this->{arg.name}) == *{arg.name}))"
385
+ )
386
+ else:
387
+ value_comparison.append(f"this->{arg.name} == {arg.name}")
388
+ value_comparison_str = " &&\n ".join(value_comparison)
389
+
390
+ return f"""{signature} {{
391
+ size_t i = 0;
392
+ return ({value_comparison_str});
393
+ }}"""
394
+
395
+
396
+ @dataclass(frozen=True)
397
+ class GenLazyNativeFuncDefinition:
398
+ class_method_name: str
399
+ backend_index: BackendIndex
400
+ tensor_class: str
401
+ gen_forced_fallback_code: bool
402
+ backend_namespace: str
403
+ get_tensorlist: str
404
+ get_tensor_or_wrap_number: str
405
+ try_get_tensor: str
406
+ metrics_counter: str
407
+ create_tensor: str
408
+ create_from_first_tensor: bool
409
+ create_aten_from_ltc_tensor: str
410
+ tuple_aten_from_ltc_tensors: str
411
+ lazy_tensor_ptr: str
412
+ get_device_fn: str
413
+
414
+ def lazy_tensor_decls(self, func: NativeFunction, schema: LazyIrSchema) -> str:
415
+ value_args = schema.filtered_args(values=True, scalars=False)
416
+ # Generates lazy_{name} variables for LazyTensors wrapping input tensors
417
+ lazy_tensor_decls: List[str] = []
418
+ for arg in value_args:
419
+ if arg.is_wrapped_scalar:
420
+ if isinstance(arg.lazy_type, OptionalCType):
421
+ lazy_tensor_decls.append(
422
+ f"""auto node_{arg.name} = {arg.name} ?
423
+ c10::make_optional(torch::lazy::LazyGraphExecutor::Get()->
424
+ GetIrValueForScalarFromCodegen(*{arg.name}, *common_device)):
425
+ c10::nullopt;"""
426
+ )
427
+ else:
428
+ lazy_tensor_decls.append(
429
+ f"""auto node_{arg.name} = torch::lazy::LazyGraphExecutor::Get()->
430
+ GetIrValueForScalarFromCodegen({arg.name}, *common_device);"""
431
+ )
432
+ elif arg.is_symint_or_list:
433
+ continue # values are extracted in isValueType
434
+ elif isinstance(arg.lazy_type, BaseCType):
435
+ if arg.lazy_type.type is tensorListValueT:
436
+ lazy_tensor_decls.append(
437
+ f"auto lazy_{arg.name}_tensorlist = "
438
+ f"{self.backend_namespace}::{self.get_tensorlist}({arg.name});"
439
+ )
440
+ else:
441
+ lazy_tensor_decls.append(
442
+ f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
443
+ f"{self.backend_namespace}::{self.get_tensor_or_wrap_number}({arg.name}, *common_device);"
444
+ )
445
+ elif isinstance(arg.lazy_type, OptionalCType):
446
+ assert arg.lazy_type.elem == BaseCType(getValueT()), arg.lazy_type.elem
447
+ # TODO(alanwaketan): Maybe we want to apply GetLtcTensorOrCreateForWrappedNumber here, but hold it
448
+ # until we encounter a real world example.
449
+ lazy_tensor_decls.append(
450
+ f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
451
+ f"{self.backend_namespace}::{self.try_get_tensor}({arg.name}.value_or(at::Tensor()));"
452
+ )
453
+ else:
454
+ raise AssertionError(
455
+ f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
456
+ )
457
+ return ("\n ").join(lazy_tensor_decls)
458
+
459
+ def force_eager_fallback(
460
+ self,
461
+ func: NativeFunction,
462
+ schema: LazyIrSchema,
463
+ metadata: BackendMetadata,
464
+ sig: Union[DispatcherSignature, NativeSignature],
465
+ ) -> str:
466
+ if self.gen_forced_fallback_code:
467
+ return gen_fallback_code(
468
+ schema, sig, overload_name=func.func.name.overload_name
469
+ )
470
+ return ""
471
+
472
+ def metrics(self, func: NativeFunction, schema: LazyIrSchema) -> str:
473
+ return f"{self.metrics_counter};"
474
+
475
+ def get_device(self, func: NativeFunction, schema: LazyIrSchema) -> str:
476
+ value_args = schema.filtered_args(values=True, scalars=False)
477
+ scalar_args = schema.filtered_args(values=False, scalars=True)
478
+ value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
479
+ optional_device = OptionalCType(BaseCType(deviceT))
480
+ optional_devices = [
481
+ a.name for a in scalar_args if a.lazy_type == optional_device
482
+ ]
483
+ assert (
484
+ len(value_types_names) > 0 or len(optional_devices) > 0
485
+ ), "Expected at least one Value or Device type"
486
+ get_device_str = (
487
+ f"{self.get_device_fn}({', '.join(value_types_names + optional_devices)})"
488
+ )
489
+ return f"""auto common_device = {get_device_str};
490
+ TORCH_INTERNAL_ASSERT(common_device);
491
+ """
492
+
493
+ def shape_inference(self, func: NativeFunction, schema: LazyIrSchema) -> str:
494
+ metadata = self.backend_index.get_kernel(func)
495
+ assert metadata is not None
496
+ all_args = schema.filtered_args()
497
+ returns_length = len(schema.returns)
498
+ # call the meta kernel if it exists, to compute output shape/dtype for our IR
499
+ # Note [Generated LTC Shape Functions]
500
+ # LTC uses meta tensors from core to do shape inference when possible, and otherwise
501
+ # we generate a shape function declaration that needs to be manually implemented.
502
+ # How do we detect which ops are eligible to use meta tensors?
503
+ # In general we should be able to use meta tensors not just on structured operators,
504
+ # but also on composite operators that are implemented in terms of structured kernels.
505
+ # We don't currently have a way of knowing at codegen time which ops are implemented that way.
506
+ # This is the case for all view and view_copy operators however, so we're going to
507
+ # use them specifically for all of the view_copy ops (instead of manually writing shape rules for all of them).
508
+ is_view_copy_op = "view_copy" in func.tags
509
+ is_structured = func.structured or func.structured_delegate is not None
510
+ if is_structured or is_view_copy_op:
511
+ meta_out = """
512
+ std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};"""
513
+ if returns_length > 1:
514
+
515
+ def this_shape(i: int) -> str:
516
+ return f"torch::lazy::Shape(std::get<{i}>(out_meta).scalar_type(), std::get<{i}>(out_meta).sizes().vec())"
517
+
518
+ shapes_str = ",".join([this_shape(i) for i in range(returns_length)])
519
+ meta_out = "std::vector<torch::lazy::Shape> shapes{" + shapes_str + "};"
520
+
521
+ # Convert tensor args to the meta device and call it.
522
+ # (We can't pass in the input tensors directly, because they are "functional wrappers".
523
+ # If any of the meta kernels call a tensor op and redispatch, we don't want to hit the functionalize kernels.)
524
+ # Even at::meta:: functions might redispatch, e.g. if they call into view ops.
525
+ dispatcher_sig = DispatcherSignature.from_schema(func.func)
526
+ meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
527
+ meta_call_args = [
528
+ e.expr
529
+ for e in translate(
530
+ meta_call_ctx, dispatcher_sig.arguments(), method=False
531
+ )
532
+ ]
533
+ if is_view_copy_op:
534
+ # view_copy ops always have a CompositeExplicitAutogradNonFunctional kernel
535
+ assert func.has_composite_explicit_autograd_non_functional_kernel
536
+ dispatch_ns = "compositeexplicitautogradnonfunctional"
537
+ else:
538
+ dispatch_ns = "meta"
539
+ aten_name = schema.aten_name
540
+ # TODO: this is trolling
541
+ if func.func.has_symint() and metadata.supports_symint():
542
+ aten_name += "_symint"
543
+ shape_str = f"""\
544
+ {meta_conversion_str}
545
+ auto out_meta = at::{dispatch_ns}::{aten_name}({', '.join(meta_call_args)});
546
+ {meta_out}"""
547
+ else:
548
+ shape_sig = ComputeShapeSignature(
549
+ metadata.kernel, func, symint=metadata.supports_symint()
550
+ )
551
+ shape_str = f"""
552
+ auto shapes = {shape_sig.shape_call};"""
553
+
554
+ shape_str += f"""
555
+ TORCH_INTERNAL_ASSERT(shapes.size() == {returns_length});"""
556
+
557
+ # Calculating which dimensions are symbolic
558
+ func_schema_str = "aten::" + str(func.func)
559
+ shape_str += f"""
560
+ if(torch::lazy::symbolicShapeEnabled()){{
561
+ std::vector<torch::jit::IValue> inputs = {{ {', '.join(str(a.name) for a in all_args)} }};
562
+ const char* schema_str = "{func_schema_str}";
563
+ applySymbolicShapesOnLT(schema_str, inputs, shapes);
564
+ }}
565
+ """
566
+ return shape_str
567
+
568
+ def build_ir_node(self, func: NativeFunction, schema: LazyIrSchema) -> str:
569
+ node_ctor_input_str = node_ctor_inputs(schema)
570
+ return f"""torch::lazy::NodePtr node = torch::lazy::ReuseNode<{schema.node_name}>({node_ctor_input_str});
571
+ if (!node) {{
572
+ {self.shape_inference(func, schema)}
573
+ node = torch::lazy::MakeNode<{schema.node_name}>({node_ctor_input_str}, std::move(shapes));
574
+ CacheNode(node);
575
+ }}
576
+ """
577
+
578
+ def create_lazy_tensor(self, first_tensor_name: Optional[str] = None) -> str:
579
+ # xla uses an instance method for tensor creation, for the time being
580
+ if self.create_from_first_tensor:
581
+ # TODO(whc) remove this if XLA switches to using static method for creation
582
+ assert (
583
+ first_tensor_name is not None
584
+ ), "Requires first tensor to create lazy tensor"
585
+ return f"{first_tensor_name}.{self.create_tensor}"
586
+ return f"{self.backend_namespace}::{self.create_tensor}"
587
+
588
+ def return_aten_tensor(self, func: NativeFunction, schema: LazyIrSchema) -> str:
589
+ returns_length = len(schema.returns)
590
+ value_args = schema.filtered_args(values=True, scalars=False)
591
+ value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
592
+ first_tensor_name = value_types_names[0] if len(value_types_names) > 0 else None
593
+ bridge_str = f"""auto result = {self.create_aten_from_ltc_tensor}(
594
+ {self.create_lazy_tensor(first_tensor_name)}(std::move(node), *common_device));"""
595
+
596
+ if returns_length > 1:
597
+ assert (
598
+ len(value_types_names) > 0
599
+ ), "Code below assumes there is at least one tensor arg"
600
+ bridge_str = f"""std::vector<{self.lazy_tensor_ptr}> lazy_tensors;
601
+ for (int i = 0; i < {returns_length}; i++) {{
602
+ lazy_tensors.push_back({self.create_lazy_tensor(first_tensor_name)}({getValueT()}(node, i), *common_device));
603
+ }}
604
+ auto result = {self.tuple_aten_from_ltc_tensors}<{returns_length}>(lazy_tensors);"""
605
+
606
+ if schema.name.name.inplace or func.func.is_out_fn():
607
+ assert returns_length == 1, (
608
+ "We assumed there was no such case where an op is an in-place variant "
609
+ f"and has tuple outputs, but got tuple of len {returns_length}."
610
+ )
611
+ bridge_str = f"""lazy_{first_tensor_name}->SetInPlaceIrValue(node);
612
+ auto& result = {first_tensor_name};"""
613
+
614
+ bridge_str += """
615
+ return result;"""
616
+ return bridge_str
617
+
618
+ @method_with_native_function
619
+ def __call__(self, func: NativeFunction) -> List[str]:
620
+ sig = kernel_signature(func, self.backend_index)
621
+ metadata = self.backend_index.get_kernel(func)
622
+ assert metadata is not None
623
+ schema = LazyIrSchema(func.func, symint=metadata.supports_symint())
624
+ return [
625
+ f"""\
626
+ {sig.decl(name=f"{self.class_method_name}::{metadata.kernel}")} {{
627
+ {self.force_eager_fallback(func, schema, metadata, sig)}
628
+ {self.metrics(func, schema)}
629
+ {self.get_device(func, schema)}
630
+ {self.lazy_tensor_decls(func, schema)}
631
+ {self.build_ir_node(func, schema)}
632
+ {self.return_aten_tensor(func, schema)}
633
+ }}\n
634
+ """
635
+ ]
636
+
637
+
638
+ class ComputeShapeSignature:
639
+ """
640
+ Here we use the base name as the suffix of the signature to avoid generating for in-place variants.
641
+ """
642
+
643
+ def __init__(self, kernel_name: str, f: NativeFunction, *, symint: bool):
644
+ self.__schema = LazyIrSchema(f.func, symint=symint)
645
+ self.__dispatch_args = ", ".join(
646
+ [a.decl() for a in dispatcher.arguments(f.func, symint=symint)]
647
+ )
648
+ self.__call_args = ", ".join(
649
+ [f"{arg.name}" for arg in self.__schema.filtered_args(generator=True)]
650
+ )
651
+ self.__kernel_name = kernel_name
652
+
653
+ def __decl_suffix(self) -> str:
654
+ return f"{self.__kernel_name}({self.__dispatch_args})"
655
+
656
+ def __call_suffix(self) -> str:
657
+ return f"{self.__kernel_name}({self.__call_args})"
658
+
659
+ @property
660
+ def shape_decl(self) -> str:
661
+ return f"TORCH_API std::vector<torch::lazy::Shape> compute_shape_{self.__decl_suffix()}"
662
+
663
+ @property
664
+ def shape_call(self) -> str:
665
+ return f"torch::lazy::compute_shape_{self.__call_suffix()}"
666
+
667
+
668
+ @dataclass(frozen=True)
669
+ class GenLazyShapeInferenceDefinition:
670
+ backend_index: BackendIndex
671
+ tensor_class: str
672
+
673
+ @method_with_native_function
674
+ def __call__(self, f: NativeFunction) -> List[str]:
675
+ sig = kernel_signature(f, self.backend_index)
676
+ metadata = self.backend_index.get_kernel(f)
677
+ assert metadata is not None
678
+
679
+ # See Note [Generated LTC Shape Functions]
680
+ is_view_copy_op = "view_copy" in f.tags
681
+ is_structured = f.structured or f.structured_delegate is not None
682
+ if is_structured or is_view_copy_op:
683
+ return []
684
+ else:
685
+ shape_sig = ComputeShapeSignature(
686
+ metadata.kernel, f, symint=metadata.supports_symint()
687
+ )
688
+ return ["\n".join([f"{shape_sig.shape_decl};"])]
689
+
690
+
691
+ def generate_non_native_lazy_ir_nodes(
692
+ non_native: List[Dict[str, Any]], gen_lazy_ir: GenLazyIR
693
+ ) -> List[str]:
694
+ """Generate the non-native lazy IR node classes"""
695
+ nodes = []
696
+ for op in non_native:
697
+ # Set default properties for Non-Native IRs
698
+ properties = LazyIrProperties("ShapeCache", "CanBeReused", "LowerDeclOnly")
699
+ for p in op.get("properties", []):
700
+ setattr(properties, p, True)
701
+
702
+ # non-native is assumed to want symint bindings if you wrote symint
703
+ schema = LazyIrSchema(FunctionSchema.parse(op["func"]), properties, symint=True)
704
+ schema.opkind = op.get("opkind")
705
+ nodes.append(gen_lazy_ir.gen(schema)[0])
706
+
707
+ return nodes
llmeval-env/lib/python3.10/site-packages/torchgen/dest/lazy_ts_lowering.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torchgen.api.lazy import LazyArgument, LazyIrSchema
2
+ from torchgen.api.types import OptionalCType
3
+
4
+
5
+ def ts_lowering_body(schema: LazyIrSchema) -> str:
6
+ # for now, we just want one IR class decl and soon after also the method defs
7
+ # and we use the functional version not out/inplace.
8
+ emplace_arguments = []
9
+
10
+ def get_value(arg: LazyArgument) -> str:
11
+ if isinstance(arg.lazy_type, OptionalCType):
12
+ return f"has_{arg.name} ? loctx->GetOutputOp(operand(i++)) : nullptr"
13
+ return "loctx->GetOutputOp(operand(i++))"
14
+
15
+ for arg in schema.positional_args:
16
+ if arg.is_lazy_value:
17
+ emplace_arguments.append(get_value(arg))
18
+ continue
19
+ emplace_arguments.append(f'"{arg.name}", {arg.name}')
20
+
21
+ emplace_arguments_str = "\n ".join(
22
+ [f"arguments.emplace_back({a});" for a in emplace_arguments]
23
+ )
24
+ emplace_kwarg_values = [
25
+ f'"{arg.name}", {get_value(arg)}' for arg in schema.keyword_values
26
+ ]
27
+ emplace_kwarg_scalars = [
28
+ f'"{arg.name}", {arg.name}' for arg in schema.keyword_scalars
29
+ ]
30
+ emplace_kwarguments = "\n ".join(
31
+ [
32
+ f"kwarguments.emplace_back({a});"
33
+ for a in emplace_kwarg_values + emplace_kwarg_scalars
34
+ ]
35
+ )
36
+ return f"""\
37
+ std::vector<torch::jit::NamedValue> arguments;
38
+ std::vector<torch::jit::NamedValue> kwarguments;
39
+ arguments.reserve({len(emplace_arguments)});
40
+ kwarguments.reserve({len(emplace_kwarg_values + emplace_kwarg_scalars)});
41
+ size_t i = 0;
42
+ {emplace_arguments_str}
43
+ {emplace_kwarguments}
44
+ torch::lazy::TSOpVector {schema.aten_name}_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments);
45
+ TORCH_CHECK_EQ({schema.aten_name}_out.size(), {len(schema.returns)});
46
+
47
+ return {schema.aten_name}_out;
48
+ """
llmeval-env/lib/python3.10/site-packages/torchgen/dest/native_functions.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Union
2
+
3
+ import torchgen.api.meta as meta
4
+ import torchgen.api.structured as structured
5
+ from torchgen.api.types import kernel_signature
6
+
7
+ from torchgen.context import with_native_function_and_index
8
+ from torchgen.model import BackendIndex, NativeFunction, NativeFunctionsGroup
9
+ from torchgen.utils import mapMaybe
10
+
11
+
12
+ @with_native_function_and_index
13
+ def gen_unstructured(f: NativeFunction, backend_index: BackendIndex) -> Optional[str]:
14
+ sig = kernel_signature(f, backend_index)
15
+ metadata = backend_index.get_kernel(f)
16
+ if metadata is None:
17
+ return None
18
+ if "legacy::" in metadata.kernel:
19
+ return None
20
+ else:
21
+ prefix = "static" if backend_index.external else "TORCH_API"
22
+ return f"{prefix} {sig.decl(name=metadata.kernel)};"
23
+
24
+
25
+ @with_native_function_and_index
26
+ def gen_structured(g: NativeFunctionsGroup, backend_index: BackendIndex) -> List[str]:
27
+ meta_name = meta.name(g)
28
+ out_args = structured.impl_arguments(g)
29
+ metadata = backend_index.get_kernel(g)
30
+ if metadata is None:
31
+ return []
32
+ prefix = "" if backend_index.external else "TORCH_API "
33
+ return [
34
+ f"""\
35
+ struct {prefix}structured_{metadata.kernel} : public at::meta::structured_{meta_name} {{
36
+ void impl({', '.join(a.decl() for a in out_args)});
37
+ }};
38
+ """
39
+ ]
40
+
41
+
42
+ # Generates NativeFunctions.h, a list of forward declarations of all
43
+ # actual kernel definitions we keep in aten/src/ATen/native/
44
+ @with_native_function_and_index
45
+ def compute_native_function_declaration(
46
+ g: Union[NativeFunctionsGroup, NativeFunction], backend_index: BackendIndex
47
+ ) -> List[str]:
48
+ metadata = backend_index.get_kernel(g)
49
+ if isinstance(g, NativeFunctionsGroup):
50
+ if metadata is not None and metadata.structured:
51
+ if backend_index.external:
52
+ # Structured hasn't been tested with external backends yet.
53
+ raise AssertionError(
54
+ "Structured external backend functions are not implemented yet."
55
+ )
56
+ else:
57
+ return gen_structured(g, backend_index)
58
+ else:
59
+ return list(
60
+ mapMaybe(lambda f: gen_unstructured(f, backend_index), g.functions())
61
+ )
62
+ else:
63
+ x = gen_unstructured(g, backend_index)
64
+ return [] if x is None else [x]
llmeval-env/lib/python3.10/site-packages/torchgen/dest/register_dispatch_key.py ADDED
@@ -0,0 +1,989 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import textwrap
3
+ from dataclasses import dataclass
4
+ from typing import List, Literal, Optional, Tuple, Union
5
+
6
+ import torchgen.api.cpp as cpp
7
+ import torchgen.api.meta as meta
8
+ import torchgen.api.structured as structured
9
+ from torchgen.api.translate import translate
10
+ from torchgen.api.types import (
11
+ BaseCType,
12
+ Binding,
13
+ ConstRefCType,
14
+ CppSignature,
15
+ CppSignatureGroup,
16
+ DispatcherSignature,
17
+ Expr,
18
+ kernel_signature,
19
+ MutRefCType,
20
+ NamedCType,
21
+ NativeSignature,
22
+ tensorT,
23
+ )
24
+
25
+ from torchgen.context import method_with_native_function, native_function_manager
26
+ from torchgen.model import (
27
+ Argument,
28
+ BackendIndex,
29
+ DeviceCheckType,
30
+ DispatchKey,
31
+ gets_generated_out_inplace_wrapper,
32
+ is_cuda_dispatch_key,
33
+ NativeFunction,
34
+ NativeFunctionsGroup,
35
+ SchemaKind,
36
+ TensorOptionsArguments,
37
+ )
38
+ from torchgen.selective_build.selector import SelectiveBuilder
39
+ from torchgen.utils import assert_never, mapMaybe, Target
40
+
41
+
42
+ def gen_registration_headers(
43
+ backend_index: BackendIndex,
44
+ per_operator_headers: bool,
45
+ rocm: bool,
46
+ ) -> List[str]:
47
+ if per_operator_headers:
48
+ headers = ["#include <ATen/ops/as_strided_native.h>"]
49
+ else:
50
+ headers = ["#include <ATen/NativeFunctions.h>"]
51
+
52
+ if backend_index.dispatch_key in (DispatchKey.CPU, DispatchKey.Meta):
53
+ headers.append("#include <ATen/EmptyTensor.h>")
54
+ elif backend_index.dispatch_key == DispatchKey.CUDA:
55
+ if rocm:
56
+ headers.append("#include <ATen/hip/EmptyTensor.h>")
57
+ else:
58
+ headers.append("#include <ATen/cuda/EmptyTensor.h>")
59
+ elif backend_index.dispatch_key == DispatchKey.MPS:
60
+ headers.append("#include <ATen/mps/EmptyTensor.h>")
61
+ elif per_operator_headers:
62
+ headers += [
63
+ "#include <ATen/ops/empty.h>",
64
+ "#include <ATen/ops/empty_strided.h>",
65
+ "#include <ATen/ops/_copy_from_and_resize.h>",
66
+ "#include <ATen/ops/_copy_from.h>",
67
+ ]
68
+ else:
69
+ headers.append("#include <ATen/Functions.h>")
70
+
71
+ return headers
72
+
73
+
74
+ def gen_empty_impl_names(
75
+ backend_index: BackendIndex,
76
+ ) -> Tuple[Optional[str], Optional[str]]:
77
+ empty_impl = None
78
+ empty_strided_impl = None
79
+
80
+ if backend_index.dispatch_key in (
81
+ DispatchKey.Meta,
82
+ DispatchKey.CPU,
83
+ DispatchKey.CUDA,
84
+ DispatchKey.MPS,
85
+ ):
86
+ dispatch = str(backend_index.dispatch_key).lower()
87
+ empty_impl = f"at::detail::empty_{dispatch}"
88
+ empty_strided_impl = f"at::detail::empty_strided_{dispatch}"
89
+ elif backend_index.dispatch_key in (
90
+ DispatchKey.CompositeExplicitAutogradNonFunctional,
91
+ DispatchKey.QuantizedCPU,
92
+ DispatchKey.QuantizedCUDA,
93
+ ):
94
+ empty_impl = "at::empty"
95
+ empty_strided_impl = "at::empty_strided"
96
+
97
+ return empty_impl, empty_strided_impl
98
+
99
+
100
+ def gen_create_out_helper(backend_index: BackendIndex) -> List[str]:
101
+ if backend_index.dispatch_key == DispatchKey.Meta:
102
+ empty_options = "options.device(at::kMeta)"
103
+ else:
104
+ empty_options = "options"
105
+
106
+ empty_impl, empty_strided_impl = gen_empty_impl_names(backend_index)
107
+ if empty_impl is None:
108
+ return []
109
+
110
+ return [
111
+ f"""
112
+ Tensor create_out(IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {{
113
+ if (strides.empty()) {{
114
+ return {empty_impl}(sizes, {empty_options});
115
+ }} else {{
116
+ return {empty_strided_impl}(sizes, strides, {empty_options});
117
+ }}
118
+ }}
119
+ """
120
+ ]
121
+
122
+
123
+ def gen_maybe_create_proxy_helper(backend_index: BackendIndex) -> List[str]:
124
+ _, empty_strided_impl = gen_empty_impl_names(backend_index)
125
+ return (
126
+ []
127
+ if empty_strided_impl is None
128
+ else [
129
+ f"""
130
+ c10::optional<Tensor> maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {{
131
+ if (out.strides() != strides) {{
132
+ return {empty_strided_impl}(sizes, strides, options);
133
+ }}
134
+ return c10::nullopt;
135
+ }}
136
+ """
137
+ ]
138
+ )
139
+
140
+
141
+ def gen_resize_out_helper(backend_index: BackendIndex) -> List[str]:
142
+ if backend_index.dispatch_key == DispatchKey.CompositeExplicitAutogradNonFunctional:
143
+ # The function isn't used by this key (since only functional ops have a kernel for this key),
144
+ # so we need to not include it to avoid a defined-but-not-used error.
145
+ return []
146
+ return [
147
+ """
148
+ void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
149
+ TORCH_CHECK(options.dtype() == out.dtype(),
150
+ "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
151
+ TORCH_CHECK(options.device() == out.device(),
152
+ "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
153
+ const bool resized = at::native::resize_output(out, sizes);
154
+ // Only restride if a resize occurred; otherwise we ignore the (advisory)
155
+ // strides from the meta function and directly use the output tensor's
156
+ // preexisting strides
157
+ if (resized) {
158
+ if (!strides.empty()) {
159
+ TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
160
+ // TODO: avoid the redispatch here
161
+ out.as_strided_(sizes, strides);
162
+ } else if (options.memory_format_opt().has_value()) {
163
+ out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
164
+ }
165
+ }
166
+ }
167
+ """
168
+ ]
169
+
170
+
171
+ def gen_check_inplace_helper(backend_index: BackendIndex) -> List[str]:
172
+ return [
173
+ """
174
+ void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
175
+ // These checks are needed on those operators that:
176
+ // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
177
+ // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
178
+ // For other operators (e.g. 'add'), 'TensorIterator' already checks
179
+ // these things separately.
180
+ TORCH_CHECK(options.dtype() == self.dtype(),
181
+ "Bad in-place call: ",
182
+ "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
183
+ TORCH_CHECK(options.device() == self.device(),
184
+ "Bad in-place call: ",
185
+ "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
186
+ TORCH_CHECK(sizes == self.sizes(),
187
+ "Bad in-place call: ",
188
+ "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
189
+ }
190
+ """
191
+ ]
192
+
193
+
194
+ def gen_registration_helpers(backend_index: BackendIndex) -> List[str]:
195
+ return [
196
+ *gen_create_out_helper(backend_index),
197
+ *gen_resize_out_helper(backend_index),
198
+ *gen_check_inplace_helper(backend_index),
199
+ *gen_maybe_create_proxy_helper(backend_index),
200
+ ]
201
+
202
+
203
+ # Generates Register{dispatch}.cpp (e.g., RegisterCPU.cpp).
204
+ #
205
+ # - The primary function of this file is to register all of the
206
+ # implementations for the given dispatch key to the dispatcher,
207
+ # so they are available for use in PyTorch. If dispatch is
208
+ # None, we generate schema (def) registrations and catchall
209
+ # registrations.
210
+ # - The secondary function of this file is to generate a wrapper
211
+ # around functions. In CPUType these wrappers do nothing
212
+ # (and should be removed), but in other cases they handle
213
+ # DeviceGuard. A small extra benefit of wrappers is they
214
+ # are not overloaded, so they can be used in the registration
215
+ # API without having to disambiguate which overload you want
216
+ # (as would be the case if you directly registered native::
217
+ # functions).
218
+ # - The tertiary function of this file is to generate *static*
219
+ # cpp API bindings which can be used to bypass dispatcher
220
+ # directly to kernels, but with user-friendly cpp-style API
221
+ @dataclass(frozen=True)
222
+ class RegisterDispatchKey:
223
+ backend_index: BackendIndex
224
+
225
+ target: Literal[
226
+ Target.ANONYMOUS_DEFINITION,
227
+ Target.NAMESPACED_DEFINITION,
228
+ Target.NAMESPACED_DECLARATION,
229
+ Target.REGISTRATION,
230
+ ]
231
+
232
+ # Selector object to determine which operators to generate
233
+ # registration code for.
234
+ selector: SelectiveBuilder
235
+
236
+ # Whether or not we are actually code-genning for ROCm
237
+ rocm: bool
238
+
239
+ # Whether or not to generate symint registrations or not. External users
240
+ # of codegen who don't care about symints can set this to false to get
241
+ # non-SymInt codegen
242
+ symint: bool
243
+
244
+ # The class that all unstructured native functions live under. This is used to improve
245
+ # compiler error messages when a kernel writer adds a native function with the wrong signature.
246
+ # This is only used in unstructured kernels, since structured kernels already live in a class.
247
+ # Finally, this field is currently Optional because it is only used by external backends.
248
+ # It would be nice if we can add the same logic to in-tree kernels too, but that requires updating
249
+ # all of the existing kernel signatures scattered across aten/src/ATen/native.
250
+ class_method_name: Optional[str]
251
+
252
+ # Only set to true in lightweight dispatch. If lightweight dispatch is enabled we are registering
253
+ # operators into JIT op registry, thus we need to avoid generating code to register into the dispatcher.
254
+ skip_dispatcher_op_registration: bool
255
+
256
+ @staticmethod
257
+ def gen_device_check(
258
+ type: DeviceCheckType, args: List[Argument], method_name: str
259
+ ) -> str:
260
+ if type == DeviceCheckType.NoCheck:
261
+ return " // No device check\n"
262
+
263
+ device_check = "c10::optional<Device> common_device = nullopt;\n"
264
+ device_check += "(void)common_device; // Suppress unused variable warning\n"
265
+ for arg in args:
266
+ # Only tensor like arguments are eligible
267
+ if arg.type.is_tensor_like():
268
+ device_check += f"""
269
+ c10::impl::check_and_update_common_device(common_device, {arg.name}, "{method_name}", "{arg.name}");"""
270
+ return device_check
271
+
272
+ @method_with_native_function
273
+ def __call__(self, f: Union[NativeFunctionsGroup, NativeFunction]) -> List[str]:
274
+ if isinstance(f, NativeFunctionsGroup):
275
+ g: NativeFunctionsGroup = f
276
+ # Note: We call gen_structured() if the operator is marked structured, regardless of the backend.
277
+ # gen_structured() has special logic to handle auto-generated kernels.
278
+ if g.structured:
279
+ return self.gen_structured(g)
280
+ else:
281
+ return list(
282
+ mapMaybe(lambda f: self.gen_unstructured(f, g), g.functions())
283
+ )
284
+ elif isinstance(f, NativeFunction):
285
+ r = self.gen_unstructured(f)
286
+ return [] if r is None else [r]
287
+ else:
288
+ assert_never(f)
289
+
290
+ def wrapper_kernel_sig(
291
+ self, f: NativeFunction
292
+ ) -> Union[NativeSignature, DispatcherSignature]:
293
+ # The prefix is just to ensure uniqueness. The Dispatcher API doesn't guarantee unique kernel names.
294
+ return DispatcherSignature.from_schema(
295
+ f.func,
296
+ prefix=f"wrapper_{self.backend_index.dispatch_key}_{f.func.name.overload_name}_",
297
+ symint=self.symint,
298
+ )
299
+
300
+ def gen_out_inplace_wrapper(
301
+ self, f: NativeFunction, g: Optional[NativeFunctionsGroup]
302
+ ) -> Optional[str]:
303
+ if g is None:
304
+ return None
305
+ k = f.func.kind()
306
+ if k is SchemaKind.inplace:
307
+ copy_op = "at::_copy_from"
308
+ elif k is SchemaKind.out:
309
+ copy_op = "at::_copy_from_and_resize"
310
+ else:
311
+ raise AssertionError("gen_out_inplace_wrapper called on a functional op")
312
+
313
+ sig = self.wrapper_kernel_sig(f)
314
+ name = sig.name()
315
+
316
+ func_res = f"{name}_tmp"
317
+ return_names = cpp.return_names(f)
318
+ if len(return_names) > 1:
319
+ updates = "\n ".join(
320
+ f"{copy_op}(std::get<{i}>({func_res}), {ret_name});"
321
+ for i, ret_name in enumerate(return_names)
322
+ )
323
+ returns = f'{sig.returns_type().cpp_type()}({", ".join(return_names)})'
324
+ elif len(return_names) == 1:
325
+ ret_name = return_names[0]
326
+ updates = f"{copy_op}({func_res}, {ret_name});"
327
+ returns = ret_name
328
+ else:
329
+ assert len(f.func.arguments.out) == 1
330
+ returns = ""
331
+ out_arg = f.func.arguments.out[0]
332
+ if out_arg.type.is_list_like():
333
+ updates = f"""\
334
+ for (int64_t i = 0; i < {func_res}.size(); ++i) {{
335
+ {copy_op}({func_res}[i], {out_arg.name}[i]);
336
+ }}"""
337
+ else:
338
+ updates = f"{copy_op}({func_res}, {out_arg.name});"
339
+
340
+ functional_sig = self.wrapper_kernel_sig(g.functional)
341
+ wrapper_name = sig.name()
342
+
343
+ return f"""\
344
+ {sig.defn(name=wrapper_name)} {{
345
+ auto {func_res} = {functional_sig.name()}({", ".join(e.expr for e in translate(sig.arguments(), functional_sig.arguments()))});
346
+ {updates}
347
+ return {returns};
348
+ }}
349
+ """
350
+
351
+ def gen_structured(self, g: NativeFunctionsGroup) -> List[str]:
352
+ metadata = self.backend_index.get_kernel(g)
353
+ if self.backend_index.dispatch_key == DispatchKey.Meta:
354
+ assert not self.backend_index.has_kernel(g.out), (
355
+ "Do not explicitly specify Meta dispatch key on structured "
356
+ "functions, they will be automatically generated for you"
357
+ )
358
+ elif (
359
+ self.backend_index.dispatch_key
360
+ == DispatchKey.CompositeExplicitAutogradNonFunctional
361
+ ):
362
+ assert not self.backend_index.has_kernel(g.out), (
363
+ "Do not explicitly specify CompositeExplicitAutograd dispatch key on structured "
364
+ "functions, they will be automatically generated for you"
365
+ )
366
+ elif metadata is None or not metadata.structured:
367
+ return list(mapMaybe(lambda f: self.gen_unstructured(f, g), g.functions()))
368
+ structured_gen = StructuredRegisterDispatchKey(
369
+ self.backend_index,
370
+ self.target,
371
+ self.selector,
372
+ self.rocm,
373
+ self.symint,
374
+ self.class_method_name,
375
+ self.skip_dispatcher_op_registration,
376
+ g,
377
+ )
378
+ return list(mapMaybe(structured_gen.gen_one, g.functions()))
379
+
380
+ def gen_unstructured(
381
+ self, f: NativeFunction, g: Optional[NativeFunctionsGroup] = None
382
+ ) -> Optional[str]:
383
+ with native_function_manager(f):
384
+ inplace_meta = False
385
+ gets_out_inplace_wrapper = False
386
+ if not self.backend_index.has_kernel(f):
387
+ if (
388
+ self.backend_index.dispatch_key == DispatchKey.Meta
389
+ and f.func.kind() is SchemaKind.inplace
390
+ and
391
+ # Defer to composites for meta implementation
392
+ not f.has_composite_kernel
393
+ and
394
+ # Inplace list operations are not supported
395
+ len(f.func.returns) == 1
396
+ ):
397
+ inplace_meta = True
398
+ elif (
399
+ not self.backend_index.use_out_as_primary
400
+ and g is not None
401
+ and gets_generated_out_inplace_wrapper(f, g, self.backend_index)
402
+ ):
403
+ # We want to generate inplace/out wrappers, that don't have a kernel for the backend.
404
+ gets_out_inplace_wrapper = True
405
+ else:
406
+ return None
407
+ if f.manual_kernel_registration:
408
+ return None
409
+
410
+ if (
411
+ self.target is Target.REGISTRATION
412
+ and not self.selector.is_native_function_selected(f)
413
+ ):
414
+ return None
415
+
416
+ sig = self.wrapper_kernel_sig(f)
417
+
418
+ name = sig.name()
419
+ returns_type = sig.returns_type().cpp_type()
420
+ args = sig.arguments()
421
+ args_str = ", ".join(a.defn() for a in args)
422
+
423
+ # See Note [Direct dispatch bindings]
424
+ cpp_sig_group = CppSignatureGroup.from_native_function(
425
+ f, method=False, fallback_binding=False
426
+ )
427
+
428
+ # TODO: dedupe this with the structured codegen
429
+ if self.target is Target.NAMESPACED_DECLARATION:
430
+ result = ""
431
+ for cpp_sig in cpp_sig_group.signatures(symint=self.symint):
432
+ result += f"TORCH_API {cpp_sig.decl()};\n"
433
+ return result
434
+ elif self.target is Target.NAMESPACED_DEFINITION:
435
+
436
+ def generate_defn(cpp_sig: CppSignature) -> str:
437
+ return f"""
438
+ {cpp_sig.defn()} {{
439
+ return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
440
+ }}
441
+ """
442
+
443
+ result = ""
444
+ for cpp_sig in cpp_sig_group.signatures(symint=self.symint):
445
+ result += generate_defn(cpp_sig)
446
+ return result
447
+
448
+ elif self.target is Target.ANONYMOUS_DEFINITION:
449
+ # short circuit for inplace_meta
450
+ if inplace_meta:
451
+ assert f.func.arguments.self_arg is not None
452
+ self_arg_name = f.func.arguments.self_arg.argument.name
453
+ # TODO: handle in place on tensor list
454
+ return f"""
455
+ {returns_type} {name}({args_str}) {{
456
+ TORCH_CHECK_NOT_IMPLEMENTED({self_arg_name}.is_meta(),
457
+ "Cannot inplace into non-meta tensor with meta tensor argument");
458
+ return {self_arg_name};
459
+ }}
460
+ """
461
+
462
+ # short circuit for generated inplace/out wrappers
463
+ if gets_out_inplace_wrapper:
464
+ return self.gen_out_inplace_wrapper(f, g)
465
+
466
+ metadata = self.backend_index.get_kernel(f)
467
+ if metadata is None:
468
+ return None
469
+ if self.class_method_name is None:
470
+ impl_name = f"{metadata.cpp_namespace}::{metadata.kernel}"
471
+ else:
472
+ impl_name = f"{metadata.cpp_namespace}::{self.class_method_name}::{metadata.kernel}"
473
+
474
+ kernel_sig = kernel_signature(f, self.backend_index)
475
+
476
+ args_exprs_str = ", ".join(
477
+ e.expr
478
+ for e in translate(
479
+ sig.arguments(), kernel_sig.arguments(), method=False
480
+ )
481
+ )
482
+
483
+ device_check = " // No device check\n"
484
+ # Backends that require device guards presumably also require device checks.
485
+ if self.backend_index.device_guard:
486
+ device_check_args = itertools.chain(
487
+ f.func.arguments.out, f.func.arguments.flat_positional
488
+ )
489
+ device_check = RegisterDispatchKey.gen_device_check(
490
+ f.device_check, list(device_check_args), name
491
+ )
492
+
493
+ device_guard = "// DeviceGuard omitted" # default
494
+ if f.device_guard and self.backend_index.device_guard:
495
+ has_tensor_options = any(
496
+ isinstance(a, TensorOptionsArguments)
497
+ for a in f.func.arguments.non_out
498
+ )
499
+ if has_tensor_options:
500
+ # kernel is creating a tensor
501
+ device_guard = """
502
+ const DeviceGuard device_guard(device_or_default(device));"""
503
+
504
+ # CUDA requires special handling
505
+ if is_cuda_dispatch_key(self.backend_index.dispatch_key):
506
+ device_guard = (
507
+ f"globalContext().lazyInitCUDA();\n{device_guard}"
508
+ )
509
+ else:
510
+ # kernel is operating on existing tensors
511
+
512
+ # There is precedence for which argument we use to do
513
+ # device guard. This describes the precedence order.
514
+ self_arg = (
515
+ [f.func.arguments.self_arg.argument]
516
+ if f.func.arguments.self_arg is not None
517
+ else []
518
+ )
519
+ candidate_args = itertools.chain(
520
+ self_arg,
521
+ f.func.arguments.out,
522
+ f.func.arguments.flat_positional,
523
+ )
524
+
525
+ # Only tensor like arguments are eligible
526
+ device_of = next(
527
+ (
528
+ f"{a.name}"
529
+ for a in candidate_args
530
+ if a.type.is_tensor_like()
531
+ ),
532
+ None,
533
+ )
534
+ if device_of is not None:
535
+ device_guard = f"const OptionalDeviceGuard device_guard(device_of({device_of}));"
536
+
537
+ return f"""\
538
+ namespace {{
539
+
540
+ {returns_type} {name}({args_str}) {{
541
+ {device_check}
542
+
543
+ {device_guard}
544
+ return {impl_name}({args_exprs_str});
545
+ }}
546
+
547
+ }} // anonymous namespace
548
+ """
549
+
550
+ elif self.target is Target.REGISTRATION:
551
+ if f.manual_kernel_registration or self.skip_dispatcher_op_registration:
552
+ return None
553
+ else:
554
+ payload = f"TORCH_FN({name})"
555
+ return f'm.impl("{f.func.name}",\n{payload});\n'
556
+ else:
557
+ assert_never(self.target)
558
+
559
+
560
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
561
+ #
562
+ # STRUCTURED
563
+ #
564
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
565
+
566
+
567
+ @dataclass(frozen=True)
568
+ class StructuredRegisterDispatchKey(RegisterDispatchKey):
569
+ g: NativeFunctionsGroup
570
+
571
+ def gen_class_set_output_functions(
572
+ self, k: SchemaKind, parent_class: str, generate_super: bool
573
+ ) -> str:
574
+ if generate_super:
575
+ set_output_super = f"{parent_class}::set_output_raw_strided(output_idx, sizes, strides, options, names);"
576
+ else:
577
+ set_output_super = ""
578
+
579
+ def gen_set_output_function(name: str, maybe_create_proxy: bool) -> str:
580
+ return f"""
581
+ void set_output_{name}(
582
+ int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
583
+ TensorOptions options, DimnameList names
584
+ ) override {{
585
+ {textwrap.indent(self.gen_class_set_output_body(k, maybe_create_proxy), " ")}
586
+ if (!names.empty()) {{
587
+ namedinference::propagate_names(outputs_[output_idx], names);
588
+ }}
589
+ // super must happen after, so that downstream can use maybe_get_output
590
+ // to retrieve the output
591
+ {textwrap.indent(set_output_super, " ")}
592
+ }}
593
+ """
594
+
595
+ return f"""
596
+ {gen_set_output_function("strided", maybe_create_proxy=True)}
597
+ {gen_set_output_function("raw_strided", maybe_create_proxy=False)}
598
+ """
599
+
600
+ def gen_class_set_output_body(self, k: SchemaKind, maybe_create_proxy: bool) -> str:
601
+ if self.backend_index.dispatch_key in [
602
+ DispatchKey.CUDA,
603
+ DispatchKey.MPS,
604
+ DispatchKey.CompositeExplicitAutogradNonFunctional,
605
+ ]:
606
+ maybe_set_guard = """
607
+ auto current_device = guard_.current_device();
608
+ if (C10_UNLIKELY(current_device.has_value())) {
609
+ TORCH_INTERNAL_ASSERT(*current_device == options.device(),
610
+ "structured kernels don't support multi-device outputs");
611
+ } else {
612
+ guard_.reset_device(options.device());
613
+ }
614
+ """
615
+ maybe_set_guard_line = maybe_set_guard + "\n"
616
+ else:
617
+ maybe_set_guard_line = maybe_set_guard = ""
618
+
619
+ if maybe_create_proxy:
620
+ create_proxy = """
621
+ auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
622
+ if (C10_UNLIKELY(maybe_proxy.has_value())) {
623
+ proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
624
+ }
625
+ """
626
+ else:
627
+ create_proxy = ""
628
+
629
+ if k is SchemaKind.functional:
630
+ assert self.backend_index.dispatch_key in (
631
+ DispatchKey.Meta,
632
+ DispatchKey.CPU,
633
+ DispatchKey.CUDA,
634
+ DispatchKey.MPS,
635
+ DispatchKey.CompositeExplicitAutogradNonFunctional,
636
+ )
637
+ return f"""{maybe_set_guard_line}
638
+ outputs_[output_idx] = create_out(sizes, strides, options);"""
639
+ elif k is SchemaKind.inplace:
640
+ return f"""{maybe_set_guard_line}
641
+ const auto& out = outputs_[output_idx].get();
642
+ check_inplace(out, sizes, options);
643
+ {create_proxy}"""
644
+ elif k is SchemaKind.out:
645
+ return f"""{maybe_set_guard_line}
646
+ const auto& out = outputs_[output_idx].get();
647
+ resize_out(out, sizes, strides, options);
648
+ {create_proxy}"""
649
+ elif k is SchemaKind.mutable or k is SchemaKind.scratch:
650
+ raise AssertionError(
651
+ f"{k} structured operators are currently not supported"
652
+ )
653
+ else:
654
+ assert_never(k)
655
+
656
+ # returns the definition of a ctor, as well as how to construct
657
+ # this class to a variable named op
658
+ def gen_class_ctor(self, k: SchemaKind, class_name: str, returns: int) -> str:
659
+ if k is SchemaKind.functional:
660
+ return ""
661
+ elif k is SchemaKind.inplace:
662
+ # TODO: Make sure out argument is guaranteed to be self
663
+ return f"{class_name}(Tensor& self) : outputs_{{std::ref(self)}} {{}}"
664
+ elif k is SchemaKind.out:
665
+ out_args = ", ".join(f"Tensor& out{i}" for i in range(returns))
666
+ out_refs = ", ".join(f"std::ref(out{i})" for i in range(returns))
667
+ return f"{class_name}({out_args}) : outputs_{{ {out_refs} }} {{}}"
668
+ elif k is SchemaKind.mutable or k is SchemaKind.scratch:
669
+ raise AssertionError(
670
+ f"{k} structured operators are currently not supported"
671
+ )
672
+ else:
673
+ assert_never(k)
674
+
675
+ def gen_class(
676
+ self,
677
+ f: NativeFunction,
678
+ k: SchemaKind,
679
+ *,
680
+ class_name: str,
681
+ parent_class: str,
682
+ generate_super: bool,
683
+ ) -> str:
684
+ if k is SchemaKind.functional:
685
+ output_type = "Tensor"
686
+ output_value = "outputs_[output_idx]"
687
+ proxy_field = ""
688
+ elif k is SchemaKind.inplace:
689
+ output_type = "std::reference_wrapper<Tensor>"
690
+ output_value = "proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get()"
691
+ proxy_field = f"std::array<c10::optional<Tensor>, {len(f.func.returns)}> proxy_outputs_;"
692
+ elif k is SchemaKind.out:
693
+ output_type = "std::reference_wrapper<Tensor>"
694
+ output_value = "proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get()"
695
+ proxy_field = f"std::array<c10::optional<Tensor>, {len(f.func.returns)}> proxy_outputs_;"
696
+
697
+ if self.backend_index.dispatch_key == DispatchKey.CUDA:
698
+ if self.rocm:
699
+ guard_field = "c10::hip::OptionalHIPGuardMasqueradingAsCUDA guard_;"
700
+ else:
701
+ guard_field = "c10::cuda::OptionalCUDAGuard guard_;"
702
+ elif (
703
+ self.backend_index.dispatch_key
704
+ == DispatchKey.CompositeExplicitAutogradNonFunctional
705
+ ):
706
+ guard_field = "c10::OptionalDeviceGuard guard_;"
707
+ elif self.backend_index.dispatch_key == DispatchKey.MPS:
708
+ # TODO: Move to OptionalMPSGuard.
709
+ guard_field = "c10::OptionalDeviceGuard guard_;"
710
+ else:
711
+ guard_field = ""
712
+
713
+ indent = " " * 4
714
+ class_ctor_str = self.gen_class_ctor(k, class_name, len(f.func.returns))
715
+ lines = (
716
+ f"struct {class_name} final : public {parent_class} {{",
717
+ f"{textwrap.indent(class_ctor_str, indent)}",
718
+ f"{textwrap.indent(self.gen_class_set_output_functions(k, parent_class, generate_super), indent)}",
719
+ " const Tensor& maybe_get_output(int64_t output_idx) override {",
720
+ f" return {output_value};\n", # type: ignore[possibly-undefined] # TODO: audit
721
+ " }",
722
+ f" std::array<{output_type}, {len(f.func.returns)}> outputs_;", # type: ignore[possibly-undefined] # TODO: audit
723
+ f"{textwrap.indent(proxy_field, indent)}", # type: ignore[possibly-undefined] # TODO: audit
724
+ f"{textwrap.indent(guard_field, indent)}",
725
+ "};",
726
+ )
727
+ return "\n".join(line for line in lines if line)
728
+
729
+ @method_with_native_function
730
+ def gen_one(self, f: NativeFunction) -> Optional[str]:
731
+ assert not f.manual_kernel_registration
732
+
733
+ if (
734
+ self.target is Target.REGISTRATION
735
+ and not self.selector.is_native_function_selected(f)
736
+ ):
737
+ return None
738
+
739
+ # TODO: Now, there is something interesting going on here. In the code below,
740
+ # we generate CompositeExplicitAutogradNonFunctional implementations of functional and inplace
741
+ # based on the out implementation. But in fact, out is definable by
742
+ # functional too (just not very efficiently), and this is honestly the
743
+ # MORE likely situation for a backend implementor. How do we pick?
744
+ # Well, taking a page from Haskell type classes and default methods,
745
+ # we could conceivably register a circular definition (out in terms
746
+ # of functional, and functional in terms of out) and just require
747
+ # someone to implement one or the other. We'd have to do a little bit
748
+ # of work to not register one of these "weak" definitions unless there
749
+ # is a strong definition somewhere in the DAG! So it's not implemented yet.
750
+ if (
751
+ self.backend_index.dispatch_key
752
+ == DispatchKey.CompositeExplicitAutogradNonFunctional
753
+ and f.func.kind() is SchemaKind.out
754
+ ):
755
+ # Never generate a default implementation for out, that's what you
756
+ # have to define as a backend implementor
757
+ return None
758
+
759
+ # Note [Direct dispatch bindings]
760
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
761
+ # Signature of the non-dispatched function we'll expose in a header
762
+ # (e.g., at::cpu::add). We don't generate methods (TODO: do this
763
+ # when CPUTensor class is a thing); nor do we generate fallback
764
+ # bindings for manual_cpp_binding functions.
765
+ cpp_sig_group = CppSignatureGroup.from_native_function(
766
+ f, method=False, fallback_binding=False
767
+ )
768
+
769
+ # Signature of the wrapper function we'll register to the dispatcher
770
+ kern = self.backend_index.get_kernel(f)
771
+ sig = NativeSignature(
772
+ f.func,
773
+ prefix=f"wrapper_{self.backend_index.dispatch_key}_",
774
+ symint=kern is not None and kern.supports_symint(),
775
+ )
776
+
777
+ if self.target is Target.NAMESPACED_DECLARATION:
778
+ result = ""
779
+ for cpp_sig in cpp_sig_group.signatures(symint=self.symint):
780
+ result += f"TORCH_API {cpp_sig.decl()};\n"
781
+ return result
782
+
783
+ elif self.target is Target.NAMESPACED_DEFINITION:
784
+
785
+ def generate_defn(cpp_sig: CppSignature) -> str:
786
+ return f"""
787
+ {cpp_sig.defn()} {{
788
+ return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
789
+ }}
790
+ """
791
+
792
+ result = ""
793
+ for cpp_sig in cpp_sig_group.signatures(symint=self.symint):
794
+ result += generate_defn(cpp_sig)
795
+ return result
796
+
797
+ elif self.target is Target.ANONYMOUS_DEFINITION:
798
+ k = f.func.kind()
799
+
800
+ # Construct the body of the wrapper function with signature sig
801
+ sig_body = []
802
+ # We'll use context to keep track of any variables we've brought
803
+ # into scope while generating code
804
+ context: List[Union[Binding, Expr]] = list(sig.arguments())
805
+
806
+ # Initialize the class corresponding to this structured
807
+ # operator; feeding it the output argument(s) if it is known
808
+ if self.backend_index.dispatch_key is DispatchKey.Meta:
809
+ class_name = f"structured_{meta.name(self.g)}_meta_{k.name}"
810
+ parent_class = f"at::meta::structured_{meta.name(self.g)}"
811
+ elif (
812
+ self.backend_index.dispatch_key
813
+ is DispatchKey.CompositeExplicitAutogradNonFunctional
814
+ ):
815
+ # TODO: dedup this branch
816
+ class_name = f"structured_{meta.name(self.g)}_default_backend_{k.name}"
817
+ parent_class = f"at::meta::structured_{meta.name(self.g)}"
818
+ else:
819
+ metadata = self.backend_index.get_kernel(self.g)
820
+ assert metadata is not None
821
+ class_name = f"structured_{metadata.kernel}_{k.name}"
822
+ parent_class = f"{metadata.cpp_namespace}::structured_{metadata.kernel}"
823
+
824
+ if self.backend_index.device_guard:
825
+ device_check_args = itertools.chain(
826
+ f.func.arguments.out, f.func.arguments.flat_positional
827
+ )
828
+ sig_body.append(
829
+ RegisterDispatchKey.gen_device_check(
830
+ f.device_check, list(device_check_args), sig.name()
831
+ )
832
+ )
833
+
834
+ if k is SchemaKind.functional:
835
+ sig_body.append(f"{class_name} op;")
836
+ elif k is SchemaKind.inplace:
837
+ sig_body.append(f"{class_name} op(self);")
838
+ elif k is SchemaKind.out:
839
+ out_args_str = ", ".join(a.name for a in f.func.arguments.out)
840
+ sig_body.append(f"{class_name} op({out_args_str});")
841
+
842
+ # Translate the input native arguments into structured
843
+ # arguments for the meta call
844
+ meta_exprs = ", ".join(
845
+ e.expr
846
+ for e in translate(
847
+ context, structured.meta_arguments(self.g), method=False
848
+ )
849
+ )
850
+
851
+ if self.g.out.precomputed:
852
+ # If this function group has precomputed elements, the meta function
853
+ # returns a struct containing them which must be saved so that it
854
+ # can be unpacked when generating code to call the impl.
855
+ sig_body.append(f"auto precompute = op.meta({meta_exprs});")
856
+
857
+ # Put all of the contents of the precompute struct into the context
858
+ # so that translate will be able to return the correct args for the
859
+ # call to the impl.
860
+ precomputed_values = [
861
+ *self.g.out.precomputed.replace.values(),
862
+ self.g.out.precomputed.add,
863
+ ]
864
+ for precomputed_elems in precomputed_values:
865
+ for arg in precomputed_elems:
866
+ context.append(
867
+ Expr(
868
+ expr=f"precompute.{arg.name}",
869
+ type=structured.argument_type(arg, binds=arg.name),
870
+ )
871
+ )
872
+
873
+ # Add a use of the precompute struct so FB internal compilers don't
874
+ # complain that there is an unused variable.
875
+ sig_body.append("(void)precompute;")
876
+ else:
877
+ sig_body.append(f"op.meta({meta_exprs});")
878
+
879
+ # After running meta, op.outputs_ is guaranteed to be valid;
880
+ # add it to the context
881
+ out_args = structured.out_arguments(self.g)
882
+ for i, out_arg in enumerate(out_args):
883
+ assert ConstRefCType(BaseCType(tensorT)) == out_arg.nctype.type
884
+
885
+ if k is SchemaKind.out:
886
+ expr = f"op.maybe_get_output({i})"
887
+ else:
888
+ expr = f"op.outputs_[{i}]"
889
+
890
+ context.append(
891
+ Expr(
892
+ expr=expr,
893
+ # TODO: Stop hardcoding that the output type is a Tensor. Note
894
+ # that for the codegen here this is fine because outputs_ is
895
+ # hardcoded to be tensor already
896
+ type=NamedCType(
897
+ out_arg.nctype.name, MutRefCType(BaseCType(tensorT))
898
+ ),
899
+ )
900
+ )
901
+
902
+ # With the expanded context, do the impl call (if not a meta
903
+ # function)
904
+ if (
905
+ self.backend_index.dispatch_key
906
+ == DispatchKey.CompositeExplicitAutogradNonFunctional
907
+ ):
908
+ # TODO: https://github.com/pytorch/pytorch/issues/53023
909
+ out_sig_group = CppSignatureGroup.from_native_function(
910
+ self.g.out, method=False, fallback_binding=f.manual_cpp_binding
911
+ )
912
+ out_sig = out_sig_group.most_faithful_signature()
913
+ api_name = out_sig.name()
914
+ out_exprs = ", ".join(
915
+ e.expr
916
+ for e in translate(context, out_sig.arguments(), method=False)
917
+ )
918
+ # TODO: I think this means structured won't work with method
919
+ # only functions (but maybe you're saved by faithful? iunno.)
920
+ # NB: Originally I wrote this as an at::redispatch call, but
921
+ # I got in trouble because that meant I needed a DispatchKeySet
922
+ # in the wrapper function, which meant I needed a DispatchKeySet
923
+ # in the DispatchKeyFunctions declarations, but the defined API
924
+ # there does NOT permit a dispatch key set. I think you can
925
+ # probably unwind this by calling some function to do the TLS
926
+ # fetch and get the DispatchKeySet when you don't have it, but
927
+ # I didn't do it for this version
928
+ sig_body.append(f"at::{api_name}({out_exprs});")
929
+ elif self.backend_index.dispatch_key != DispatchKey.Meta:
930
+ impl_exprs = ", ".join(
931
+ e.expr
932
+ for e in translate(
933
+ context, structured.impl_arguments(self.g), method=False
934
+ )
935
+ )
936
+ sig_body.append(f"op.impl({impl_exprs});")
937
+
938
+ # Go over each output, and check if there is a proxy created for it.
939
+ # If so, copy it over to the original output.
940
+ if k is SchemaKind.out or k is SchemaKind.inplace:
941
+ for i in range(len(f.func.returns)):
942
+ sig_body.append(
943
+ f"if (op.proxy_outputs_[{i}].has_value()) op.outputs_[{i}].get().copy_(*op.proxy_outputs_[{i}]);"
944
+ )
945
+
946
+ # Destructively return the final tensors
947
+ # TODO: Do this in translate instead
948
+ if k is SchemaKind.functional:
949
+ if len(f.func.returns) == 1:
950
+ ret_expr = "std::move(op.outputs_[0])" # small optimization
951
+ else:
952
+ moved = ", ".join(
953
+ f"std::move(op.outputs_[{i}])"
954
+ for i in range(len(f.func.returns))
955
+ )
956
+ ret_expr = f"std::make_tuple({moved})"
957
+ elif k is SchemaKind.inplace:
958
+ ret_expr = "self"
959
+ elif k is SchemaKind.out:
960
+ if len(f.func.returns) == 1:
961
+ ret_expr = f.func.arguments.out[0].name
962
+ else:
963
+ refs = ", ".join(a.name for a in f.func.arguments.out)
964
+ ret_expr = f"std::forward_as_tuple({refs})"
965
+ sig_body.append(f"return {ret_expr};") # type: ignore[possibly-undefined] # TODO: audit
966
+
967
+ sig_body_str = "\n".join(sig_body)
968
+
969
+ # For an overview of what this template code looks like, see
970
+ # https://github.com/pytorch/rfcs/pull/9
971
+ return f"""\
972
+ {self.gen_class(
973
+ f, k,
974
+ class_name=class_name,
975
+ parent_class=parent_class,
976
+ generate_super=self.g.out.structured_inherits is not None
977
+ )}
978
+
979
+ {sig.defn()} {{
980
+ {sig_body_str}
981
+ }}
982
+ """
983
+
984
+ elif self.target is Target.REGISTRATION:
985
+ return f'm.impl("{f.func.name}", TORCH_FN({sig.name()}));'
986
+ else:
987
+ assert_never(self.target)
988
+ # Silence mypy's "Missing return statement" error
989
+ return None
llmeval-env/lib/python3.10/site-packages/torchgen/dest/ufunc.py ADDED
@@ -0,0 +1,545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Dict, List, Optional, Sequence, Tuple, Union
3
+
4
+ import torchgen.api.ufunc as ufunc
5
+ from torchgen.api.translate import translate
6
+ from torchgen.api.types import (
7
+ BaseCType,
8
+ Binding,
9
+ CType,
10
+ Expr,
11
+ NamedCType,
12
+ opmath_t,
13
+ scalar_t,
14
+ StructuredImplSignature,
15
+ VectorizedCType,
16
+ )
17
+ from torchgen.api.ufunc import UfunctorBindings
18
+ from torchgen.context import with_native_function
19
+ from torchgen.model import (
20
+ Argument,
21
+ BaseTy,
22
+ BaseType,
23
+ DispatchKey,
24
+ NativeFunctionsGroup,
25
+ ScalarType,
26
+ UfuncKey,
27
+ )
28
+ from torchgen.utils import OrderedSet
29
+
30
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
31
+ #
32
+ # CUDA STUFF
33
+ #
34
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
35
+
36
+ # NB: not bothering to generate dispatch stub forward declaration in header,
37
+ # we can just paste it whereever necessary
38
+
39
+ # TODO: use BackendIndex
40
+ # dispatch_key: DispatchKey # only CPU/CUDA right now
41
+
42
+
43
+ # Represents functors for implementing CUDA ufuncs.
44
+ # Functors are templated by scalar_t because when USERS instantiate functors
45
+ # they are templated. A functor looks something like this:
46
+ #
47
+ # template <typename scalar_t>
48
+ # struct CUDAFunctorOnSelf_add {
49
+ # using opmath_t = at::opmath_type<scalar_t>;
50
+ # opmath_t other_;
51
+ # opmath_t alpha_;
52
+ # CUDAFunctorOnSelf_add(opmath_t other, opmath_t alpha)
53
+ # : other_(other), alpha_(alpha) {}
54
+ # __device__ scalar_t operator()(scalar_t self) {
55
+ # return ufunc::add(static_cast<opmath_t>(self), other_, alpha_);
56
+ # }
57
+ # };
58
+ #
59
+ @dataclass(frozen=True)
60
+ class UfunctorSignature:
61
+ g: NativeFunctionsGroup
62
+ scalar_tensor_idx: Optional[int]
63
+ name: str
64
+
65
+ def arguments(self) -> UfunctorBindings:
66
+ return ufunc.ufunctor_arguments(
67
+ self.g, scalar_tensor_idx=self.scalar_tensor_idx, scalar_t=scalar_t
68
+ )
69
+
70
+ def fields(self) -> List[Binding]:
71
+ # fields are renamed to have a trailing underscore, as is conventional
72
+ return [b.rename(f"{b.name}_") for b in self.arguments().ctor]
73
+
74
+ def returns_type(self) -> CType:
75
+ # TODO: don't hardcode; return type will be inferred based on tags on
76
+ # the native function
77
+ return BaseCType(scalar_t)
78
+
79
+ def decl_fields(self) -> str:
80
+ return "\n".join(f"{f.type} {f.name};" for f in self.fields())
81
+
82
+ def inline_defn_ctor(self) -> str:
83
+ args_str = ", ".join(a.decl() for a in self.arguments().ctor)
84
+ # NB: hypothetically could do this with translate but the
85
+ # transition here is very regular
86
+ init_str = ", ".join(f"{a.name}_({a.name})" for a in self.arguments().ctor)
87
+ return f"{self.name}({args_str}) : {init_str} {{}}"
88
+
89
+ def decl_apply(self) -> str:
90
+ args_str = ", ".join(a.decl() for a in self.arguments().apply)
91
+ return f"{self.returns_type().cpp_type()} operator()({args_str}) const"
92
+
93
+
94
+ @dataclass(frozen=True)
95
+ class UfuncSignature:
96
+ g: NativeFunctionsGroup
97
+ name: str
98
+ compute_t: CType
99
+
100
+ def arguments(self) -> List[Binding]:
101
+ return ufunc.ufunc_arguments(self.g, compute_t=self.compute_t)
102
+
103
+ def call(self, ctx: Sequence[Union[Binding, Expr]]) -> str:
104
+ return f"{self.name}({', '.join(a.expr for a in translate(ctx, self.arguments()))})"
105
+
106
+
107
+ # steps:
108
+ # 1. take the functional signature
109
+ # 2. use api.ufunc to convert it to template signature. this establishes
110
+ # the type of the template function
111
+ # 3. use api.ufunc (II) to generate a split struct / operator() signature.
112
+ # this establish context in which we call the template signature
113
+ #
114
+ # StructuredImplSignature context
115
+ # ~> functor constructor sig
116
+ #
117
+ # Functor constructor context
118
+ # ~> functor fields sig
119
+ #
120
+ # Functor apply context (functor fields + functor apply sig)
121
+ # ~> template sig
122
+ #
123
+
124
+
125
+ def eligible_for_binary_scalar_specialization(g: NativeFunctionsGroup) -> bool:
126
+ num_tensors = sum(
127
+ 1 for a in g.functional.func.arguments.flat_non_out if a.type.is_tensor_like()
128
+ )
129
+ return num_tensors == 2
130
+
131
+
132
+ def compute_ufunc_cuda_functors(
133
+ g: NativeFunctionsGroup,
134
+ ) -> Tuple[Dict[ScalarType, Dict[UfuncKey, UfunctorSignature]], str]:
135
+ # First, build the functors.
136
+ ufunctor_sigs: Dict[ScalarType, Dict[UfuncKey, UfunctorSignature]] = {}
137
+ ufunctors: List[str] = []
138
+ loops = g.out.ufunc_inner_loop
139
+ scalar_tensor_idx_lookup = {
140
+ UfuncKey.CUDAFunctorOnSelf: 1,
141
+ UfuncKey.CUDAFunctorOnOther: 0,
142
+ UfuncKey.CUDAFunctor: None,
143
+ }
144
+ if eligible_for_binary_scalar_specialization(g):
145
+ keys = [
146
+ UfuncKey.CUDAFunctorOnSelf,
147
+ UfuncKey.CUDAFunctorOnOther,
148
+ UfuncKey.CUDAFunctor,
149
+ ]
150
+ else:
151
+ keys = [UfuncKey.CUDAFunctor]
152
+ for k in [UfuncKey.CUDAFunctorOnSelf, UfuncKey.CUDAFunctorOnOther]:
153
+ assert k not in loops, f"cannot use {k} on non-binary function"
154
+ for k in keys:
155
+ # If the key was directly defined, skip functor codegen; we assume the
156
+ # user already done it for us
157
+ if k in loops:
158
+ ufunctor_sig = UfunctorSignature(
159
+ g, scalar_tensor_idx=scalar_tensor_idx_lookup[k], name=loops[k].name
160
+ )
161
+ for dtype in loops[k].supported_dtypes:
162
+ ufunctor_sigs.setdefault(dtype, {})[k] = ufunctor_sig
163
+ continue
164
+
165
+ # Note [ScalarOnly and Generic must match names for CUDA]
166
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
167
+ # Otherwise, look in ANY of the generic entries. For simplicity of
168
+ # codegen, both ScalarOnly and Generic are defined, the ufunc name
169
+ # must match (if they didn't match, we'd have to generate distinct
170
+ # functors per dtype, which is awful, so we're not going to do it unless
171
+ # someone really forces us to)
172
+ ufunc_name = None
173
+ supported_dtypes: OrderedSet[ScalarType] = OrderedSet()
174
+ for lk in [UfuncKey.ScalarOnly, UfuncKey.Generic]:
175
+ if lk not in loops:
176
+ continue
177
+ if ufunc_name is None:
178
+ ufunc_name = loops[lk].name
179
+ else:
180
+ # See Note [ScalarOnly and Generic must match names for CUDA]
181
+ assert (
182
+ ufunc_name == loops[lk].name
183
+ ), "ScalarOnly and Generic must have same ufunc name"
184
+ supported_dtypes |= loops[lk].supported_dtypes
185
+ assert ufunc_name is not None
186
+
187
+ name = f"{k}_{ufunc_name}"
188
+ ufunctor_sig = UfunctorSignature(
189
+ g, scalar_tensor_idx=scalar_tensor_idx_lookup[k], name=name
190
+ )
191
+ for dtype in supported_dtypes:
192
+ ufunctor_sigs.setdefault(dtype, {})[k] = ufunctor_sig
193
+
194
+ ufunc_sig = UfuncSignature(
195
+ g, name=f"ufunc::{ufunc_name}", compute_t=BaseCType(opmath_t)
196
+ )
197
+ apply_ctx = ufunctor_sig.fields() + ufunctor_sig.arguments().apply
198
+ ufunctors.append(
199
+ f"""
200
+ template <typename scalar_t>
201
+ struct {ufunctor_sig.name} {{
202
+ using opmath_t = at::opmath_type<scalar_t>;
203
+ {ufunctor_sig.decl_fields()}
204
+ {ufunctor_sig.inline_defn_ctor()}
205
+ __device__ {ufunctor_sig.decl_apply()} {{
206
+ return {ufunc_sig.call(apply_ctx)};
207
+ }}
208
+ }};
209
+ """
210
+ )
211
+
212
+ return ufunctor_sigs, "\n".join(ufunctors)
213
+
214
+
215
+ @dataclass(frozen=True)
216
+ class BinaryScalarSpecializationConfig:
217
+ scalar_idx: int
218
+ ctor_tensor: str
219
+ ufunc_key: UfuncKey
220
+
221
+
222
+ BinaryScalarSpecializationConfigs = [
223
+ BinaryScalarSpecializationConfig(
224
+ scalar_idx=0,
225
+ ctor_tensor="self",
226
+ ufunc_key=UfuncKey.CUDAFunctorOnOther,
227
+ ),
228
+ BinaryScalarSpecializationConfig(
229
+ scalar_idx=1,
230
+ ctor_tensor="other",
231
+ ufunc_key=UfuncKey.CUDAFunctorOnSelf,
232
+ ),
233
+ ]
234
+
235
+
236
+ def compute_ufunc_cuda_dtype_body(
237
+ g: NativeFunctionsGroup,
238
+ dtype: ScalarType,
239
+ inner_loops: Dict[UfuncKey, UfunctorSignature],
240
+ parent_ctx: Sequence[Binding],
241
+ ) -> str:
242
+ body = "using opmath_t = at::opmath_type<scalar_t>;"
243
+ body += "if (false) {}\n" # for ease of codegen
244
+ for config in BinaryScalarSpecializationConfigs:
245
+ if config.ufunc_key not in inner_loops:
246
+ continue
247
+ ufunctor_sig = inner_loops[config.ufunc_key]
248
+ scalar_idx = config.scalar_idx + 1
249
+ # Make a copy and at the same time widen the type (not permissible
250
+ # without copy; we don't want to mutate the input argument anyway)
251
+ ctx: List[Union[Expr, Binding]] = list(parent_ctx)
252
+ ctx.append(
253
+ Expr(
254
+ expr=f"iter.scalar_value<opmath_t>({scalar_idx})",
255
+ type=NamedCType(config.ctor_tensor, BaseCType(opmath_t)),
256
+ )
257
+ )
258
+ ufunctor_ctor_exprs_str = ", ".join(
259
+ a.expr for a in translate(ctx, ufunctor_sig.arguments().ctor)
260
+ )
261
+
262
+ # NB: ufunctor must be allocated before iter.remove_operand is called,
263
+ # as it relies on iter
264
+ body += f"""\
265
+ else if (iter.is_cpu_scalar({scalar_idx})) {{
266
+ {ufunctor_sig.name}<scalar_t> ufunctor({ufunctor_ctor_exprs_str});
267
+ iter.remove_operand({scalar_idx});
268
+ gpu_kernel(iter, ufunctor);
269
+ }}"""
270
+
271
+ ufunctor_sig = inner_loops[UfuncKey.CUDAFunctor]
272
+ ufunctor_ctor_exprs_str = ", ".join(
273
+ a.expr for a in translate(parent_ctx, ufunctor_sig.arguments().ctor)
274
+ )
275
+ body += f"""
276
+ else {{
277
+ gpu_kernel(iter, {ufunctor_sig.name}<scalar_t>({ufunctor_ctor_exprs_str}));
278
+ }}
279
+ """
280
+ return body
281
+
282
+
283
+ @with_native_function
284
+ def compute_ufunc_cuda(g: NativeFunctionsGroup) -> str:
285
+ # First, build the functors, indexing them by dtype
286
+ ufunctor_sigs, ufunctors = compute_ufunc_cuda_functors(g)
287
+
288
+ # Next, build the conditionals
289
+ sig = StructuredImplSignature(g, ufunc.kernel_name(g, DispatchKey.CUDA))
290
+ dtype_cases = []
291
+ for dtype, inner_ufunc_sigs in ufunctor_sigs.items():
292
+ dtype_cases.append(
293
+ f"""
294
+ AT_DISPATCH_CASE(at::ScalarType::{dtype},
295
+ [&]() {{
296
+ {compute_ufunc_cuda_dtype_body(g, dtype, inner_ufunc_sigs, sig.arguments())}
297
+ }}
298
+ )
299
+ """
300
+ )
301
+
302
+ dtype_cases_str = "\n".join(dtype_cases)
303
+
304
+ stub_sig = StubSignature(g)
305
+
306
+ return f"""
307
+ {ufunctors}
308
+
309
+ {stub_sig.type_defn()};
310
+ {stub_sig.dispatch_decl()};
311
+
312
+ {stub_sig.kernel_defn()} {{
313
+ AT_DISPATCH_SWITCH(iter.common_dtype(), "{sig.name}",
314
+ {dtype_cases_str}
315
+ );
316
+ }}
317
+ REGISTER_DISPATCH({stub_sig.name}, &{stub_sig.kernel_name});
318
+
319
+ {sig.defn()} {{
320
+ {stub_sig.direct_call(sig.arguments())};
321
+ }}
322
+ """
323
+
324
+
325
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
326
+ #
327
+ # CPU STUFF
328
+ #
329
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
330
+
331
+
332
+ @dataclass(frozen=True)
333
+ class StubSignature:
334
+ g: NativeFunctionsGroup
335
+
336
+ @property
337
+ def name(self) -> str:
338
+ return f"{str(self.g.functional.func.name.name)}_stub"
339
+
340
+ @property
341
+ def kernel_name(self) -> str:
342
+ return f"{str(self.g.functional.func.name.name)}_kernel"
343
+
344
+ @property
345
+ def type_name(self) -> str:
346
+ return f"{str(self.g.functional.func.name.name)}_fn"
347
+
348
+ def arguments(self) -> List[Binding]:
349
+ return ufunc.stub_arguments(self.g)
350
+
351
+ def type(self) -> str:
352
+ cpp_args = self.arguments()
353
+ return f"void(*)(TensorIteratorBase&, {', '.join(a.type for a in cpp_args)})"
354
+
355
+ def dispatch_decl(self) -> str:
356
+ return f"DECLARE_DISPATCH({self.type_name}, {self.name})"
357
+
358
+ def dispatch_defn(self) -> str:
359
+ return f"DEFINE_DISPATCH({self.name})"
360
+
361
+ def kernel_defn(self) -> str:
362
+ return f"void {self.kernel_name}(TensorIteratorBase& iter, {', '.join(a.defn() for a in self.arguments())})"
363
+
364
+ def type_defn(self) -> str:
365
+ return f"using {self.type_name} = {self.type()}"
366
+
367
+ # must be called from context where this is TensorIteratorBase*
368
+ def call(self, ctx: Sequence[Binding]) -> str:
369
+ return f"{self.name}(device_type(), *this, {', '.join(a.expr for a in translate(ctx, self.arguments()))})"
370
+
371
+ # used in CUDA to skip the unnecessary dynamic dispatch
372
+ def direct_call(self, ctx: Sequence[Binding]) -> str:
373
+ return f"{self.kernel_name}(*this, {', '.join(a.expr for a in translate(ctx, self.arguments()))})"
374
+
375
+
376
+ @with_native_function
377
+ def compute_ufunc_cpu(g: NativeFunctionsGroup) -> str:
378
+ stub_sig = StubSignature(g)
379
+ sig = StructuredImplSignature(g, ufunc.kernel_name(g, DispatchKey.CPU))
380
+
381
+ return f"""
382
+ {stub_sig.type_defn()};
383
+ {stub_sig.dispatch_decl()};
384
+ {stub_sig.dispatch_defn()};
385
+
386
+ {sig.defn()} {{
387
+ {stub_sig.call(sig.arguments())};
388
+ }}
389
+ """
390
+
391
+
392
+ def compute_ufunc_cpu_dtype_body(
393
+ g: NativeFunctionsGroup,
394
+ dtype: ScalarType,
395
+ inner_loops: Dict[UfuncKey, UfuncSignature],
396
+ parent_ctx: Sequence[Binding],
397
+ ) -> str:
398
+ assert UfuncKey.CPUScalar in inner_loops, f"{dtype}, {inner_loops.keys()}"
399
+ assert inner_loops.keys() <= {UfuncKey.CPUScalar, UfuncKey.CPUVector}
400
+ scalar_loop = inner_loops[UfuncKey.CPUScalar]
401
+ vec_loop = None
402
+ if UfuncKey.CPUVector in inner_loops:
403
+ vec_loop = inner_loops[UfuncKey.CPUVector]
404
+
405
+ # NB: We DON'T use translate here, because translate is
406
+ # incapable of CSE'ing the scalar accesses in case it is also
407
+ # used by Vectorized; also, the unpacking here is very simple
408
+ # and only affects Scalar; everything else is implicitly captured
409
+ # by the lambda
410
+
411
+ # Setup scalar in scope
412
+ body = []
413
+ ctx = []
414
+ for b in parent_ctx:
415
+ if isinstance(b.argument, Argument) and b.argument.type != BaseType(
416
+ BaseTy.Scalar
417
+ ):
418
+ continue
419
+ body.append(f"auto _s_{b.name} = {b.name}.to<scalar_t>();")
420
+ ctx.append(Expr(f"_s_{b.name}", NamedCType(b.nctype.name, BaseCType(scalar_t))))
421
+ if vec_loop is not None:
422
+ for b in parent_ctx:
423
+ if isinstance(b.argument, Argument) and b.argument.type != BaseType(
424
+ BaseTy.Scalar
425
+ ):
426
+ continue
427
+ body.append(
428
+ f"auto _v_{b.name} = at::vec::Vectorized<scalar_t>(_s_{b.name});"
429
+ )
430
+ ctx.append(
431
+ Expr(
432
+ f"_v_{b.name}",
433
+ NamedCType(b.nctype.name, VectorizedCType(BaseCType(scalar_t))),
434
+ )
435
+ )
436
+
437
+ # Setup lambda signature
438
+ # NB: simplified version of ufunctor_arguments
439
+ scalar_bindings = []
440
+ vec_bindings = []
441
+ for a in g.functional.func.arguments.flat_non_out:
442
+ if not a.type.is_tensor_like():
443
+ continue
444
+ assert a.type == BaseType(BaseTy.Tensor)
445
+ scalar_bindings.append(
446
+ Binding(
447
+ name=a.name,
448
+ nctype=NamedCType(a.name, BaseCType(scalar_t)),
449
+ argument=a,
450
+ )
451
+ )
452
+ if vec_loop is not None:
453
+ vec_bindings.append(
454
+ Binding(
455
+ name=a.name,
456
+ nctype=NamedCType(a.name, VectorizedCType(BaseCType(scalar_t))),
457
+ argument=a,
458
+ )
459
+ )
460
+
461
+ def with_ctx(b: Sequence[Binding]) -> List[Union[Expr, Binding]]:
462
+ r: List[Union[Expr, Binding]] = []
463
+ r.extend(ctx)
464
+ r.extend(b)
465
+ return r
466
+
467
+ body_str = "\n".join(body)
468
+ if vec_loop is not None:
469
+ return f"""
470
+ {body_str}
471
+ cpu_kernel_vec(iter,
472
+ [=]({', '.join(b.decl() for b in scalar_bindings)}) {{ return {scalar_loop.call(with_ctx(scalar_bindings))}; }},
473
+ [=]({', '.join(b.decl() for b in vec_bindings)}) {{ return {vec_loop.call(with_ctx(vec_bindings))}; }}
474
+ );
475
+ """
476
+ else:
477
+ return f"""
478
+ {body_str}
479
+ cpu_kernel(iter,
480
+ [=]({', '.join(b.decl() for b in scalar_bindings)}) {{ return {scalar_loop.call(with_ctx(scalar_bindings))}; }}
481
+ );
482
+ """
483
+
484
+
485
+ @with_native_function
486
+ def compute_ufunc_cpu_kernel(g: NativeFunctionsGroup) -> str:
487
+ stub_sig = StubSignature(g)
488
+
489
+ # Reindex the ufunc by dtypes; processing generic/scalaronly as well
490
+ loops = g.out.ufunc_inner_loop
491
+ ufunc_sigs: Dict[ScalarType, Dict[UfuncKey, UfuncSignature]] = {}
492
+ for k in [UfuncKey.CPUScalar, UfuncKey.CPUVector]:
493
+ lks = []
494
+ # ORDER MATTERS: this specifies overriding precedence
495
+ if k in loops: # should happen rarely
496
+ lks.append(k)
497
+ if UfuncKey.ScalarOnly in loops and k is UfuncKey.CPUScalar:
498
+ lks.append(UfuncKey.ScalarOnly)
499
+ if UfuncKey.Generic in loops:
500
+ lks.append(UfuncKey.Generic)
501
+ # TODO: don't hardcode ufunc:: namespace here, should be centralized smh
502
+ for lk in lks:
503
+ for dtype in loops[lk].supported_dtypes:
504
+ compute_t: CType
505
+ if k is UfuncKey.CPUScalar:
506
+ compute_t = BaseCType(scalar_t)
507
+ elif k is UfuncKey.CPUVector:
508
+ compute_t = VectorizedCType(BaseCType(scalar_t))
509
+ else:
510
+ raise AssertionError()
511
+ inner_ufunc_sigs = ufunc_sigs.setdefault(dtype, {})
512
+ if k not in inner_ufunc_sigs:
513
+ inner_ufunc_sigs[k] = UfuncSignature(
514
+ g, name=f"ufunc::{loops[lk].name}", compute_t=compute_t
515
+ )
516
+
517
+ # Build the conditionals
518
+ dtype_cases = []
519
+ for dtype, inner_ufunc_sigs in ufunc_sigs.items():
520
+ dtype_cases.append(
521
+ f"""
522
+ AT_DISPATCH_CASE(at::ScalarType::{dtype},
523
+ [&]() {{
524
+ {compute_ufunc_cpu_dtype_body(g, dtype, inner_ufunc_sigs, stub_sig.arguments())}
525
+ }}
526
+ )
527
+ """
528
+ )
529
+
530
+ dtype_cases_str = "\n".join(dtype_cases)
531
+ return f"""
532
+ namespace {{
533
+
534
+ {stub_sig.kernel_defn()} {{
535
+ AT_DISPATCH_SWITCH(iter.common_dtype(), "{stub_sig.name}",
536
+ {dtype_cases_str}
537
+ );
538
+ }}
539
+
540
+ }} // anonymous namespace
541
+
542
+ {stub_sig.type_defn()};
543
+ {stub_sig.dispatch_decl()};
544
+ REGISTER_DISPATCH({stub_sig.name}, &{stub_sig.kernel_name});
545
+ """
llmeval-env/lib/python3.10/site-packages/torchgen/gen.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torchgen/gen_aoti_c_shim.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import textwrap
2
+ from dataclasses import dataclass
3
+ from typing import Dict, List, Optional, Sequence, Tuple, Union
4
+
5
+ from torchgen.api.types import DispatcherSignature
6
+ from torchgen.api.types.signatures import CppSignature, CppSignatureGroup
7
+
8
+ from torchgen.context import method_with_native_function
9
+ from torchgen.model import (
10
+ Argument,
11
+ BackendIndex,
12
+ BaseTy,
13
+ BaseType,
14
+ DispatchKey,
15
+ FunctionSchema,
16
+ ListType,
17
+ NativeFunction,
18
+ OptionalType,
19
+ Type,
20
+ )
21
+ from torchgen.utils import mapMaybe
22
+
23
+
24
+ def returns_are_all_tensor(schema: FunctionSchema) -> bool:
25
+ return len(schema.returns) != 0 and all(
26
+ ret.type.is_tensor_like() for ret in schema.returns
27
+ )
28
+
29
+
30
+ base_type_to_c_type = {
31
+ BaseTy.Tensor: "AtenTensorHandle",
32
+ BaseTy.bool: "int32_t", # Use int to pass bool
33
+ BaseTy.int: "int64_t",
34
+ BaseTy.SymInt: "int64_t", # Inductor-generated code won't see a SymInt
35
+ BaseTy.Scalar: "double", # Use double to pass both integer and floating point
36
+ BaseTy.float: "double", # TODO: how about other floating point types?
37
+ BaseTy.str: "const char*",
38
+ BaseTy.DeviceIndex: "int32_t",
39
+ BaseTy.Layout: "int32_t", # Represent enum as int
40
+ BaseTy.MemoryFormat: "int32_t", # Represent enum as int
41
+ BaseTy.ScalarType: "int32_t", # Represent enum as int
42
+ }
43
+
44
+ base_type_to_aten_type = {
45
+ BaseTy.Tensor: "at::Tensor",
46
+ BaseTy.bool: "bool",
47
+ BaseTy.int: "int64_t",
48
+ BaseTy.SymInt: "c10::SymInt",
49
+ BaseTy.Scalar: "c10::Scalar",
50
+ BaseTy.float: "double",
51
+ BaseTy.str: "c10::string_view",
52
+ BaseTy.DeviceIndex: "c10::DeviceIndex",
53
+ BaseTy.Layout: "c10::Layout",
54
+ BaseTy.MemoryFormat: "c10::MemoryFormat",
55
+ BaseTy.ScalarType: "c10::ScalarType",
56
+ }
57
+
58
+ base_type_to_callsite_expr = {
59
+ BaseTy.Tensor: "*tensor_handle_to_tensor_pointer",
60
+ BaseTy.bool: "",
61
+ BaseTy.int: "",
62
+ BaseTy.SymInt: "",
63
+ BaseTy.Scalar: "",
64
+ BaseTy.float: "",
65
+ BaseTy.str: "",
66
+ BaseTy.DeviceIndex: "static_cast<c10::DeviceIndex>",
67
+ BaseTy.Layout: "static_cast<c10::Layout>",
68
+ BaseTy.MemoryFormat: "static_cast<c10::MemoryFormat>",
69
+ BaseTy.ScalarType: "static_cast<c10::ScalarType>",
70
+ }
71
+
72
+
73
+ # convert args to C types, names in declarations, and expressions in function bodies
74
+ def convert_arg_type_and_name(typ: Type, name: str) -> Tuple[List[str], List[str], List[str], List[str]]: # type: ignore[return]
75
+ if isinstance(typ, BaseType):
76
+ if typ.name in base_type_to_c_type:
77
+ return (
78
+ [base_type_to_c_type[typ.name]],
79
+ [name],
80
+ [base_type_to_aten_type[typ.name]],
81
+ [
82
+ f"{base_type_to_callsite_expr[typ.name]}({name})"
83
+ if base_type_to_callsite_expr[typ.name]
84
+ else name
85
+ ],
86
+ )
87
+ elif typ.name == BaseTy.Device:
88
+ return (
89
+ ["int32_t", "int32_t"],
90
+ [name, name + "_index_"],
91
+ ["c10::Device"],
92
+ [
93
+ f"c10::Device(static_cast<c10::DeviceType>({name}), static_cast<c10::DeviceIndex>({name}_index_))"
94
+ ],
95
+ )
96
+ else:
97
+ # TODO: BaseTy.Dimname, BaseTy.Generator, etc.
98
+ raise NotImplementedError(f"TODO: add support for arg type {repr(typ)}")
99
+ elif isinstance(typ, OptionalType):
100
+ c_types, names, aten_types, callsite_exprs = convert_arg_type_and_name(
101
+ typ.elem, name
102
+ )
103
+ j = 0 # index for names
104
+ new_aten_types = []
105
+ new_callsite_exprs = []
106
+ for i, aten_type in enumerate(aten_types):
107
+ # Use pointer to denote optional type
108
+ c_types[j] = c_types[j] + "*"
109
+ if aten_type.startswith("c10::ArrayRef<"):
110
+ # ArrayRef is passed as pointer + size, but no need to add "*" to the size argument
111
+ new_aten_types.append(f"c10::optional<{aten_type}>")
112
+ base_type = aten_type[len("c10::ArrayRef<") : -1]
113
+ new_callsite_exprs.append(
114
+ f"pointer_to_optional_list<{base_type}>({names[j]}, {names[j+1]})"
115
+ )
116
+ j += 2
117
+ elif aten_type == "c10::Device":
118
+ # Device is passed as device_type + device_index
119
+ new_aten_types.append("c10::optional<c10::Device>")
120
+ new_callsite_exprs.append(
121
+ f"pointer_to_optional_device({names[j]}, {names[j+1]})"
122
+ )
123
+ j += 2
124
+ else:
125
+ new_aten_types.append(f"c10::optional<{aten_type}>")
126
+ new_callsite_exprs.append(
127
+ f"pointer_to_optional<{aten_type}>({names[j]})"
128
+ )
129
+ j += 1
130
+
131
+ return (
132
+ c_types,
133
+ names,
134
+ new_aten_types,
135
+ new_callsite_exprs,
136
+ )
137
+ elif isinstance(typ, ListType):
138
+ # Need to explictly pass the list as pointer + length
139
+ c_types, names, aten_types, _ = convert_arg_type_and_name(typ.elem, name)
140
+ assert len(c_types) == 1, "ListType with unsupported element type " + repr(typ)
141
+
142
+ # The list content should never be modified
143
+ c_types[0] = f"const {c_types[0]}*"
144
+ c_types.append("int64_t")
145
+ name = names[0]
146
+ names.append(name + "_len_")
147
+
148
+ atype = aten_types[0]
149
+ callsite_exprs = []
150
+ if atype == "bool":
151
+ # no converter from std::vector<bool> to c10::ArrayRef<bool>
152
+ # construct std::array<bool, N> instead
153
+ assert typ.size is not None
154
+ callsite_exprs.append(f"pointer_to_list<{typ.size}>({name})")
155
+ elif atype == "c10::optional<at::Tensor>":
156
+ # convert from std::vector<c10::optional<at::Tensor>> to c10::List<c10::optional<at::Tensor>>
157
+ callsite_exprs.append(
158
+ f"c10::List<{atype}>(c10::ArrayRef<{atype}>(pointer_to_list<{atype}>({name}, {name}_len_)))"
159
+ )
160
+ else:
161
+ callsite_exprs.append(f"pointer_to_list<{atype}>({name}, {name}_len_)")
162
+
163
+ aten_types = [f"c10::ArrayRef<{t}>" for t in aten_types]
164
+ return (
165
+ c_types,
166
+ names,
167
+ aten_types,
168
+ callsite_exprs,
169
+ )
170
+
171
+
172
+ def zip_type_and_name(types: List[str], names: List[str]) -> List[str]:
173
+ return [typ + " " + name for typ, name in zip(types, names)]
174
+
175
+
176
+ # Generate argument declarations and callsite expressions
177
+ def gen_arguments(flat_arguments: Sequence[Argument]) -> Tuple[List[str], List[str]]:
178
+ types = []
179
+ new_names = []
180
+ callsite_exprs = []
181
+ for arg in flat_arguments:
182
+ new_types, names, _, new_callsite_exprs = convert_arg_type_and_name(
183
+ arg.type, arg.name
184
+ )
185
+ types.extend(new_types)
186
+ new_names.extend(names)
187
+ callsite_exprs.extend(new_callsite_exprs)
188
+ return zip_type_and_name(types, new_names), callsite_exprs
189
+
190
+
191
+ # Return values are passed out as pointer arguments because all the C shim functions
192
+ # are expected to return AOTITorchError.
193
+ # Generate returns as declarations and callsite expressions
194
+ def gen_returns(schema: FunctionSchema) -> Tuple[List[str], List[str]]:
195
+ types = []
196
+ names = []
197
+ for idx, ret in enumerate(schema.returns):
198
+ names.append(f"ret{idx}")
199
+ if isinstance(ret.type, BaseType) and ret.type.name in base_type_to_c_type:
200
+ types.append(base_type_to_c_type[ret.type.name] + "*")
201
+ else:
202
+ raise NotImplementedError(
203
+ f"TODO: add support for return type {repr(ret.type)}"
204
+ )
205
+
206
+ def convert_return(typ: BaseType, val: str) -> str:
207
+ if typ.name == BaseTy.Tensor:
208
+ return f"new_tensor_handle(std::move({val}));"
209
+ elif typ.name == BaseTy.SymInt:
210
+ return f"{val}.expect_int()"
211
+ elif typ.name == BaseTy.Scalar:
212
+ return f"{val}.toDouble()"
213
+ else:
214
+ return val
215
+
216
+ ret_pointer_can_be_null = False
217
+ unambiguous_name = schema.name.unambiguous_name()
218
+ for name in ["_scaled_dot_product_flash_attention"]:
219
+ if name in unambiguous_name:
220
+ ret_pointer_can_be_null = True
221
+ break
222
+
223
+ callsite_exprs: List[str] = []
224
+ for idx, ret in enumerate(schema.returns):
225
+ tmp = "tmp_result" if len(names) == 1 else f"std::get<{idx}>(tmp_result)"
226
+ assert isinstance(ret.type, BaseType)
227
+ rval = convert_return(ret.type, tmp)
228
+ if ret_pointer_can_be_null:
229
+ callsite_exprs.append(f"if ({names[idx]}) {{ *{names[idx]} = {rval}; }}")
230
+ else:
231
+ callsite_exprs.append(f"*{names[idx]} = {rval};")
232
+
233
+ return zip_type_and_name(types, names), callsite_exprs
234
+
235
+
236
+ # gen.py generates header first and then src, so caching the result here to avoid duplicate work
237
+ declaration_definition_cache: Dict[Tuple[str, str, str], Tuple[str, str]] = {}
238
+
239
+
240
+ def gen_declaration_and_definition(
241
+ schema: FunctionSchema, device: str, backend_call: str
242
+ ) -> Tuple[str, str]:
243
+ func_name = schema.name.unambiguous_name()
244
+
245
+ global declaration_definition_cache
246
+ if (func_name, device, backend_call) in declaration_definition_cache:
247
+ return declaration_definition_cache[(func_name, device, backend_call)]
248
+
249
+ if schema.is_out_fn():
250
+ # out_variant has out arguments in the front, and it's ok to ignore return value
251
+ # because C shim functions only return AOTITorchError
252
+ # Somehow at::native out-variant functions have out arguments in the back
253
+ args, callsite_exprs = gen_arguments(
254
+ [*schema.arguments.flat_non_out, *schema.arguments.out]
255
+ if "at::native" in backend_call
256
+ else [*schema.arguments.out, *schema.arguments.flat_non_out],
257
+ )
258
+ ret_assignments: List[str] = []
259
+ else:
260
+ args, callsite_exprs = gen_arguments(schema.arguments.flat_all)
261
+ ret_declarations, ret_assignments = gen_returns(schema)
262
+ args.extend(ret_declarations)
263
+
264
+ declaration = f"AOTITorchError aoti_torch_{device}_{func_name}({', '.join(args)})"
265
+
266
+ tmp_result = "auto tmp_result = " if ret_assignments else ""
267
+ ret_assignments_str = "\n" + "\n".join(ret_assignments) if ret_assignments else ""
268
+ definition = f"""
269
+ {declaration} {{
270
+ AOTI_TORCH_CONVERT_EXCEPTION_TO_ERROR_CODE({{
271
+ {tmp_result}{backend_call}(
272
+ {textwrap.indent(', '.join(callsite_exprs), " ")}
273
+ );{textwrap.indent(ret_assignments_str, " ")}
274
+ }});
275
+ }}
276
+ """
277
+ declaration_definition_cache[(func_name, device, backend_call)] = (
278
+ declaration,
279
+ definition,
280
+ )
281
+ return declaration, definition
282
+
283
+
284
+ def gen_static_dispatch_backend_call_signature(
285
+ sig: Union[CppSignature, DispatcherSignature],
286
+ f: NativeFunction,
287
+ ) -> CppSignature:
288
+ sig = DispatcherSignature.from_schema(f.func)
289
+ cpp_sigs = CppSignatureGroup.from_native_function(
290
+ f, method=False, fallback_binding=False
291
+ )
292
+ if sig.symint and f.func.has_symint():
293
+ cpp_sig = cpp_sigs.symint_signature
294
+ else:
295
+ cpp_sig = cpp_sigs.signature
296
+ assert cpp_sig is not None
297
+ return cpp_sig
298
+
299
+
300
+ def gen_static_dispatch_backend_call(
301
+ f: NativeFunction,
302
+ backend_index: BackendIndex,
303
+ ) -> str:
304
+ assert backend_index.has_kernel(f)
305
+ sig = DispatcherSignature.from_schema(f.func)
306
+ cpp_sig = gen_static_dispatch_backend_call_signature(sig, f)
307
+ return f"at::{backend_index.dispatch_key.lower()}::{cpp_sig.name()}"
308
+
309
+
310
+ def get_backend_index_for_aoti(
311
+ f: NativeFunction,
312
+ dispatch_key: DispatchKey,
313
+ backend_indices: Dict[DispatchKey, BackendIndex],
314
+ ) -> Optional[BackendIndex]:
315
+ if "pointwise" in f.tags:
316
+ # TODO: No need to generate C shim for Inductor lowered ops.
317
+ # Only skip pointwise kernels for now, and we can add more tags later.
318
+ return None
319
+
320
+ backend_index = None
321
+ if backend_indices[dispatch_key].has_kernel(f):
322
+ backend_index = backend_indices[dispatch_key]
323
+ elif backend_indices[DispatchKey.CompositeExplicitAutograd].has_kernel(f):
324
+ # We need to create C shim wrappers for CompositeExplicitAutograd kernels
325
+ backend_index = backend_indices[DispatchKey.CompositeExplicitAutograd]
326
+ elif backend_indices[DispatchKey.CompositeExplicitAutogradNonFunctional].has_kernel(
327
+ f
328
+ ):
329
+ # We need to create C shim wrappers for CompositeExplicitAutogradNonFunctional kernels
330
+ backend_index = backend_indices[
331
+ DispatchKey.CompositeExplicitAutogradNonFunctional
332
+ ]
333
+ return backend_index
334
+
335
+
336
+ def gen_c_shim(
337
+ f: NativeFunction,
338
+ dispatch_key: DispatchKey,
339
+ backend_indices: Dict[DispatchKey, BackendIndex],
340
+ header: bool,
341
+ ) -> Optional[str]:
342
+ backend_index = get_backend_index_for_aoti(f, dispatch_key, backend_indices)
343
+ if backend_index is None:
344
+ return None
345
+
346
+ schema = f.func
347
+ device = dispatch_key.lower()
348
+ backend_call = gen_static_dispatch_backend_call(
349
+ f,
350
+ backend_index,
351
+ )
352
+
353
+ try:
354
+ if header:
355
+ declaration, _ = gen_declaration_and_definition(
356
+ schema, device, backend_call
357
+ )
358
+ return f"AOTI_TORCH_EXPORT {declaration};"
359
+ else:
360
+ _, definition = gen_declaration_and_definition(schema, device, backend_call)
361
+ return definition
362
+
363
+ except NotImplementedError:
364
+ return None
365
+
366
+
367
+ @dataclass(frozen=True)
368
+ class ShimGenerator:
369
+ dispatch_key: DispatchKey
370
+ backend_indices: Dict[DispatchKey, BackendIndex]
371
+ header: bool # True to generate .h and False to generate .cpp
372
+
373
+ @method_with_native_function
374
+ def __call__(self, f: NativeFunction) -> Optional[str]:
375
+ result = gen_c_shim(f, self.dispatch_key, self.backend_indices, self.header)
376
+ return result
377
+
378
+
379
+ def gen_aoti_c_shim(
380
+ native_functions: Sequence[NativeFunction],
381
+ dispatch_key: DispatchKey,
382
+ backend_indices: Dict[DispatchKey, BackendIndex],
383
+ header: bool,
384
+ includes: str = "",
385
+ ) -> str:
386
+ body = "\n".join(
387
+ list(
388
+ mapMaybe(
389
+ ShimGenerator(dispatch_key, backend_indices, header),
390
+ native_functions,
391
+ )
392
+ )
393
+ )
394
+
395
+ if header:
396
+ return f"""
397
+ #pragma once
398
+
399
+ #include <torch/csrc/inductor/aoti_torch/c/shim.h>
400
+
401
+ #ifdef __cplusplus
402
+ extern "C" {{
403
+ #endif
404
+
405
+ {body}
406
+
407
+ #ifdef __cplusplus
408
+ }} // extern "C"
409
+ #endif
410
+
411
+ """
412
+ else:
413
+ device = dispatch_key.lower()
414
+ return f"""
415
+ #include <torch/csrc/inductor/aoti_torch/tensor_converter.h>
416
+ #include <torch/csrc/inductor/aoti_torch/utils.h>
417
+ #include <torch/csrc/inductor/aoti_torch/generated/c_shim_{device}.h>
418
+
419
+ #ifndef AT_PER_OPERATOR_HEADERS
420
+ #include <ATen/{str(dispatch_key)}Functions.h>
421
+ #include <ATen/CompositeExplicitAutogradFunctions.h>
422
+ #include <ATen/CompositeExplicitAutogradNonFunctionalFunctions.h>
423
+ #else
424
+ {includes}
425
+ #endif
426
+
427
+ using namespace torch::aot_inductor;
428
+
429
+ {body}
430
+
431
+ """
llmeval-env/lib/python3.10/site-packages/torchgen/gen_backend_stubs.py ADDED
@@ -0,0 +1,609 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import pathlib
4
+ import re
5
+ from collections import Counter, defaultdict, namedtuple
6
+ from typing import Dict, List, Optional, Sequence, Set, Union
7
+
8
+ import yaml
9
+
10
+ import torchgen.api.dispatcher as dispatcher
11
+ import torchgen.dest as dest
12
+ from torchgen.api.types import DispatcherSignature
13
+ from torchgen.code_template import CodeTemplate
14
+ from torchgen.context import native_function_manager
15
+ from torchgen.gen import get_grouped_native_functions, parse_native_yaml
16
+ from torchgen.model import (
17
+ BackendIndex,
18
+ BackendMetadata,
19
+ DispatchKey,
20
+ NativeFunction,
21
+ NativeFunctionsGroup,
22
+ OperatorName,
23
+ )
24
+ from torchgen.selective_build.selector import SelectiveBuilder
25
+ from torchgen.utils import concatMap, context, FileManager, NamespaceHelper, Target
26
+ from torchgen.yaml_utils import YamlLoader
27
+
28
+
29
+ # Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key.
30
+ # Returns a Tuple of (backend_key, autograd_key, cpp_namespace, updated BackendIndex mapping)
31
+ ParsedExternalYaml = namedtuple(
32
+ "ParsedExternalYaml",
33
+ ["backend_key", "autograd_key", "class_name", "cpp_namespace", "backend_indices"],
34
+ )
35
+
36
+
37
+ def parse_backend_yaml(
38
+ backend_yaml_path: str,
39
+ grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
40
+ backend_indices: Dict[DispatchKey, BackendIndex],
41
+ ) -> ParsedExternalYaml:
42
+ native_functions_map: Dict[OperatorName, NativeFunction] = {
43
+ f.func.name: f
44
+ for f in concatMap(
45
+ lambda f: [f] if isinstance(f, NativeFunction) else list(f.functions()),
46
+ grouped_native_functions,
47
+ )
48
+ }
49
+
50
+ with open(backend_yaml_path) as f:
51
+ yaml_values = yaml.load(f, Loader=YamlLoader)
52
+ assert isinstance(yaml_values, dict)
53
+
54
+ valid_keys = [
55
+ "backend",
56
+ "class_name",
57
+ "cpp_namespace",
58
+ "extra_headers",
59
+ "supported",
60
+ "autograd",
61
+ "full_codegen",
62
+ "non_native",
63
+ "ir_gen",
64
+ "symint",
65
+ ]
66
+
67
+ backend = yaml_values.pop("backend", None)
68
+ assert backend is not None, 'You must provide a value for "backend"'
69
+
70
+ class_name = yaml_values.pop("class_name", None)
71
+
72
+ cpp_namespace = yaml_values.pop("cpp_namespace", None)
73
+ assert cpp_namespace is not None, 'You must provide a value for "cpp_namespace"'
74
+
75
+ # Mostly just defaulting to false to stick with LazyTensor convention.
76
+ use_out_as_primary = yaml_values.pop("use_out_as_primary", False)
77
+ assert isinstance(
78
+ use_out_as_primary, bool
79
+ ), f"You must provide either True or False for use_out_as_primary. Provided: {use_out_as_primary}"
80
+
81
+ use_device_guard = yaml_values.pop("device_guard", False)
82
+ assert isinstance(
83
+ use_device_guard, bool
84
+ ), f"You must provide either True or False for device_guard. Provided: {use_device_guard}"
85
+
86
+ supported = yaml_values.pop("supported", [])
87
+ if supported is None:
88
+ supported = [] # Allow an empty list of supported ops
89
+ assert isinstance(
90
+ supported, list
91
+ ), f'expected "supported" to be a list, but got: {supported} (of type {type(supported)})'
92
+
93
+ symint = yaml_values.pop("symint", [])
94
+ if symint is None:
95
+ symint = [] # Allow an empty list of symint ops
96
+ assert isinstance(
97
+ symint, list
98
+ ), f'expected "symint" to be a list, but got: {supported} (of type {type(supported)})'
99
+ symint_set = set(symint)
100
+
101
+ supported_autograd = yaml_values.pop("autograd", [])
102
+ assert isinstance(
103
+ supported_autograd, list
104
+ ), f'expected "autograd" to be a list, but got: {supported_autograd}'
105
+
106
+ # full_codegen is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
107
+ full_codegen = yaml_values.pop("full_codegen", [])
108
+ supported.extend(full_codegen)
109
+
110
+ # non_native is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
111
+ non_native = yaml_values.pop("non_native", {})
112
+
113
+ # ir_gen is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
114
+ _ = yaml_values.pop("ir_gen", {})
115
+
116
+ assert (
117
+ len(yaml_values.keys()) == 0
118
+ ), f'{backend_yaml_path} contains unexpected keys: {", ".join(yaml_values.keys())}. \
119
+ Only the following keys are supported: {", ".join(valid_keys)}'
120
+
121
+ def create_backend_index(
122
+ backend_ops: List[str],
123
+ symint_ops: Set[str],
124
+ dispatch_key: DispatchKey,
125
+ *,
126
+ use_out_as_primary: bool,
127
+ use_device_guard: bool,
128
+ ) -> BackendIndex:
129
+ metadata: Dict[OperatorName, BackendMetadata] = {}
130
+ for op in backend_ops:
131
+ op_name = OperatorName.parse(op)
132
+ assert (
133
+ op_name in native_functions_map
134
+ ), f"Found an invalid operator name: {op_name}"
135
+ # See Note [External Backends Follow Dispatcher API]
136
+ kernel_name = dispatcher.name(native_functions_map[op_name].func)
137
+ if op in symint_ops:
138
+ kernel_name += "_symint"
139
+ # TODO: allow structured external backends later.
140
+ m = BackendMetadata(
141
+ kernel=kernel_name, structured=False, cpp_namespace=cpp_namespace
142
+ )
143
+ metadata[op_name] = m
144
+ return BackendIndex(
145
+ dispatch_key=dispatch_key,
146
+ use_out_as_primary=use_out_as_primary,
147
+ external=True,
148
+ device_guard=use_device_guard,
149
+ index=metadata,
150
+ )
151
+
152
+ backend_key: Optional[DispatchKey] = None
153
+ if len(supported) > 0:
154
+ with context(
155
+ lambda: f'The provided value for "backend" must be a valid DispatchKey, but got {backend}.'
156
+ ):
157
+ backend_key = DispatchKey.parse(backend)
158
+
159
+ backend_idx = create_backend_index(
160
+ supported,
161
+ symint_set,
162
+ backend_key,
163
+ use_out_as_primary=use_out_as_primary,
164
+ use_device_guard=use_device_guard,
165
+ )
166
+ assert backend_key not in backend_indices
167
+ backend_indices[backend_key] = backend_idx
168
+
169
+ autograd_key: Optional[DispatchKey] = None
170
+ if len(supported_autograd) > 0:
171
+ with context(
172
+ lambda: f'The "autograd" key was specified, which indicates that you would like to override \
173
+ the behavior of autograd for some operators on your backend. However "Autograd{backend}" is not a valid DispatchKey.'
174
+ ):
175
+ autograd_key = DispatchKey.parse(f"Autograd{backend}")
176
+
177
+ autograd_idx = create_backend_index(
178
+ supported_autograd,
179
+ symint_set,
180
+ autograd_key,
181
+ use_out_as_primary=use_out_as_primary,
182
+ use_device_guard=use_device_guard,
183
+ )
184
+ assert autograd_key not in backend_indices
185
+ backend_indices[autograd_key] = autograd_idx
186
+
187
+ for g in grouped_native_functions:
188
+ if isinstance(g, NativeFunction):
189
+ forward_kernels = (
190
+ []
191
+ if backend_key is None
192
+ else [
193
+ m
194
+ for m in [backend_indices[backend_key].get_kernel(g)]
195
+ if m is not None
196
+ ]
197
+ )
198
+ backward_kernels = (
199
+ []
200
+ if autograd_key is None
201
+ else [
202
+ m
203
+ for m in [backend_indices[autograd_key].get_kernel(g)]
204
+ if m is not None
205
+ ]
206
+ )
207
+ else:
208
+ forward_kernels = (
209
+ []
210
+ if backend_key is None
211
+ else [
212
+ m
213
+ for m in [
214
+ backend_indices[backend_key].get_kernel(f)
215
+ for f in g.functions()
216
+ ]
217
+ if m is not None
218
+ ]
219
+ )
220
+ backward_kernels = (
221
+ []
222
+ if autograd_key is None
223
+ else [
224
+ m
225
+ for m in [
226
+ backend_indices[autograd_key].get_kernel(f)
227
+ for f in g.functions()
228
+ ]
229
+ if m is not None
230
+ ]
231
+ )
232
+
233
+ forward_kernels = [f for f in forward_kernels if f is not None]
234
+ backward_kernels = [f for f in backward_kernels if f is not None]
235
+ assert (
236
+ len(forward_kernels) == 0 or len(backward_kernels) == 0
237
+ ), f'Currently, all variants of an op must either be registered to a backend key, or to a backend\'s \
238
+ autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! \
239
+ {forward_kernels[0].kernel} is listed under "supported", but {backward_kernels[0].kernel} is listed under "autograd".'
240
+
241
+ return ParsedExternalYaml(
242
+ backend_key, autograd_key, class_name, cpp_namespace, backend_indices
243
+ )
244
+
245
+
246
+ def error_on_missing_kernels(
247
+ native_functions: Sequence[NativeFunction],
248
+ backend_indices: Dict[DispatchKey, BackendIndex],
249
+ backend_key: DispatchKey,
250
+ autograd_key: Optional[DispatchKey],
251
+ class_name: str,
252
+ kernel_defn_file_path: str,
253
+ full_codegen: Optional[List[OperatorName]] = None,
254
+ ) -> None:
255
+ try:
256
+ with open(kernel_defn_file_path) as f:
257
+ backend_defns = f.read()
258
+ except OSError as e:
259
+ raise AssertionError(
260
+ f"Unable to read from the specified impl_path file: {kernel_defn_file_path}"
261
+ ) from e
262
+
263
+ if full_codegen is None:
264
+ full_codegen = []
265
+
266
+ indices = [backend_indices[backend_key].index] + (
267
+ [] if autograd_key is None else [backend_indices[autograd_key].index]
268
+ )
269
+ # Quick mapping from each OperatorName used by the external backend
270
+ # to its backend kernel name
271
+ expected_backend_op_names: Dict[OperatorName, str] = dict(
272
+ list(
273
+ concatMap(
274
+ lambda index: [
275
+ (op_name, metadata.kernel) for op_name, metadata in index.items()
276
+ ],
277
+ indices,
278
+ )
279
+ )
280
+ )
281
+ expected_backend_native_funcs: List[NativeFunction] = [
282
+ f
283
+ for f in native_functions
284
+ if f.func.name in expected_backend_op_names.keys()
285
+ and f.func.name not in full_codegen
286
+ ]
287
+ expected_backend_kernel_name_counts: Dict[str, List[NativeFunction]] = defaultdict(
288
+ list
289
+ )
290
+ for native_f in expected_backend_native_funcs:
291
+ expected_backend_kernel_name_counts[
292
+ expected_backend_op_names[native_f.func.name]
293
+ ].append(native_f)
294
+
295
+ # This just looks for lines containing "foo(", and assumes that the kernel foo has been implemented.
296
+ # It might cause false negatives (we won't catch all cases), but that's ok - if we catch a missing kernel
297
+ # here, then we get a nicer error message. If we miss it, you get a linker error.
298
+ kernel_defn_regex = rf"(.*){class_name}::\s*([\w\d]*)\("
299
+ actual_backend_kernel_name_counts = Counter(
300
+ # A bit unwieldy (this could probably be moved into regex),
301
+ # but we don't want to include kernel names that come from function calls,
302
+ # like "return torch_xla::XLANativeFunctions::empty_strided_symint(...)".
303
+ # Easy check is to ignore any lines with colons before the class name.
304
+ [
305
+ y
306
+ for (x, y) in re.findall(kernel_defn_regex, backend_defns)
307
+ if not x.endswith(":")
308
+ ]
309
+ )
310
+
311
+ missing_kernels_err_msg = ""
312
+ for expected_name, funcs in expected_backend_kernel_name_counts.items():
313
+ expected_overload_count = len(funcs)
314
+ actual_overload_count = actual_backend_kernel_name_counts[expected_name]
315
+ if expected_overload_count != actual_overload_count:
316
+
317
+ def create_decl(f: NativeFunction) -> str:
318
+ with native_function_manager(f):
319
+ return DispatcherSignature.from_schema(f.func).decl()
320
+
321
+ expected_schemas_str = "\n".join([create_decl(f) for f in funcs])
322
+ missing_kernels_err_msg += f"""
323
+ {class_name} is missing a kernel definition for {expected_name}. We found {actual_overload_count} kernel(s) with that name,
324
+ but expected {expected_overload_count} kernel(s). The expected function schemas for the missing operator are:
325
+ {expected_schemas_str}
326
+
327
+ """
328
+ assert missing_kernels_err_msg == "", missing_kernels_err_msg
329
+
330
+
331
+ def main() -> None:
332
+ parser = argparse.ArgumentParser(description="Generate backend stub files")
333
+ parser.add_argument(
334
+ "-s",
335
+ "--source-yaml",
336
+ "--source_yaml",
337
+ help="path to source yaml file containing operator external definitions",
338
+ )
339
+ parser.add_argument("-o", "--output-dir", "--output_dir", help="output directory")
340
+ parser.add_argument(
341
+ "--dry-run", "--dry_run", type=bool, default=False, help="output directory"
342
+ )
343
+ parser.add_argument(
344
+ "--impl-path",
345
+ "--impl_path",
346
+ type=str,
347
+ default=None,
348
+ help="path to the source C++ file containing kernel definitions",
349
+ )
350
+ options = parser.parse_args()
351
+
352
+ run(options.source_yaml, options.output_dir, options.dry_run, options.impl_path)
353
+
354
+
355
+ def gen_dispatchkey_nativefunc_headers(
356
+ fm: FileManager,
357
+ class_name: str,
358
+ cpp_namespace: str,
359
+ backend_indices: Dict[DispatchKey, BackendIndex],
360
+ grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
361
+ backend_dispatch_key: DispatchKey,
362
+ autograd_dispatch_key: Optional[DispatchKey],
363
+ backend_name: str = "",
364
+ ) -> None:
365
+ assert class_name is not None
366
+ generated_comment = (
367
+ "Autogenerated file by gen_backend_stubs.py. Do not edit directly!"
368
+ )
369
+
370
+ # Convert to a set first to remove duplicate kernel names.
371
+ # Backends are allowed to repeat kernel names; only generate the declaration once!
372
+ # Sort for deterministic output.
373
+ backend_declarations = sorted(
374
+ set(
375
+ concatMap(
376
+ lambda f: dest.compute_native_function_declaration(
377
+ f, backend_indices[backend_dispatch_key]
378
+ ),
379
+ grouped_native_functions,
380
+ )
381
+ )
382
+ )
383
+ autograd_declarations = sorted(
384
+ set(
385
+ concatMap(
386
+ lambda f: []
387
+ if autograd_dispatch_key is None
388
+ else dest.compute_native_function_declaration(
389
+ f, backend_indices[autograd_dispatch_key]
390
+ ),
391
+ grouped_native_functions,
392
+ )
393
+ )
394
+ )
395
+
396
+ ns_helper = NamespaceHelper(cpp_namespace)
397
+ fm.write_with_template(
398
+ f"{backend_dispatch_key}NativeFunctions.h",
399
+ "DispatchKeyNativeFunctions.h",
400
+ lambda: {
401
+ "generated_comment": generated_comment,
402
+ "namespace_prologue": ns_helper.prologue,
403
+ "class_name": class_name,
404
+ "namespace_epilogue": ns_helper.epilogue,
405
+ "dispatch_declarations": backend_declarations + autograd_declarations,
406
+ "BackendName": backend_name,
407
+ "DispatchKey": backend_dispatch_key,
408
+ },
409
+ )
410
+
411
+
412
+ def gen_dispatcher_registrations(
413
+ fm: FileManager,
414
+ output_dir: str,
415
+ class_name: str,
416
+ backend_indices: Dict[DispatchKey, BackendIndex],
417
+ grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
418
+ backend_dispatch_key: DispatchKey,
419
+ dispatch_key: DispatchKey,
420
+ selector: "SelectiveBuilder",
421
+ # build_in_tree is true for lazy TS backend and affects include paths, not used for external backends
422
+ build_in_tree: bool = False,
423
+ per_operator_headers: bool = False,
424
+ backend_name: str = "",
425
+ eager_registration: bool = True,
426
+ ) -> None:
427
+ headers = [
428
+ f"{output_dir}/{backend_dispatch_key}NativeFunctions.h",
429
+ ]
430
+ if build_in_tree:
431
+ external_backend_headers_str = "\n".join(f"#include <{h}>" for h in headers)
432
+ else:
433
+ external_backend_headers_str = "\n".join(f'#include "{h}"' for h in headers)
434
+
435
+ assert class_name is not None
436
+ backend_index = backend_indices[dispatch_key]
437
+
438
+ dispatch_registrations_body = list(
439
+ concatMap(
440
+ dest.RegisterDispatchKey(
441
+ backend_index,
442
+ Target.REGISTRATION,
443
+ selector,
444
+ rocm=False,
445
+ symint=True,
446
+ class_method_name=f"{class_name}",
447
+ skip_dispatcher_op_registration=False,
448
+ ),
449
+ grouped_native_functions,
450
+ )
451
+ )
452
+ newline = "\n"
453
+ ns_helper = NamespaceHelper(namespace_str="at")
454
+ deferred_dispatch_registrations = ""
455
+ static_init_dispatch_registrations = ""
456
+ if eager_registration:
457
+ static_template = CodeTemplate(
458
+ """\
459
+ TORCH_LIBRARY_IMPL(aten, $dispatch_key, m) {
460
+ $dispatch_registrations_body
461
+ };"""
462
+ )
463
+ static_init_dispatch_registrations = static_template.substitute(
464
+ dispatch_key=dispatch_key,
465
+ dispatch_registrations_body=dispatch_registrations_body,
466
+ )
467
+ else:
468
+ deferred_template = CodeTemplate(
469
+ """\
470
+ TORCH_API void Register${backend_name}${dispatch_key}NativeFunctions();
471
+ TORCH_API void Register${backend_name}${dispatch_key}NativeFunctions() {
472
+ static auto m = MAKE_TORCH_LIBRARY_IMPL(aten, $dispatch_key);
473
+ $dispatch_registrations_body
474
+ }"""
475
+ )
476
+ deferred_dispatch_registrations = deferred_template.substitute(
477
+ backend_name=backend_name,
478
+ dispatch_key=dispatch_key,
479
+ dispatch_registrations_body=dispatch_registrations_body,
480
+ )
481
+
482
+ fm.write_with_template(
483
+ f"Register{dispatch_key}.cpp",
484
+ "RegisterDispatchKey.cpp",
485
+ lambda: {
486
+ "extra_cuda_headers": "",
487
+ "external_backend_headers": external_backend_headers_str,
488
+ "ops_headers": "#include <ATen/Functions.h>"
489
+ if not per_operator_headers
490
+ else "",
491
+ "DispatchKey": dispatch_key,
492
+ "dispatch_namespace": dispatch_key.lower(),
493
+ "dispatch_headers": dest.gen_registration_headers(
494
+ backend_index, per_operator_headers=per_operator_headers, rocm=False
495
+ ),
496
+ "dispatch_definitions": fm.substitute_with_template(
497
+ "RegisterDispatchDefinitions.ini",
498
+ lambda: {
499
+ "ns_prologue": ns_helper.prologue,
500
+ "ns_epilogue": ns_helper.epilogue,
501
+ "static_init_dispatch_registrations": static_init_dispatch_registrations,
502
+ "deferred_dispatch_registrations": deferred_dispatch_registrations,
503
+ "dispatch_helpers": dest.gen_registration_helpers(backend_index),
504
+ "dispatch_namespace": dispatch_key.lower(),
505
+ "dispatch_namespaced_definitions": "",
506
+ "dispatch_anonymous_definitions": list(
507
+ concatMap(
508
+ dest.RegisterDispatchKey(
509
+ backend_index,
510
+ Target.ANONYMOUS_DEFINITION,
511
+ selector,
512
+ rocm=False,
513
+ symint=True,
514
+ class_method_name=f"{class_name}",
515
+ skip_dispatcher_op_registration=False,
516
+ ),
517
+ grouped_native_functions,
518
+ )
519
+ ),
520
+ },
521
+ ).split(newline),
522
+ },
523
+ )
524
+
525
+
526
+ def run(
527
+ source_yaml: str, output_dir: str, dry_run: bool, impl_path: Optional[str] = None
528
+ ) -> None:
529
+ # Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
530
+ pytorch_root = pathlib.Path(__file__).parent.parent.absolute()
531
+ template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates")
532
+
533
+ def make_file_manager(install_dir: str) -> FileManager:
534
+ return FileManager(
535
+ install_dir=install_dir, template_dir=template_dir, dry_run=dry_run
536
+ )
537
+
538
+ fm = make_file_manager(output_dir)
539
+
540
+ native_yaml_path = os.path.join(
541
+ pytorch_root, "aten/src/ATen/native/native_functions.yaml"
542
+ )
543
+ tags_yaml_path = os.path.join(pytorch_root, "aten/src/ATen/native/tags.yaml")
544
+ parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
545
+ native_functions, backend_indices = (
546
+ parsed_yaml.native_functions,
547
+ parsed_yaml.backend_indices,
548
+ )
549
+ grouped_native_functions = get_grouped_native_functions(native_functions)
550
+ parsed_backend_yaml = parse_backend_yaml(
551
+ source_yaml, grouped_native_functions, backend_indices
552
+ )
553
+ backend_key = parsed_backend_yaml.backend_key
554
+ autograd_key = parsed_backend_yaml.autograd_key
555
+ cpp_namespace = parsed_backend_yaml.cpp_namespace
556
+ class_name = parsed_backend_yaml.class_name
557
+ backend_indices = parsed_backend_yaml.backend_indices
558
+
559
+ selector = SelectiveBuilder.get_nop_selector()
560
+
561
+ if backend_key is None:
562
+ # This could be useful if a backend wants to quickly set up a noop yaml file but doesn't have any kernels ready yet.
563
+ return
564
+
565
+ if class_name is None:
566
+ # class_name is an optional argument to backend yaml file.
567
+ # if specified it allows an external backend to override
568
+ # the name of the class that all generated kernel definitions live under.
569
+ # if not specified, its value is given as native_function_class_name.
570
+ class_name = backend_indices[backend_key].native_function_class_name()
571
+ assert class_name is not None
572
+
573
+ if impl_path is not None:
574
+ error_on_missing_kernels(
575
+ native_functions,
576
+ backend_indices,
577
+ backend_key,
578
+ autograd_key,
579
+ class_name,
580
+ impl_path,
581
+ )
582
+
583
+ gen_dispatchkey_nativefunc_headers(
584
+ fm,
585
+ class_name,
586
+ cpp_namespace,
587
+ backend_indices,
588
+ grouped_native_functions,
589
+ backend_key,
590
+ autograd_key,
591
+ )
592
+
593
+ for dispatch_key in (
594
+ [backend_key] if autograd_key is None else [backend_key, autograd_key]
595
+ ):
596
+ gen_dispatcher_registrations(
597
+ fm,
598
+ output_dir,
599
+ class_name,
600
+ backend_indices,
601
+ grouped_native_functions,
602
+ backend_key,
603
+ dispatch_key,
604
+ selector,
605
+ )
606
+
607
+
608
+ if __name__ == "__main__":
609
+ main()
llmeval-env/lib/python3.10/site-packages/torchgen/gen_executorch.py ADDED
@@ -0,0 +1,995 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import pathlib
4
+ from collections import defaultdict
5
+ from dataclasses import dataclass
6
+ from typing import Any, Callable, Dict, List, Optional, Sequence, TextIO, Tuple, Union
7
+
8
+ import yaml
9
+
10
+ # Parse native_functions.yaml into a sequence of NativeFunctions and Backend Indices.
11
+ from torchgen import dest
12
+ from torchgen.api import cpp as aten_cpp
13
+ from torchgen.api.types import CppSignature, CppSignatureGroup, CType, NamedCType
14
+ from torchgen.context import (
15
+ method_with_native_function,
16
+ method_with_nested_native_function,
17
+ with_native_function_and_index,
18
+ )
19
+ from torchgen.executorch.api import et_cpp
20
+ from torchgen.executorch.api.custom_ops import (
21
+ ComputeNativeFunctionStub,
22
+ gen_custom_ops_registration,
23
+ )
24
+ from torchgen.executorch.api.types import contextArg, ExecutorchCppSignature
25
+ from torchgen.executorch.api.unboxing import Unboxing
26
+ from torchgen.executorch.model import ETKernelIndex, ETKernelKey, ETParsedYaml
27
+ from torchgen.executorch.parse import ET_FIELDS, parse_et_yaml, parse_et_yaml_struct
28
+ from torchgen.gen import (
29
+ get_custom_build_selector,
30
+ get_native_function_declarations,
31
+ get_native_function_declarations_from_ns_grouped_kernels,
32
+ get_native_function_schema_registrations,
33
+ LineLoader,
34
+ parse_native_yaml,
35
+ )
36
+ from torchgen.model import (
37
+ BackendIndex,
38
+ BackendMetadata,
39
+ DEFAULT_KERNEL_NAMESPACE,
40
+ DispatchKey,
41
+ FunctionSchema,
42
+ Location,
43
+ NativeFunction,
44
+ NativeFunctionsGroup,
45
+ OperatorName,
46
+ Variant,
47
+ )
48
+ from torchgen.selective_build.selector import SelectiveBuilder
49
+ from torchgen.utils import (
50
+ context,
51
+ FileManager,
52
+ make_file_manager,
53
+ mapMaybe,
54
+ NamespaceHelper,
55
+ )
56
+
57
+
58
+ def _sig_decl_wrapper(sig: Union[CppSignature, ExecutorchCppSignature]) -> str:
59
+ """
60
+ A wrapper function to basically get `sig.decl(include_context=True)`.
61
+ For ATen kernel, the codegen has no idea about ET contextArg, so we
62
+ use this wrapper to add it.
63
+ """
64
+ if isinstance(sig, ExecutorchCppSignature):
65
+ return sig.decl()
66
+
67
+ returns_type = aten_cpp.returns_type(sig.func.returns).cpp_type()
68
+ cpp_args = [a.decl() for a in sig.arguments()]
69
+ cpp_args_str = ", ".join([contextArg.decl()] + cpp_args)
70
+ sig_decl = f"{returns_type} {sig.name()}({cpp_args_str})"
71
+ return sig_decl
72
+
73
+
74
+ def static_dispatch(
75
+ sig: Union[CppSignature, ExecutorchCppSignature],
76
+ f: NativeFunction,
77
+ backend_indices: List[BackendIndex],
78
+ ) -> str:
79
+ """
80
+ For a given `NativeFunction`, find out the corresponding native function and dispatch to it. If zero or more than one
81
+ native function exists, error out. A simplified version of register_dispatch_key.py
82
+ Arguments:
83
+ sig: A CppSignature for this native function we want to use.
84
+ f: NativeFunction to generate static dispatch.
85
+ backend_indices: All available backends.
86
+ Return:
87
+ C++ code to call backend-specific functions, e.g., "return at::native::add(self, other, scale);"
88
+ """
89
+ if len(backend_indices) == 0 or f.manual_kernel_registration:
90
+ return ""
91
+
92
+ backends = [b for b in backend_indices if b.has_kernel(f)]
93
+ static_block = None
94
+ if len(backends) == 1:
95
+ backend_metadata = backends[0].get_kernel(f)
96
+ if backend_metadata:
97
+ args = ", ".join(a.name for a in sig.arguments())
98
+ # Here we are assuming there's no difference between CppSignature and NativeSignature for Executorch.
99
+ static_block = f"return ::{backend_metadata.cpp_namespace}::{backend_metadata.kernel}({args});"
100
+ else:
101
+ static_block = f"""
102
+ ET_ASSERT_UNREACHABLE_MSG("The number of native function(s) binding to {f.func.name} is {len(backends)}.");
103
+ """
104
+ return f"""
105
+ // {f.namespace}::{f.func}
106
+ TORCH_API inline {_sig_decl_wrapper(sig)} {{
107
+ {static_block}
108
+ }}
109
+ """
110
+
111
+
112
+ # Generates Functions.h, which provides the functional public C++ API,
113
+ # and the scaffolding to call into the dispatcher from these functions.
114
+ @dataclass(frozen=True)
115
+ class ComputeFunction:
116
+ static_dispatch_backend_indices: List[BackendIndex]
117
+
118
+ selector: SelectiveBuilder
119
+
120
+ use_aten_lib: bool
121
+
122
+ is_custom_op: Callable[[NativeFunction], bool]
123
+
124
+ @method_with_native_function
125
+ def __call__(self, f: NativeFunction) -> Optional[str]:
126
+ is_method_variant = False
127
+ if not self.selector.is_root_operator(f"{f.namespace}::{f.func.name}"):
128
+ return None
129
+
130
+ if Variant.function not in f.variants and Variant.method in f.variants:
131
+ is_method_variant = True
132
+
133
+ # only valid remaining case is only function is in f.variants
134
+ elif not (Variant.function in f.variants and Variant.method not in f.variants):
135
+ raise Exception(
136
+ f"Can't handle native function {f.func} with the following variant specification {f.variants}."
137
+ )
138
+
139
+ sig: Union[CppSignature, ExecutorchCppSignature] = (
140
+ CppSignatureGroup.from_native_function(
141
+ f, method=False, fallback_binding=f.manual_cpp_binding
142
+ ).most_faithful_signature()
143
+ if self.use_aten_lib
144
+ else ExecutorchCppSignature.from_native_function(f)
145
+ )
146
+ if self.use_aten_lib and not self.is_custom_op(f):
147
+ comma = ", "
148
+
149
+ if is_method_variant:
150
+ return f"""
151
+ // {f.namespace}::{f.func}
152
+ TORCH_API inline {_sig_decl_wrapper(sig)} {{
153
+ return {sig.arguments()[0].name}.{sig.name()}({comma.join(e.name for e in sig.arguments()[1:])});
154
+ }}
155
+ """
156
+ else:
157
+ return f"""
158
+ // {f.namespace}::{f.func}
159
+ TORCH_API inline {_sig_decl_wrapper(sig)} {{
160
+ return at::{sig.name()}({comma.join(e.name for e in sig.arguments())});
161
+ }}
162
+ """
163
+
164
+ else:
165
+ return static_dispatch(
166
+ sig,
167
+ f,
168
+ backend_indices=self.static_dispatch_backend_indices,
169
+ )
170
+
171
+
172
+ # Generates RegisterCodegenUnboxedKernels.cpp.
173
+ @dataclass(frozen=True)
174
+ class ComputeCodegenUnboxedKernels:
175
+ selector: SelectiveBuilder
176
+
177
+ use_aten_lib: bool
178
+
179
+ @method_with_nested_native_function
180
+ def __call__(
181
+ self,
182
+ unbox_kernel_entry: Tuple[NativeFunction, Tuple[ETKernelKey, BackendMetadata]],
183
+ ) -> str:
184
+ f: NativeFunction = unbox_kernel_entry[0]
185
+ kernel_key: Union[ETKernelKey, List[ETKernelKey]] = unbox_kernel_entry[1][0]
186
+ kernel_meta: BackendMetadata = unbox_kernel_entry[1][1]
187
+
188
+ op_name = f"{f.namespace}::{f.func.name}"
189
+ if not self.selector.is_root_operator(op_name):
190
+ return ""
191
+
192
+ if not isinstance(kernel_key, list):
193
+ kernel_key = [kernel_key]
194
+ used_kernel_keys = self.selector.et_get_selected_kernels(
195
+ op_name, [k.to_native_string() for k in kernel_key]
196
+ )
197
+ if not used_kernel_keys:
198
+ return ""
199
+ sig: Union[CppSignature, ExecutorchCppSignature]
200
+ argument_type_gen: Callable[..., NamedCType]
201
+ return_type_gen: Callable[..., CType]
202
+ if self.use_aten_lib:
203
+ sig = CppSignatureGroup.from_native_function(
204
+ f, method=False, fallback_binding=f.manual_cpp_binding
205
+ ).most_faithful_signature()
206
+ argument_type_gen = aten_cpp.argumenttype_type
207
+ return_type_gen = aten_cpp.returns_type
208
+ arguments = sig.arguments()
209
+ kernel_call = f"torch::executor::{f.namespace}::{sig.name()}"
210
+ else:
211
+ sig = ExecutorchCppSignature.from_native_function(f)
212
+ argument_type_gen = et_cpp.argumenttype_type
213
+ return_type_gen = et_cpp.returns_type
214
+ arguments = sig.arguments(include_context=False)
215
+ kernel_call = f"{kernel_meta.cpp_namespace}::{kernel_meta.kernel}"
216
+ # parse arguments into C++ code
217
+ binding_list, code_list = Unboxing(
218
+ argument_type_gen=argument_type_gen
219
+ ).convert_arguments(arguments)
220
+
221
+ # for each C++ argument, generate the conversion code
222
+ code_connector = "\n\t"
223
+ arg_connector = ", "
224
+
225
+ args_str = f"{arg_connector.join(e.name for e in binding_list)}"
226
+ event_tracer_output_logging = ""
227
+ output_ids = []
228
+
229
+ if len(f.func.returns) == 0:
230
+ if len(f.func.arguments.out) == 0:
231
+ raise Exception(
232
+ f"Can't handle native function {f.func} with no returns and no out yet."
233
+ )
234
+ out = f.func.arguments.out[0]
235
+ return_assignment = f"""stack[{len(binding_list)}] = &{out.name};"""
236
+ ret_prefix = ""
237
+ output_ids = [len(binding_list)]
238
+ else:
239
+ if len(f.func.arguments.out) == 0:
240
+ return_assignment = (
241
+ f"""*stack[{len(binding_list)}] = EValue(result_);"""
242
+ )
243
+ ret_prefix = return_type_gen(f.func.returns).cpp_type() + " result_ = "
244
+ output_ids = [len(binding_list)]
245
+ else:
246
+ return_assignment = ""
247
+ ret_prefix = ""
248
+ output_ids = [
249
+ len(binding_list) - (i + 1)
250
+ for i in reversed(range(len(f.func.arguments.out)))
251
+ ]
252
+
253
+ for output_id in output_ids:
254
+ event_tracer_output_logging += (
255
+ f"internal::event_tracer_log_evalue("
256
+ f"context.internal_event_tracer(), "
257
+ f"*stack[{output_id}]);\n"
258
+ )
259
+
260
+ newline = "\n "
261
+ return "\n".join(
262
+ [
263
+ f"""
264
+ Kernel(
265
+ "{f.namespace}::{f.func.name}",{newline + '"' + (k + '",') if k != 'default' else ''}
266
+ []({contextArg.defn()}, EValue** stack) {{
267
+ {code_connector.join(code_list)}
268
+
269
+ internal::EventTracerProfileScope event_tracer_scope(context.internal_event_tracer(), "native_call_{f.func.name}");
270
+ EXECUTORCH_SCOPE_PROF("native_call_{f.func.name}");
271
+ {ret_prefix}{kernel_call}(context, {args_str});
272
+ {event_tracer_output_logging}
273
+ {return_assignment}
274
+ }}
275
+ ),
276
+ """
277
+ for k in used_kernel_keys
278
+ ]
279
+ )
280
+
281
+
282
+ def gen_unboxing(
283
+ *,
284
+ native_functions: Sequence[NativeFunction],
285
+ cpu_fm: FileManager,
286
+ selector: SelectiveBuilder,
287
+ use_aten_lib: bool,
288
+ kernel_index: ETKernelIndex,
289
+ manual_registration: bool,
290
+ ) -> None:
291
+ # Iterable type for write_sharded is a Tuple of (native_function, (kernel_key, metadata))
292
+ def key_func(
293
+ item: Tuple[NativeFunction, Tuple[ETKernelKey, BackendMetadata]]
294
+ ) -> str:
295
+ return item[0].root_name + ":" + item[1][0].to_native_string()
296
+
297
+ items: List[Tuple[NativeFunction, Tuple[ETKernelKey, BackendMetadata]]] = [
298
+ (native_function, (kernel_key, metadata))
299
+ for native_function in native_functions
300
+ for kernel_key, metadata in kernel_index.get_kernels(native_function).items()
301
+ ]
302
+
303
+ header = ["Functions.h" if use_aten_lib else "NativeFunctions.h"]
304
+ filename = (
305
+ "RegisterKernels.cpp"
306
+ if manual_registration
307
+ else "RegisterCodegenUnboxedKernels.cpp"
308
+ )
309
+ cpu_fm.write_sharded(
310
+ filename,
311
+ items,
312
+ key_fn=key_func,
313
+ env_callable=lambda unbox_kernel_entry: {
314
+ "unboxed_kernels": [
315
+ ComputeCodegenUnboxedKernels(selector, use_aten_lib)(unbox_kernel_entry)
316
+ ],
317
+ "fn_header": header
318
+ if unbox_kernel_entry == items[0]
319
+ else [], # Only write header once
320
+ },
321
+ num_shards=1,
322
+ sharded_keys={"unboxed_kernels", "fn_header"},
323
+ )
324
+
325
+
326
+ @with_native_function_and_index # type: ignore[arg-type]
327
+ def compute_native_function_declaration(
328
+ g: Union[NativeFunctionsGroup, NativeFunction], kernel_index: ETKernelIndex
329
+ ) -> List[str]:
330
+ assert isinstance(g, NativeFunction)
331
+ sig = ExecutorchCppSignature.from_native_function(f=g)
332
+ metadata_list = kernel_index.get_kernels(g).values()
333
+ if metadata_list is None:
334
+ return []
335
+ prefix = "TORCH_API"
336
+
337
+ # for kernels in lean mode, we declare two versions, one with context and one without.
338
+ # In the end we will cleanup the unused one.
339
+ def gen_decl(metadata: BackendMetadata, include_context: bool) -> str:
340
+ return f"{prefix} {sig.decl(name=metadata.kernel, include_context=include_context)};"
341
+
342
+ return [
343
+ gen_decl(metadata, include_context)
344
+ for include_context in [False, True]
345
+ for metadata in metadata_list
346
+ ]
347
+
348
+
349
+ def gen_functions_declarations(
350
+ *,
351
+ native_functions: Sequence[NativeFunction],
352
+ kernel_index: ETKernelIndex,
353
+ selector: SelectiveBuilder,
354
+ use_aten_lib: bool,
355
+ custom_ops_native_functions: Optional[Sequence[NativeFunction]] = None,
356
+ ) -> str:
357
+ """
358
+ Generates namespace separated C++ function API inline declaration/definitions.
359
+ Native functions are grouped by namespaces and the generated code is wrapped inside
360
+ namespace blocks.
361
+
362
+ E.g., for `custom_1::foo.out` in yaml file we will generate a C++ API as a symbol
363
+ in `torch::executor::custom_1::foo_out`. This way we avoid symbol conflict when
364
+ the other `custom_2::foo.out` is available.
365
+ """
366
+
367
+ # convert kernel index to BackendIndex. This is because we can't handle ETKernelIndex yet.
368
+ # TODO larryliu: evaluate if this code is still needed. If yes let it handle ETKernelIndex.
369
+
370
+ dispatch_key = DispatchKey.CPU
371
+ backend_index = kernel_index._to_backend_index()
372
+
373
+ ns_grouped_functions = defaultdict(list)
374
+ for native_function in native_functions:
375
+ ns_grouped_functions[native_function.namespace].append(native_function)
376
+ functions_declarations = ""
377
+ newline = "\n"
378
+ for namespace in ns_grouped_functions:
379
+ ns_helper = NamespaceHelper(
380
+ namespace_str=namespace,
381
+ entity_name="",
382
+ max_level=3,
383
+ )
384
+ declarations = list(
385
+ mapMaybe(
386
+ ComputeFunction(
387
+ static_dispatch_backend_indices=[backend_index],
388
+ selector=selector,
389
+ use_aten_lib=use_aten_lib,
390
+ is_custom_op=lambda f: custom_ops_native_functions is not None
391
+ and f in custom_ops_native_functions,
392
+ ),
393
+ ns_grouped_functions[namespace],
394
+ )
395
+ )
396
+ functions_declarations += f"""
397
+ {ns_helper.prologue}
398
+ {newline.join(declarations)}
399
+ {ns_helper.epilogue}
400
+ """
401
+ return functions_declarations
402
+
403
+
404
+ def get_ns_grouped_kernels(
405
+ *,
406
+ native_functions: Sequence[NativeFunction],
407
+ kernel_index: ETKernelIndex,
408
+ native_function_decl_gen: Callable[
409
+ [
410
+ Union[NativeFunctionsGroup, NativeFunction],
411
+ ETKernelIndex,
412
+ ],
413
+ List[str],
414
+ ],
415
+ ) -> Dict[str, List[str]]:
416
+ ns_grouped_kernels: Dict[str, List[str]] = defaultdict(list)
417
+ for f in native_functions:
418
+ native_function_namespaces = set()
419
+ op_kernels = kernel_index.get_kernels(f)
420
+ for backend_metadata in op_kernels.values():
421
+ if backend_metadata:
422
+ namespace = backend_metadata.cpp_namespace
423
+ native_function_namespaces.add(namespace)
424
+ else:
425
+ namespace = DEFAULT_KERNEL_NAMESPACE
426
+ assert (
427
+ len(native_function_namespaces) <= 1
428
+ ), f"Codegen only supports one namespace per operator, got {native_function_namespaces}"
429
+ ns_grouped_kernels[namespace].extend(
430
+ native_function_decl_gen(f, kernel_index)
431
+ )
432
+ return ns_grouped_kernels
433
+
434
+
435
+ def gen_headers(
436
+ *,
437
+ native_functions: Sequence[NativeFunction],
438
+ gen_custom_ops_header: bool,
439
+ custom_ops_native_functions: Sequence[NativeFunction],
440
+ selector: SelectiveBuilder,
441
+ kernel_index: ETKernelIndex,
442
+ cpu_fm: FileManager,
443
+ use_aten_lib: bool,
444
+ ) -> None:
445
+ """Generate headers.
446
+
447
+ Args:
448
+ native_functions (Sequence[NativeFunction]): a collection of NativeFunction for ATen ops.
449
+ gen_custom_ops_header (bool): whether we should generate CustomOpsNativeFunctions.h
450
+ custom_ops_native_functions (Sequence[NativeFunction]): a collection of NativeFunction for custom ops.
451
+ kernel_index (ETKernelIndex): kernel collection
452
+ cpu_fm (FileManager): file manager manages output stream
453
+ use_aten_lib (bool): whether we are generating for PyTorch types or Executorch types.
454
+ """
455
+ aten_headers = ["#include <ATen/Functions.h>"]
456
+ backend_indices = {DispatchKey.CPU: kernel_index._to_backend_index()}
457
+ if gen_custom_ops_header:
458
+ cpu_fm.write_with_template(
459
+ "CustomOpsNativeFunctions.h",
460
+ "NativeFunctions.h",
461
+ lambda: {
462
+ "nativeFunctions_declarations": get_native_function_declarations(
463
+ grouped_native_functions=custom_ops_native_functions,
464
+ backend_indices=backend_indices,
465
+ native_function_decl_gen=dest.compute_native_function_declaration,
466
+ ),
467
+ "headers": [
468
+ "#include <ATen/ATen.h>",
469
+ "#include <torch/torch.h>",
470
+ ],
471
+ },
472
+ )
473
+ aten_headers.append('#include "CustomOpsNativeFunctions.h"')
474
+ cpu_fm.write(
475
+ "Functions.h",
476
+ lambda: {
477
+ "static_dispatch_extra_headers": aten_headers
478
+ if use_aten_lib
479
+ else ['#include "NativeFunctions.h"'],
480
+ "Functions_declarations": gen_functions_declarations(
481
+ native_functions=native_functions,
482
+ kernel_index=kernel_index,
483
+ selector=selector,
484
+ use_aten_lib=use_aten_lib,
485
+ custom_ops_native_functions=custom_ops_native_functions,
486
+ ),
487
+ },
488
+ )
489
+ cpu_fm.write(
490
+ "RegisterKernels.h",
491
+ lambda: {
492
+ "generated_comment": "@" + "generated by torchgen/gen_executorch.py",
493
+ },
494
+ )
495
+ headers = {
496
+ "headers": [
497
+ "#include <executorch/runtime/core/exec_aten/exec_aten.h> // at::Tensor etc.",
498
+ "#include <executorch/codegen/macros.h> // TORCH_API",
499
+ "#include <executorch/runtime/kernel/kernel_runtime_context.h>",
500
+ ],
501
+ }
502
+ if use_aten_lib:
503
+ cpu_fm.write(
504
+ "NativeFunctions.h",
505
+ lambda: dict(
506
+ {
507
+ "nativeFunctions_declarations": get_native_function_declarations(
508
+ grouped_native_functions=native_functions,
509
+ backend_indices=backend_indices,
510
+ native_function_decl_gen=dest.compute_native_function_declaration,
511
+ ),
512
+ },
513
+ **headers,
514
+ ),
515
+ )
516
+ else:
517
+ ns_grouped_kernels = get_ns_grouped_kernels(
518
+ native_functions=native_functions,
519
+ kernel_index=kernel_index,
520
+ native_function_decl_gen=compute_native_function_declaration, # type: ignore[arg-type]
521
+ )
522
+ cpu_fm.write(
523
+ "NativeFunctions.h",
524
+ lambda: dict(
525
+ {
526
+ "nativeFunctions_declarations": get_native_function_declarations_from_ns_grouped_kernels(
527
+ ns_grouped_kernels=ns_grouped_kernels,
528
+ ),
529
+ },
530
+ **headers,
531
+ ),
532
+ )
533
+
534
+
535
+ def gen_custom_ops(
536
+ *,
537
+ native_functions: Sequence[NativeFunction],
538
+ selector: SelectiveBuilder,
539
+ kernel_index: ETKernelIndex,
540
+ cpu_fm: FileManager,
541
+ rocm: bool,
542
+ ) -> None:
543
+ dispatch_key = DispatchKey.CPU
544
+ (
545
+ anonymous_definition,
546
+ static_init_dispatch_registrations,
547
+ ) = gen_custom_ops_registration(
548
+ native_functions=native_functions,
549
+ selector=selector,
550
+ kernel_index=kernel_index,
551
+ rocm=rocm,
552
+ )
553
+ cpu_fm.write_with_template(
554
+ f"Register{dispatch_key}CustomOps.cpp",
555
+ "RegisterDispatchKeyCustomOps.cpp",
556
+ lambda: {
557
+ "ops_headers": '#include "CustomOpsNativeFunctions.h"',
558
+ "DispatchKey": dispatch_key,
559
+ "dispatch_namespace": dispatch_key.lower(),
560
+ "dispatch_namespaced_definitions": "",
561
+ "dispatch_anonymous_definitions": anonymous_definition,
562
+ "static_init_dispatch_registrations": static_init_dispatch_registrations,
563
+ },
564
+ )
565
+ cpu_fm.write_with_template(
566
+ f"Register{dispatch_key}Stub.cpp",
567
+ "RegisterDispatchKeyCustomOps.cpp",
568
+ lambda: {
569
+ "ops_headers": "",
570
+ "DispatchKey": dispatch_key,
571
+ "dispatch_namespace": dispatch_key.lower(),
572
+ "dispatch_namespaced_definitions": "",
573
+ "dispatch_anonymous_definitions": list(
574
+ mapMaybe(ComputeNativeFunctionStub(), native_functions)
575
+ ),
576
+ "static_init_dispatch_registrations": static_init_dispatch_registrations,
577
+ },
578
+ )
579
+
580
+ (
581
+ aten_schema_registrations,
582
+ schema_registrations,
583
+ ) = get_native_function_schema_registrations(
584
+ native_functions=native_functions,
585
+ schema_selector=selector,
586
+ )
587
+ cpu_fm.write(
588
+ "RegisterSchema.cpp",
589
+ lambda: {
590
+ "schema_registrations": schema_registrations,
591
+ "aten_schema_registrations": aten_schema_registrations,
592
+ },
593
+ )
594
+
595
+
596
+ def translate_native_yaml(
597
+ tags_yaml_path: str,
598
+ aten_yaml_path: str,
599
+ native_yaml_path: Optional[str],
600
+ use_aten_lib: bool,
601
+ out_file: TextIO,
602
+ ) -> None:
603
+ """Translates Executorch DSL dialect to use the same syntax as
604
+ native_functions.yaml. The major difference is that Executorch DSL dialect
605
+ supports "op" key, where it refers to the operator name in native_functions.yaml.
606
+
607
+ For example, a functions.yaml may have the following entry:
608
+
609
+ - op: add.out
610
+ ...
611
+
612
+ It needs to be translated to the following:
613
+
614
+ - func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
615
+ ...
616
+
617
+ We go in aten_yaml_path and find the operator schema for "add.out" and add it
618
+ to the original functions.yaml. We also add required field "variants", where for
619
+ Executorch it will always be "function".
620
+
621
+ For ATen mode we don't have to do the translation because native_yaml_path is
622
+ the same as native_functions.yaml.
623
+
624
+ Args:
625
+ tags_yaml_path: Path to a tags.yaml file to satisfy codegen parsing.
626
+ It is not optional.
627
+ aten_yaml_path: Path to ATen operator yaml file native_functions.yaml.
628
+ native_yaml_path: Path to a functions.yaml file to parse.
629
+ If the path does not exist in the filesystem, it is treated as an
630
+ empty file. If `custom_ops_yaml_path` exists, the contents of that
631
+ file are appended to the yaml input to be parsed.
632
+ use_aten_lib: We use this flag to determine if we want to generate native
633
+ functions. In ATen mode we should generate out= variants.
634
+ out_file: The IO object that we are writing into.
635
+ Returns:
636
+ None
637
+ """
638
+ if use_aten_lib:
639
+ with open(aten_yaml_path) as aten_yaml:
640
+ out_file.writelines(aten_yaml.readlines())
641
+ return
642
+
643
+ native_functions, persisted_fields = parse_et_yaml(
644
+ aten_yaml_path,
645
+ tags_yaml_path,
646
+ None,
647
+ skip_native_fns_gen=False,
648
+ )
649
+
650
+ func_to_scoped_name: Dict[FunctionSchema, str] = {
651
+ f.func: f"{f.namespace}::{f.func.name}" for f in native_functions
652
+ }
653
+ op_to_scoped_name: Dict[OperatorName, str] = {
654
+ func.name: name for func, name in func_to_scoped_name.items()
655
+ }
656
+
657
+ schema_dict = {name: str(func) for func, name in func_to_scoped_name.items()}
658
+ kernel_persist_dict: Dict[str, Dict[str, Any]] = {
659
+ op_to_scoped_name[op]: v for op, v in persisted_fields.items()
660
+ }
661
+
662
+ if (
663
+ not native_yaml_path
664
+ or not os.path.exists(native_yaml_path)
665
+ or os.stat(native_yaml_path).st_size == 0
666
+ ):
667
+ return
668
+ with open(native_yaml_path) as native_yaml:
669
+ native_es = yaml.load(native_yaml, Loader=LineLoader)
670
+ if not native_es:
671
+ return
672
+ for e in native_es:
673
+ assert isinstance(e.get("__line__"), int), e
674
+ loc = Location(native_yaml_path, e.pop("__line__"))
675
+ with context(lambda: f"in {loc}:\n "):
676
+ if "variants" not in e:
677
+ e["variants"] = "function"
678
+ if "func" in e:
679
+ continue
680
+ assert isinstance(e.get("op"), str), e
681
+ opname = e.pop("op")
682
+ if "::" not in opname:
683
+ opname = "aten::" + opname
684
+ assert opname in schema_dict
685
+ e["func"] = schema_dict.get(opname)
686
+
687
+ # Write out persisted kernel information
688
+ if opname in kernel_persist_dict:
689
+ for k, v in kernel_persist_dict[opname].items():
690
+ e[k] = v
691
+
692
+ yaml.dump(native_es, out_file, width=1000)
693
+
694
+
695
+ def parse_yaml(
696
+ path: Optional[str],
697
+ tags_yaml_path: str,
698
+ function_filter: Callable[[NativeFunction], bool],
699
+ skip_native_fns_gen: bool = False,
700
+ ) -> Tuple[
701
+ List[NativeFunction],
702
+ Union[Dict[DispatchKey, Dict[OperatorName, BackendMetadata]], ETKernelIndex],
703
+ ]:
704
+ if path and os.path.exists(path) and os.stat(path).st_size > 0:
705
+ with open(path) as f:
706
+ es = yaml.load(f, Loader=LineLoader)
707
+
708
+ # Check for kernel index structure
709
+ kernel_index = (
710
+ parse_et_yaml_struct(es) if any("kernels" in e for e in es) else None
711
+ )
712
+
713
+ # Remove ET specific fields from entries for BC compatibility
714
+ for entry in es:
715
+ for field in ET_FIELDS:
716
+ entry.pop(field, None)
717
+
718
+ parsed_yaml = parse_native_yaml(
719
+ path,
720
+ tags_yaml_path,
721
+ None,
722
+ skip_native_fns_gen=skip_native_fns_gen,
723
+ loaded_yaml=es,
724
+ )
725
+ native_functions = list(filter(function_filter, parsed_yaml.native_functions))
726
+ op_names = [f.func.name for f in native_functions]
727
+
728
+ # (1) Return ETKernelIndex if kernel index is present
729
+ if kernel_index is not None:
730
+ filtered_index = {
731
+ op_name: kernel_mapping
732
+ for op_name, kernel_mapping in kernel_index.index.items()
733
+ if op_name in op_names
734
+ }
735
+ return native_functions, ETKernelIndex(index=filtered_index)
736
+
737
+ # (2) Return BackendIndices if kernel index is absent
738
+ def map_index(
739
+ m: Dict[OperatorName, BackendMetadata]
740
+ ) -> Dict[OperatorName, BackendMetadata]:
741
+ return {op: m[op] for op in m if op in op_names}
742
+
743
+ backend_indices = {
744
+ k: map_index(b.index) for (k, b) in parsed_yaml.backend_indices.items()
745
+ }
746
+
747
+ return native_functions, backend_indices
748
+ else:
749
+ return [], {}
750
+
751
+
752
+ def parse_yaml_files(
753
+ tags_yaml_path: str,
754
+ aten_yaml_path: str,
755
+ native_yaml_path: Optional[str],
756
+ custom_ops_yaml_path: Optional[str],
757
+ selector: SelectiveBuilder,
758
+ use_aten_lib: bool,
759
+ ) -> Tuple[ETParsedYaml, Optional[ETParsedYaml]]:
760
+ """Parses functions.yaml and custom_ops.yaml files.
761
+
762
+ Args:
763
+ tags_yaml_path: Path to a tags.yaml file to satisfy codegen parsing.
764
+ It is not optional.
765
+ aten_yaml_path: Path to ATen operator yaml file native_functions.yaml.
766
+ native_yaml_path: Path to a functions.yaml file to parse.
767
+ If the path does not exist in the filesystem, it is treated as an
768
+ empty file. If `custom_ops_yaml_path` exists, the contents of that
769
+ file are appended to the yaml input to be parsed.
770
+ custom_ops_yaml_path: Path to a custom_ops.yaml file to parse. If
771
+ the path does not exist in the filesystem, it is ignored.
772
+ selector: For selective build.
773
+ use_aten_lib: We use this flag to determine if we want to generate native
774
+ functions. In ATen mode we should generate out= variants.
775
+ Returns:
776
+ A tuple with two elements:
777
+ [0]: The parsed results of concatenating the contents of
778
+ `native_yaml_path` and `custom_ops_yaml_path`.
779
+ [1]: The parsed results of the contents of `custom_ops_yaml_path`, if
780
+ present. If not present, None.
781
+ """
782
+ import tempfile
783
+
784
+ # only include selected ops, this is because we want to avoid
785
+ def function_filter(f: NativeFunction) -> bool:
786
+ return selector.is_native_function_selected(f)
787
+
788
+ with tempfile.TemporaryDirectory() as tmpdirname:
789
+ translated_yaml_path = os.path.join(tmpdirname, "translated.yaml")
790
+ with open(translated_yaml_path, "w") as translated:
791
+ translate_native_yaml(
792
+ tags_yaml_path,
793
+ aten_yaml_path,
794
+ native_yaml_path,
795
+ use_aten_lib,
796
+ translated,
797
+ )
798
+
799
+ translated_functions, translated_indices = parse_yaml(
800
+ translated_yaml_path, tags_yaml_path, function_filter, not use_aten_lib
801
+ )
802
+ custom_ops_functions, custom_ops_indices = parse_yaml(
803
+ custom_ops_yaml_path, tags_yaml_path, function_filter, True
804
+ )
805
+
806
+ # Convert BackendIndices to ETKernelIndex
807
+ if not isinstance(translated_indices, ETKernelIndex):
808
+ translated_indices = ETKernelIndex.from_backend_indices(translated_indices)
809
+ if not isinstance(custom_ops_indices, ETKernelIndex):
810
+ custom_ops_indices = ETKernelIndex.from_backend_indices(custom_ops_indices)
811
+
812
+ combined_functions = translated_functions + custom_ops_functions
813
+ combined_kernel_index = ETKernelIndex.merge_indices(
814
+ translated_indices, custom_ops_indices
815
+ )
816
+ combined_yaml = ETParsedYaml(combined_functions, combined_kernel_index)
817
+ custom_ops_parsed_yaml = ETParsedYaml(custom_ops_functions, custom_ops_indices)
818
+
819
+ return combined_yaml, custom_ops_parsed_yaml
820
+
821
+
822
+ def main() -> None:
823
+ parser = argparse.ArgumentParser(description="Generate operator source files")
824
+ # Although we don't refer to --source-path directly, make_file_manager()
825
+ # expects it to point to a directory that contains a templates/ subdirectory
826
+ # containing the file templates.
827
+ parser.add_argument(
828
+ "-s",
829
+ "--source-path",
830
+ help="path to source directory for kernel templates",
831
+ )
832
+ parser.add_argument(
833
+ "--functions-yaml-path",
834
+ "--functions_yaml_path",
835
+ help="path to the functions.yaml file to use. Optional, but at least "
836
+ "one of --functions-yaml-path and --custom-ops-yaml-path must be "
837
+ "specified.",
838
+ )
839
+ parser.add_argument(
840
+ "--custom-ops-yaml-path",
841
+ "--custom_ops_yaml_path",
842
+ help="path to the custom_ops.yaml file to use. Optional, but at least "
843
+ "one of --functions-yaml-path and --custom-ops-yaml-path must be "
844
+ "specified.",
845
+ )
846
+ parser.add_argument(
847
+ "--aten-yaml-path",
848
+ "--aten_yaml_path",
849
+ help="path to native_functions.yaml file.",
850
+ )
851
+ # Note that make_file_manager() also looks at --install-dir.
852
+ parser.add_argument(
853
+ "-d",
854
+ "--install-dir",
855
+ "--install_dir",
856
+ help="output directory",
857
+ default="build/generated",
858
+ )
859
+ parser.add_argument(
860
+ "-o",
861
+ "--output-dependencies",
862
+ help="output a list of dependencies into the given file and exit",
863
+ )
864
+ # Although we don't refer to --dry-run directly, make_file_manager() looks
865
+ # for it.
866
+ parser.add_argument(
867
+ "--dry-run",
868
+ action="store_true",
869
+ help="run without writing any files (still updates outputs)",
870
+ )
871
+ parser.add_argument(
872
+ "--static-dispatch-backend",
873
+ "--static_dispatch_backend",
874
+ nargs="*",
875
+ help="generate static dispatch code for the specific backend (if set)",
876
+ )
877
+ parser.add_argument(
878
+ "--op-registration-whitelist",
879
+ "--op_registration_whitelist",
880
+ nargs="*",
881
+ help="filter op registrations by the whitelist (if set); "
882
+ "each item is `namespace`::`operator name` without overload name; "
883
+ "e.g.: aten::empty aten::conv2d ...",
884
+ )
885
+ parser.add_argument(
886
+ "--op-selection-yaml-path",
887
+ "--op_selection_yaml_path",
888
+ help="Provide a path to the operator selection (for custom build) YAML "
889
+ "that contains the information about the set of selected operators "
890
+ "and their categories (training, ...). Each operator is either a "
891
+ "full operator name with overload or just a bare operator name. "
892
+ "The operator names also contain the namespace prefix (e.g. aten::)",
893
+ )
894
+ parser.add_argument(
895
+ "--tags-path",
896
+ help="Path to tags.yaml. Required by yaml parsing in codegen system.",
897
+ )
898
+ parser.add_argument(
899
+ "--rocm",
900
+ action="store_true",
901
+ help="reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly",
902
+ )
903
+ parser.add_argument(
904
+ "--use-aten-lib",
905
+ "--use_aten_lib",
906
+ action="store_true",
907
+ help="a boolean flag to indicate whether we use ATen kernels or not, in the future this flag will be per "
908
+ "operator",
909
+ )
910
+ parser.add_argument(
911
+ "--manual_registration",
912
+ "--manual-registration",
913
+ action="store_true",
914
+ help="a boolean flag to indicate whether we want to manually call"
915
+ "register_kernels() or rely on static init. ",
916
+ )
917
+ parser.add_argument(
918
+ "--generate",
919
+ type=str,
920
+ nargs="*",
921
+ choices=["headers", "sources"],
922
+ default=["headers", "sources"],
923
+ help="Generate only a subset of files",
924
+ )
925
+ options = parser.parse_args()
926
+ assert options.tags_path, "tags.yaml is required by codegen yaml parsing."
927
+
928
+ selector = get_custom_build_selector(
929
+ options.op_registration_whitelist,
930
+ options.op_selection_yaml_path,
931
+ )
932
+
933
+ parsed_yaml, custom_ops_parsed_yaml = parse_yaml_files(
934
+ aten_yaml_path=options.aten_yaml_path,
935
+ tags_yaml_path=options.tags_path,
936
+ native_yaml_path=options.functions_yaml_path,
937
+ custom_ops_yaml_path=options.custom_ops_yaml_path,
938
+ selector=selector,
939
+ use_aten_lib=options.use_aten_lib,
940
+ )
941
+ native_functions, kernel_index = (
942
+ parsed_yaml.native_functions,
943
+ parsed_yaml.kernel_index,
944
+ )
945
+ custom_ops_native_functions = (
946
+ custom_ops_parsed_yaml.native_functions if custom_ops_parsed_yaml else []
947
+ )
948
+
949
+ cpu_fm = make_file_manager(options=options)
950
+
951
+ if "headers" in options.generate:
952
+ # generate CustomOpsNativeFunctions.h when custom_ops.yaml is present, to match the build system.
953
+ gen_headers(
954
+ native_functions=native_functions,
955
+ gen_custom_ops_header=options.custom_ops_yaml_path,
956
+ custom_ops_native_functions=custom_ops_native_functions,
957
+ selector=selector,
958
+ kernel_index=kernel_index,
959
+ cpu_fm=cpu_fm,
960
+ use_aten_lib=options.use_aten_lib,
961
+ )
962
+
963
+ if "sources" in options.generate:
964
+ gen_unboxing(
965
+ native_functions=native_functions,
966
+ cpu_fm=cpu_fm,
967
+ selector=selector,
968
+ use_aten_lib=options.use_aten_lib,
969
+ kernel_index=kernel_index,
970
+ manual_registration=options.manual_registration,
971
+ )
972
+ if custom_ops_native_functions:
973
+ gen_custom_ops(
974
+ native_functions=custom_ops_native_functions,
975
+ selector=selector,
976
+ kernel_index=kernel_index,
977
+ cpu_fm=cpu_fm,
978
+ rocm=options.rocm,
979
+ )
980
+
981
+ if options.output_dependencies:
982
+ depfile_path = pathlib.Path(options.output_dependencies).resolve()
983
+ depfile_name = depfile_path.name
984
+ depfile_stem = depfile_path.stem
985
+
986
+ for fm, prefix in [
987
+ (cpu_fm, ""),
988
+ ]:
989
+ varname = prefix + depfile_stem
990
+ path = depfile_path.parent / (prefix + depfile_name)
991
+ fm.write_outputs(varname, str(path))
992
+
993
+
994
+ if __name__ == "__main__":
995
+ main()
llmeval-env/lib/python3.10/site-packages/torchgen/gen_functionalization_type.py ADDED
@@ -0,0 +1,809 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Callable, List, Optional, Tuple, Union
3
+
4
+ from torchgen.api import cpp, dispatcher
5
+ from torchgen.api.translate import translate
6
+ from torchgen.api.types import (
7
+ BaseCType,
8
+ Binding,
9
+ CType,
10
+ DispatcherSignature,
11
+ FunctionalizationLambda,
12
+ iTensorListRefT,
13
+ NativeSignature,
14
+ tensorListT,
15
+ tensorT,
16
+ VectorCType,
17
+ ViewInverseSignature,
18
+ )
19
+ from torchgen.context import (
20
+ method_with_native_function,
21
+ native_function_manager,
22
+ with_native_function,
23
+ with_native_function_and,
24
+ )
25
+ from torchgen.model import (
26
+ Argument,
27
+ BackendIndex,
28
+ BaseTy,
29
+ BaseType,
30
+ FunctionSchema,
31
+ ListType,
32
+ NativeFunction,
33
+ NativeFunctionsGroup,
34
+ NativeFunctionsViewGroup,
35
+ Return,
36
+ SchemaKind,
37
+ SelfArgument,
38
+ TensorOptionsArguments,
39
+ )
40
+ from torchgen.native_function_generation import (
41
+ INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY,
42
+ MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT,
43
+ OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY,
44
+ )
45
+
46
+ from torchgen.selective_build.selector import SelectiveBuilder
47
+
48
+
49
+ # Note: [Mutable Ops Not Using Functionalization]
50
+ # Ops in this list currently do not work with functionalization and should be fixed.
51
+ MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION = (
52
+ OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY
53
+ + MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
54
+ + INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY
55
+ + [
56
+ # It will be BC-breaking, but we should fix their schemas.
57
+ # should be inplace?
58
+ "record_stream",
59
+ # See Note [resize_ in Functionalization]
60
+ "resize_",
61
+ "resize_as_",
62
+ # This function is used as for testing purposes only.
63
+ "_fill_mem_eff_dropout_mask_",
64
+ ]
65
+ )
66
+
67
+ # This file contains codegen that relates to the functionalization pass.
68
+ # It includes:
69
+ # - gen_functionalization_definition
70
+ # Generates dispatcher kernel definitions for the functionalization pass.
71
+ # - gen_functionalization_registration
72
+ # Generates dispatcher kernel registrations for the functionalization pass.
73
+ # - gen_functionalization_view_inverse_declaration
74
+ # Generates a declaration for an "inverse view", for every view op
75
+ # that is needed in functionalization. We manually implement their definitions.
76
+ # - gen_composite_view_copy_kernel
77
+ # Generates view_copy() composite kernels for all view_copy operators.
78
+
79
+
80
+ # Generates the body of the default composite C++ kernel for a {view}_copy NativeFunction
81
+ # See Note [view_copy NativeFunctions]
82
+ @dataclass(frozen=True)
83
+ class GenCompositeViewCopyKernel:
84
+ backend_index: BackendIndex
85
+
86
+ @method_with_native_function
87
+ def __call__(self, g: NativeFunctionsViewGroup) -> Optional[str]:
88
+ if g.view_copy is None:
89
+ return None
90
+ elif g.view_copy.func.name.name.base != f"{g.view.func.name.name}_copy":
91
+ # If the view_copy doesn't match the standard naming scheme of <op>_copy,
92
+ # assume it already exists and doesn't need to be generated.
93
+ # Example: slice_inverse() with the copy variant named slice_scatter()
94
+ # instead of slice_inverse_copy()
95
+ return None
96
+
97
+ metadata = self.backend_index.get_kernel(g.view_copy)
98
+ assert metadata is not None
99
+
100
+ # We can make view_copy work in more cases by using reshape()
101
+ # when a normal view call would ordinarily fail.
102
+ # This also makes LTC more efficient, because they don't need to include
103
+ # clone() calls in their graph (which is normally needed by reshape).
104
+ if str(g.view_copy.func.name) == "view_copy":
105
+ assert metadata.kernel == "view_copy_symint"
106
+ return """\
107
+ at::Tensor view_copy_symint(const at::Tensor & self, at::SymIntArrayRef size) {
108
+ c10::SymDimVector shape = infer_size_dv(size, self.sym_numel());
109
+ if (!at::detail::computeStride(self.sym_sizes(), self.sym_strides(), shape).has_value()) {
110
+ return self.reshape_symint(size);
111
+ } else {
112
+ auto output = at::_ops::view::call(self, size);
113
+ return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous);
114
+ }
115
+ }
116
+ """
117
+ # view_copy is a native signature, since we're generating an at::native:: kernel
118
+ # Functionalization always operates on symints though
119
+ view_copy_sig = NativeSignature(
120
+ g.view_copy.func, symint=metadata.supports_symint()
121
+ )
122
+
123
+ # view is a dispatcher signature, since we're calling into the at::_ops API
124
+ view_sig = DispatcherSignature(g.view.func)
125
+
126
+ view_api_name = g.view.func.name.unambiguous_name()
127
+ exprs = ", ".join(
128
+ [e.expr for e in translate(view_copy_sig.arguments(), view_sig.arguments())]
129
+ )
130
+
131
+ # view ops today always return either a Tensor or a list of Tensors
132
+ assert len(g.view.func.returns) == 1
133
+ assert g.view.func.returns[0].type == BaseType(
134
+ BaseTy.Tensor
135
+ ) or g.view.func.returns[0].type == ListType(BaseType(BaseTy.Tensor), None)
136
+
137
+ if g.view.func.returns[0].type == BaseType(BaseTy.Tensor):
138
+ return_cloned_output = """\
139
+ return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous);"""
140
+ else:
141
+ # If the return type is a list, we need to clone each tensor in the list.
142
+ return_cloned_output = f"""\
143
+ {view_copy_sig.returns_type().cpp_type()} out_clone;
144
+ for (const auto i : c10::irange(output.size())) {{
145
+ out_clone.push_back(output[i].clone(/*memory_format=*/at::MemoryFormat::Contiguous));
146
+ }}
147
+ return out_clone;"""
148
+
149
+ # The default generated composite kernel for {view}_copy() operators just clones
150
+ # the input tensor, and runs the underlying view on the clone.
151
+ return f"""
152
+ {view_copy_sig.defn(name=metadata.kernel)} {{
153
+ auto output = at::_ops::{view_api_name}::call({exprs});
154
+ {return_cloned_output}
155
+ }}
156
+ """
157
+
158
+
159
+ def return_str(rets: Tuple[Return, ...], names: List[str]) -> str:
160
+ assert len(rets) == len(names)
161
+ if len(rets) == 0:
162
+ return ""
163
+ elif len(rets) == 1:
164
+ return f"return {names[0]};"
165
+ else:
166
+ return f"return {dispatcher.returns_type(rets).cpp_type()}({', '.join(names)});"
167
+
168
+
169
+ def modifies_arguments(f: NativeFunction) -> bool:
170
+ return any(
171
+ a.annotation is not None and a.annotation.is_write
172
+ for a in f.func.arguments.flat_all
173
+ )
174
+
175
+
176
+ def wrapper_name(func: FunctionSchema) -> str:
177
+ if func.name.overload_name:
178
+ return f"{cpp.name(func)}_{func.name.overload_name}"
179
+ else:
180
+ return cpp.name(func)
181
+
182
+
183
+ def is_tensor_like(a: Union[Argument, TensorOptionsArguments, SelfArgument]) -> bool:
184
+ return isinstance(a, SelfArgument) or (
185
+ isinstance(a, Argument) and a.type.is_tensor_like()
186
+ )
187
+
188
+
189
+ # We need to wrap / unwrap various arguments from the op in the functionalization kernels.
190
+ # Some op schemas include non-owning types though (like TensorList),
191
+ # and when we unwrap them we expect to get out an owning type!.
192
+ # We also return a lambda that tells you how to conver the non-owning type argument into the owning type.
193
+ def get_owning_type(t: CType) -> Tuple[CType, Callable[[str], str]]:
194
+ if t == BaseCType(tensorListT):
195
+ return VectorCType(BaseCType(tensorT)), lambda x: f"{x}.vec()"
196
+ if t == BaseCType(iTensorListRefT):
197
+ return VectorCType(BaseCType(tensorT)), lambda x: f"{{{x}.begin(), {x}.end()}}"
198
+ # There are technically other non-owning types out there (like IntArrayRef),
199
+ # but functionalization only actually cares about the ones involving tensors.
200
+ return t, lambda x: x
201
+
202
+
203
+ # unwraps all tensor-like arguments, returning:
204
+ # (1) a string containing all of the logic that does the unwrapping
205
+ # (2) a context, to be used by translate(), with all of the relevant bindings.
206
+ def unwrap_tensor_args(
207
+ sig: DispatcherSignature, *, is_view_op: bool
208
+ ) -> Tuple[str, List[Binding]]:
209
+ context: List[Binding] = []
210
+ unwrapped_tensor_args: List[str] = []
211
+ for arg in sig.arguments():
212
+ if is_tensor_like(arg.argument):
213
+ # for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
214
+ unwrapped_name = f"{arg.name}_"
215
+ # For most ops, the functionalization needs to sync any pending updates on the input tensors
216
+ # before calling the operator, since otherwise the operator will act on stale data.
217
+ # For view ops though, we can continue to defer syncing until the tensor is used by
218
+ # a non-view operator.
219
+ maybe_sync_input = (
220
+ "" if is_view_op else f"at::functionalization::impl::sync({arg.name});"
221
+ )
222
+ unwrapped_type, conversion_fn = get_owning_type(
223
+ arg.nctype.remove_const_ref().type
224
+ )
225
+ unwrapped_tensor_args.append(
226
+ f"""
227
+ {unwrapped_type.cpp_type()} {unwrapped_name};
228
+ if (at::functionalization::impl::isFunctionalTensor({arg.name})) {{
229
+ {maybe_sync_input}
230
+ {unwrapped_name} = at::functionalization::impl::from_functional_tensor({arg.name});
231
+ }} else {{
232
+ {unwrapped_name} = {conversion_fn(arg.name)};
233
+ }}"""
234
+ )
235
+ context.append(arg.with_name(unwrapped_name))
236
+ else:
237
+ # for non-tensor inputs, we want to pass them directly into the redispatch calls.
238
+ context.append(arg)
239
+ unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
240
+ return unwrap_tensor_args_str, context
241
+
242
+
243
+ # converts all tensor-like arguments to meta tensors, which are used to compute stride info. Returns:
244
+ # (1) a string containing all of the logic that does the conversions.
245
+ # (2) a context, to be used by translate(), with all of the relevant bindings.
246
+ def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
247
+ context: List[Binding] = []
248
+ unwrapped_tensor_args: List[str] = []
249
+ for arg in sig.arguments():
250
+ if is_tensor_like(arg.argument):
251
+ # for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
252
+ a_ = arg.name
253
+ unwrapped_name = f"{arg.name}_meta"
254
+ unwrapped_tensor_args.append(f"auto {unwrapped_name} = to_meta({a_});")
255
+ context.append(arg.with_name(unwrapped_name))
256
+ else:
257
+ # for non-tensor inputs, we want to pass them directly into the redispatch calls.
258
+ context.append(arg)
259
+ unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
260
+ return unwrap_tensor_args_str, context
261
+
262
+
263
+ # The functionalization codegen currently expects view op schemas to have this form:
264
+ # foo(Tensor(a), ...) -> Tensor(a) (e.g. transpose)
265
+ # foo(Tensor(a!), ...) -> Tensor(a!) (e.g. transpose_)
266
+ def assert_view_op_properties(func: FunctionSchema) -> None:
267
+ def is_alias(a: Argument) -> bool:
268
+ return a.annotation is not None
269
+
270
+ args = func.arguments.flat_non_out
271
+ # The first argument is a tensor with an alias semantics (annotations)
272
+ assert len(args) > 0 and args[0].type == BaseType(
273
+ BaseTy.Tensor
274
+ ), f"""In the functionalization codegen, we expect the first argument of every view operator to be a tensor,
275
+ but found an argument of type {str(args[0].type)} for operator: {str(func.name)}."""
276
+ # No other arguments have aliasing semantics
277
+ assert is_alias(args[0]) and not any(
278
+ is_alias(a) for a in args[1:]
279
+ ), """In the functionalization codegen, we expect the first argument of every view operator to alias the output.
280
+ View operators with multiple aliasing inputs aren't supported yet. Found an operator that doesn't satisfy this constraint"""
281
+
282
+
283
+ # Generates the Functionalization kernel for:
284
+ # - ops that create aliases (e.g. transpose())
285
+ # - ops that are views AND mutations (e.g. transpose_())
286
+ def emit_view_functionalization_body(
287
+ g: NativeFunctionsViewGroup, *, view_inplace: bool
288
+ ) -> str:
289
+ if view_inplace:
290
+ # This op is both an inplace op AND a view op.
291
+ # See Note [Functionalization Pass - Inplace View Ops] for details.
292
+ # I currently have the view meta call into the out-of-place variant of the view, to avoid
293
+ # having to define an extra ~20 inplace {view}_inverse_ functions.
294
+ # Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops.
295
+ # I'm assuming that every inplace-view op has a corresponding out-of-place view op,
296
+ # with the same name but the trailing underscore removed.
297
+ # This is currently asserted at parse time in gen.py (see error_check_native_functions).
298
+ assert g.view_inplace is not None
299
+ f = g.view_inplace
300
+ else:
301
+ f = g.view
302
+
303
+ assert g.view_copy is not None
304
+ with native_function_manager(f):
305
+ call_sig = DispatcherSignature.from_schema(g.view_copy.func)
306
+
307
+ # the "view_copy" op name that the functionalization kernels need to call
308
+ api_name = g.view_copy.func.name.unambiguous_name()
309
+ # Sometimes the functionalization pass needs to no-op (e.g. if it was passed non-functional tensors)
310
+ # "no-op"ing in this context is just redispatching to the original op.
311
+ noop_api_name = f.func.name.unambiguous_name()
312
+
313
+ dispatcher_sig = DispatcherSignature.from_schema(f.func)
314
+ assert_view_op_properties(f.func)
315
+ view_tensor_name = dispatcher_sig.arguments()[0].name
316
+
317
+ return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
318
+
319
+ unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(
320
+ dispatcher_sig, is_view_op=True
321
+ )
322
+ view_redispatch_args = [
323
+ e.expr
324
+ for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)
325
+ ]
326
+
327
+ forward_lambda = FunctionalizationLambda.from_func(g, is_reverse=False)
328
+ reverse_lambda = FunctionalizationLambda.from_func(g, is_reverse=True)
329
+
330
+ # The meta API call should use the same arguments, but convert all tensors to meta tensors first.
331
+ meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
332
+ meta_call_args = [
333
+ e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)
334
+ ]
335
+
336
+ if "inplace_view" in f.tags:
337
+ # See Note [Functionalization Pass - Inplace View Ops] for more details
338
+ return f"""
339
+ {dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
340
+ if (!at::functionalization::impl::isFunctionalTensor({view_tensor_name})) {{
341
+ // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
342
+ {unwrap_tensor_args_str}
343
+ at::AutoDispatchSkipFunctionalize guard;
344
+ return at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
345
+ }}
346
+ auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
347
+ auto inverse_return_mode = (
348
+ reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
349
+ : at::functionalization::InverseReturnMode::NeverView
350
+ );
351
+ at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
352
+ {forward_lambda.decl()} {{
353
+ if (reapply_views) {{
354
+ return {forward_lambda.inner_call(reapply_views=True)}
355
+ }} else {{
356
+ return {forward_lambda.inner_call(reapply_views=False)}
357
+ }}
358
+ }},
359
+ {reverse_lambda.decl()} {{
360
+ return {reverse_lambda.inner_call()}
361
+ }}
362
+ );
363
+ auto compute_reference_meta =
364
+ {view_tensor_name}.key_set().has_backend(c10::BackendComponent::XLABit) ||
365
+ {view_tensor_name}.key_set().has_backend(c10::BackendComponent::LazyBit);
366
+ {return_type} reference_tensor_output;
367
+ if (compute_reference_meta) {{
368
+ {meta_conversion_str}
369
+ at::AutoDispatchSkipFunctionalize func_guard;
370
+ c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
371
+ reference_tensor_output = at::_ops::{noop_api_name}::call({', '.join(meta_call_args)});
372
+ }}
373
+ // This function adds the above view meta to the current tensor and replays them off the base,
374
+ // mutating the size/stride info of the current FunctionalTensorWrapper.
375
+ // Because of this, we need to make sure to run the reference shape function above,
376
+ // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
377
+ at::functionalization::impl::mutate_view_meta({view_tensor_name}, view_meta);
378
+ // See Note [Propagating strides in the functionalization pass]
379
+ // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
380
+ // on a reference implementation here (instead of relying on the output from the forward lambda
381
+ // having the correct stride info)
382
+ if (compute_reference_meta) {{
383
+ at::functionalization::impl::set_sizes_strides_offset({view_tensor_name}, reference_tensor_output);
384
+ }}
385
+ return {view_tensor_name};
386
+ }}
387
+ """
388
+
389
+ else:
390
+ is_multi_output_view = isinstance(f.func.returns[0].type, ListType)
391
+ return f"""
392
+ {dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
393
+ {unwrap_tensor_args_str}
394
+ if (!at::functionalization::impl::isFunctionalTensor({view_tensor_name})) {{
395
+ // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
396
+ at::AutoDispatchSkipFunctionalize guard;
397
+ return at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
398
+ }}
399
+ auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
400
+ auto inverse_return_mode = (
401
+ reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
402
+ : at::functionalization::InverseReturnMode::NeverView
403
+ );
404
+ auto compute_reference_meta =
405
+ {view_tensor_name}.key_set().has_backend(c10::BackendComponent::XLABit) ||
406
+ {view_tensor_name}.key_set().has_backend(c10::BackendComponent::LazyBit);
407
+ {return_type} reference_tensor_output;
408
+ if (compute_reference_meta) {{
409
+ {meta_conversion_str}
410
+ at::AutoDispatchSkipFunctionalize func_guard;
411
+ c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
412
+ reference_tensor_output = at::_ops::{noop_api_name}::call({', '.join(meta_call_args)});
413
+ }}
414
+ {return_type} tmp_output;
415
+ {{
416
+ at::AutoDispatchSkipFunctionalize guard;
417
+ if (reapply_views) {{
418
+ tmp_output = at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
419
+ }} else {{
420
+ tmp_output = at::_ops::{api_name}::call({', '.join(view_redispatch_args)});
421
+ }}
422
+ }}
423
+ at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
424
+ {forward_lambda.decl()} {{
425
+ if (reapply_views) {{
426
+ return {forward_lambda.inner_call(reapply_views=True)}
427
+ }} else {{
428
+ return {forward_lambda.inner_call(reapply_views=False)}
429
+ }}
430
+ }},
431
+ {reverse_lambda.decl()} {{
432
+ return {reverse_lambda.inner_call()}
433
+ }},
434
+ /*is_multi_output=*/{str(is_multi_output_view).lower()}
435
+ );
436
+ auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, {view_tensor_name}, view_meta);
437
+ // See Note [Propagating strides in the functionalization pass]
438
+ if (compute_reference_meta) {{
439
+ at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
440
+ }}
441
+ return out;
442
+ }}
443
+ """
444
+
445
+
446
+ def maybe_create_output(f: NativeFunction, var_name: str) -> str:
447
+ if len(f.func.returns) == 0:
448
+ return ""
449
+ return_type = dispatcher.returns_type(f.func.returns).remove_const_ref().cpp_type()
450
+ return f"{return_type} {var_name} = "
451
+
452
+
453
+ # Given a NativeFunction, and a variable name corresponding to the output of redispatching on the function,
454
+ # this returns two lists of names, consisting of:
455
+ # - the names of returns corresponding to the original (mutable) inputs of the outer function
456
+ # - the names of returns corresponding to the (immutable) outputs of the inner redispatched function
457
+ def get_mutable_redispatch_return_names(
458
+ f: NativeFunction, inner_return_var: str
459
+ ) -> Tuple[List[str], List[str]]:
460
+ aliased_returns = []
461
+ non_aliased_returns = []
462
+ for i, name in enumerate(f.func.aliased_return_names()):
463
+ if name is not None:
464
+ aliased_returns.append(name)
465
+ else:
466
+ non_aliased_returns.append(
467
+ inner_return_var
468
+ if len(f.func.returns) == 1
469
+ else f"std::get<{i}>({inner_return_var})"
470
+ )
471
+ return aliased_returns, non_aliased_returns
472
+
473
+
474
+ # When functionalization "no-op's" and redispatches on a mutable operator, we need to take care so that:
475
+ # - For fresh outputs, we return the result of the redispatch (without wrapping outputs)
476
+ # - For outputs that were aliased to inputs, we return the inputs directly (since some of them might have been wrapped)
477
+ def return_from_mutable_noop_redispatch(
478
+ f: NativeFunction, inner_return_var: str
479
+ ) -> str:
480
+ aliased, non_aliased = get_mutable_redispatch_return_names(f, inner_return_var)
481
+ # Just get all of the return names, and immediately return them
482
+ return return_str(f.func.returns, aliased + non_aliased)
483
+
484
+
485
+ def wrap_propagate_mutations_and_return(
486
+ f: NativeFunction, functional_op: NativeFunction, inner_return_var: str
487
+ ) -> str:
488
+ mutable_arg_names = f.func.arguments.mutable_arg_names()
489
+ (
490
+ aliased_outer_rets,
491
+ non_aliased_outer_rets,
492
+ ) = get_mutable_redispatch_return_names(f, inner_return_var)
493
+ _, non_aliased_inner_rets = get_mutable_redispatch_return_names(
494
+ functional_op, inner_return_var
495
+ )
496
+ # The outer function may have a mix of aliased and non-aliased outputs,
497
+ # But the inner functional op that we're transforming to should only have non-aliased outputs
498
+ assert len(mutable_arg_names) + len(non_aliased_outer_rets) == len(
499
+ non_aliased_inner_rets
500
+ )
501
+
502
+ # First, take all of the newly created outputs from the inner call and wrap them into functional tensors
503
+ updates = []
504
+ non_aliased_wrapped_ret_names = []
505
+ for i, inner_ret in enumerate(
506
+ non_aliased_inner_rets[: len(non_aliased_outer_rets)]
507
+ ):
508
+ ret_name = f"output_{i}"
509
+ updates.append(
510
+ f"""\
511
+ auto output_{i} = at::functionalization::impl::to_functional_tensor({inner_ret});"""
512
+ )
513
+ non_aliased_wrapped_ret_names.append(ret_name)
514
+
515
+ # Next, take all of the mutated outputs from the inner call corresponding to mutated inputs,
516
+ # and propagate the mutations
517
+ for outer_arg, inner_ret in zip(
518
+ mutable_arg_names, non_aliased_inner_rets[len(non_aliased_outer_rets) :]
519
+ ):
520
+ updates.append(
521
+ f"""\
522
+ at::functionalization::impl::propagate_xla_data({outer_arg}, {inner_ret});
523
+ at::functionalization::impl::replace_({outer_arg}, {inner_ret});
524
+ at::functionalization::impl::commit_update({outer_arg});
525
+ at::functionalization::impl::sync({outer_arg});"""
526
+ )
527
+
528
+ # Finally, we return:
529
+ # - Any mutable arguments that also returns
530
+ # - Any immutable returns that were created wrapping the output from the inner call
531
+ returns_str = return_str(
532
+ f.func.returns, aliased_outer_rets + non_aliased_wrapped_ret_names
533
+ )
534
+ updates_str = "\n".join(updates)
535
+ return f"""\
536
+ {updates_str}
537
+ {returns_str}"""
538
+
539
+
540
+ # Generates the Functionalization kernel for:
541
+ # - mutation ops (inplace and out= ops)
542
+ @with_native_function_and
543
+ def emit_inplace_functionalization_body(
544
+ f: NativeFunction, g: NativeFunctionsGroup
545
+ ) -> str:
546
+ # mutation case
547
+ assert modifies_arguments(f)
548
+
549
+ dispatcher_sig = DispatcherSignature.from_schema(f.func)
550
+
551
+ unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(
552
+ dispatcher_sig, is_view_op=False
553
+ )
554
+
555
+ mutated_names = [
556
+ a.name
557
+ for a in f.func.arguments.flat_all
558
+ if a.type.is_tensor_like() and a.annotation is not None
559
+ ]
560
+ non_mutated_names = [
561
+ a.name
562
+ for a in f.func.arguments.flat_all
563
+ if a.type.is_tensor_like() and a.annotation is None
564
+ ]
565
+ non_mutated_tensor_names = [
566
+ a.name
567
+ for a in f.func.arguments.flat_all
568
+ if a.type == BaseType(BaseTy.Tensor) and a.annotation is None
569
+ ]
570
+ # all mutable inputs must be functional tensors in order to participate in functionalization
571
+ check_all_mutated_args_are_functional = " && ".join(
572
+ ["true"]
573
+ + [
574
+ f"at::functionalization::impl::isFunctionalTensor({a})"
575
+ for a in mutated_names
576
+ ]
577
+ )
578
+ check_any_non_mutated_args_are_functional = " || ".join(
579
+ ["false"]
580
+ + [
581
+ f"at::functionalization::impl::isFunctionalTensor({a})"
582
+ for a in non_mutated_names
583
+ ]
584
+ )
585
+
586
+ check_any_non_mutated_tensors_are_xla = " || ".join(
587
+ ["false"]
588
+ + [
589
+ f"{a}.device().type() == c10::DeviceType::XLA"
590
+ for a in non_mutated_tensor_names
591
+ ]
592
+ )
593
+ # These are used in the cases where we don't functionalize and redispatch to the inplace op
594
+ # case 1: we hit an inplace op that doesn't have an out-of-place equivalent
595
+ # case 2: we hit an inplace ops but our inputs are not functional tensors (in which case our kernel just no-ops)
596
+ inplace_exprs = [
597
+ e.expr
598
+ for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)
599
+ ]
600
+
601
+ # call the out-of-place variant of the op
602
+ return_type = (
603
+ dispatcher.returns_type(g.functional.func.returns).remove_const_ref().cpp_type()
604
+ )
605
+ functional_sig = DispatcherSignature.from_schema(g.functional.func)
606
+ functional_exprs = [
607
+ e.expr
608
+ for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)
609
+ ]
610
+
611
+ if f.func.is_out_fn():
612
+ mutable_input_post_processing = "\n".join(
613
+ [
614
+ f"""
615
+ at::functionalization::impl::replace_(
616
+ {a.name}, {'std::get<' + str(i) + '>(tmp_output)' if len(f.func.returns) > 1 else 'tmp_output'});
617
+ at::functionalization::impl::commit_update({a.name});"""
618
+ for (i, a) in enumerate(f.func.arguments.out)
619
+ if a.annotation and a.annotation.is_write and a.type.is_tensor_like()
620
+ ]
621
+ )
622
+ else:
623
+ mutable_input_post_processing = "\n".join(
624
+ [
625
+ f"""
626
+ at::functionalization::impl::replace_({a.name}, tmp_output);
627
+ at::functionalization::impl::commit_update({a.name});"""
628
+ for a in f.func.arguments.flat_all
629
+ if a.annotation and a.annotation.is_write and a.type.is_tensor_like()
630
+ ]
631
+ )
632
+
633
+ meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
634
+ # We don't want to run the inplace meta func for ops like .set_(), because:
635
+ # (1) they're unnecessary: inplace meta checks are only useful for ops like add_(),
636
+ # where broadcasting will work for the out-of-place case but should fail on the inplace call
637
+ # (2) They'll also fail without adding extra infra: we'd need to convert the input storage argument
638
+ # into a meta storage
639
+ any_storage_args = any(
640
+ a.type == BaseType(BaseTy.Storage) for a in f.func.arguments.flat_all
641
+ )
642
+
643
+ return f"""
644
+ {dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
645
+ if ({str(not any_storage_args and f.func.kind() == SchemaKind.inplace).lower()}) {{
646
+ // Before converting the mutable op to its functional variant, run meta tensors through the original op.
647
+ // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
648
+ // (We can only do this for inplace ops today though, because they technically all support meta tensors).
649
+ {meta_conversion_str}
650
+ at::AutoDispatchSkipFunctionalize func_guard;
651
+ c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
652
+ at::_ops::{f.func.name.unambiguous_name()}::call({', '.join(a.name for a in meta_call_ctx)});
653
+ }}
654
+ {unwrap_tensor_args_str}
655
+ if (!({check_all_mutated_args_are_functional})) {{
656
+ // We want to disable this check if there are any XLA tensors.
657
+ // cpu_tensor.copy_(xla_tensor) is valid code.
658
+ if (!({check_any_non_mutated_tensors_are_xla}) && ({check_any_non_mutated_args_are_functional})) {{
659
+ // case 1: trying to mutate a non functional tensor with a functional tensor is an error
660
+ TORCH_INTERNAL_ASSERT(false,
661
+ "mutating a non-functional tensor with a functional tensor is not allowed.",
662
+ " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
663
+ }} else {{
664
+ // case 2: arguments are not functional tensors, so we no-op and redispatch.
665
+ at::AutoDispatchSkipFunctionalize guard;
666
+ {maybe_create_output(f, 'tmp_output')}at::_ops::{f.func.name.unambiguous_name()}::call({', '.join(inplace_exprs)});
667
+ {return_from_mutable_noop_redispatch(f, 'tmp_output')}
668
+ }}
669
+ }} else {{
670
+ {return_type} tmp_output;
671
+ {{
672
+ at::AutoDispatchSkipFunctionalize guard;
673
+ tmp_output = at::_ops::{g.functional.func.name.unambiguous_name()}::call({', '.join(functional_exprs)});
674
+ }}
675
+ {wrap_propagate_mutations_and_return(f, g.functional, 'tmp_output')}
676
+ }}
677
+ }}"""
678
+
679
+
680
+ # The below functions generate RegisterFunctionalization.cpp
681
+ # These files provide the kernels that run the functionalization pass, which can be opted into
682
+ # per backend (e.g. XLA or Vulkan), or as a composable transform (functionalize() in functorch).
683
+
684
+
685
+ # See Note [Functionalization Pass: View Inverses].
686
+ def gen_functionalization_view_inverse_declaration(
687
+ selector: SelectiveBuilder, g: NativeFunctionsViewGroup
688
+ ) -> Optional[str]:
689
+ # For every (non-composite) view op, we need a corresponding "inverse view" function.
690
+ # This generates the declarations so we get a good compiler error when someone adds a new view.
691
+ @with_native_function
692
+ def emit_decl_helper(g: NativeFunctionsViewGroup) -> Optional[str]:
693
+ if g.view.has_composite_implicit_autograd_kernel:
694
+ return None
695
+ view_inverse_sig = ViewInverseSignature(g)
696
+ return view_inverse_sig.decl()
697
+
698
+ return emit_decl_helper(g)
699
+
700
+
701
+ def gen_functionalization_registration(
702
+ selector: SelectiveBuilder,
703
+ g: Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup],
704
+ composite_implicit_autograd_index: BackendIndex,
705
+ ) -> List[str]:
706
+ @with_native_function
707
+ def emit_registration_helper(f: NativeFunction) -> str:
708
+ assert not f.has_composite_implicit_autograd_kernel
709
+ registration_str = f"TORCH_FN(functionalization::{wrapper_name(f.func)})"
710
+ return f'm.impl("{f.func.name}", {registration_str});'
711
+
712
+ # Don't generate kernels in mobile build
713
+ if not selector.include_all_operators:
714
+ return []
715
+
716
+ if isinstance(g, NativeFunctionsViewGroup):
717
+ # functionalization needs to register kernels for view + view_inplace ops
718
+ # See Note [Functionalization <> torch.Tensor constructor]
719
+ if str(g.view.func.name) == "lift_fresh":
720
+ return []
721
+ view_str = []
722
+ if not g.view.has_composite_implicit_autograd_kernel:
723
+ view_str.append(emit_registration_helper(g.view))
724
+ if (
725
+ g.view_inplace is not None
726
+ and not g.view_inplace.has_composite_implicit_autograd_kernel
727
+ ):
728
+ assert g.view_inplace.is_view_op
729
+ view_str.append(emit_registration_helper(g.view_inplace))
730
+ return view_str
731
+
732
+ elif isinstance(g, NativeFunctionsGroup):
733
+ # Gets a hand-written functionalization kernel
734
+ if g.inplace is not None and str(g.inplace.func.name) == "set_.source_Tensor":
735
+ fns = []
736
+ else:
737
+ fns = list(g.functions())
738
+ else:
739
+ if str(g.func.name) in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION:
740
+ return []
741
+ fns = [g]
742
+
743
+ registrations = []
744
+ for f in fns:
745
+ if f.has_composite_implicit_autograd_kernel:
746
+ continue
747
+ if str(f.func.name) == "lift":
748
+ # See Note [Functionalization <> torch.Tensor constructor]
749
+ return []
750
+ if str(f.func.name) == "resize_":
751
+ # See Note [resize_ in Functionalization]
752
+ return []
753
+ if str(f.func.name.name) != "set_":
754
+ assert not f.is_view_op
755
+ # functionalization needs to generate and register kernels for inplace ops.
756
+ # We *also* need to directly register CompositeImplicitAUtograd kernels
757
+ # so that they decompose properly before functioanlization.
758
+ if modifies_arguments(f):
759
+ registrations.append(emit_registration_helper(f))
760
+ return registrations
761
+
762
+
763
+ def gen_functionalization_definition(
764
+ selector: SelectiveBuilder,
765
+ # Note: Ideally this code should never have to look at NativeFunction
766
+ # (and instead only need to operate on grouped NativeFunctions).
767
+ # The only reason currently is because we need to emit direct dispatch registrations
768
+ # For CompositeImplicitAutograd operators, which are potentially ungrouped.
769
+ g: Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup],
770
+ ) -> List[str]:
771
+ # Don't generate kernels in mobile build
772
+ if not selector.include_all_operators:
773
+ return []
774
+
775
+ if isinstance(g, NativeFunctionsViewGroup):
776
+ # Case 1: emit view -> view_copy kernels for the functionalization pass
777
+ view_defs = []
778
+ if not g.composite:
779
+ # invariant: NativeFunctionsViewGroup's always have a view_copy operator
780
+ # if the view is not composite (implicit autograd)
781
+ assert g.view_copy is not None
782
+ view_defs.append(emit_view_functionalization_body(g, view_inplace=False))
783
+ if g.view_inplace is not None:
784
+ view_defs.append(emit_view_functionalization_body(g, view_inplace=True))
785
+ return view_defs
786
+ elif isinstance(g, NativeFunction):
787
+ # Invariant: all mutable operators that we need to handle in functionalization
788
+ # should have been properly grouped up.
789
+ # TODO: The below ops all have "problematic" schemas that prevent them from
790
+ # getting functionalized. Instead of bending over backwards to get things to work,
791
+ # I think we should either:
792
+ # (1) fix their schemas (BC-breaking)
793
+ # (2) hand-write their functionalization kernels
794
+ if (
795
+ str(g.func.name) not in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION
796
+ and str(g.func.name.name) not in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION
797
+ ):
798
+ assert g.has_composite_implicit_autograd_kernel or not modifies_arguments(g)
799
+ return []
800
+ else:
801
+ # Case 2: emit inplace -> out-of-place kernels for the functionalization pass
802
+ mutation_defs = []
803
+ mutation_defs.append(emit_inplace_functionalization_body(g.out, g))
804
+ if g.inplace is not None:
805
+ mutation_defs.append(emit_inplace_functionalization_body(g.inplace, g))
806
+ if g.mutable is not None:
807
+ mutation_defs.append(emit_inplace_functionalization_body(g.mutable, g))
808
+ return mutation_defs
809
+ return []
llmeval-env/lib/python3.10/site-packages/torchgen/gen_lazy_tensor.py ADDED
@@ -0,0 +1,605 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import pathlib
4
+ import re
5
+ from collections import Counter, namedtuple
6
+ from typing import (
7
+ Any,
8
+ Callable,
9
+ Dict,
10
+ Iterable,
11
+ Iterator,
12
+ List,
13
+ Optional,
14
+ Sequence,
15
+ Tuple,
16
+ Type,
17
+ Union,
18
+ )
19
+
20
+ import yaml
21
+
22
+ import torchgen.dest as dest
23
+
24
+ from torchgen.api.lazy import setValueT
25
+ from torchgen.api.types import BaseCppType
26
+ from torchgen.dest.lazy_ir import GenLazyIR, GenLazyNativeFuncDefinition, GenTSLazyIR
27
+ from torchgen.gen import get_grouped_native_functions, parse_native_yaml
28
+
29
+ from torchgen.model import NativeFunction, NativeFunctionsGroup, OperatorName
30
+ from torchgen.selective_build.selector import SelectiveBuilder
31
+ from torchgen.utils import concatMap, FileManager, NamespaceHelper
32
+ from torchgen.yaml_utils import YamlLoader
33
+ from .gen_backend_stubs import (
34
+ error_on_missing_kernels,
35
+ gen_dispatcher_registrations,
36
+ gen_dispatchkey_nativefunc_headers,
37
+ parse_backend_yaml,
38
+ )
39
+
40
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
41
+ #
42
+ # Lazy Tensor Codegen
43
+ #
44
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
45
+ # Overview
46
+ # ~~~~~~~~
47
+ #
48
+ # This codegen script builds on existing data models and helpers used
49
+ # by all ATen backends, and adds new functionality specific to lazy
50
+ # tensor backends.
51
+ #
52
+ # Inputs:
53
+ # - <backend>_native_functions.yaml: controls which operators are
54
+ # supported by the backend.
55
+ #
56
+ # Outputs:
57
+ # (for all backends)
58
+ # <DispatchKey>Ir.h defines Lazy IR classes to be constructed during tracing
59
+ # - opt-in: also generate 'lowering' methods for the TorchScript backend only
60
+ # <DispatchKey>NativeFunctions.cpp defines implementations of native functions which perform lazy tracing
61
+ # - opt-in: 'full_codegen' section of backend yaml; 'supported' section omits these implementations
62
+ # <DispatchKey>NativeFunctions.h declares implementations of native functions for both 'supported' and 'full_codegen'
63
+ # ops
64
+ #
65
+ # Register<DispatchKey>.cpp registers all op implementations with the dispatcher
66
+ # RegisterAutograd<DispatchKey>.cpp registers all autograd implementations with the dispatcher
67
+ #
68
+ # Validation Helpers:
69
+ # - Shape Inference: errs if any ops in backend yaml require shape inference not provided by meta kernels or
70
+ # implementations in torch/csrc/lazy/core/shape_inference.*
71
+ # - native function impls: errs if any 'supported' ops do not have an implementation defined in the backend
72
+ # (non-codegen) implementation file
73
+ #
74
+ #
75
+ # About the Data Model
76
+ # ~~~~~~~~~~~~~~~~~~~~
77
+ #
78
+ # Modeled after ATen codegen, the first step is to parse yaml and build a data model for the operators
79
+ # we care about. In this case, the <backend>_native_functions yaml defines a subset of the core operators
80
+ # (defined in more detail in the main native_functions.yaml), which will be supported by your backend.
81
+ # Backends can list ops in two categories:
82
+ # - `supported` ops require hand-implementations but still get codegenned declarations and registrations
83
+ # - `full_codegen` ops get implementations (and IR classes) generated too
84
+ #
85
+ # Each native function is modeled as an object with a schema, and each schema has objects representing their
86
+ # arguments. Much of the codegen is manipulation of the arguments and their types. For example, lazy tensor
87
+ # backends need to transform 'at::Tensor' arguments into 'lazy::Value' objects, as well as replacing reference
88
+ # types (stringref) with actual string objects, and this is done by manipulating the data model objects.
89
+ # - see api/lazy.py for the lazy data model
90
+ #
91
+ # Once the data model is set up, the rest of this script processes a number of templates for output CPP file
92
+ # and fills in the template values using helpers in `dest/lazy_ir.py` and `dest/lazy_ts_lowering.py`. These
93
+ # helpers mostly iterate over functions and their arguments, outputting different c++ snippets.
94
+ #
95
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
96
+
97
+
98
+ # Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key.
99
+ # Returns a Tuple of (backend_key, autograd_key, cpp_namespace, updated BackendIndex mapping, full_codegen)
100
+ ParsedExternalYaml = namedtuple(
101
+ "ParsedExternalYaml",
102
+ ["backend_key", "autograd_key", "cpp_namespace", "backend_indices", "full_codegen"],
103
+ )
104
+
105
+
106
+ def parse_native_functions_keys(
107
+ backend_yaml_path: str,
108
+ grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
109
+ ) -> Tuple[List[OperatorName], List[Any], List[OperatorName]]:
110
+ native_functions_map: Dict[OperatorName, NativeFunction] = {
111
+ f.func.name: f
112
+ for f in concatMap(
113
+ lambda f: [f] if isinstance(f, NativeFunction) else list(f.functions()),
114
+ grouped_native_functions,
115
+ )
116
+ }
117
+
118
+ with open(backend_yaml_path) as f:
119
+ yaml_values = yaml.load(f, Loader=YamlLoader)
120
+ assert isinstance(yaml_values, dict)
121
+
122
+ full_codegen = yaml_values.pop("full_codegen", [])
123
+ non_native = yaml_values.pop("non_native", [])
124
+ ir_gen = yaml_values.pop("ir_gen", [])
125
+ assert isinstance(full_codegen, list)
126
+ assert isinstance(non_native, list)
127
+ assert isinstance(ir_gen, list)
128
+ full_codegen_opnames = [OperatorName.parse(name) for name in full_codegen]
129
+ ir_gen_opnames = [OperatorName.parse(name) for name in ir_gen]
130
+ return full_codegen_opnames, non_native, ir_gen_opnames
131
+
132
+
133
+ def validate_shape_inference_header(
134
+ shape_inference_hdr: str, expected_shape_infr_decls: List[str]
135
+ ) -> None:
136
+ try:
137
+ with open(shape_inference_hdr) as f:
138
+ shape_infr_decls = f.read()
139
+ shape_infr_decl_lines = set(shape_infr_decls.split("\n"))
140
+ except OSError as e:
141
+ raise AssertionError(
142
+ f"Unable to read from the specified shape_inference_hdr file: {shape_inference_hdr}"
143
+ ) from e
144
+
145
+ shape_infr_regex = r"compute_shape_(\w+)"
146
+ actual_shape_infr_name_counts = Counter(
147
+ re.findall(shape_infr_regex, shape_infr_decls)
148
+ )
149
+ # TODO(whc) add a check for shape inference functions that have meta kernels implement and should be retired.
150
+
151
+ missing_decls = [
152
+ decl for decl in expected_shape_infr_decls if decl not in shape_infr_decl_lines
153
+ ]
154
+ if missing_decls:
155
+ raise Exception(
156
+ f"""Missing shape inference function.\n
157
+ Please add declare this function in {shape_inference_hdr}:\n
158
+ and implement it in the corresponding shape_inference.cpp file.\n
159
+ {os.linesep.join(missing_decls)}"""
160
+ )
161
+
162
+
163
+ # Some helper functions for the codegen.
164
+ def get_ltc_helper_fns() -> str:
165
+ return """\
166
+ at::Tensor to_meta(const at::Tensor& tensor) {
167
+ // undefined tensors can't be converted to the meta device, since they don't have sizes/strides
168
+ if (!tensor.defined()) return tensor;
169
+ auto out = at::native::empty_strided_meta_symint(tensor.sym_sizes(), tensor.sym_strides(), \
170
+ /*dtype=*/c10::make_optional(tensor.scalar_type()), /*layout=*/c10::make_optional(tensor.layout()), \
171
+ /*device=*/c10::make_optional(c10::Device(c10::kMeta)), /*pin_memory=*/c10::nullopt);
172
+ // needs to handle wrapped numbers, so dtype promotion works properly.
173
+ if (tensor.unsafeGetTensorImpl()->is_wrapped_number()) {
174
+ out.unsafeGetTensorImpl()->set_wrapped_number(true);
175
+ }
176
+ return out;
177
+ }
178
+ c10::optional<at::Tensor> to_meta(const c10::optional<at::Tensor>& tensor) {
179
+ if (tensor.has_value()) {
180
+ return to_meta(*tensor);
181
+ }
182
+ return c10::nullopt;
183
+ }
184
+
185
+ std::vector<at::Tensor> to_meta(at::ITensorListRef t_list) {
186
+ std::vector<at::Tensor> outs;
187
+ outs.reserve(t_list.size());
188
+ for (const auto& tensor : t_list) {
189
+ outs.push_back(to_meta(tensor));
190
+ }
191
+ return outs;
192
+ }
193
+ """
194
+
195
+
196
+ class default_args:
197
+ node_base: str = "Node"
198
+ node_base_hdr: Optional[str] = None
199
+ shape_inference_hdr: str = "torch/csrc/lazy/core/shape_inference.h"
200
+ tensor_class: str = "torch::lazy::LazyTensor"
201
+ tensor_class_hdr: str = "torch/csrc/lazy/core/tensor.h"
202
+ lazy_ir_generator: Type[GenLazyIR] = GenLazyIR
203
+ native_func_definition_generator: Type[
204
+ GenLazyNativeFuncDefinition
205
+ ] = GenLazyNativeFuncDefinition
206
+ backend_name: str = "TorchScript"
207
+
208
+
209
+ def main() -> None:
210
+ parser = argparse.ArgumentParser(description="Generate Lazy Tensor backend files")
211
+ parser.add_argument(
212
+ "-s",
213
+ "--source-yaml",
214
+ "--source_yaml",
215
+ help="path to source yaml file containing operator external definitions",
216
+ )
217
+ parser.add_argument("-o", "--output-dir", "--output_dir", help="output directory")
218
+ parser.add_argument(
219
+ "--dry-run", "--dry_run", type=bool, default=False, help="output directory"
220
+ )
221
+ parser.add_argument(
222
+ "--impl-path",
223
+ "--impl_path",
224
+ type=str,
225
+ default=None,
226
+ help="path to the source C++ file containing kernel definitions",
227
+ )
228
+ parser.add_argument(
229
+ "--gen-ts-lowerings",
230
+ "--gen_ts_lowerings",
231
+ action="store_true",
232
+ help="Generate TorchScript lowerings in addition to Lazy IR and NativeFunctions",
233
+ )
234
+ parser.add_argument(
235
+ "--node-base",
236
+ "--node_base",
237
+ type=str,
238
+ default=default_args.node_base,
239
+ help="Name of backend specific custom Lazy IR Node base class",
240
+ )
241
+ parser.add_argument(
242
+ "--node-base-hdr",
243
+ "--node_base_hdr",
244
+ type=str,
245
+ default=default_args.node_base_hdr,
246
+ help="Path to header file defining custom Lazy IR Node base class",
247
+ )
248
+ parser.add_argument(
249
+ "--shape-inference-hdr",
250
+ "--shape_inference_hdr",
251
+ type=str,
252
+ default=default_args.shape_inference_hdr,
253
+ help="Path to header file defining custom Lazy shape inference functions",
254
+ )
255
+ parser.add_argument(
256
+ "--tensor-class",
257
+ "--tensor_class",
258
+ type=str,
259
+ default=default_args.tensor_class,
260
+ help="Name of backend specific custom Lazy Tensor class",
261
+ )
262
+ parser.add_argument(
263
+ "--tensor-class-hdr",
264
+ "--tensor_class_hdr",
265
+ type=str,
266
+ default=default_args.tensor_class_hdr,
267
+ help="Path to header file defining custom Lazy Tensor class",
268
+ )
269
+ parser.add_argument(
270
+ "--backend-name",
271
+ "--backend_name",
272
+ type=str,
273
+ default=default_args.backend_name,
274
+ help="Name of the backend to generate",
275
+ )
276
+ options = parser.parse_args()
277
+
278
+ # Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
279
+ torch_root = pathlib.Path(__file__).parent.parent.parent.absolute()
280
+ aten_path = str(torch_root / "aten" / "src" / "ATen")
281
+ lazy_ir_generator: Type[GenLazyIR] = default_args.lazy_ir_generator
282
+ if options.gen_ts_lowerings:
283
+ lazy_ir_generator = GenTSLazyIR
284
+ native_func_definition_generator: Type[
285
+ GenLazyNativeFuncDefinition
286
+ ] = default_args.native_func_definition_generator
287
+
288
+ run_gen_lazy_tensor(
289
+ aten_path,
290
+ options.source_yaml,
291
+ options.output_dir,
292
+ options.dry_run,
293
+ options.impl_path,
294
+ options.node_base,
295
+ options.node_base_hdr,
296
+ options.tensor_class,
297
+ options.tensor_class_hdr,
298
+ options.shape_inference_hdr,
299
+ lazy_ir_generator,
300
+ native_func_definition_generator,
301
+ options.backend_name,
302
+ )
303
+
304
+
305
+ def run_gen_lazy_tensor(
306
+ aten_path: str,
307
+ source_yaml: str,
308
+ output_dir: str,
309
+ dry_run: bool,
310
+ impl_path: Optional[str],
311
+ node_base: str = default_args.node_base,
312
+ node_base_hdr: Optional[str] = default_args.node_base_hdr,
313
+ tensor_class: str = default_args.tensor_class,
314
+ tensor_class_hdr: str = default_args.tensor_class_hdr,
315
+ shape_inference_hdr: str = default_args.shape_inference_hdr,
316
+ lazy_ir_generator: Type[GenLazyIR] = default_args.lazy_ir_generator,
317
+ native_func_definition_generator: Type[
318
+ GenLazyNativeFuncDefinition
319
+ ] = default_args.native_func_definition_generator,
320
+ # build_in_tree is true for TS backend and affects include paths
321
+ build_in_tree: bool = False,
322
+ # per_operator_headers changes whether ATen/Functions.h or individual operator headers are used
323
+ # it must match how ATen was built
324
+ per_operator_headers: bool = False,
325
+ backend_name: str = default_args.backend_name,
326
+ gen_forced_fallback_code: bool = False,
327
+ use_lazy_shape: bool = True,
328
+ # the following arguments are temporary customization points for xla backend migration.
329
+ # do not rely on them otherwise, they should be removed once migration is complete
330
+ backend_namespace: str = "torch::lazy",
331
+ get_tensorlist: str = "GetTensorList",
332
+ get_tensor_or_wrap_number: str = "GetLtcTensorOrCreateForWrappedNumber",
333
+ try_get_tensor: str = "TryGetLtcTensor",
334
+ metrics_counter: str = 'TORCH_LAZY_FN_COUNTER("lazy::")',
335
+ create_tensor: str = "LazyTensor::Create",
336
+ create_from_first_tensor: bool = False,
337
+ create_aten_from_ltc_tensor: str = "torch::lazy::CreateAtenFromLtcTensor",
338
+ tuple_aten_from_ltc_tensors: str = "torch::lazy::TupleAtenFromLtcTensors",
339
+ lazy_value_class: str = "torch::lazy::Value",
340
+ lazy_tensor_ptr: str = "LazyTensorPtr",
341
+ get_device_fn: str = "torch::lazy::GetBackendDevice",
342
+ ) -> None:
343
+ lv_tokens = lazy_value_class.split("::")
344
+ lv_class = lv_tokens[-1]
345
+ lv_ns = "::".join(lv_tokens[:-1])
346
+ setValueT(BaseCppType(lv_ns, lv_class))
347
+ template_dir = os.path.join(aten_path, "templates")
348
+
349
+ def make_file_manager(install_dir: str) -> FileManager:
350
+ return FileManager(
351
+ install_dir=install_dir, template_dir=template_dir, dry_run=dry_run
352
+ )
353
+
354
+ fm = make_file_manager(output_dir)
355
+
356
+ native_yaml_path = os.path.join(aten_path, "native/native_functions.yaml")
357
+ tags_yaml_path = os.path.join(aten_path, "native/tags.yaml")
358
+ parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
359
+ native_functions, backend_indices = (
360
+ parsed_yaml.native_functions,
361
+ parsed_yaml.backend_indices,
362
+ )
363
+ grouped_native_functions = get_grouped_native_functions(native_functions)
364
+
365
+ def sort_native_function(f: Union[NativeFunctionsGroup, NativeFunction]) -> str:
366
+ """
367
+ We sort the native function because of the note in concat_map_codegen.
368
+ TODO(alanwaketan): Remove this sorting hack once all ops are grouped properly.
369
+ """
370
+ func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
371
+ return str(func.name.name)
372
+
373
+ grouped_native_functions = sorted(
374
+ grouped_native_functions, key=sort_native_function
375
+ )
376
+
377
+ parsed_backend_yaml = parse_backend_yaml(
378
+ source_yaml, grouped_native_functions, backend_indices
379
+ )
380
+ backend_key = parsed_backend_yaml.backend_key
381
+ autograd_key = parsed_backend_yaml.autograd_key
382
+ cpp_namespace = parsed_backend_yaml.cpp_namespace
383
+ backend_indices = parsed_backend_yaml.backend_indices
384
+ # the following 3 keys are all processed differently
385
+ # for full_codegen, we generate IR, kernels, etc
386
+ # for ir_gen, we generate only IR
387
+ # non_native is used to register kernels not declared in
388
+ # native_functions.yaml
389
+ full_codegen, non_native, ir_gen = parse_native_functions_keys(
390
+ source_yaml, grouped_native_functions
391
+ )
392
+
393
+ def concat_map_codegen(
394
+ func: Callable[[NativeFunction], Sequence[str]],
395
+ xs: Iterable[Union[NativeFunctionsGroup, NativeFunction]],
396
+ ops_list: List[OperatorName] = full_codegen,
397
+ ) -> Iterator[str]:
398
+ """
399
+ We code-gen for the functional variant, which is all we need for IR classes/lowerings/shape inferences, but we
400
+ only code-gen additional entries for the inplace variant for the native functions.
401
+ """
402
+
403
+ for x in xs:
404
+ fs = list(x.functions()) if isinstance(x, NativeFunctionsGroup) else [x]
405
+ for f in fs:
406
+ if f.func.name in ops_list:
407
+ yield from func(f)
408
+
409
+ selector = SelectiveBuilder.get_nop_selector()
410
+
411
+ assert backend_key is not None
412
+ class_name = backend_indices[backend_key].native_function_class_name()
413
+
414
+ if impl_path is not None:
415
+ error_on_missing_kernels(
416
+ native_functions,
417
+ backend_indices,
418
+ backend_key,
419
+ autograd_key,
420
+ class_name,
421
+ impl_path,
422
+ full_codegen,
423
+ )
424
+
425
+ """ Validate Shape Inference Definitions
426
+
427
+ Generated lazy native functions all perform shape inference, by first using a meta:: kernel
428
+ if available for that op, and otherwise using a 'compute_shape_{op}' function instead. The generator
429
+ knows the call signature for compute_shape_{op} because it matches the nativefunction (and meta::) signature,
430
+ so it just has to check whether the op is structured and generate a call for one or the other. It's up to the dev
431
+ to supply the missing compute_shape_{op} function, but the codegen at least warns you about this and provides
432
+ the expected signature which can be copy-pasted into shape_inference.h.
433
+
434
+ compute_shape_{op} functions are handwritten and should be replaced over time as ops get ported
435
+ to structured kernels.
436
+
437
+ See torch/csrc/lazy/core/shape_inference.cpp #READ THIS! for more information.
438
+ """
439
+ if shape_inference_hdr is not None:
440
+ expected_shape_infr_decls = list(
441
+ concat_map_codegen(
442
+ dest.GenLazyShapeInferenceDefinition(
443
+ backend_indices[backend_key], tensor_class
444
+ ),
445
+ grouped_native_functions,
446
+ )
447
+ )
448
+
449
+ validate_shape_inference_header(shape_inference_hdr, expected_shape_infr_decls)
450
+ assert class_name is not None
451
+
452
+ # Generate nativefunction declarations
453
+ # Note, eager registrations is set to False for the lazy TS backend as another LTC backend
454
+ # may want to register their own lazy kernels instead of registering the TS ones.
455
+ # The registration will lazily happen when init_ts_backend is called.
456
+ gen_dispatchkey_nativefunc_headers(
457
+ fm,
458
+ class_name,
459
+ cpp_namespace,
460
+ backend_indices,
461
+ grouped_native_functions,
462
+ backend_key,
463
+ autograd_key,
464
+ backend_name,
465
+ )
466
+
467
+ # Generate Dispatcher registrations which hook up the nativefunctions
468
+ for dispatch_key in (
469
+ [backend_key] if autograd_key is None else [backend_key, autograd_key]
470
+ ):
471
+ gen_dispatcher_registrations(
472
+ fm,
473
+ output_dir,
474
+ class_name,
475
+ backend_indices,
476
+ grouped_native_functions,
477
+ backend_key,
478
+ dispatch_key,
479
+ selector,
480
+ build_in_tree=build_in_tree,
481
+ per_operator_headers=per_operator_headers,
482
+ backend_name=backend_name,
483
+ eager_registration=False,
484
+ )
485
+
486
+ # Generate native function impls that build IR nodes
487
+ ns_helper = NamespaceHelper(cpp_namespace)
488
+ fm.write_with_template(
489
+ f"{backend_key}NativeFunctions.cpp",
490
+ "DispatchKeyNativeFunctions.cpp",
491
+ lambda: {
492
+ "includes": [
493
+ f"#include <{path}>"
494
+ for path in [
495
+ tensor_class_hdr,
496
+ shape_inference_hdr,
497
+ "ATen/Functions.h",
498
+ "ATen/native/TensorConversions.h",
499
+ "ATen/NativeFunctions.h",
500
+ "ATen/CompositeExplicitAutogradNonFunctionalFunctions.h",
501
+ "ATen/MetaFunctions.h",
502
+ "ATen/Operators.h",
503
+ "ATen/native/CPUFallback.h",
504
+ "torch/csrc/lazy/core/ir_builder.h",
505
+ "torch/csrc/lazy/core/lazy_graph_executor.h",
506
+ "torch/csrc/lazy/core/metrics.h",
507
+ "torch/csrc/lazy/core/shape.h",
508
+ f"{output_dir}/{backend_key}NativeFunctions.h",
509
+ f"{output_dir}/LazyIr.h",
510
+ ]
511
+ + (
512
+ ["torch/csrc/lazy/ts_backend/ts_eager_fallback.h"]
513
+ if gen_forced_fallback_code
514
+ else []
515
+ )
516
+ ],
517
+ "helper_fns": get_ltc_helper_fns(),
518
+ "native_functions_include": "",
519
+ "namespace_prologue": ns_helper.prologue,
520
+ "namespace_epilogue": ns_helper.epilogue,
521
+ "native_function_definitions": list(
522
+ concat_map_codegen(
523
+ native_func_definition_generator(
524
+ f"{backend_key}NativeFunctions",
525
+ backend_indices[backend_key],
526
+ tensor_class,
527
+ gen_forced_fallback_code,
528
+ backend_namespace,
529
+ get_tensorlist,
530
+ get_tensor_or_wrap_number,
531
+ try_get_tensor,
532
+ metrics_counter,
533
+ create_tensor,
534
+ create_from_first_tensor,
535
+ create_aten_from_ltc_tensor,
536
+ tuple_aten_from_ltc_tensors,
537
+ lazy_tensor_ptr,
538
+ get_device_fn,
539
+ ),
540
+ grouped_native_functions,
541
+ )
542
+ ),
543
+ },
544
+ )
545
+ # Generate IR node classes
546
+ lazy_ir_obj = lazy_ir_generator(
547
+ backend_indices[backend_key], backend_name, node_base, use_lazy_shape
548
+ )
549
+
550
+ fm.write_with_template(
551
+ "LazyIr.h",
552
+ "LazyIr.h",
553
+ lambda: {
554
+ "lazy_ir_sysinc": [
555
+ f"#include <{path}>"
556
+ for path in [
557
+ "ATen/core/Formatting.h",
558
+ "c10/core/ScalarType.h",
559
+ "c10/util/Optional.h",
560
+ "torch/csrc/lazy/core/hash.h",
561
+ "torch/csrc/lazy/core/ir.h",
562
+ "torch/csrc/lazy/core/shape.h",
563
+ "vector",
564
+ ]
565
+ ],
566
+ "lazy_ir_inc": [f'#include "{node_base_hdr}"']
567
+ if node_base_hdr is not None
568
+ else [],
569
+ "ir_declarations": list(
570
+ concat_map_codegen(
571
+ lazy_ir_obj, grouped_native_functions, full_codegen + ir_gen
572
+ )
573
+ ),
574
+ "namespace_prologue": ns_helper.prologue,
575
+ "namespace_epilogue": ns_helper.epilogue,
576
+ },
577
+ )
578
+
579
+ # Generate Non Native IR Node classes
580
+ fm.write_with_template(
581
+ "LazyNonNativeIr.h",
582
+ "LazyNonNativeIr.h",
583
+ lambda: {
584
+ "lazy_non_native_ir_inc": [
585
+ f"#include <{path}>"
586
+ for path in [
587
+ "torch/csrc/lazy/core/ir.h",
588
+ "torch/csrc/lazy/core/ir_builder.h",
589
+ "torch/csrc/lazy/core/internal_ops/ltc_ops.h",
590
+ "torch/csrc/lazy/core/shape_inference.h",
591
+ ]
592
+ + ([node_base_hdr] if node_base_hdr else [])
593
+ if path
594
+ ],
595
+ "non_native_ir_nodes": dest.generate_non_native_lazy_ir_nodes(
596
+ non_native, lazy_ir_obj
597
+ ),
598
+ "namespace_prologue": ns_helper.prologue,
599
+ "namespace_epilogue": ns_helper.epilogue,
600
+ },
601
+ )
602
+
603
+
604
+ if __name__ == "__main__":
605
+ main()
llmeval-env/lib/python3.10/site-packages/torchgen/gen_vmap_plumbing.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import textwrap
2
+ from dataclasses import dataclass
3
+ from typing import List, Optional, Sequence, Tuple
4
+
5
+ from torchgen.api.translate import translate
6
+ from torchgen.api.types import DispatcherSignature
7
+ from torchgen.context import method_with_native_function
8
+ from torchgen.model import (
9
+ Argument,
10
+ BaseTy,
11
+ BaseType,
12
+ FunctionSchema,
13
+ ListType,
14
+ NativeFunction,
15
+ OptionalType,
16
+ Return,
17
+ SchemaKind,
18
+ Type,
19
+ )
20
+ from torchgen.utils import mapMaybe
21
+
22
+
23
+ def is_tensor(typ: Type) -> bool:
24
+ return isinstance(typ, BaseType) and typ.name == BaseTy.Tensor
25
+
26
+
27
+ def is_optional_tensor(typ: Type) -> bool:
28
+ return isinstance(typ, OptionalType) and is_tensor(typ.elem)
29
+
30
+
31
+ def is_tensor_list(typ: Type) -> bool:
32
+ return isinstance(typ, ListType) and is_tensor(typ.elem)
33
+
34
+
35
+ def unwrap_tensor(name: str, cur_level_var: str) -> List[str]:
36
+ result = f"""\
37
+ Tensor {name}_value;
38
+ optional<int64_t> {name}_bdim;
39
+ std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}, {cur_level_var});"""
40
+ return textwrap.dedent(result).split("\n")
41
+
42
+
43
+ def unwrap_optional_tensor(name: str, cur_level_var: str) -> List[str]:
44
+ result = f"""\
45
+ optional<Tensor> {name}_value;
46
+ optional<int64_t> {name}_bdim;
47
+ if ({name}) {{
48
+ std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}.value(), {cur_level_var});
49
+ }}"""
50
+ return textwrap.dedent(result).split("\n")
51
+
52
+
53
+ def gen_unwraps(
54
+ flat_arguments: Sequence[Argument], cur_level_var: str
55
+ ) -> Tuple[str, List[str]]:
56
+ arg_names = [a.name for a in flat_arguments]
57
+ arg_types = [a.type for a in flat_arguments]
58
+
59
+ tensors = [name for typ, name in zip(arg_types, arg_names) if is_tensor(typ)]
60
+ optional_tensors = [
61
+ name for typ, name in zip(arg_types, arg_names) if is_optional_tensor(typ)
62
+ ]
63
+
64
+ unwraps = []
65
+ for tensor in tensors:
66
+ unwraps += unwrap_tensor(tensor, cur_level_var)
67
+
68
+ for opt_tensor in optional_tensors:
69
+ unwraps += unwrap_optional_tensor(opt_tensor, cur_level_var)
70
+ unwrap_code = "\n".join(unwraps)
71
+
72
+ unwrapped_arg_list = []
73
+ for arg in arg_names:
74
+ if arg in tensors or arg in optional_tensors:
75
+ unwrapped_arg_list += [f"{arg}_value", f"{arg}_bdim"]
76
+ else:
77
+ unwrapped_arg_list.append(arg)
78
+ return unwrap_code, unwrapped_arg_list
79
+
80
+
81
+ def gen_case_where_all_bdims_are_none(
82
+ outer_sig: DispatcherSignature, schema: FunctionSchema, cur_level_var: str
83
+ ) -> str:
84
+ conditions = []
85
+ flat_args = schema.arguments.flat_all
86
+ for arg in flat_args:
87
+ if not arg.type.is_tensor_like():
88
+ continue
89
+ conditions.append(f"!isBatchedAtLevel({arg.name}, {cur_level_var})")
90
+
91
+ sig = DispatcherSignature.from_schema(schema)
92
+ translated_args = ", ".join(
93
+ e.expr for e in translate(outer_sig.arguments(), sig.arguments())
94
+ )
95
+ return f"""\
96
+ if ({' && '.join(conditions)}) {{
97
+ return at::_ops::{sig.func.name.unambiguous_name()}::call({translated_args});
98
+ }}"""
99
+
100
+
101
+ def gen_returns(
102
+ returns: Tuple[Return, ...], cur_level_var: str, results_var: str
103
+ ) -> str:
104
+ idx = 0
105
+ wrapped_returns = []
106
+ for ret in returns:
107
+ if is_tensor(ret.type):
108
+ wrapped_returns.append(
109
+ f"makeBatched(std::get<{idx}>({results_var}), std::get<{idx + 1}>({results_var}), {cur_level_var})"
110
+ )
111
+ idx += 2
112
+ elif is_tensor_list(ret.type):
113
+ wrapped_returns.append(
114
+ f"makeBatchedVector(std::get<{idx}>({results_var}), std::get<{idx+1}>({results_var}), {cur_level_var})"
115
+ )
116
+ idx += 2
117
+ else:
118
+ wrapped_returns.append(f"std::get<{idx}>({results_var})")
119
+ idx += 1
120
+ if len(wrapped_returns) == 1:
121
+ result = f"return {wrapped_returns[0]};"
122
+ else:
123
+ result = f'return std::make_tuple({", ".join(wrapped_returns)});'
124
+ return result
125
+
126
+
127
+ def accepts_at_least_one_tensor_input(schema: FunctionSchema) -> bool:
128
+ return any(a.type.is_tensor_like() for a in schema.arguments.flat_all)
129
+
130
+
131
+ def is_mutated_arg(argument: Argument) -> bool:
132
+ return argument.annotation is not None and argument.annotation.is_write
133
+
134
+
135
+ def gen_vmap_inplace_plumbing(native_function: NativeFunction) -> Optional[str]:
136
+ # Assumptions:
137
+ # - only one argument is being modified in-place
138
+ # - the argument that is being modified in-place is the first argument
139
+ # - all returns are either Tensor, tuple of Tensor, or TensorList
140
+ schema = native_function.func
141
+ sig = DispatcherSignature.from_schema(schema)
142
+ returns = schema.returns
143
+
144
+ # Check assumptions. If these are invalid we return None
145
+ # and punt the work to handle them to the future.
146
+ assert schema.kind() == SchemaKind.inplace
147
+ if not is_mutated_arg(schema.arguments.flat_all[0]):
148
+ return None
149
+ if not len([arg for arg in schema.arguments.flat_all if is_mutated_arg(arg)]) == 1:
150
+ return None
151
+
152
+ # Only support cases where all returns are Tensors or vector<Tensor>
153
+ if len(returns) == 0:
154
+ return None
155
+ if not all(is_tensor(ret.type) or is_tensor_list(ret.type) for ret in returns):
156
+ return None
157
+ if not accepts_at_least_one_tensor_input(schema):
158
+ return None
159
+
160
+ cur_level_var = "cur_level"
161
+
162
+ unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
163
+ bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
164
+
165
+ return f"""\
166
+ template <typename batch_rule_t, batch_rule_t batch_rule>
167
+ {sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
168
+ c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
169
+ auto maybe_layer = maybeCurrentDynamicLayer();
170
+ vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
171
+ int64_t {cur_level_var} = maybe_layer->layerId();
172
+ {textwrap.indent(bdims_all_none_case, " ")}
173
+ {textwrap.indent(unwraps, " ")}
174
+ batch_rule({', '.join(unwrapped_arg_list)});
175
+ return {schema.arguments.flat_all[0].name};
176
+ }}"""
177
+
178
+
179
+ def gen_vmap_plumbing_no_returns(native_function: NativeFunction) -> str:
180
+ schema = native_function.func
181
+ sig = DispatcherSignature.from_schema(schema)
182
+ cur_level_var = "cur_level"
183
+
184
+ unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
185
+ bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
186
+
187
+ return f"""\
188
+ template <typename batch_rule_t, batch_rule_t batch_rule>
189
+ {sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
190
+ c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
191
+ auto maybe_layer = maybeCurrentDynamicLayer();
192
+ vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
193
+ int64_t {cur_level_var} = maybe_layer->layerId();
194
+ {textwrap.indent(bdims_all_none_case, " ")}
195
+ {textwrap.indent(unwraps, " ")}
196
+ batch_rule({', '.join(unwrapped_arg_list)});
197
+ }}"""
198
+
199
+
200
+ def gen_vmap_plumbing(native_function: NativeFunction) -> Optional[str]:
201
+ schema = native_function.func
202
+ sig = DispatcherSignature.from_schema(schema)
203
+ returns = schema.returns
204
+
205
+ # Only support cases where all returns are Tensors or vector<Tensor>
206
+ if not accepts_at_least_one_tensor_input(schema):
207
+ return None
208
+ if len(returns) == 0:
209
+ return gen_vmap_plumbing_no_returns(native_function)
210
+ if not all(ret.type.is_tensor_like() for ret in returns):
211
+ return None
212
+ # in-place views need special handling
213
+ if "inplace_view" in native_function.tags:
214
+ return None
215
+
216
+ if schema.kind() == SchemaKind.inplace:
217
+ return gen_vmap_inplace_plumbing(native_function)
218
+
219
+ # Don't support these (mutable, out, scratch)
220
+ if schema.kind() != SchemaKind.functional:
221
+ return None
222
+
223
+ results_var = "results"
224
+ cur_level_var = "cur_level"
225
+
226
+ unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
227
+ bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
228
+
229
+ wrapped_returns = gen_returns(returns, cur_level_var, results_var)
230
+ return f"""\
231
+ template <typename batch_rule_t, batch_rule_t batch_rule>
232
+ {sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
233
+ c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
234
+ auto maybe_layer = maybeCurrentDynamicLayer();
235
+ vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
236
+ int64_t {cur_level_var} = maybe_layer->layerId();
237
+ {textwrap.indent(bdims_all_none_case, " ")}
238
+ {textwrap.indent(unwraps, " ")}
239
+ auto {results_var} = batch_rule({', '.join(unwrapped_arg_list)});
240
+ {wrapped_returns}
241
+ }}"""
242
+
243
+
244
+ @dataclass(frozen=True)
245
+ class ComputeBatchRulePlumbing:
246
+ @method_with_native_function
247
+ def __call__(self, f: NativeFunction) -> Optional[str]:
248
+ opname = str(f.func.name)
249
+ result = gen_vmap_plumbing(f)
250
+ return result
251
+
252
+
253
+ def gen_all_vmap_plumbing(native_functions: Sequence[NativeFunction]) -> str:
254
+ body = "\n".join(list(mapMaybe(ComputeBatchRulePlumbing(), native_functions)))
255
+ return f"""
256
+ #pragma once
257
+ #include <ATen/Operators.h>
258
+ #include <ATen/functorch/PlumbingHelper.h>
259
+
260
+ namespace at {{ namespace functorch {{
261
+
262
+ {body}
263
+
264
+ }}}} // namespace at::functorch
265
+ """
llmeval-env/lib/python3.10/site-packages/torchgen/local.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ from contextlib import contextmanager
3
+ from typing import Iterator, Optional
4
+
5
+ # Simple dynamic scoping implementation. The name "parametrize" comes
6
+ # from Racket.
7
+ #
8
+ # WARNING WARNING: LOOKING TO EDIT THIS FILE? Think carefully about
9
+ # why you need to add a toggle to the global behavior of code
10
+ # generation. The parameters here should really only be used
11
+ # for "temporary" situations, where we need to temporarily change
12
+ # the codegen in some cases because we cannot conveniently update
13
+ # all call sites, and are slated to be eliminated once all call
14
+ # sites are eliminated. If you don't have a plan for how to get there,
15
+ # DON'T add a new entry here.
16
+
17
+
18
+ class Locals(threading.local):
19
+ use_const_ref_for_mutable_tensors: Optional[bool] = None
20
+ use_ilistref_for_tensor_lists: Optional[bool] = None
21
+
22
+
23
+ _locals = Locals()
24
+
25
+
26
+ def use_const_ref_for_mutable_tensors() -> bool:
27
+ assert _locals.use_const_ref_for_mutable_tensors is not None, (
28
+ "need to initialize local.use_const_ref_for_mutable_tensors with "
29
+ "local.parametrize"
30
+ )
31
+ return _locals.use_const_ref_for_mutable_tensors
32
+
33
+
34
+ def use_ilistref_for_tensor_lists() -> bool:
35
+ assert _locals.use_ilistref_for_tensor_lists is not None, (
36
+ "need to initialize local.use_ilistref_for_tensor_lists with "
37
+ "local.parametrize"
38
+ )
39
+ return _locals.use_ilistref_for_tensor_lists
40
+
41
+
42
+ @contextmanager
43
+ def parametrize(
44
+ *, use_const_ref_for_mutable_tensors: bool, use_ilistref_for_tensor_lists: bool
45
+ ) -> Iterator[None]:
46
+ old_use_const_ref_for_mutable_tensors = _locals.use_const_ref_for_mutable_tensors
47
+ old_use_ilistref_for_tensor_lists = _locals.use_ilistref_for_tensor_lists
48
+ try:
49
+ _locals.use_const_ref_for_mutable_tensors = use_const_ref_for_mutable_tensors
50
+ _locals.use_ilistref_for_tensor_lists = use_ilistref_for_tensor_lists
51
+ yield
52
+ finally:
53
+ _locals.use_const_ref_for_mutable_tensors = (
54
+ old_use_const_ref_for_mutable_tensors
55
+ )
56
+ _locals.use_ilistref_for_tensor_lists = old_use_ilistref_for_tensor_lists
llmeval-env/lib/python3.10/site-packages/torchgen/model.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torchgen/native_function_generation.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+
3
+ from typing import Dict, List, Optional, Sequence, Tuple, Union
4
+
5
+ import torchgen.api.dispatcher as dispatcher
6
+ from torchgen.api.translate import translate
7
+ from torchgen.api.types import Binding, DispatcherSignature, Expr
8
+ from torchgen.context import with_native_function
9
+ from torchgen.model import (
10
+ Annotation,
11
+ Argument,
12
+ BackendIndex,
13
+ BackendMetadata,
14
+ BaseOperatorName,
15
+ BaseTy,
16
+ BaseType,
17
+ DEFAULT_KERNEL_NAMESPACE,
18
+ DeviceCheckType,
19
+ DispatchKey,
20
+ FunctionSchema,
21
+ NativeFunction,
22
+ NativeFunctionsGroup,
23
+ OperatorName,
24
+ Return,
25
+ SchemaKind,
26
+ Variant,
27
+ )
28
+ from torchgen.utils import concatMap
29
+
30
+ # See Note: [Out ops with functional variants that don't get grouped properly]
31
+ OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY = [
32
+ # This has a functional variant, but it's currently marked private.
33
+ # This function should be marked private as well (*_backward ops aren't exposed to python anyway).
34
+ "adaptive_avg_pool3d_backward.grad_input",
35
+ # There's a functional variant, _slow_conv2d_backward.output_mask, that isn't grouped properly.
36
+ # Maybe we can kill this operator in favor of convolution_backward?
37
+ "_slow_conv2d_backward.grad_input",
38
+ ]
39
+
40
+
41
+ # See Note: [Mutable ops that cannot get an out variant]
42
+ MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT = [
43
+ # should be out=?
44
+ "_cummax_helper",
45
+ # should be out=?
46
+ "_cummin_helper",
47
+ ]
48
+
49
+ # All of these operators don't have any tensor like returns
50
+ FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT = [
51
+ "_assert_async", # no return
52
+ "_assert_async.msg", # no return
53
+ "_cslt_sparse_mm_search", # returns an int
54
+ "_assert_scalar", # no return
55
+ "_dimI", # returns an int
56
+ "_dimV", # returns an int
57
+ "_has_same_storage_numel", # returns a boolean
58
+ "_linalg_check_errors", # no return
59
+ "_local_scalar_dense", # returns a Scalar
60
+ "_nested_tensor_from_mask_left_aligned", # returns a boolean
61
+ "_nnz", # returns an int
62
+ "_use_cudnn_ctc_loss", # returns a boolean
63
+ "_use_cudnn_ctc_loss.Tensor", # returns a boolean
64
+ "_validate_compressed_sparse_indices", # no return
65
+ "allclose", # returns a boolean
66
+ "dense_dim", # returns an int
67
+ "equal", # returns a boolean
68
+ "is_coalesced", # returns an boolean
69
+ "is_pinned", # returns a boolean
70
+ "is_same_size", # returns a boolean
71
+ "is_set_to", # returns a boolean
72
+ "q_per_channel_axis", # returns an int
73
+ "q_scale", # returns a float
74
+ "q_zero_point", # returns an int
75
+ "qscheme", # returns a QScheme
76
+ "record_stream", # no return
77
+ "sparse_dim", # returns an int
78
+ "sym_constrain_range", # no return
79
+ "sym_constrain_range_for_size", # no return
80
+ "_nested_tensor_storage_offsets", # returns a vector of ints
81
+ "_chunk_grad_outputs_efficient_attention", # returns a bool
82
+ "_fused_sdp_choice", # returns an int
83
+ "_print", # no return
84
+ "_nested_get_ragged_idx", # returns an int
85
+ ]
86
+
87
+ INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY = [
88
+ # polygamma and polygamma.out both exist, but have a
89
+ # pre-self arg (while polygamma_ does not)
90
+ # We should either fix this schema so it can be grouped properly,
91
+ # or allow the codegen to generate new functional/out= NativeFunctions for this op
92
+ # (which would require changing its overload name to prevent overload ambiguity).
93
+ "polygamma_"
94
+ ]
95
+
96
+
97
+ # Groups "similar" NativeFunctions together
98
+ # example add.Tensor, add_.Tensor, add.out
99
+ # "similar" NativeFunctions are all expected to have an identical `signature()`,
100
+ # But have differing SchemaKinds.
101
+ def pre_group_native_functions(
102
+ native_functions: Sequence[NativeFunction],
103
+ ) -> Dict[FunctionSchema, Dict[SchemaKind, NativeFunction]]:
104
+ pre_grouped_native_functions: Dict[
105
+ FunctionSchema, Dict[SchemaKind, NativeFunction]
106
+ ] = defaultdict(dict)
107
+ for f in native_functions:
108
+ d = pre_grouped_native_functions[f.func.signature()]
109
+ assert f.func.kind() not in d
110
+ d[f.func.kind()] = f
111
+ return pre_grouped_native_functions
112
+
113
+
114
+ # Returns the out variant overload name given a base function overload name
115
+ def get_expected_out_variant_overload_name(overload_name: Optional[str]) -> str:
116
+ return "out" if not overload_name else f"{overload_name}_out"
117
+
118
+
119
+ # Helper function: given an inplace FunctionSchema, generate its corresponding out= variant
120
+ # Example before:
121
+ # _add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
122
+ # Example after:
123
+ # _add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out)
124
+ def self_to_out_signature(func: FunctionSchema) -> FunctionSchema:
125
+ # Generating an out= schema from an inplace schema.
126
+ assert func.kind() == SchemaKind.inplace
127
+ assert func.arguments.self_arg is not None
128
+ # The new out= schema has:
129
+ # - a new out argument with the same type as "func" (but with a mutable annotation)
130
+ # - The returns (if any) now alias the out= argument instead of "func"
131
+ # - an "out" overload name
132
+ return FunctionSchema(
133
+ name=func.name.remove_inplace().with_overload(
134
+ get_expected_out_variant_overload_name(func.name.overload_name)
135
+ ),
136
+ arguments=func.arguments.remove_self_annotation().with_out_args(
137
+ [
138
+ Argument(
139
+ name="out",
140
+ type=func.arguments.self_arg.argument.type,
141
+ default=None,
142
+ annotation=func.arguments.self_arg.argument.annotation,
143
+ )
144
+ ]
145
+ ),
146
+ returns=func.returns,
147
+ )
148
+
149
+
150
+ # Helper function: given a functional FunctionSchema, generate its corresponding out= variant
151
+ # Example before:
152
+ # _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None,
153
+ # bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
154
+ # Example after:
155
+ # _to_copy._out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None,
156
+ # Tensor(a!) out) -> Tensor(a!)
157
+ def functional_to_out_signature(func: FunctionSchema) -> FunctionSchema:
158
+ # Generating an out= schema from a functional schema.
159
+ assert func.kind() == SchemaKind.functional
160
+
161
+ new_returns, new_out_args = generate_out_args_from_schema(func)
162
+ # The new out= schema has:
163
+ # - one or more new out argument(s) with the same type as returns (but with a mutable annotation)
164
+ # - The returns now alias the out= arguments
165
+ # - an "_out" overload name
166
+ return FunctionSchema(
167
+ name=func.name.with_overload(
168
+ get_expected_out_variant_overload_name(func.name.overload_name)
169
+ ),
170
+ arguments=func.arguments.signature().with_out_args(
171
+ new_out_args,
172
+ ),
173
+ returns=tuple(new_returns),
174
+ )
175
+
176
+
177
+ # Helper function: given a function schema, generate corresponding out arguments, also the updated return annotations.
178
+ def generate_out_args_from_schema(
179
+ func: FunctionSchema,
180
+ ) -> Tuple[List[Return], List[Argument]]:
181
+ # More of a sanity check - our existing restrictions on schemas should enforce that
182
+ # mutable schema kinds never return their mutable arguments.
183
+ assert not any(
184
+ r.annotation is not None and r.annotation.is_write for r in func.returns
185
+ )
186
+
187
+ tensorlike_rets = [r for r in func.returns if r.type.is_tensor_like()]
188
+ assert len(tensorlike_rets) > 0
189
+
190
+ used_annotations = concatMap(
191
+ lambda a: [] if a.annotation is None else a.annotation.alias_set,
192
+ func.arguments.flat_all,
193
+ )
194
+ valid_annotations = [
195
+ x for x in "abcdefghijklmnopqrstuvwxyz" if x not in used_annotations
196
+ ]
197
+
198
+ all_rets_are_tensors = all(r.type == BaseType(BaseTy.Tensor) for r in func.returns)
199
+
200
+ new_out_args: List[Argument] = []
201
+ # The end result of new_returns is that:
202
+ # - If every return is a plain tensor, then the new returns == the old returns, but with the out= alias annotations added.
203
+ # - Otherwise, none of the out arguments show up in the returns (and we're only left with non-tensor-like returns, if any).
204
+ new_returns: List[Return] = []
205
+ for i, r in enumerate(func.returns):
206
+ if r.type.is_tensor_like():
207
+ new_out = Argument(
208
+ name="out" if len(func.returns) == 1 else f"out{i}",
209
+ type=r.type,
210
+ default=None,
211
+ annotation=Annotation.parse(f"{valid_annotations[i]}!"),
212
+ )
213
+ new_out_args.append(new_out)
214
+ if all_rets_are_tensors:
215
+ # The convention for out= schemas is that they only return their out arguments
216
+ # if the return is a plain Tensor (or if it's a tuple of plain Tensors)
217
+ new_ret = Return(
218
+ name=None, type=new_out.type, annotation=new_out.annotation
219
+ )
220
+ new_returns.append(new_ret)
221
+ else:
222
+ new_returns.append(r)
223
+ return new_returns, new_out_args
224
+
225
+
226
+ # Helper function: given a mutable FunctionSchema, generate its corresponding out= variant
227
+ # Example before:
228
+ # _fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) # noqa: B950
229
+ # Example after:
230
+ # _fused_moving_avg_obs_fq_helper._out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) # noqa: B950
231
+ def mutable_to_out_signature(func: FunctionSchema) -> FunctionSchema:
232
+ # Generating an out= schema from a mutable schema.
233
+ assert func.kind() == SchemaKind.mutable
234
+ # The new out= schema has:
235
+ # - Any non-aliased tensor-like returns are converted to mutable, aliased out= arguments
236
+ # (if the argument is a tensor then we also return it for method chaining,
237
+ # otherwise we return nothing)
238
+ # - an "out" overload name
239
+ #
240
+ # Note that:
241
+ # (1) This also means that we can *only* generate an out= variant from a mutable schema
242
+ # if the mutable schema has at least one tensor-like non-aliasing return.
243
+ # (2) The generated out= variant still has mutable positional arguments,
244
+ # but if necessary we could probably add another out= variant that also
245
+ # functionalizes the mutable arguments (a functional_out variant)
246
+
247
+ new_returns, new_out_args = generate_out_args_from_schema(func)
248
+
249
+ return FunctionSchema(
250
+ name=func.name.remove_inplace().with_overload(
251
+ get_expected_out_variant_overload_name(func.name.overload_name)
252
+ ),
253
+ arguments=func.arguments.with_out_args(new_out_args),
254
+ returns=tuple(new_returns),
255
+ )
256
+
257
+
258
+ # This function, given function of one SchemaKind, as well as a target SchemaKind,
259
+ # generates a new NativeFunction with the same properties, but using the target SchemaKind.
260
+ # We only actually generate functions for either functional or out= SchemaKinds.
261
+ # This function returns a tuple, with:
262
+ # - The generated NativeFunction
263
+ # - a dictionary of `BackendIndex` objects, describing which dispatch keys
264
+ # we will generate kernels for, for the new NativeFunction.
265
+ # Details are in the function, but we only generate composite kernels (in some cases) today.
266
+ def generate_function(
267
+ f: NativeFunction, k: SchemaKind
268
+ ) -> Tuple[NativeFunction, Dict[DispatchKey, Dict["OperatorName", "BackendMetadata"]]]:
269
+ from torchgen.api import cpp
270
+
271
+ if k == SchemaKind.functional:
272
+ assert f.func.kind() != SchemaKind.functional
273
+ # The new "functional" NativeFunction has:
274
+ # - any mutable arguments have been converted into (immutable) returns.
275
+ # (if a mutable argument was not also a return, it gets converted to one)
276
+ # - "_functional" appended to the base name, ONLY IF this op has a mutable variant.
277
+ # See Note [Overload Ambiguity With Functional Variants]
278
+ # The default grouping logic in signature() actually already does this,
279
+ # so we can piggy-back off it (but we still want return names)
280
+ func = f.func.signature(keep_return_names=True).with_name(
281
+ OperatorName(
282
+ name=BaseOperatorName(
283
+ base=f.func.name.name.base,
284
+ inplace=False,
285
+ dunder_method=f.func.name.name.dunder_method,
286
+ # See Note [Overload Ambiguity With Functional Variants]
287
+ functional_overload=f.func.kind() == SchemaKind.mutable,
288
+ ),
289
+ overload_name=f.func.name.overload_name,
290
+ )
291
+ )
292
+ elif k == SchemaKind.out:
293
+ # We generate out= ops mostly just so that we can pair up NativeFunctions into groups easily,
294
+ # but at least today, there is no good reason to actually use them.
295
+ # we'll generate a dispatcher entry for them, but won't actually register any kernels for them.
296
+ if f.func.kind() == SchemaKind.inplace:
297
+ func = self_to_out_signature(f.func)
298
+ elif f.func.kind() == SchemaKind.mutable:
299
+ func = mutable_to_out_signature(f.func)
300
+ elif f.func.kind() == SchemaKind.functional:
301
+ func = functional_to_out_signature(f.func)
302
+ else:
303
+ raise AssertionError(
304
+ "We only bother generating out= functions from either inplace or mutable or functional variants"
305
+ )
306
+ else:
307
+ raise AssertionError(
308
+ "We currently only generate either functional or out= NativeFunctions"
309
+ )
310
+
311
+ # Generated kernel naming convention for out: <op_name>_<overload_name>. The reason for this is to
312
+ # disambiguate operator with the same name but different overload name, e.g., `randn.names_out` and
313
+ # `randn.generator_with_names_out`.
314
+ kernel_name = (
315
+ func.name.unambiguous_name()
316
+ if func.kind() == SchemaKind.out
317
+ else cpp.name(func)
318
+ )
319
+ if f.func.has_symint():
320
+ kernel_name += "_symint"
321
+ backend_metadata = {
322
+ DispatchKey.CompositeExplicitAutograd: {
323
+ func.name: BackendMetadata(
324
+ kernel=kernel_name,
325
+ structured=False,
326
+ cpp_namespace=DEFAULT_KERNEL_NAMESPACE,
327
+ )
328
+ }
329
+ }
330
+ tags = {"generated"} | set(
331
+ f.tags & {"nondeterministic_seeded", "view_copy", "pt2_compliant_tag"}
332
+ )
333
+
334
+ return (
335
+ NativeFunction(
336
+ func=func,
337
+ use_const_ref_for_mutable_tensors=f.use_const_ref_for_mutable_tensors,
338
+ # These generated fn's aren't meant to be user friendly- don't generate methods.
339
+ variants={Variant.function},
340
+ structured=False,
341
+ structured_delegate=None,
342
+ structured_inherits=None,
343
+ precomputed=None,
344
+ autogen=[],
345
+ ufunc_inner_loop={},
346
+ manual_kernel_registration=False,
347
+ manual_cpp_binding=False,
348
+ python_module=None,
349
+ category_override=None,
350
+ device_guard=False,
351
+ device_check=DeviceCheckType.NoCheck,
352
+ loc=f.loc,
353
+ cpp_no_default_args=set(),
354
+ is_abstract=f.is_abstract,
355
+ has_composite_implicit_autograd_kernel=False,
356
+ has_composite_implicit_autograd_nested_tensor_kernel=False,
357
+ has_composite_explicit_autograd_kernel=True,
358
+ has_composite_explicit_autograd_non_functional_kernel=False,
359
+ # Every generated NativeFunction gets a "generated" tag, so it's easy to tell
360
+ # which NativeFunction objects did not come directly from native_functions.yaml.
361
+ tags=tags,
362
+ namespace=f.namespace,
363
+ ),
364
+ backend_metadata,
365
+ )
366
+
367
+
368
+ # This function is responsible for adding generated NativeFunctions which don't appear
369
+ # explicitly in the codegen.
370
+ # You can inspect the full list of NativeFunctions yourself with the torchgen package, by running
371
+ # torchgen.parse_native_yaml("aten/src/ATen/native/native_functions.yaml", "aten/src/ATen/native/tags.yaml")
372
+ # (Maybe we should make a friendly API for this)
373
+ #
374
+ # Note: this function *mutates* its two inputs,
375
+ # adding the new NativeFunctions / BackendMetadata to them
376
+ def add_generated_native_functions(
377
+ rs: List[NativeFunction],
378
+ indices: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]],
379
+ ) -> None:
380
+ # The main code for generating new NativeFunctions
381
+ # First we group of NativeFunctions by schema kind,
382
+ # then we detect which ones are missing and generate them.
383
+ pre_grouped_native_functions = pre_group_native_functions(rs)
384
+ for d in pre_grouped_native_functions.values():
385
+ has_functional = SchemaKind.functional in d
386
+ has_inplace = SchemaKind.inplace in d
387
+ has_mutable = SchemaKind.mutable in d
388
+ has_out = SchemaKind.out in d
389
+
390
+ # We automatically generate a few native functions that don't exist in the yaml, for a few reasons:
391
+ # (1) If an operator has an inplace/out= variant but no functional variant, we can generate
392
+ # a simple functional variant that the functionalization pass can consume.
393
+ # (2) If an operator has an inplace or functional but no out= variant, we generate an out=
394
+ # variant, mostly so we can easily pair up functions into NativeFunctionsGroup,
395
+ # while maintaining the constraint that the out= variant is "required".
396
+ if has_mutable or has_inplace or has_out or has_functional:
397
+ # Don't bother generating functions trio's for native functions that bypass the dispatcher.
398
+ are_manual = all(f.manual_cpp_binding for f in d.values())
399
+ # Don't bother generating functional + out= variants for view operators
400
+ # set_ is technically an inplace_view, but for now it is treated
401
+ # as a normal inplace op in the codegen
402
+ has_view_ops = any(
403
+ f.is_view_op and str(f.func.name.name) != "set_" for f in d.values()
404
+ )
405
+ # Don't generate the other variants for CompositeImplicitAutograd operators.
406
+ # We could probably do this, but the main benefit of generating the function triplets
407
+ # is for transforms that need them, and transforms don't need to act directly
408
+ # on CompositeImplicitAutograd operators (since we let them decompose).
409
+ are_composite_implicit = all(
410
+ f.has_composite_implicit_autograd_kernel for f in d.values()
411
+ )
412
+ if are_manual or has_view_ops or are_composite_implicit:
413
+ continue
414
+ if has_out and len(d.values()) == 1:
415
+ # Note: [Out ops with functional variants that don't get grouped properly]
416
+ # In theory we could validly have an out= operator in native_functions.yaml
417
+ # that has no other variants.
418
+ # But today, all of the operators where that's the case actually do have
419
+ # functional variants, that we are just unable to pair up properly.
420
+ # I think banning this all together is probably safer
421
+ # (you can always add a functional variant yourself if you want to add a new out= operator).
422
+ #
423
+ # We should probably fix the existing cases; this check is to prevent us from adding more over time.
424
+ if (
425
+ str(d[SchemaKind.out].func.name)
426
+ not in OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY
427
+ ):
428
+ raise AssertionError(
429
+ f"Found an out= operator that we could not find any other variants of: {str(d[SchemaKind.out].func)}"
430
+ )
431
+ continue
432
+
433
+ # Some inplace ops that have problematic schemas (that we should fix), which prevent us
434
+ # from generating out= and functional variants
435
+ if (
436
+ has_inplace
437
+ and str(d[SchemaKind.inplace].func.name)
438
+ in INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY
439
+ ):
440
+ continue
441
+
442
+ base_fn = (
443
+ d[SchemaKind.inplace]
444
+ if has_inplace
445
+ else d[SchemaKind.mutable]
446
+ if has_mutable
447
+ else d[SchemaKind.out]
448
+ if has_out
449
+ else d[SchemaKind.functional]
450
+ )
451
+
452
+ # Note: [Mutable ops that cannot get an out variant]
453
+ # We can only generate an out= variant if either:
454
+ # - the original function has tensor-like returns (since we can convert them to out kwargs)
455
+ # - or it's inplace (since we can convert `self` to an out kwarg)
456
+ # There are only two functions that don't fit this criteria today though,
457
+ # and they both look like they should be fixed to be out= variants,
458
+ # so if feels safer to ban this schema all-together
459
+ base_fn_valid = base_fn.func.kind() == SchemaKind.inplace or any(
460
+ r.type.is_tensor_like() for r in base_fn.func.returns
461
+ )
462
+ # Note: [Loosen the assertion that all functional should have out variant]
463
+ # By design all functional operators should have our variants. The needs_out check
464
+ # is loosening this requirement, changing it to only generate out variant if there's
465
+ # an `autogen` block in the native function, in the long run it should be removed.
466
+ # FIXME: Remove this after figuring out CI job failures related to min, max, mean
467
+ needs_out = any("out" in str(op_name) for op_name in base_fn.autogen)
468
+ gets_out_variant = not has_out and base_fn_valid and needs_out
469
+ if not has_out and not base_fn_valid:
470
+ if (
471
+ str(base_fn.func.name)
472
+ not in MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
473
+ and str(base_fn.func.name)
474
+ not in FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
475
+ ):
476
+ raise AssertionError(
477
+ f"""Found an operator that we could not generate an out= variant for: {str(base_fn.func)}.
478
+ This type of operators don't have tensor-like return, making it difficult to generate a proper out= variant. If
479
+ out= variant is not needed, please add the function name into FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT list."""
480
+ )
481
+
482
+ # Generate an out= variant
483
+ if gets_out_variant:
484
+ fn, metadata = generate_function(base_fn, SchemaKind.out)
485
+ d[SchemaKind.out] = fn
486
+ BackendIndex.grow_index(indices, metadata)
487
+ rs.append(fn)
488
+
489
+ # Generate a functional variant, but only do it if the operator got an out= variant
490
+ # (Functional variants are only useful if we can group up the variants,
491
+ # which we can only do if they have an out= variant)
492
+ if not has_functional and (has_out or gets_out_variant):
493
+ fn, metadata = generate_function(base_fn, SchemaKind.functional)
494
+ d[SchemaKind.functional] = fn
495
+ BackendIndex.grow_index(indices, metadata)
496
+ rs.append(fn)
497
+
498
+
499
+ def return_str(rets: Tuple[Return, ...], names: List[str]) -> str:
500
+ assert len(rets) == len(names)
501
+ if len(rets) == 0:
502
+ return ""
503
+ elif len(rets) == 1:
504
+ return f"return {names[0]};"
505
+ else:
506
+ return f"return {dispatcher.returns_type(rets).cpp_type()}({', '.join(names)});"
507
+
508
+
509
+ # Given a function, and the name of a variable corresponding to the output of that function,
510
+ # gather up all of the individual returns that are not aliased
511
+ def gather_nonaliased_inner_rets(func: FunctionSchema, out_var: str) -> List[str]:
512
+ aliased_rets = func.aliased_return_names()
513
+ non_aliased_names = []
514
+ is_out_var_a_tuple = len(func.returns) > 1
515
+ for i, r in enumerate(aliased_rets):
516
+ if r is None:
517
+ non_aliased_names.append(
518
+ f"std::get<{i}>({out_var})" if is_out_var_a_tuple else out_var
519
+ )
520
+ return non_aliased_names
521
+
522
+
523
+ # Generates functional kernels in terms of their inplace.mutable counterparts.
524
+ # We only do this for "generated" NativeFunctions
525
+ @with_native_function
526
+ def gen_composite_functional_kernel(g: NativeFunctionsGroup) -> Optional[str]:
527
+ # We should only be generating these for code-generated NativeFunctions
528
+ if "generated" not in g.functional.tags:
529
+ return None
530
+ # And we always write the kernel for a generated op in terms of a non-generated op.
531
+ if g.inplace is not None and "generated" not in g.inplace.tags:
532
+ target_f = g.inplace
533
+ elif g.mutable is not None and "generated" not in g.mutable.tags:
534
+ target_f = g.mutable
535
+ else:
536
+ # We should be guaranteed to have a valid inplace/mutable variant to call into.
537
+ # See Note: [Mutable Ops Not Using Functionalization]
538
+ raise AssertionError(str(g.functional.func))
539
+
540
+ sig = DispatcherSignature(g.functional.func)
541
+ target_sig = DispatcherSignature(target_f.func)
542
+
543
+ context: List[Union[Binding, Expr]] = []
544
+ clone_mutable_inputs = []
545
+ cloned_return_names = []
546
+ # We can't just directly pass all of the arguments from the functional op into the mutating op.
547
+ # We need to check for which inputs to the mutating operator are mutable,
548
+ # and clone those inputs first.
549
+ for a_curr, a_tgt in zip(
550
+ dispatcher.jit_arguments(g.functional.func),
551
+ dispatcher.jit_arguments(target_f.func),
552
+ ):
553
+ if a_tgt.annotation is not None and a_tgt.annotation.is_write:
554
+ clone_mutable_inputs.append(
555
+ f"auto {a_curr.name}_clone = clone_arg({a_curr.name});"
556
+ )
557
+ context.append(
558
+ Expr(
559
+ expr=f"{a_curr.name}_clone",
560
+ type=dispatcher.argument_type(a_curr, binds=a_curr.name),
561
+ )
562
+ )
563
+ # Invariant: mutable arguments on the inner mutable op are always returns on the functional op.
564
+ cloned_return_names.append(f"{a_curr.name}_clone")
565
+ else:
566
+ context.append(dispatcher.argument(a_curr))
567
+ exprs = ", ".join([e.expr for e in translate(context, target_sig.arguments())])
568
+
569
+ out_name = "output"
570
+ maybe_assign = f"auto {out_name} = " if len(target_f.func.returns) > 0 else ""
571
+ inner_return_names = gather_nonaliased_inner_rets(target_f.func, out_name)
572
+ ret_str = return_str(
573
+ g.functional.func.returns, inner_return_names + cloned_return_names
574
+ )
575
+
576
+ clone_mutable_inputs_str = "\n".join(clone_mutable_inputs)
577
+ return f"""
578
+ {sig.defn(name=sig.name() + ("_symint" if g.out.func.has_symint() else ""))} {{
579
+ {clone_mutable_inputs_str}
580
+ {maybe_assign}at::_ops::{target_f.func.name.unambiguous_name()}::call({exprs});
581
+ {ret_str}
582
+ }}
583
+ """
584
+
585
+
586
+ # Generates out= kernels in terms of their functional counterparts.
587
+ # We only do this for "generated" NativeFunctions
588
+ @with_native_function
589
+ def gen_composite_out_kernel(g: NativeFunctionsGroup) -> Optional[str]:
590
+ # We should only be generating these for code-generated NativeFunctions
591
+ if "generated" not in g.out.tags:
592
+ return None
593
+ # And we always write the kernel for the out= op in terms of the functional.
594
+ # Note that the functional op might have also been generated, but we don't have to
595
+ # worry about cycles, because the generated functional kernels are always implemented
596
+ # in terms of non-generated kernels (see gen_composite_functional_kernel).
597
+
598
+ sig = DispatcherSignature(g.out.func)
599
+ target_sig = DispatcherSignature(g.functional.func)
600
+
601
+ exprs = ", ".join(
602
+ [e.expr for e in translate(sig.arguments(), target_sig.arguments())]
603
+ )
604
+
605
+ copy_outs = []
606
+ out_name = "tmp_output"
607
+ for i, out_arg in enumerate(g.out.func.arguments.out):
608
+ functional_return_name = (
609
+ out_name
610
+ if len(g.functional.func.returns) == 1
611
+ else f"std::get<{i}>({out_name})"
612
+ )
613
+ copy_outs.append(
614
+ f"""\
615
+ resize_out_helper({out_arg.name}, {functional_return_name});
616
+ copy_arg({out_arg.name}, {functional_return_name});"""
617
+ )
618
+
619
+ rets = []
620
+ # For each return arg in the calling (out=) operator,
621
+ # If it corresponds to an aliased input, return the input.
622
+ # Otherwise, return the corresponding output from calling the functional operator.
623
+ for i, ret_name in enumerate(g.out.func.aliased_return_names()):
624
+ if ret_name is not None:
625
+ rets.append(ret_name)
626
+ else:
627
+ functional_return_name = (
628
+ out_name
629
+ if len(g.functional.func.returns) == 1
630
+ else f"std::get<{i}>({out_name})"
631
+ )
632
+ rets.append(functional_return_name)
633
+
634
+ copy_outs_str = "\n".join(copy_outs)
635
+
636
+ # Kernel name needs to follow the naming convention defined in `generate_function()`
637
+ return f"""
638
+ {sig.defn(name=g.out.func.name.unambiguous_name() + ("_symint" if g.out.func.has_symint() else ""))} {{
639
+ auto {out_name} = at::_ops::{g.functional.func.name.unambiguous_name()}::call({exprs});
640
+ {copy_outs_str}
641
+ {return_str(g.out.func.returns, rets)}
642
+ }}
643
+ """
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/native/native_functions.yaml ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/native/tags.yaml ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This yaml file contains all the possible tags that can be defined in `tags` in `native_functions.yaml`
2
+
3
+ - tag: inplace_view
4
+ desc: |
5
+ This tag indicates if an operator *only* modifies the tensor metadata
6
+ - tag: pt2_compliant_tag
7
+ desc: |
8
+ This tag indicates if the operator is guaranteed to
9
+ work with the PT2 compilation APIs (torch.compile,
10
+ torch.export, etc). If you add this tag to an
11
+ operator, please use
12
+ `torch.testing._internal.optest.opcheck` to test that
13
+ the operator has been registered correctly and
14
+ works with torch.compile
15
+ - tag: view_copy
16
+ desc: |
17
+ This tag indicates operators that are *_copy* variants
18
+ of view/aliasing operators. If an operator has a view_copy tag,
19
+ then it should have the name {op}_copy, where {op} is a view operator.
20
+ - tag: dynamic_output_shape
21
+ desc: |
22
+ This tag indicates if an operator's output's shape depends on input Tensor
23
+ data.
24
+ - tag: data_dependent_output
25
+ desc: |
26
+ Operator has a non-Tensor output whose value is dependent on the data
27
+ of Tensor inputs. Among other things, this implies that this operator
28
+ cannot be run with meta tensor (since data is not available), nor
29
+ can it be symbolically traced.
30
+ - tag: generated
31
+ desc: |
32
+ This tag indicates that the operator doesn't have an explicit entry in
33
+ native_functions.yaml, and instead was generated automatically by the codegen.
34
+ - tag: nondeterministic_seeded
35
+ desc: |
36
+ This tag indicates if an operator is nondeterministically seeded
37
+ (i.e., is random) such that the operator intentionally produces
38
+ different results when run twice on the same inputs, but this randomness
39
+ is controlled by a Generator which, if reseeded would give you the
40
+ same result.
41
+ - tag: nondeterministic_bitwise
42
+ desc: |
43
+ This tag indicates if an operator doesn't guarantee bitwise equivalence
44
+ across different runs of an operator with identical inputs.
45
+ - tag: needs_fixed_stride_order
46
+ desc: |
47
+ This tag indicates that the operator should be passed Tensors following
48
+ the same stride permutation as observed in eager when compiled in inductor.
49
+
50
+ # NOTE [Core ATen Ops]
51
+ - tag: core
52
+ desc: |
53
+ Core aten ops is a subset of aten ops that remains after aten-to-aten decomposition and
54
+ functionalization pass. Core aten ops are fully functional and adhere to single static
55
+ assignment (SSA): this implies there will be no `inplace` or `_out` variants in this opset.
56
+ This opset is designed to serve as the functional IR to interface with compiler backends.
57
+ In contrast to primTorch, core aten opset doesn't decompose ops into explicit
58
+ type promotion and broadcasting ops.
59
+ Core aten ops is also effectively the opset produced by torchdynamo.export(aten_graph=True),
60
+ and thus can be used as an opset for export purpose.
61
+ - tag: pointwise
62
+ desc: |
63
+ Pointwise operators are operators where each element of the output is computed only by accessing
64
+ the corresponding element of all the broadcasted inputs. The output shape will be the broadcasted
65
+ shape of the inputs.