applied-ai-018 commited on
Commit
0ee13d9
·
verified ·
1 Parent(s): db4ff34

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix/__about__.py +5 -0
  2. env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix/__init__.py +110 -0
  3. env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix/__pycache__/__about__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix/__pycache__/__init__.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/__init__.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/code_template.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/context.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/gen.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/gen_backend_stubs.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/gen_executorch.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/gen_functionalization_type.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/gen_vmap_plumbing.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/local.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/yaml_utils.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/torchgen/dest/__init__.py +19 -0
  16. env-llmeval/lib/python3.10/site-packages/torchgen/dest/__pycache__/__init__.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ir.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ts_lowering.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/torchgen/dest/__pycache__/native_functions.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/torchgen/dest/__pycache__/register_dispatch_key.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/torchgen/dest/__pycache__/ufunc.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/torchgen/dest/lazy_ir.py +707 -0
  23. env-llmeval/lib/python3.10/site-packages/torchgen/dest/lazy_ts_lowering.py +48 -0
  24. env-llmeval/lib/python3.10/site-packages/torchgen/dest/native_functions.py +64 -0
  25. env-llmeval/lib/python3.10/site-packages/torchgen/dest/register_dispatch_key.py +989 -0
  26. env-llmeval/lib/python3.10/site-packages/torchgen/dest/ufunc.py +545 -0
  27. env-llmeval/lib/python3.10/site-packages/torchgen/executorch/__pycache__/__init__.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/torchgen/selective_build/__init__.py +0 -0
  29. env-llmeval/lib/python3.10/site-packages/torchgen/selective_build/__pycache__/__init__.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/torchgen/selective_build/__pycache__/operator.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/torchgen/selective_build/__pycache__/selector.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/torchgen/selective_build/operator.py +170 -0
  33. env-llmeval/lib/python3.10/site-packages/torchgen/selective_build/selector.py +347 -0
  34. env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/__init__.py +0 -0
  35. env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/__init__.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/config.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/gen_static_runtime_ops.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/generator.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/config.py +388 -0
  40. env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/gen_static_runtime_ops.py +228 -0
  41. env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/generator.py +796 -0
  42. env-llmeval/lib/python3.10/site-packages/tzdata/__init__.py +6 -0
  43. env-llmeval/lib/python3.10/site-packages/tzdata/__pycache__/__init__.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/CST6CDT +0 -0
  45. env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/Cuba +0 -0
  46. env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/Egypt +0 -0
  47. env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/Eire +0 -0
  48. env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/Factory +0 -0
  49. env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/GMT-0 +0 -0
  50. env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/GMT0 +0 -0
env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix/__about__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2023-present Antoine Pitrou <[email protected]>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ __version__ = "0.6"
env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix/__init__.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2023-present Antoine Pitrou <[email protected]>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+
6
+ _ERROR_MSG = """\
7
+ Disallowed deserialization of 'arrow.py_extension_type':
8
+ storage_type = {storage_type}
9
+ serialized = {serialized}
10
+ pickle disassembly:\n{pickle_disassembly}
11
+
12
+ Reading of untrusted Parquet or Feather files with a PyExtensionType column
13
+ allows arbitrary code execution.
14
+ If you trust this file, you can enable reading the extension type by one of:
15
+
16
+ - upgrading to pyarrow >= 14.0.1, and call `pa.PyExtensionType.set_auto_load(True)`
17
+ - disable this error by running `import pyarrow_hotfix; pyarrow_hotfix.uninstall()`
18
+
19
+ We strongly recommend updating your Parquet/Feather files to use extension types
20
+ derived from `pyarrow.ExtensionType` instead, and register this type explicitly.
21
+ See https://arrow.apache.org/docs/dev/python/extending_types.html#defining-extension-types-user-defined-types
22
+ for more details.
23
+ """
24
+
25
+ try:
26
+ _import_error = ModuleNotFoundError
27
+ except NameError:
28
+ _import_error = ImportError # ModuleNotFoundError unavailable in py3.5
29
+
30
+
31
+ def install():
32
+ import atexit
33
+ try:
34
+ import pyarrow as pa
35
+ except _import_error:
36
+ # Not installed; nothing to do here.
37
+ return
38
+
39
+ if not hasattr(pa, "ExtensionType"):
40
+ # Unsupported PyArrow version?
41
+ return
42
+
43
+ if getattr(pa, "_hotfix_installed", False):
44
+ return
45
+
46
+ class ForbiddenExtensionType(pa.ExtensionType):
47
+ def __arrow_ext_serialize__(self):
48
+ return b""
49
+
50
+ @classmethod
51
+ def __arrow_ext_deserialize__(cls, storage_type, serialized):
52
+ import io
53
+ import pickletools
54
+ out = io.StringIO()
55
+ pickletools.dis(serialized, out)
56
+ raise RuntimeError(
57
+ _ERROR_MSG.format(
58
+ storage_type=storage_type,
59
+ serialized=serialized,
60
+ pickle_disassembly=out.getvalue(),
61
+ )
62
+ )
63
+
64
+ if hasattr(pa, "unregister_extension_type"):
65
+ # 0.15.0 <= PyArrow
66
+ pa.unregister_extension_type("arrow.py_extension_type")
67
+ pa.register_extension_type(ForbiddenExtensionType(pa.null(),
68
+ "arrow.py_extension_type"))
69
+ elif hasattr(pa.lib, "_unregister_py_extension_type"):
70
+ # 0.14.1 <= PyArrow < 0.15.0
71
+ pa.lib._unregister_py_extension_type()
72
+ atexit.unregister(pa.lib._unregister_py_extension_type)
73
+ else:
74
+ # PyArrow 0.14.0
75
+ del pa.lib._extension_types_initializer
76
+
77
+ pa._hotfix_installed = True
78
+
79
+
80
+ def uninstall():
81
+ import atexit
82
+ try:
83
+ import pyarrow as pa
84
+ except _import_error:
85
+ # Not installed; nothing to do here.
86
+ return
87
+
88
+ if not hasattr(pa, "ExtensionType"):
89
+ # Unsupported PyArrow version?
90
+ return
91
+
92
+ if not getattr(pa, "_hotfix_installed", False):
93
+ return
94
+
95
+ if hasattr(pa, "unregister_extension_type"):
96
+ # 0.15.0 <= PyArrow
97
+ pa.unregister_extension_type("arrow.py_extension_type")
98
+ pa.lib._register_py_extension_type()
99
+ elif hasattr(pa.lib, "_register_py_extension_type"):
100
+ # 0.14.1 <= PyArrow < 0.15.0
101
+ pa.lib._register_py_extension_type()
102
+ atexit.register(pa.lib._unregister_py_extension_type)
103
+ elif hasattr(pa.lib, "_ExtensionTypesInitializer"):
104
+ # PyArrow 0.14.0
105
+ pa.lib._extension_types_initializer = pa.lib._ExtensionTypesInitializer()
106
+
107
+ pa._hotfix_installed = False
108
+
109
+
110
+ install()
env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix/__pycache__/__about__.cpython-310.pyc ADDED
Binary file (199 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.97 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (529 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/code_template.cpython-310.pyc ADDED
Binary file (3.05 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/context.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/gen.cpython-310.pyc ADDED
Binary file (64.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/gen_backend_stubs.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/gen_executorch.cpython-310.pyc ADDED
Binary file (27.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/gen_functionalization_type.cpython-310.pyc ADDED
Binary file (22.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/gen_vmap_plumbing.cpython-310.pyc ADDED
Binary file (8.76 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/local.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/yaml_utils.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/dest/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .lazy_ir import (
2
+ generate_non_native_lazy_ir_nodes as generate_non_native_lazy_ir_nodes,
3
+ GenLazyIR as GenLazyIR,
4
+ GenLazyNativeFuncDefinition as GenLazyNativeFuncDefinition,
5
+ GenLazyShapeInferenceDefinition as GenLazyShapeInferenceDefinition,
6
+ )
7
+ from .native_functions import (
8
+ compute_native_function_declaration as compute_native_function_declaration,
9
+ )
10
+ from .register_dispatch_key import (
11
+ gen_registration_headers as gen_registration_headers,
12
+ gen_registration_helpers as gen_registration_helpers,
13
+ RegisterDispatchKey as RegisterDispatchKey,
14
+ )
15
+ from .ufunc import (
16
+ compute_ufunc_cpu as compute_ufunc_cpu,
17
+ compute_ufunc_cpu_kernel as compute_ufunc_cpu_kernel,
18
+ compute_ufunc_cuda as compute_ufunc_cuda,
19
+ )
env-llmeval/lib/python3.10/site-packages/torchgen/dest/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (665 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ir.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ts_lowering.cpython-310.pyc ADDED
Binary file (2.18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/dest/__pycache__/native_functions.cpython-310.pyc ADDED
Binary file (2.23 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/dest/__pycache__/register_dispatch_key.cpython-310.pyc ADDED
Binary file (23.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/dest/__pycache__/ufunc.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/dest/lazy_ir.py ADDED
@@ -0,0 +1,707 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from abc import ABC
3
+ from dataclasses import dataclass
4
+ from typing import Any, Dict, List, Optional, Tuple, Union
5
+
6
+ import torchgen.api.dispatcher as dispatcher
7
+ from torchgen.api.lazy import (
8
+ getValueT,
9
+ isValueType,
10
+ LazyArgument,
11
+ LazyIrProperties,
12
+ LazyIrSchema,
13
+ tensorListValueT,
14
+ )
15
+ from torchgen.api.translate import translate
16
+ from torchgen.api.types import (
17
+ BaseCType,
18
+ Binding,
19
+ deviceT,
20
+ DispatcherSignature,
21
+ kernel_signature,
22
+ NativeSignature,
23
+ OptionalCType,
24
+ VectorCType,
25
+ )
26
+ from torchgen.context import method_with_native_function
27
+ from torchgen.dest.lazy_ts_lowering import ts_lowering_body
28
+ from torchgen.model import (
29
+ Argument,
30
+ BackendIndex,
31
+ BackendMetadata,
32
+ BaseTy,
33
+ BaseType,
34
+ FunctionSchema,
35
+ ListType,
36
+ NativeFunction,
37
+ NativeFunctionsGroup,
38
+ )
39
+
40
+
41
+ def node_ctor_arg_rvalue_string(arg: LazyArgument) -> str:
42
+ """
43
+ Given a LazyArgument,
44
+ generate a c++ string for materializing an rvalue of that arg for passing into
45
+ a lazy Node constructor.
46
+ """
47
+
48
+ # TODO: Matching on CType seems wrong; should be matching on Type
49
+ if isValueType(arg.lazy_type):
50
+ if isinstance(arg.lazy_type, BaseCType):
51
+ if arg.is_wrapped_scalar:
52
+ return f"node_{arg.name}"
53
+ elif arg.lazy_type.type is tensorListValueT:
54
+ return f"lazy_{arg.name}_tensorlist"
55
+ elif arg.is_symint_or_list:
56
+ return f"GetSymIntValue({arg.name})"
57
+ return f"lazy_{arg.name}->GetIrValue()"
58
+ elif isinstance(arg.lazy_type, OptionalCType):
59
+ if arg.is_symint_or_list:
60
+ # TODO: I don't understand when you should put lazy_ in the name
61
+ # or not
62
+ return f"{arg.name} ? c10::make_optional(GetSymIntValue(*{arg.name})) : c10::nullopt"
63
+ elif arg.is_wrapped_scalar:
64
+ return f"node_{arg.name}"
65
+ return (
66
+ f"lazy_{arg.name} ? "
67
+ f"c10::make_optional(lazy_{arg.name}->GetIrValue()) : "
68
+ "c10::nullopt"
69
+ )
70
+ else:
71
+ raise AssertionError(
72
+ f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
73
+ )
74
+ else:
75
+ # NB: this is here because right now we aren't treating SymInt[] as a
76
+ # value type; when we do this needs to move above
77
+ # NB: we cannot test arg.lazy_type as we've already specified it is an
78
+ # int64_t and so we cannot distinguish between SymInt and int64_t
79
+ if isinstance(arg.orig_type, ListType) and arg.orig_type.elem == BaseType(
80
+ BaseTy.SymInt
81
+ ):
82
+ if arg.symint:
83
+ return f"GetSymIntArrayRefValue({arg.name})"
84
+ else:
85
+ return f"std::vector<int64_t>({arg.name}.begin(), {arg.name}.end())"
86
+ elif isinstance(arg.lazy_type, VectorCType) and isinstance(
87
+ arg.lazy_type.elem, BaseCType
88
+ ):
89
+ return f"std::vector<{arg.lazy_type.elem.type}>({arg.name}.begin(), {arg.name}.end())"
90
+ elif (
91
+ isinstance(arg.lazy_type, OptionalCType)
92
+ and isinstance(arg.lazy_type.elem, VectorCType)
93
+ and isinstance(arg.lazy_type.elem.elem, BaseCType)
94
+ ):
95
+ return f"torch::lazy::ToOptionalVector<{arg.lazy_type.elem.elem.type}>({arg.name})"
96
+ else:
97
+ return f"{arg.name}"
98
+
99
+
100
+ def node_ctor_inputs(schema: LazyIrSchema) -> str:
101
+ """
102
+ Produce a formatted string with the arguments as passed into the constructor of a node class.
103
+ """
104
+ node_ctor_values = [
105
+ node_ctor_arg_rvalue_string(arg) for arg in schema.filtered_args()
106
+ ]
107
+ return ", ".join(node_ctor_values)
108
+
109
+
110
+ def gen_fallback_code(
111
+ schema: LazyIrSchema,
112
+ sig: Union[DispatcherSignature, NativeSignature],
113
+ overload_name: str,
114
+ ) -> str:
115
+ """
116
+ Generate code that falls back to eager conditioned on a predicate
117
+ """
118
+ dispatcher_sig = DispatcherSignature.from_schema(schema.func)
119
+ exprs = translate(sig.arguments(), dispatcher_sig.arguments())
120
+ fallback_args = ",\n ".join([a.expr for a in exprs])
121
+ if len(overload_name):
122
+ aten_op_str = f"ATEN_OP2({schema.aten_name}, {overload_name})"
123
+ else:
124
+ aten_op_str = f"ATEN_OP({schema.aten_name})"
125
+ return f"""
126
+ if (force_eager_fallback({aten_symbol(schema)})) {{
127
+ return at::native::call_fallback_fn_symint<&ltc_eager_fallback, {aten_op_str}>::call(
128
+ {fallback_args}
129
+ );
130
+ }}
131
+ """
132
+
133
+
134
+ def aten_symbol(schema: LazyIrSchema) -> str:
135
+ missing_interned_strings = {
136
+ "sigmoid_backward",
137
+ }
138
+ if schema.aten_name in missing_interned_strings:
139
+ return f'c10::Symbol::fromQualString("aten::{schema.aten_name}")'
140
+
141
+ if not schema.aten_name.startswith("at::"):
142
+ return f"at::aten::{schema.aten_name}"
143
+ else:
144
+ return schema.aten_name
145
+
146
+
147
+ # converts all tensor-like arguments to meta tensors. Returns:
148
+ # (1) a string containing all of the logic that does the conversions.
149
+ # (2) a context, to be used by translate(), with all of the relevant bindings.
150
+ def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
151
+ context: List[Binding] = []
152
+ unwrapped_tensor_args: List[str] = []
153
+ for arg in sig.arguments():
154
+ if isinstance(arg.argument, Argument) and arg.argument.type.is_tensor_like():
155
+ unwrapped_name = f"{arg.name}_meta"
156
+ unwrapped_tensor_args.append(
157
+ f"auto {unwrapped_name} = to_meta({arg.name});"
158
+ )
159
+ context.append(arg.with_name(unwrapped_name))
160
+ else:
161
+ context.append(arg)
162
+ unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
163
+ return unwrap_tensor_args_str, context
164
+
165
+
166
+ @dataclass(frozen=True)
167
+ class GenLazyIR(ABC):
168
+ backend_index: BackendIndex
169
+ backend_name: str
170
+ node_base: str
171
+ use_lazy_shape: bool
172
+
173
+ @method_with_native_function
174
+ def __call__(self, f: Union[NativeFunctionsGroup, NativeFunction]) -> List[str]:
175
+ func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
176
+ metadata = self.backend_index.get_kernel(
177
+ f.functional if isinstance(f, NativeFunctionsGroup) else f
178
+ )
179
+ schema = LazyIrSchema(
180
+ func, symint=metadata is not None and metadata.supports_symint()
181
+ )
182
+ return self.gen(schema)
183
+
184
+ # there is no lowering functionality generated unless this IR base class is subclassed and
185
+ # implemented as a backend-specific node
186
+ def lowering_function(self, schema: LazyIrSchema) -> str:
187
+ return ""
188
+
189
+ def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
190
+ return ""
191
+
192
+ def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
193
+ return f"""bool CanBeReused({node_ctor_args}) const {{
194
+ return false;
195
+ }}"""
196
+
197
+ def node_base_ctor_call(self, schema: LazyIrSchema) -> str:
198
+ value_args = schema.filtered_args(values=True, scalars=False)
199
+ # backends can customize the way the node base class constructor is called,
200
+ # as long as all of its arguments can be generated from information available from the schema
201
+ base_ctor_value_args_list = []
202
+ for arg in value_args:
203
+ if isinstance(arg.lazy_type, (BaseCType, VectorCType)):
204
+ base_ctor_value_args_list.append(f"{arg.name}")
205
+ elif isinstance(arg.lazy_type, OptionalCType):
206
+ base_ctor_value_args_list.append(f"{arg.name}.value_or(kNullValue)")
207
+ else:
208
+ raise AssertionError(
209
+ f"Unsupported type ({arg.lazy_type}) - add support if necessary"
210
+ )
211
+ base_ctor_value_args = ", ".join(base_ctor_value_args_list)
212
+
213
+ scalar_args = schema.filtered_args(values=False, scalars=True)
214
+
215
+ # Shape construction.
216
+ # Conditionally build shape depending on specified shape property
217
+ if schema.properties.ShapePrecompute:
218
+ shape_ctor_arg = "std::move(shapes),"
219
+ elif schema.properties.ShapeCompute:
220
+ shape_args = [a.name for a in value_args]
221
+ shape_args.extend(a.name for a in scalar_args)
222
+ shape_ctor_arg = f"compute_shape_{schema.name}({', '.join(shape_args)}),"
223
+ elif schema.properties.ShapeCache:
224
+ shape_args = [f"operand({i})" for i in range(len(value_args))]
225
+ shape_args.extend(a.name for a in scalar_args)
226
+ shape_ctor_arg = f"[&](){{ return compute_shape_{schema.name}({', '.join(shape_args)})[0]; }},"
227
+ else:
228
+ shape_ctor_arg = ""
229
+
230
+ scalar_hashes = ", ".join(f"{a.name}" for a in scalar_args)
231
+
232
+ return f"""{self.node_base}(
233
+ {schema.node_name}::ClassOpKind(),
234
+ OpList{{{base_ctor_value_args}}},
235
+ {shape_ctor_arg}
236
+ /* num_outputs */ {len(schema.returns)},
237
+ torch::lazy::MHash({scalar_hashes}))"""
238
+
239
+ def gen(self, schema: LazyIrSchema) -> List[str]:
240
+ opkind = schema.opkind or aten_symbol(schema)
241
+
242
+ # for now, we just want one IR class decl and soon after also the method defs
243
+ # and we use the functional version not out/inplace.
244
+ all_args = schema.filtered_args()
245
+ value_args = schema.filtered_args(values=True, scalars=False)
246
+ scalar_args = schema.filtered_args(values=False, scalars=True)
247
+
248
+ ctor_args = [f"const {i.lazy_type.cpp_type()}& {i.name}" for i in all_args]
249
+ reuse_ctor_args = ", ".join(ctor_args)
250
+ if self.use_lazy_shape and schema.properties.ShapePrecompute:
251
+ ctor_args.append("std::vector<torch::lazy::Shape>&& shapes")
252
+ node_ctor_args = ", ".join(ctor_args)
253
+
254
+ scalar_initializers = ",\n ".join(
255
+ [
256
+ # This code is just special casing the mapping from string_view -> strings
257
+ f"{a.name}({a.name}.has_value() ? c10::make_optional(std::string(*{a.name})) : c10::nullopt)"
258
+ if a.lazy_type.cpp_type() == "c10::optional<c10::string_view>"
259
+ else f"{a.name}({a.name})"
260
+ for a in scalar_args
261
+ ]
262
+ )
263
+ if len(scalar_initializers):
264
+ scalar_initializers = f",\n {scalar_initializers}"
265
+ scalar_decls = "\n ".join(
266
+ [
267
+ f"std::string {a.name};"
268
+ if a.lazy_type.cpp_type() == "c10::string_view"
269
+ else f"c10::optional<std::string> {a.name};"
270
+ if a.lazy_type.cpp_type() == "c10::optional<c10::string_view>"
271
+ else f"{a.lazy_type.cpp_type()} {a.name};"
272
+ for a in scalar_args
273
+ ]
274
+ )
275
+ optional_values = [
276
+ arg.name
277
+ for arg in schema.filtered_args(values=True, scalars=False)
278
+ if isinstance(arg.lazy_type, OptionalCType)
279
+ ]
280
+ has_optional_decls = "\n ".join(
281
+ [f"bool has_{value}: 1;" for value in optional_values]
282
+ )
283
+ has_optional_defs = "\n ".join(
284
+ [f"has_{value} = !!{value};" for value in optional_values]
285
+ )
286
+ members_to_string = []
287
+ for arg in scalar_args:
288
+ if isinstance(arg.lazy_type, OptionalCType):
289
+ value = f"{arg.name}.value()"
290
+ if arg.is_generator:
291
+ value = '"torch.Generator()"'
292
+ members_to_string.append(
293
+ f"""if ({arg.name}.has_value()) {{
294
+ ss << ", {arg.name}=" << {value};
295
+ }} else {{
296
+ ss << ", {arg.name}=null";
297
+ }}"""
298
+ )
299
+ else:
300
+ members_to_string.append(f'ss << ", {arg.name}=" << {arg.name};')
301
+ members_to_string_str = "\n ".join(members_to_string)
302
+
303
+ return [
304
+ f"""\
305
+ class {schema.node_name} : public {self.node_base} {{
306
+ public:
307
+ static torch::lazy::OpKind ClassOpKind() {{
308
+ return torch::lazy::OpKind({opkind});
309
+ }}
310
+
311
+ {schema.node_name}({node_ctor_args})
312
+ : {self.node_base_ctor_call(schema)}{scalar_initializers}
313
+ {{
314
+ {has_optional_defs}
315
+ }}
316
+
317
+ std::string ToString() const override {{
318
+ std::stringstream ss;
319
+ ss << {self.node_base}::ToString();
320
+ {members_to_string_str}
321
+ return ss.str();
322
+ }}
323
+
324
+ {self.create_function(schema, reuse_ctor_args)}
325
+
326
+ {self.can_be_reused_function(schema, reuse_ctor_args)}
327
+
328
+ {self.lowering_function(schema)}
329
+
330
+ {scalar_decls}
331
+ {has_optional_decls}
332
+
333
+ }};
334
+
335
+ """,
336
+ ]
337
+
338
+
339
+ @dataclass(frozen=True)
340
+ class GenTSLazyIR(GenLazyIR):
341
+ def lowering_function(self, schema: LazyIrSchema) -> str:
342
+ signature = """
343
+ torch::lazy::TSOpVector Lower(
344
+ std::shared_ptr<torch::jit::GraphFunction> function,
345
+ torch::lazy::TSLoweringContext* loctx) const override"""
346
+
347
+ if schema.properties.LowerDeclOnly:
348
+ return f"{signature};"
349
+ elif schema.properties.Lower:
350
+ return f"""{signature} {{
351
+ {ts_lowering_body(schema)}
352
+ }}
353
+ """
354
+ else:
355
+ return ""
356
+
357
+ def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
358
+ signature = f"static NodePtr Create({node_ctor_args})"
359
+ if schema.properties.CreateFnDeclOnly:
360
+ return f"{signature};"
361
+ elif not schema.properties.CreateFn:
362
+ return ""
363
+ return f"""{signature} {{
364
+ return ReuseOrMakeNode<{schema.node_name}>(data);
365
+ }}"""
366
+
367
+ def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
368
+ signature = f"bool CanBeReused({node_ctor_args}) const"
369
+ if schema.properties.CanBeReusedDeclOnly:
370
+ return f"{signature};"
371
+ elif not schema.properties.CanBeReused:
372
+ return ""
373
+ value_comparison = []
374
+ for arg in itertools.chain(schema.positional_values, schema.keyword_values):
375
+ if isinstance(arg.lazy_type, OptionalCType):
376
+ value_comparison.append(
377
+ f"nullable_operand(i++) == {arg.name}.value_or(kNullValue)"
378
+ )
379
+ else:
380
+ value_comparison.append(f"operand(i++) == {arg.name}")
381
+ for arg in itertools.chain(schema.positional_scalars, schema.keyword_scalars):
382
+ if isinstance(arg.lazy_type, OptionalCType):
383
+ value_comparison.append(
384
+ f"((!this->{arg.name}&&!{arg.name}) || (this->{arg.name}&&{arg.name} && *(this->{arg.name}) == *{arg.name}))"
385
+ )
386
+ else:
387
+ value_comparison.append(f"this->{arg.name} == {arg.name}")
388
+ value_comparison_str = " &&\n ".join(value_comparison)
389
+
390
+ return f"""{signature} {{
391
+ size_t i = 0;
392
+ return ({value_comparison_str});
393
+ }}"""
394
+
395
+
396
+ @dataclass(frozen=True)
397
+ class GenLazyNativeFuncDefinition:
398
+ class_method_name: str
399
+ backend_index: BackendIndex
400
+ tensor_class: str
401
+ gen_forced_fallback_code: bool
402
+ backend_namespace: str
403
+ get_tensorlist: str
404
+ get_tensor_or_wrap_number: str
405
+ try_get_tensor: str
406
+ metrics_counter: str
407
+ create_tensor: str
408
+ create_from_first_tensor: bool
409
+ create_aten_from_ltc_tensor: str
410
+ tuple_aten_from_ltc_tensors: str
411
+ lazy_tensor_ptr: str
412
+ get_device_fn: str
413
+
414
+ def lazy_tensor_decls(self, func: NativeFunction, schema: LazyIrSchema) -> str:
415
+ value_args = schema.filtered_args(values=True, scalars=False)
416
+ # Generates lazy_{name} variables for LazyTensors wrapping input tensors
417
+ lazy_tensor_decls: List[str] = []
418
+ for arg in value_args:
419
+ if arg.is_wrapped_scalar:
420
+ if isinstance(arg.lazy_type, OptionalCType):
421
+ lazy_tensor_decls.append(
422
+ f"""auto node_{arg.name} = {arg.name} ?
423
+ c10::make_optional(torch::lazy::LazyGraphExecutor::Get()->
424
+ GetIrValueForScalarFromCodegen(*{arg.name}, *common_device)):
425
+ c10::nullopt;"""
426
+ )
427
+ else:
428
+ lazy_tensor_decls.append(
429
+ f"""auto node_{arg.name} = torch::lazy::LazyGraphExecutor::Get()->
430
+ GetIrValueForScalarFromCodegen({arg.name}, *common_device);"""
431
+ )
432
+ elif arg.is_symint_or_list:
433
+ continue # values are extracted in isValueType
434
+ elif isinstance(arg.lazy_type, BaseCType):
435
+ if arg.lazy_type.type is tensorListValueT:
436
+ lazy_tensor_decls.append(
437
+ f"auto lazy_{arg.name}_tensorlist = "
438
+ f"{self.backend_namespace}::{self.get_tensorlist}({arg.name});"
439
+ )
440
+ else:
441
+ lazy_tensor_decls.append(
442
+ f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
443
+ f"{self.backend_namespace}::{self.get_tensor_or_wrap_number}({arg.name}, *common_device);"
444
+ )
445
+ elif isinstance(arg.lazy_type, OptionalCType):
446
+ assert arg.lazy_type.elem == BaseCType(getValueT()), arg.lazy_type.elem
447
+ # TODO(alanwaketan): Maybe we want to apply GetLtcTensorOrCreateForWrappedNumber here, but hold it
448
+ # until we encounter a real world example.
449
+ lazy_tensor_decls.append(
450
+ f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
451
+ f"{self.backend_namespace}::{self.try_get_tensor}({arg.name}.value_or(at::Tensor()));"
452
+ )
453
+ else:
454
+ raise AssertionError(
455
+ f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
456
+ )
457
+ return ("\n ").join(lazy_tensor_decls)
458
+
459
+ def force_eager_fallback(
460
+ self,
461
+ func: NativeFunction,
462
+ schema: LazyIrSchema,
463
+ metadata: BackendMetadata,
464
+ sig: Union[DispatcherSignature, NativeSignature],
465
+ ) -> str:
466
+ if self.gen_forced_fallback_code:
467
+ return gen_fallback_code(
468
+ schema, sig, overload_name=func.func.name.overload_name
469
+ )
470
+ return ""
471
+
472
+ def metrics(self, func: NativeFunction, schema: LazyIrSchema) -> str:
473
+ return f"{self.metrics_counter};"
474
+
475
+ def get_device(self, func: NativeFunction, schema: LazyIrSchema) -> str:
476
+ value_args = schema.filtered_args(values=True, scalars=False)
477
+ scalar_args = schema.filtered_args(values=False, scalars=True)
478
+ value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
479
+ optional_device = OptionalCType(BaseCType(deviceT))
480
+ optional_devices = [
481
+ a.name for a in scalar_args if a.lazy_type == optional_device
482
+ ]
483
+ assert (
484
+ len(value_types_names) > 0 or len(optional_devices) > 0
485
+ ), "Expected at least one Value or Device type"
486
+ get_device_str = (
487
+ f"{self.get_device_fn}({', '.join(value_types_names + optional_devices)})"
488
+ )
489
+ return f"""auto common_device = {get_device_str};
490
+ TORCH_INTERNAL_ASSERT(common_device);
491
+ """
492
+
493
+ def shape_inference(self, func: NativeFunction, schema: LazyIrSchema) -> str:
494
+ metadata = self.backend_index.get_kernel(func)
495
+ assert metadata is not None
496
+ all_args = schema.filtered_args()
497
+ returns_length = len(schema.returns)
498
+ # call the meta kernel if it exists, to compute output shape/dtype for our IR
499
+ # Note [Generated LTC Shape Functions]
500
+ # LTC uses meta tensors from core to do shape inference when possible, and otherwise
501
+ # we generate a shape function declaration that needs to be manually implemented.
502
+ # How do we detect which ops are eligible to use meta tensors?
503
+ # In general we should be able to use meta tensors not just on structured operators,
504
+ # but also on composite operators that are implemented in terms of structured kernels.
505
+ # We don't currently have a way of knowing at codegen time which ops are implemented that way.
506
+ # This is the case for all view and view_copy operators however, so we're going to
507
+ # use them specifically for all of the view_copy ops (instead of manually writing shape rules for all of them).
508
+ is_view_copy_op = "view_copy" in func.tags
509
+ is_structured = func.structured or func.structured_delegate is not None
510
+ if is_structured or is_view_copy_op:
511
+ meta_out = """
512
+ std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};"""
513
+ if returns_length > 1:
514
+
515
+ def this_shape(i: int) -> str:
516
+ return f"torch::lazy::Shape(std::get<{i}>(out_meta).scalar_type(), std::get<{i}>(out_meta).sizes().vec())"
517
+
518
+ shapes_str = ",".join([this_shape(i) for i in range(returns_length)])
519
+ meta_out = "std::vector<torch::lazy::Shape> shapes{" + shapes_str + "};"
520
+
521
+ # Convert tensor args to the meta device and call it.
522
+ # (We can't pass in the input tensors directly, because they are "functional wrappers".
523
+ # If any of the meta kernels call a tensor op and redispatch, we don't want to hit the functionalize kernels.)
524
+ # Even at::meta:: functions might redispatch, e.g. if they call into view ops.
525
+ dispatcher_sig = DispatcherSignature.from_schema(func.func)
526
+ meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
527
+ meta_call_args = [
528
+ e.expr
529
+ for e in translate(
530
+ meta_call_ctx, dispatcher_sig.arguments(), method=False
531
+ )
532
+ ]
533
+ if is_view_copy_op:
534
+ # view_copy ops always have a CompositeExplicitAutogradNonFunctional kernel
535
+ assert func.has_composite_explicit_autograd_non_functional_kernel
536
+ dispatch_ns = "compositeexplicitautogradnonfunctional"
537
+ else:
538
+ dispatch_ns = "meta"
539
+ aten_name = schema.aten_name
540
+ # TODO: this is trolling
541
+ if func.func.has_symint() and metadata.supports_symint():
542
+ aten_name += "_symint"
543
+ shape_str = f"""\
544
+ {meta_conversion_str}
545
+ auto out_meta = at::{dispatch_ns}::{aten_name}({', '.join(meta_call_args)});
546
+ {meta_out}"""
547
+ else:
548
+ shape_sig = ComputeShapeSignature(
549
+ metadata.kernel, func, symint=metadata.supports_symint()
550
+ )
551
+ shape_str = f"""
552
+ auto shapes = {shape_sig.shape_call};"""
553
+
554
+ shape_str += f"""
555
+ TORCH_INTERNAL_ASSERT(shapes.size() == {returns_length});"""
556
+
557
+ # Calculating which dimensions are symbolic
558
+ func_schema_str = "aten::" + str(func.func)
559
+ shape_str += f"""
560
+ if(torch::lazy::symbolicShapeEnabled()){{
561
+ std::vector<torch::jit::IValue> inputs = {{ {', '.join(str(a.name) for a in all_args)} }};
562
+ const char* schema_str = "{func_schema_str}";
563
+ applySymbolicShapesOnLT(schema_str, inputs, shapes);
564
+ }}
565
+ """
566
+ return shape_str
567
+
568
+ def build_ir_node(self, func: NativeFunction, schema: LazyIrSchema) -> str:
569
+ node_ctor_input_str = node_ctor_inputs(schema)
570
+ return f"""torch::lazy::NodePtr node = torch::lazy::ReuseNode<{schema.node_name}>({node_ctor_input_str});
571
+ if (!node) {{
572
+ {self.shape_inference(func, schema)}
573
+ node = torch::lazy::MakeNode<{schema.node_name}>({node_ctor_input_str}, std::move(shapes));
574
+ CacheNode(node);
575
+ }}
576
+ """
577
+
578
+ def create_lazy_tensor(self, first_tensor_name: Optional[str] = None) -> str:
579
+ # xla uses an instance method for tensor creation, for the time being
580
+ if self.create_from_first_tensor:
581
+ # TODO(whc) remove this if XLA switches to using static method for creation
582
+ assert (
583
+ first_tensor_name is not None
584
+ ), "Requires first tensor to create lazy tensor"
585
+ return f"{first_tensor_name}.{self.create_tensor}"
586
+ return f"{self.backend_namespace}::{self.create_tensor}"
587
+
588
+ def return_aten_tensor(self, func: NativeFunction, schema: LazyIrSchema) -> str:
589
+ returns_length = len(schema.returns)
590
+ value_args = schema.filtered_args(values=True, scalars=False)
591
+ value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
592
+ first_tensor_name = value_types_names[0] if len(value_types_names) > 0 else None
593
+ bridge_str = f"""auto result = {self.create_aten_from_ltc_tensor}(
594
+ {self.create_lazy_tensor(first_tensor_name)}(std::move(node), *common_device));"""
595
+
596
+ if returns_length > 1:
597
+ assert (
598
+ len(value_types_names) > 0
599
+ ), "Code below assumes there is at least one tensor arg"
600
+ bridge_str = f"""std::vector<{self.lazy_tensor_ptr}> lazy_tensors;
601
+ for (int i = 0; i < {returns_length}; i++) {{
602
+ lazy_tensors.push_back({self.create_lazy_tensor(first_tensor_name)}({getValueT()}(node, i), *common_device));
603
+ }}
604
+ auto result = {self.tuple_aten_from_ltc_tensors}<{returns_length}>(lazy_tensors);"""
605
+
606
+ if schema.name.name.inplace or func.func.is_out_fn():
607
+ assert returns_length == 1, (
608
+ "We assumed there was no such case where an op is an in-place variant "
609
+ f"and has tuple outputs, but got tuple of len {returns_length}."
610
+ )
611
+ bridge_str = f"""lazy_{first_tensor_name}->SetInPlaceIrValue(node);
612
+ auto& result = {first_tensor_name};"""
613
+
614
+ bridge_str += """
615
+ return result;"""
616
+ return bridge_str
617
+
618
+ @method_with_native_function
619
+ def __call__(self, func: NativeFunction) -> List[str]:
620
+ sig = kernel_signature(func, self.backend_index)
621
+ metadata = self.backend_index.get_kernel(func)
622
+ assert metadata is not None
623
+ schema = LazyIrSchema(func.func, symint=metadata.supports_symint())
624
+ return [
625
+ f"""\
626
+ {sig.decl(name=f"{self.class_method_name}::{metadata.kernel}")} {{
627
+ {self.force_eager_fallback(func, schema, metadata, sig)}
628
+ {self.metrics(func, schema)}
629
+ {self.get_device(func, schema)}
630
+ {self.lazy_tensor_decls(func, schema)}
631
+ {self.build_ir_node(func, schema)}
632
+ {self.return_aten_tensor(func, schema)}
633
+ }}\n
634
+ """
635
+ ]
636
+
637
+
638
+ class ComputeShapeSignature:
639
+ """
640
+ Here we use the base name as the suffix of the signature to avoid generating for in-place variants.
641
+ """
642
+
643
+ def __init__(self, kernel_name: str, f: NativeFunction, *, symint: bool):
644
+ self.__schema = LazyIrSchema(f.func, symint=symint)
645
+ self.__dispatch_args = ", ".join(
646
+ [a.decl() for a in dispatcher.arguments(f.func, symint=symint)]
647
+ )
648
+ self.__call_args = ", ".join(
649
+ [f"{arg.name}" for arg in self.__schema.filtered_args(generator=True)]
650
+ )
651
+ self.__kernel_name = kernel_name
652
+
653
+ def __decl_suffix(self) -> str:
654
+ return f"{self.__kernel_name}({self.__dispatch_args})"
655
+
656
+ def __call_suffix(self) -> str:
657
+ return f"{self.__kernel_name}({self.__call_args})"
658
+
659
+ @property
660
+ def shape_decl(self) -> str:
661
+ return f"TORCH_API std::vector<torch::lazy::Shape> compute_shape_{self.__decl_suffix()}"
662
+
663
+ @property
664
+ def shape_call(self) -> str:
665
+ return f"torch::lazy::compute_shape_{self.__call_suffix()}"
666
+
667
+
668
+ @dataclass(frozen=True)
669
+ class GenLazyShapeInferenceDefinition:
670
+ backend_index: BackendIndex
671
+ tensor_class: str
672
+
673
+ @method_with_native_function
674
+ def __call__(self, f: NativeFunction) -> List[str]:
675
+ sig = kernel_signature(f, self.backend_index)
676
+ metadata = self.backend_index.get_kernel(f)
677
+ assert metadata is not None
678
+
679
+ # See Note [Generated LTC Shape Functions]
680
+ is_view_copy_op = "view_copy" in f.tags
681
+ is_structured = f.structured or f.structured_delegate is not None
682
+ if is_structured or is_view_copy_op:
683
+ return []
684
+ else:
685
+ shape_sig = ComputeShapeSignature(
686
+ metadata.kernel, f, symint=metadata.supports_symint()
687
+ )
688
+ return ["\n".join([f"{shape_sig.shape_decl};"])]
689
+
690
+
691
+ def generate_non_native_lazy_ir_nodes(
692
+ non_native: List[Dict[str, Any]], gen_lazy_ir: GenLazyIR
693
+ ) -> List[str]:
694
+ """Generate the non-native lazy IR node classes"""
695
+ nodes = []
696
+ for op in non_native:
697
+ # Set default properties for Non-Native IRs
698
+ properties = LazyIrProperties("ShapeCache", "CanBeReused", "LowerDeclOnly")
699
+ for p in op.get("properties", []):
700
+ setattr(properties, p, True)
701
+
702
+ # non-native is assumed to want symint bindings if you wrote symint
703
+ schema = LazyIrSchema(FunctionSchema.parse(op["func"]), properties, symint=True)
704
+ schema.opkind = op.get("opkind")
705
+ nodes.append(gen_lazy_ir.gen(schema)[0])
706
+
707
+ return nodes
env-llmeval/lib/python3.10/site-packages/torchgen/dest/lazy_ts_lowering.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torchgen.api.lazy import LazyArgument, LazyIrSchema
2
+ from torchgen.api.types import OptionalCType
3
+
4
+
5
+ def ts_lowering_body(schema: LazyIrSchema) -> str:
6
+ # for now, we just want one IR class decl and soon after also the method defs
7
+ # and we use the functional version not out/inplace.
8
+ emplace_arguments = []
9
+
10
+ def get_value(arg: LazyArgument) -> str:
11
+ if isinstance(arg.lazy_type, OptionalCType):
12
+ return f"has_{arg.name} ? loctx->GetOutputOp(operand(i++)) : nullptr"
13
+ return "loctx->GetOutputOp(operand(i++))"
14
+
15
+ for arg in schema.positional_args:
16
+ if arg.is_lazy_value:
17
+ emplace_arguments.append(get_value(arg))
18
+ continue
19
+ emplace_arguments.append(f'"{arg.name}", {arg.name}')
20
+
21
+ emplace_arguments_str = "\n ".join(
22
+ [f"arguments.emplace_back({a});" for a in emplace_arguments]
23
+ )
24
+ emplace_kwarg_values = [
25
+ f'"{arg.name}", {get_value(arg)}' for arg in schema.keyword_values
26
+ ]
27
+ emplace_kwarg_scalars = [
28
+ f'"{arg.name}", {arg.name}' for arg in schema.keyword_scalars
29
+ ]
30
+ emplace_kwarguments = "\n ".join(
31
+ [
32
+ f"kwarguments.emplace_back({a});"
33
+ for a in emplace_kwarg_values + emplace_kwarg_scalars
34
+ ]
35
+ )
36
+ return f"""\
37
+ std::vector<torch::jit::NamedValue> arguments;
38
+ std::vector<torch::jit::NamedValue> kwarguments;
39
+ arguments.reserve({len(emplace_arguments)});
40
+ kwarguments.reserve({len(emplace_kwarg_values + emplace_kwarg_scalars)});
41
+ size_t i = 0;
42
+ {emplace_arguments_str}
43
+ {emplace_kwarguments}
44
+ torch::lazy::TSOpVector {schema.aten_name}_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments);
45
+ TORCH_CHECK_EQ({schema.aten_name}_out.size(), {len(schema.returns)});
46
+
47
+ return {schema.aten_name}_out;
48
+ """
env-llmeval/lib/python3.10/site-packages/torchgen/dest/native_functions.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Union
2
+
3
+ import torchgen.api.meta as meta
4
+ import torchgen.api.structured as structured
5
+ from torchgen.api.types import kernel_signature
6
+
7
+ from torchgen.context import with_native_function_and_index
8
+ from torchgen.model import BackendIndex, NativeFunction, NativeFunctionsGroup
9
+ from torchgen.utils import mapMaybe
10
+
11
+
12
+ @with_native_function_and_index
13
+ def gen_unstructured(f: NativeFunction, backend_index: BackendIndex) -> Optional[str]:
14
+ sig = kernel_signature(f, backend_index)
15
+ metadata = backend_index.get_kernel(f)
16
+ if metadata is None:
17
+ return None
18
+ if "legacy::" in metadata.kernel:
19
+ return None
20
+ else:
21
+ prefix = "static" if backend_index.external else "TORCH_API"
22
+ return f"{prefix} {sig.decl(name=metadata.kernel)};"
23
+
24
+
25
+ @with_native_function_and_index
26
+ def gen_structured(g: NativeFunctionsGroup, backend_index: BackendIndex) -> List[str]:
27
+ meta_name = meta.name(g)
28
+ out_args = structured.impl_arguments(g)
29
+ metadata = backend_index.get_kernel(g)
30
+ if metadata is None:
31
+ return []
32
+ prefix = "" if backend_index.external else "TORCH_API "
33
+ return [
34
+ f"""\
35
+ struct {prefix}structured_{metadata.kernel} : public at::meta::structured_{meta_name} {{
36
+ void impl({', '.join(a.decl() for a in out_args)});
37
+ }};
38
+ """
39
+ ]
40
+
41
+
42
+ # Generates NativeFunctions.h, a list of forward declarations of all
43
+ # actual kernel definitions we keep in aten/src/ATen/native/
44
+ @with_native_function_and_index
45
+ def compute_native_function_declaration(
46
+ g: Union[NativeFunctionsGroup, NativeFunction], backend_index: BackendIndex
47
+ ) -> List[str]:
48
+ metadata = backend_index.get_kernel(g)
49
+ if isinstance(g, NativeFunctionsGroup):
50
+ if metadata is not None and metadata.structured:
51
+ if backend_index.external:
52
+ # Structured hasn't been tested with external backends yet.
53
+ raise AssertionError(
54
+ "Structured external backend functions are not implemented yet."
55
+ )
56
+ else:
57
+ return gen_structured(g, backend_index)
58
+ else:
59
+ return list(
60
+ mapMaybe(lambda f: gen_unstructured(f, backend_index), g.functions())
61
+ )
62
+ else:
63
+ x = gen_unstructured(g, backend_index)
64
+ return [] if x is None else [x]
env-llmeval/lib/python3.10/site-packages/torchgen/dest/register_dispatch_key.py ADDED
@@ -0,0 +1,989 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import textwrap
3
+ from dataclasses import dataclass
4
+ from typing import List, Literal, Optional, Tuple, Union
5
+
6
+ import torchgen.api.cpp as cpp
7
+ import torchgen.api.meta as meta
8
+ import torchgen.api.structured as structured
9
+ from torchgen.api.translate import translate
10
+ from torchgen.api.types import (
11
+ BaseCType,
12
+ Binding,
13
+ ConstRefCType,
14
+ CppSignature,
15
+ CppSignatureGroup,
16
+ DispatcherSignature,
17
+ Expr,
18
+ kernel_signature,
19
+ MutRefCType,
20
+ NamedCType,
21
+ NativeSignature,
22
+ tensorT,
23
+ )
24
+
25
+ from torchgen.context import method_with_native_function, native_function_manager
26
+ from torchgen.model import (
27
+ Argument,
28
+ BackendIndex,
29
+ DeviceCheckType,
30
+ DispatchKey,
31
+ gets_generated_out_inplace_wrapper,
32
+ is_cuda_dispatch_key,
33
+ NativeFunction,
34
+ NativeFunctionsGroup,
35
+ SchemaKind,
36
+ TensorOptionsArguments,
37
+ )
38
+ from torchgen.selective_build.selector import SelectiveBuilder
39
+ from torchgen.utils import assert_never, mapMaybe, Target
40
+
41
+
42
+ def gen_registration_headers(
43
+ backend_index: BackendIndex,
44
+ per_operator_headers: bool,
45
+ rocm: bool,
46
+ ) -> List[str]:
47
+ if per_operator_headers:
48
+ headers = ["#include <ATen/ops/as_strided_native.h>"]
49
+ else:
50
+ headers = ["#include <ATen/NativeFunctions.h>"]
51
+
52
+ if backend_index.dispatch_key in (DispatchKey.CPU, DispatchKey.Meta):
53
+ headers.append("#include <ATen/EmptyTensor.h>")
54
+ elif backend_index.dispatch_key == DispatchKey.CUDA:
55
+ if rocm:
56
+ headers.append("#include <ATen/hip/EmptyTensor.h>")
57
+ else:
58
+ headers.append("#include <ATen/cuda/EmptyTensor.h>")
59
+ elif backend_index.dispatch_key == DispatchKey.MPS:
60
+ headers.append("#include <ATen/mps/EmptyTensor.h>")
61
+ elif per_operator_headers:
62
+ headers += [
63
+ "#include <ATen/ops/empty.h>",
64
+ "#include <ATen/ops/empty_strided.h>",
65
+ "#include <ATen/ops/_copy_from_and_resize.h>",
66
+ "#include <ATen/ops/_copy_from.h>",
67
+ ]
68
+ else:
69
+ headers.append("#include <ATen/Functions.h>")
70
+
71
+ return headers
72
+
73
+
74
+ def gen_empty_impl_names(
75
+ backend_index: BackendIndex,
76
+ ) -> Tuple[Optional[str], Optional[str]]:
77
+ empty_impl = None
78
+ empty_strided_impl = None
79
+
80
+ if backend_index.dispatch_key in (
81
+ DispatchKey.Meta,
82
+ DispatchKey.CPU,
83
+ DispatchKey.CUDA,
84
+ DispatchKey.MPS,
85
+ ):
86
+ dispatch = str(backend_index.dispatch_key).lower()
87
+ empty_impl = f"at::detail::empty_{dispatch}"
88
+ empty_strided_impl = f"at::detail::empty_strided_{dispatch}"
89
+ elif backend_index.dispatch_key in (
90
+ DispatchKey.CompositeExplicitAutogradNonFunctional,
91
+ DispatchKey.QuantizedCPU,
92
+ DispatchKey.QuantizedCUDA,
93
+ ):
94
+ empty_impl = "at::empty"
95
+ empty_strided_impl = "at::empty_strided"
96
+
97
+ return empty_impl, empty_strided_impl
98
+
99
+
100
+ def gen_create_out_helper(backend_index: BackendIndex) -> List[str]:
101
+ if backend_index.dispatch_key == DispatchKey.Meta:
102
+ empty_options = "options.device(at::kMeta)"
103
+ else:
104
+ empty_options = "options"
105
+
106
+ empty_impl, empty_strided_impl = gen_empty_impl_names(backend_index)
107
+ if empty_impl is None:
108
+ return []
109
+
110
+ return [
111
+ f"""
112
+ Tensor create_out(IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {{
113
+ if (strides.empty()) {{
114
+ return {empty_impl}(sizes, {empty_options});
115
+ }} else {{
116
+ return {empty_strided_impl}(sizes, strides, {empty_options});
117
+ }}
118
+ }}
119
+ """
120
+ ]
121
+
122
+
123
+ def gen_maybe_create_proxy_helper(backend_index: BackendIndex) -> List[str]:
124
+ _, empty_strided_impl = gen_empty_impl_names(backend_index)
125
+ return (
126
+ []
127
+ if empty_strided_impl is None
128
+ else [
129
+ f"""
130
+ c10::optional<Tensor> maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {{
131
+ if (out.strides() != strides) {{
132
+ return {empty_strided_impl}(sizes, strides, options);
133
+ }}
134
+ return c10::nullopt;
135
+ }}
136
+ """
137
+ ]
138
+ )
139
+
140
+
141
+ def gen_resize_out_helper(backend_index: BackendIndex) -> List[str]:
142
+ if backend_index.dispatch_key == DispatchKey.CompositeExplicitAutogradNonFunctional:
143
+ # The function isn't used by this key (since only functional ops have a kernel for this key),
144
+ # so we need to not include it to avoid a defined-but-not-used error.
145
+ return []
146
+ return [
147
+ """
148
+ void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
149
+ TORCH_CHECK(options.dtype() == out.dtype(),
150
+ "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
151
+ TORCH_CHECK(options.device() == out.device(),
152
+ "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
153
+ const bool resized = at::native::resize_output(out, sizes);
154
+ // Only restride if a resize occurred; otherwise we ignore the (advisory)
155
+ // strides from the meta function and directly use the output tensor's
156
+ // preexisting strides
157
+ if (resized) {
158
+ if (!strides.empty()) {
159
+ TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
160
+ // TODO: avoid the redispatch here
161
+ out.as_strided_(sizes, strides);
162
+ } else if (options.memory_format_opt().has_value()) {
163
+ out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
164
+ }
165
+ }
166
+ }
167
+ """
168
+ ]
169
+
170
+
171
+ def gen_check_inplace_helper(backend_index: BackendIndex) -> List[str]:
172
+ return [
173
+ """
174
+ void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
175
+ // These checks are needed on those operators that:
176
+ // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
177
+ // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
178
+ // For other operators (e.g. 'add'), 'TensorIterator' already checks
179
+ // these things separately.
180
+ TORCH_CHECK(options.dtype() == self.dtype(),
181
+ "Bad in-place call: ",
182
+ "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
183
+ TORCH_CHECK(options.device() == self.device(),
184
+ "Bad in-place call: ",
185
+ "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
186
+ TORCH_CHECK(sizes == self.sizes(),
187
+ "Bad in-place call: ",
188
+ "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
189
+ }
190
+ """
191
+ ]
192
+
193
+
194
+ def gen_registration_helpers(backend_index: BackendIndex) -> List[str]:
195
+ return [
196
+ *gen_create_out_helper(backend_index),
197
+ *gen_resize_out_helper(backend_index),
198
+ *gen_check_inplace_helper(backend_index),
199
+ *gen_maybe_create_proxy_helper(backend_index),
200
+ ]
201
+
202
+
203
+ # Generates Register{dispatch}.cpp (e.g., RegisterCPU.cpp).
204
+ #
205
+ # - The primary function of this file is to register all of the
206
+ # implementations for the given dispatch key to the dispatcher,
207
+ # so they are available for use in PyTorch. If dispatch is
208
+ # None, we generate schema (def) registrations and catchall
209
+ # registrations.
210
+ # - The secondary function of this file is to generate a wrapper
211
+ # around functions. In CPUType these wrappers do nothing
212
+ # (and should be removed), but in other cases they handle
213
+ # DeviceGuard. A small extra benefit of wrappers is they
214
+ # are not overloaded, so they can be used in the registration
215
+ # API without having to disambiguate which overload you want
216
+ # (as would be the case if you directly registered native::
217
+ # functions).
218
+ # - The tertiary function of this file is to generate *static*
219
+ # cpp API bindings which can be used to bypass dispatcher
220
+ # directly to kernels, but with user-friendly cpp-style API
221
+ @dataclass(frozen=True)
222
+ class RegisterDispatchKey:
223
+ backend_index: BackendIndex
224
+
225
+ target: Literal[
226
+ Target.ANONYMOUS_DEFINITION,
227
+ Target.NAMESPACED_DEFINITION,
228
+ Target.NAMESPACED_DECLARATION,
229
+ Target.REGISTRATION,
230
+ ]
231
+
232
+ # Selector object to determine which operators to generate
233
+ # registration code for.
234
+ selector: SelectiveBuilder
235
+
236
+ # Whether or not we are actually code-genning for ROCm
237
+ rocm: bool
238
+
239
+ # Whether or not to generate symint registrations or not. External users
240
+ # of codegen who don't care about symints can set this to false to get
241
+ # non-SymInt codegen
242
+ symint: bool
243
+
244
+ # The class that all unstructured native functions live under. This is used to improve
245
+ # compiler error messages when a kernel writer adds a native function with the wrong signature.
246
+ # This is only used in unstructured kernels, since structured kernels already live in a class.
247
+ # Finally, this field is currently Optional because it is only used by external backends.
248
+ # It would be nice if we can add the same logic to in-tree kernels too, but that requires updating
249
+ # all of the existing kernel signatures scattered across aten/src/ATen/native.
250
+ class_method_name: Optional[str]
251
+
252
+ # Only set to true in lightweight dispatch. If lightweight dispatch is enabled we are registering
253
+ # operators into JIT op registry, thus we need to avoid generating code to register into the dispatcher.
254
+ skip_dispatcher_op_registration: bool
255
+
256
+ @staticmethod
257
+ def gen_device_check(
258
+ type: DeviceCheckType, args: List[Argument], method_name: str
259
+ ) -> str:
260
+ if type == DeviceCheckType.NoCheck:
261
+ return " // No device check\n"
262
+
263
+ device_check = "c10::optional<Device> common_device = nullopt;\n"
264
+ device_check += "(void)common_device; // Suppress unused variable warning\n"
265
+ for arg in args:
266
+ # Only tensor like arguments are eligible
267
+ if arg.type.is_tensor_like():
268
+ device_check += f"""
269
+ c10::impl::check_and_update_common_device(common_device, {arg.name}, "{method_name}", "{arg.name}");"""
270
+ return device_check
271
+
272
+ @method_with_native_function
273
+ def __call__(self, f: Union[NativeFunctionsGroup, NativeFunction]) -> List[str]:
274
+ if isinstance(f, NativeFunctionsGroup):
275
+ g: NativeFunctionsGroup = f
276
+ # Note: We call gen_structured() if the operator is marked structured, regardless of the backend.
277
+ # gen_structured() has special logic to handle auto-generated kernels.
278
+ if g.structured:
279
+ return self.gen_structured(g)
280
+ else:
281
+ return list(
282
+ mapMaybe(lambda f: self.gen_unstructured(f, g), g.functions())
283
+ )
284
+ elif isinstance(f, NativeFunction):
285
+ r = self.gen_unstructured(f)
286
+ return [] if r is None else [r]
287
+ else:
288
+ assert_never(f)
289
+
290
+ def wrapper_kernel_sig(
291
+ self, f: NativeFunction
292
+ ) -> Union[NativeSignature, DispatcherSignature]:
293
+ # The prefix is just to ensure uniqueness. The Dispatcher API doesn't guarantee unique kernel names.
294
+ return DispatcherSignature.from_schema(
295
+ f.func,
296
+ prefix=f"wrapper_{self.backend_index.dispatch_key}_{f.func.name.overload_name}_",
297
+ symint=self.symint,
298
+ )
299
+
300
+ def gen_out_inplace_wrapper(
301
+ self, f: NativeFunction, g: Optional[NativeFunctionsGroup]
302
+ ) -> Optional[str]:
303
+ if g is None:
304
+ return None
305
+ k = f.func.kind()
306
+ if k is SchemaKind.inplace:
307
+ copy_op = "at::_copy_from"
308
+ elif k is SchemaKind.out:
309
+ copy_op = "at::_copy_from_and_resize"
310
+ else:
311
+ raise AssertionError("gen_out_inplace_wrapper called on a functional op")
312
+
313
+ sig = self.wrapper_kernel_sig(f)
314
+ name = sig.name()
315
+
316
+ func_res = f"{name}_tmp"
317
+ return_names = cpp.return_names(f)
318
+ if len(return_names) > 1:
319
+ updates = "\n ".join(
320
+ f"{copy_op}(std::get<{i}>({func_res}), {ret_name});"
321
+ for i, ret_name in enumerate(return_names)
322
+ )
323
+ returns = f'{sig.returns_type().cpp_type()}({", ".join(return_names)})'
324
+ elif len(return_names) == 1:
325
+ ret_name = return_names[0]
326
+ updates = f"{copy_op}({func_res}, {ret_name});"
327
+ returns = ret_name
328
+ else:
329
+ assert len(f.func.arguments.out) == 1
330
+ returns = ""
331
+ out_arg = f.func.arguments.out[0]
332
+ if out_arg.type.is_list_like():
333
+ updates = f"""\
334
+ for (int64_t i = 0; i < {func_res}.size(); ++i) {{
335
+ {copy_op}({func_res}[i], {out_arg.name}[i]);
336
+ }}"""
337
+ else:
338
+ updates = f"{copy_op}({func_res}, {out_arg.name});"
339
+
340
+ functional_sig = self.wrapper_kernel_sig(g.functional)
341
+ wrapper_name = sig.name()
342
+
343
+ return f"""\
344
+ {sig.defn(name=wrapper_name)} {{
345
+ auto {func_res} = {functional_sig.name()}({", ".join(e.expr for e in translate(sig.arguments(), functional_sig.arguments()))});
346
+ {updates}
347
+ return {returns};
348
+ }}
349
+ """
350
+
351
+ def gen_structured(self, g: NativeFunctionsGroup) -> List[str]:
352
+ metadata = self.backend_index.get_kernel(g)
353
+ if self.backend_index.dispatch_key == DispatchKey.Meta:
354
+ assert not self.backend_index.has_kernel(g.out), (
355
+ "Do not explicitly specify Meta dispatch key on structured "
356
+ "functions, they will be automatically generated for you"
357
+ )
358
+ elif (
359
+ self.backend_index.dispatch_key
360
+ == DispatchKey.CompositeExplicitAutogradNonFunctional
361
+ ):
362
+ assert not self.backend_index.has_kernel(g.out), (
363
+ "Do not explicitly specify CompositeExplicitAutograd dispatch key on structured "
364
+ "functions, they will be automatically generated for you"
365
+ )
366
+ elif metadata is None or not metadata.structured:
367
+ return list(mapMaybe(lambda f: self.gen_unstructured(f, g), g.functions()))
368
+ structured_gen = StructuredRegisterDispatchKey(
369
+ self.backend_index,
370
+ self.target,
371
+ self.selector,
372
+ self.rocm,
373
+ self.symint,
374
+ self.class_method_name,
375
+ self.skip_dispatcher_op_registration,
376
+ g,
377
+ )
378
+ return list(mapMaybe(structured_gen.gen_one, g.functions()))
379
+
380
+ def gen_unstructured(
381
+ self, f: NativeFunction, g: Optional[NativeFunctionsGroup] = None
382
+ ) -> Optional[str]:
383
+ with native_function_manager(f):
384
+ inplace_meta = False
385
+ gets_out_inplace_wrapper = False
386
+ if not self.backend_index.has_kernel(f):
387
+ if (
388
+ self.backend_index.dispatch_key == DispatchKey.Meta
389
+ and f.func.kind() is SchemaKind.inplace
390
+ and
391
+ # Defer to composites for meta implementation
392
+ not f.has_composite_kernel
393
+ and
394
+ # Inplace list operations are not supported
395
+ len(f.func.returns) == 1
396
+ ):
397
+ inplace_meta = True
398
+ elif (
399
+ not self.backend_index.use_out_as_primary
400
+ and g is not None
401
+ and gets_generated_out_inplace_wrapper(f, g, self.backend_index)
402
+ ):
403
+ # We want to generate inplace/out wrappers, that don't have a kernel for the backend.
404
+ gets_out_inplace_wrapper = True
405
+ else:
406
+ return None
407
+ if f.manual_kernel_registration:
408
+ return None
409
+
410
+ if (
411
+ self.target is Target.REGISTRATION
412
+ and not self.selector.is_native_function_selected(f)
413
+ ):
414
+ return None
415
+
416
+ sig = self.wrapper_kernel_sig(f)
417
+
418
+ name = sig.name()
419
+ returns_type = sig.returns_type().cpp_type()
420
+ args = sig.arguments()
421
+ args_str = ", ".join(a.defn() for a in args)
422
+
423
+ # See Note [Direct dispatch bindings]
424
+ cpp_sig_group = CppSignatureGroup.from_native_function(
425
+ f, method=False, fallback_binding=False
426
+ )
427
+
428
+ # TODO: dedupe this with the structured codegen
429
+ if self.target is Target.NAMESPACED_DECLARATION:
430
+ result = ""
431
+ for cpp_sig in cpp_sig_group.signatures(symint=self.symint):
432
+ result += f"TORCH_API {cpp_sig.decl()};\n"
433
+ return result
434
+ elif self.target is Target.NAMESPACED_DEFINITION:
435
+
436
+ def generate_defn(cpp_sig: CppSignature) -> str:
437
+ return f"""
438
+ {cpp_sig.defn()} {{
439
+ return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
440
+ }}
441
+ """
442
+
443
+ result = ""
444
+ for cpp_sig in cpp_sig_group.signatures(symint=self.symint):
445
+ result += generate_defn(cpp_sig)
446
+ return result
447
+
448
+ elif self.target is Target.ANONYMOUS_DEFINITION:
449
+ # short circuit for inplace_meta
450
+ if inplace_meta:
451
+ assert f.func.arguments.self_arg is not None
452
+ self_arg_name = f.func.arguments.self_arg.argument.name
453
+ # TODO: handle in place on tensor list
454
+ return f"""
455
+ {returns_type} {name}({args_str}) {{
456
+ TORCH_CHECK_NOT_IMPLEMENTED({self_arg_name}.is_meta(),
457
+ "Cannot inplace into non-meta tensor with meta tensor argument");
458
+ return {self_arg_name};
459
+ }}
460
+ """
461
+
462
+ # short circuit for generated inplace/out wrappers
463
+ if gets_out_inplace_wrapper:
464
+ return self.gen_out_inplace_wrapper(f, g)
465
+
466
+ metadata = self.backend_index.get_kernel(f)
467
+ if metadata is None:
468
+ return None
469
+ if self.class_method_name is None:
470
+ impl_name = f"{metadata.cpp_namespace}::{metadata.kernel}"
471
+ else:
472
+ impl_name = f"{metadata.cpp_namespace}::{self.class_method_name}::{metadata.kernel}"
473
+
474
+ kernel_sig = kernel_signature(f, self.backend_index)
475
+
476
+ args_exprs_str = ", ".join(
477
+ e.expr
478
+ for e in translate(
479
+ sig.arguments(), kernel_sig.arguments(), method=False
480
+ )
481
+ )
482
+
483
+ device_check = " // No device check\n"
484
+ # Backends that require device guards presumably also require device checks.
485
+ if self.backend_index.device_guard:
486
+ device_check_args = itertools.chain(
487
+ f.func.arguments.out, f.func.arguments.flat_positional
488
+ )
489
+ device_check = RegisterDispatchKey.gen_device_check(
490
+ f.device_check, list(device_check_args), name
491
+ )
492
+
493
+ device_guard = "// DeviceGuard omitted" # default
494
+ if f.device_guard and self.backend_index.device_guard:
495
+ has_tensor_options = any(
496
+ isinstance(a, TensorOptionsArguments)
497
+ for a in f.func.arguments.non_out
498
+ )
499
+ if has_tensor_options:
500
+ # kernel is creating a tensor
501
+ device_guard = """
502
+ const DeviceGuard device_guard(device_or_default(device));"""
503
+
504
+ # CUDA requires special handling
505
+ if is_cuda_dispatch_key(self.backend_index.dispatch_key):
506
+ device_guard = (
507
+ f"globalContext().lazyInitCUDA();\n{device_guard}"
508
+ )
509
+ else:
510
+ # kernel is operating on existing tensors
511
+
512
+ # There is precedence for which argument we use to do
513
+ # device guard. This describes the precedence order.
514
+ self_arg = (
515
+ [f.func.arguments.self_arg.argument]
516
+ if f.func.arguments.self_arg is not None
517
+ else []
518
+ )
519
+ candidate_args = itertools.chain(
520
+ self_arg,
521
+ f.func.arguments.out,
522
+ f.func.arguments.flat_positional,
523
+ )
524
+
525
+ # Only tensor like arguments are eligible
526
+ device_of = next(
527
+ (
528
+ f"{a.name}"
529
+ for a in candidate_args
530
+ if a.type.is_tensor_like()
531
+ ),
532
+ None,
533
+ )
534
+ if device_of is not None:
535
+ device_guard = f"const OptionalDeviceGuard device_guard(device_of({device_of}));"
536
+
537
+ return f"""\
538
+ namespace {{
539
+
540
+ {returns_type} {name}({args_str}) {{
541
+ {device_check}
542
+
543
+ {device_guard}
544
+ return {impl_name}({args_exprs_str});
545
+ }}
546
+
547
+ }} // anonymous namespace
548
+ """
549
+
550
+ elif self.target is Target.REGISTRATION:
551
+ if f.manual_kernel_registration or self.skip_dispatcher_op_registration:
552
+ return None
553
+ else:
554
+ payload = f"TORCH_FN({name})"
555
+ return f'm.impl("{f.func.name}",\n{payload});\n'
556
+ else:
557
+ assert_never(self.target)
558
+
559
+
560
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
561
+ #
562
+ # STRUCTURED
563
+ #
564
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
565
+
566
+
567
+ @dataclass(frozen=True)
568
+ class StructuredRegisterDispatchKey(RegisterDispatchKey):
569
+ g: NativeFunctionsGroup
570
+
571
+ def gen_class_set_output_functions(
572
+ self, k: SchemaKind, parent_class: str, generate_super: bool
573
+ ) -> str:
574
+ if generate_super:
575
+ set_output_super = f"{parent_class}::set_output_raw_strided(output_idx, sizes, strides, options, names);"
576
+ else:
577
+ set_output_super = ""
578
+
579
+ def gen_set_output_function(name: str, maybe_create_proxy: bool) -> str:
580
+ return f"""
581
+ void set_output_{name}(
582
+ int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
583
+ TensorOptions options, DimnameList names
584
+ ) override {{
585
+ {textwrap.indent(self.gen_class_set_output_body(k, maybe_create_proxy), " ")}
586
+ if (!names.empty()) {{
587
+ namedinference::propagate_names(outputs_[output_idx], names);
588
+ }}
589
+ // super must happen after, so that downstream can use maybe_get_output
590
+ // to retrieve the output
591
+ {textwrap.indent(set_output_super, " ")}
592
+ }}
593
+ """
594
+
595
+ return f"""
596
+ {gen_set_output_function("strided", maybe_create_proxy=True)}
597
+ {gen_set_output_function("raw_strided", maybe_create_proxy=False)}
598
+ """
599
+
600
+ def gen_class_set_output_body(self, k: SchemaKind, maybe_create_proxy: bool) -> str:
601
+ if self.backend_index.dispatch_key in [
602
+ DispatchKey.CUDA,
603
+ DispatchKey.MPS,
604
+ DispatchKey.CompositeExplicitAutogradNonFunctional,
605
+ ]:
606
+ maybe_set_guard = """
607
+ auto current_device = guard_.current_device();
608
+ if (C10_UNLIKELY(current_device.has_value())) {
609
+ TORCH_INTERNAL_ASSERT(*current_device == options.device(),
610
+ "structured kernels don't support multi-device outputs");
611
+ } else {
612
+ guard_.reset_device(options.device());
613
+ }
614
+ """
615
+ maybe_set_guard_line = maybe_set_guard + "\n"
616
+ else:
617
+ maybe_set_guard_line = maybe_set_guard = ""
618
+
619
+ if maybe_create_proxy:
620
+ create_proxy = """
621
+ auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
622
+ if (C10_UNLIKELY(maybe_proxy.has_value())) {
623
+ proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
624
+ }
625
+ """
626
+ else:
627
+ create_proxy = ""
628
+
629
+ if k is SchemaKind.functional:
630
+ assert self.backend_index.dispatch_key in (
631
+ DispatchKey.Meta,
632
+ DispatchKey.CPU,
633
+ DispatchKey.CUDA,
634
+ DispatchKey.MPS,
635
+ DispatchKey.CompositeExplicitAutogradNonFunctional,
636
+ )
637
+ return f"""{maybe_set_guard_line}
638
+ outputs_[output_idx] = create_out(sizes, strides, options);"""
639
+ elif k is SchemaKind.inplace:
640
+ return f"""{maybe_set_guard_line}
641
+ const auto& out = outputs_[output_idx].get();
642
+ check_inplace(out, sizes, options);
643
+ {create_proxy}"""
644
+ elif k is SchemaKind.out:
645
+ return f"""{maybe_set_guard_line}
646
+ const auto& out = outputs_[output_idx].get();
647
+ resize_out(out, sizes, strides, options);
648
+ {create_proxy}"""
649
+ elif k is SchemaKind.mutable or k is SchemaKind.scratch:
650
+ raise AssertionError(
651
+ f"{k} structured operators are currently not supported"
652
+ )
653
+ else:
654
+ assert_never(k)
655
+
656
+ # returns the definition of a ctor, as well as how to construct
657
+ # this class to a variable named op
658
+ def gen_class_ctor(self, k: SchemaKind, class_name: str, returns: int) -> str:
659
+ if k is SchemaKind.functional:
660
+ return ""
661
+ elif k is SchemaKind.inplace:
662
+ # TODO: Make sure out argument is guaranteed to be self
663
+ return f"{class_name}(Tensor& self) : outputs_{{std::ref(self)}} {{}}"
664
+ elif k is SchemaKind.out:
665
+ out_args = ", ".join(f"Tensor& out{i}" for i in range(returns))
666
+ out_refs = ", ".join(f"std::ref(out{i})" for i in range(returns))
667
+ return f"{class_name}({out_args}) : outputs_{{ {out_refs} }} {{}}"
668
+ elif k is SchemaKind.mutable or k is SchemaKind.scratch:
669
+ raise AssertionError(
670
+ f"{k} structured operators are currently not supported"
671
+ )
672
+ else:
673
+ assert_never(k)
674
+
675
+ def gen_class(
676
+ self,
677
+ f: NativeFunction,
678
+ k: SchemaKind,
679
+ *,
680
+ class_name: str,
681
+ parent_class: str,
682
+ generate_super: bool,
683
+ ) -> str:
684
+ if k is SchemaKind.functional:
685
+ output_type = "Tensor"
686
+ output_value = "outputs_[output_idx]"
687
+ proxy_field = ""
688
+ elif k is SchemaKind.inplace:
689
+ output_type = "std::reference_wrapper<Tensor>"
690
+ output_value = "proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get()"
691
+ proxy_field = f"std::array<c10::optional<Tensor>, {len(f.func.returns)}> proxy_outputs_;"
692
+ elif k is SchemaKind.out:
693
+ output_type = "std::reference_wrapper<Tensor>"
694
+ output_value = "proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get()"
695
+ proxy_field = f"std::array<c10::optional<Tensor>, {len(f.func.returns)}> proxy_outputs_;"
696
+
697
+ if self.backend_index.dispatch_key == DispatchKey.CUDA:
698
+ if self.rocm:
699
+ guard_field = "c10::hip::OptionalHIPGuardMasqueradingAsCUDA guard_;"
700
+ else:
701
+ guard_field = "c10::cuda::OptionalCUDAGuard guard_;"
702
+ elif (
703
+ self.backend_index.dispatch_key
704
+ == DispatchKey.CompositeExplicitAutogradNonFunctional
705
+ ):
706
+ guard_field = "c10::OptionalDeviceGuard guard_;"
707
+ elif self.backend_index.dispatch_key == DispatchKey.MPS:
708
+ # TODO: Move to OptionalMPSGuard.
709
+ guard_field = "c10::OptionalDeviceGuard guard_;"
710
+ else:
711
+ guard_field = ""
712
+
713
+ indent = " " * 4
714
+ class_ctor_str = self.gen_class_ctor(k, class_name, len(f.func.returns))
715
+ lines = (
716
+ f"struct {class_name} final : public {parent_class} {{",
717
+ f"{textwrap.indent(class_ctor_str, indent)}",
718
+ f"{textwrap.indent(self.gen_class_set_output_functions(k, parent_class, generate_super), indent)}",
719
+ " const Tensor& maybe_get_output(int64_t output_idx) override {",
720
+ f" return {output_value};\n",
721
+ " }",
722
+ f" std::array<{output_type}, {len(f.func.returns)}> outputs_;",
723
+ f"{textwrap.indent(proxy_field, indent)}",
724
+ f"{textwrap.indent(guard_field, indent)}",
725
+ "};",
726
+ )
727
+ return "\n".join(line for line in lines if line)
728
+
729
+ @method_with_native_function
730
+ def gen_one(self, f: NativeFunction) -> Optional[str]:
731
+ assert not f.manual_kernel_registration
732
+
733
+ if (
734
+ self.target is Target.REGISTRATION
735
+ and not self.selector.is_native_function_selected(f)
736
+ ):
737
+ return None
738
+
739
+ # TODO: Now, there is something interesting going on here. In the code below,
740
+ # we generate CompositeExplicitAutogradNonFunctional implementations of functional and inplace
741
+ # based on the out implementation. But in fact, out is definable by
742
+ # functional too (just not very efficiently), and this is honestly the
743
+ # MORE likely situation for a backend implementor. How do we pick?
744
+ # Well, taking a page from Haskell type classes and default methods,
745
+ # we could conceivably register a circular definition (out in terms
746
+ # of functional, and functional in terms of out) and just require
747
+ # someone to implement one or the other. We'd have to do a little bit
748
+ # of work to not register one of these "weak" definitions unless there
749
+ # is a strong definition somewhere in the DAG! So it's not implemented yet.
750
+ if (
751
+ self.backend_index.dispatch_key
752
+ == DispatchKey.CompositeExplicitAutogradNonFunctional
753
+ and f.func.kind() is SchemaKind.out
754
+ ):
755
+ # Never generate a default implementation for out, that's what you
756
+ # have to define as a backend implementor
757
+ return None
758
+
759
+ # Note [Direct dispatch bindings]
760
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
761
+ # Signature of the non-dispatched function we'll expose in a header
762
+ # (e.g., at::cpu::add). We don't generate methods (TODO: do this
763
+ # when CPUTensor class is a thing); nor do we generate fallback
764
+ # bindings for manual_cpp_binding functions.
765
+ cpp_sig_group = CppSignatureGroup.from_native_function(
766
+ f, method=False, fallback_binding=False
767
+ )
768
+
769
+ # Signature of the wrapper function we'll register to the dispatcher
770
+ kern = self.backend_index.get_kernel(f)
771
+ sig = NativeSignature(
772
+ f.func,
773
+ prefix=f"wrapper_{self.backend_index.dispatch_key}_",
774
+ symint=kern is not None and kern.supports_symint(),
775
+ )
776
+
777
+ if self.target is Target.NAMESPACED_DECLARATION:
778
+ result = ""
779
+ for cpp_sig in cpp_sig_group.signatures(symint=self.symint):
780
+ result += f"TORCH_API {cpp_sig.decl()};\n"
781
+ return result
782
+
783
+ elif self.target is Target.NAMESPACED_DEFINITION:
784
+
785
+ def generate_defn(cpp_sig: CppSignature) -> str:
786
+ return f"""
787
+ {cpp_sig.defn()} {{
788
+ return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
789
+ }}
790
+ """
791
+
792
+ result = ""
793
+ for cpp_sig in cpp_sig_group.signatures(symint=self.symint):
794
+ result += generate_defn(cpp_sig)
795
+ return result
796
+
797
+ elif self.target is Target.ANONYMOUS_DEFINITION:
798
+ k = f.func.kind()
799
+
800
+ # Construct the body of the wrapper function with signature sig
801
+ sig_body = []
802
+ # We'll use context to keep track of any variables we've brought
803
+ # into scope while generating code
804
+ context: List[Union[Binding, Expr]] = list(sig.arguments())
805
+
806
+ # Initialize the class corresponding to this structured
807
+ # operator; feeding it the output argument(s) if it is known
808
+ if self.backend_index.dispatch_key is DispatchKey.Meta:
809
+ class_name = f"structured_{meta.name(self.g)}_meta_{k.name}"
810
+ parent_class = f"at::meta::structured_{meta.name(self.g)}"
811
+ elif (
812
+ self.backend_index.dispatch_key
813
+ is DispatchKey.CompositeExplicitAutogradNonFunctional
814
+ ):
815
+ # TODO: dedup this branch
816
+ class_name = f"structured_{meta.name(self.g)}_default_backend_{k.name}"
817
+ parent_class = f"at::meta::structured_{meta.name(self.g)}"
818
+ else:
819
+ metadata = self.backend_index.get_kernel(self.g)
820
+ assert metadata is not None
821
+ class_name = f"structured_{metadata.kernel}_{k.name}"
822
+ parent_class = f"{metadata.cpp_namespace}::structured_{metadata.kernel}"
823
+
824
+ if self.backend_index.device_guard:
825
+ device_check_args = itertools.chain(
826
+ f.func.arguments.out, f.func.arguments.flat_positional
827
+ )
828
+ sig_body.append(
829
+ RegisterDispatchKey.gen_device_check(
830
+ f.device_check, list(device_check_args), sig.name()
831
+ )
832
+ )
833
+
834
+ if k is SchemaKind.functional:
835
+ sig_body.append(f"{class_name} op;")
836
+ elif k is SchemaKind.inplace:
837
+ sig_body.append(f"{class_name} op(self);")
838
+ elif k is SchemaKind.out:
839
+ out_args_str = ", ".join(a.name for a in f.func.arguments.out)
840
+ sig_body.append(f"{class_name} op({out_args_str});")
841
+
842
+ # Translate the input native arguments into structured
843
+ # arguments for the meta call
844
+ meta_exprs = ", ".join(
845
+ e.expr
846
+ for e in translate(
847
+ context, structured.meta_arguments(self.g), method=False
848
+ )
849
+ )
850
+
851
+ if self.g.out.precomputed:
852
+ # If this function group has precomputed elements, the meta function
853
+ # returns a struct containing them which must be saved so that it
854
+ # can be unpacked when generating code to call the impl.
855
+ sig_body.append(f"auto precompute = op.meta({meta_exprs});")
856
+
857
+ # Put all of the contents of the precompute struct into the context
858
+ # so that translate will be able to return the correct args for the
859
+ # call to the impl.
860
+ precomputed_values = [
861
+ *self.g.out.precomputed.replace.values(),
862
+ self.g.out.precomputed.add,
863
+ ]
864
+ for precomputed_elems in precomputed_values:
865
+ for arg in precomputed_elems:
866
+ context.append(
867
+ Expr(
868
+ expr=f"precompute.{arg.name}",
869
+ type=structured.argument_type(arg, binds=arg.name),
870
+ )
871
+ )
872
+
873
+ # Add a use of the precompute struct so FB internal compilers don't
874
+ # complain that there is an unused variable.
875
+ sig_body.append("(void)precompute;")
876
+ else:
877
+ sig_body.append(f"op.meta({meta_exprs});")
878
+
879
+ # After running meta, op.outputs_ is guaranteed to be valid;
880
+ # add it to the context
881
+ out_args = structured.out_arguments(self.g)
882
+ for i, out_arg in enumerate(out_args):
883
+ assert ConstRefCType(BaseCType(tensorT)) == out_arg.nctype.type
884
+
885
+ if k is SchemaKind.out:
886
+ expr = f"op.maybe_get_output({i})"
887
+ else:
888
+ expr = f"op.outputs_[{i}]"
889
+
890
+ context.append(
891
+ Expr(
892
+ expr=expr,
893
+ # TODO: Stop hardcoding that the output type is a Tensor. Note
894
+ # that for the codegen here this is fine because outputs_ is
895
+ # hardcoded to be tensor already
896
+ type=NamedCType(
897
+ out_arg.nctype.name, MutRefCType(BaseCType(tensorT))
898
+ ),
899
+ )
900
+ )
901
+
902
+ # With the expanded context, do the impl call (if not a meta
903
+ # function)
904
+ if (
905
+ self.backend_index.dispatch_key
906
+ == DispatchKey.CompositeExplicitAutogradNonFunctional
907
+ ):
908
+ # TODO: https://github.com/pytorch/pytorch/issues/53023
909
+ out_sig_group = CppSignatureGroup.from_native_function(
910
+ self.g.out, method=False, fallback_binding=f.manual_cpp_binding
911
+ )
912
+ out_sig = out_sig_group.most_faithful_signature()
913
+ api_name = out_sig.name()
914
+ out_exprs = ", ".join(
915
+ e.expr
916
+ for e in translate(context, out_sig.arguments(), method=False)
917
+ )
918
+ # TODO: I think this means structured won't work with method
919
+ # only functions (but maybe you're saved by faithful? iunno.)
920
+ # NB: Originally I wrote this as an at::redispatch call, but
921
+ # I got in trouble because that meant I needed a DispatchKeySet
922
+ # in the wrapper function, which meant I needed a DispatchKeySet
923
+ # in the DispatchKeyFunctions declarations, but the defined API
924
+ # there does NOT permit a dispatch key set. I think you can
925
+ # probably unwind this by calling some function to do the TLS
926
+ # fetch and get the DispatchKeySet when you don't have it, but
927
+ # I didn't do it for this version
928
+ sig_body.append(f"at::{api_name}({out_exprs});")
929
+ elif self.backend_index.dispatch_key != DispatchKey.Meta:
930
+ impl_exprs = ", ".join(
931
+ e.expr
932
+ for e in translate(
933
+ context, structured.impl_arguments(self.g), method=False
934
+ )
935
+ )
936
+ sig_body.append(f"op.impl({impl_exprs});")
937
+
938
+ # Go over each output, and check if there is a proxy created for it.
939
+ # If so, copy it over to the original output.
940
+ if k is SchemaKind.out or k is SchemaKind.inplace:
941
+ for i in range(len(f.func.returns)):
942
+ sig_body.append(
943
+ f"if (op.proxy_outputs_[{i}].has_value()) op.outputs_[{i}].get().copy_(*op.proxy_outputs_[{i}]);"
944
+ )
945
+
946
+ # Destructively return the final tensors
947
+ # TODO: Do this in translate instead
948
+ if k is SchemaKind.functional:
949
+ if len(f.func.returns) == 1:
950
+ ret_expr = "std::move(op.outputs_[0])" # small optimization
951
+ else:
952
+ moved = ", ".join(
953
+ f"std::move(op.outputs_[{i}])"
954
+ for i in range(len(f.func.returns))
955
+ )
956
+ ret_expr = f"std::make_tuple({moved})"
957
+ elif k is SchemaKind.inplace:
958
+ ret_expr = "self"
959
+ elif k is SchemaKind.out:
960
+ if len(f.func.returns) == 1:
961
+ ret_expr = f.func.arguments.out[0].name
962
+ else:
963
+ refs = ", ".join(a.name for a in f.func.arguments.out)
964
+ ret_expr = f"std::forward_as_tuple({refs})"
965
+ sig_body.append(f"return {ret_expr};")
966
+
967
+ sig_body_str = "\n".join(sig_body)
968
+
969
+ # For an overview of what this template code looks like, see
970
+ # https://github.com/pytorch/rfcs/pull/9
971
+ return f"""\
972
+ {self.gen_class(
973
+ f, k,
974
+ class_name=class_name,
975
+ parent_class=parent_class,
976
+ generate_super=self.g.out.structured_inherits is not None
977
+ )}
978
+
979
+ {sig.defn()} {{
980
+ {sig_body_str}
981
+ }}
982
+ """
983
+
984
+ elif self.target is Target.REGISTRATION:
985
+ return f'm.impl("{f.func.name}", TORCH_FN({sig.name()}));'
986
+ else:
987
+ assert_never(self.target)
988
+ # Silence mypy's "Missing return statement" error
989
+ return None
env-llmeval/lib/python3.10/site-packages/torchgen/dest/ufunc.py ADDED
@@ -0,0 +1,545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Dict, List, Optional, Sequence, Tuple, Union
3
+
4
+ import torchgen.api.ufunc as ufunc
5
+ from torchgen.api.translate import translate
6
+ from torchgen.api.types import (
7
+ BaseCType,
8
+ Binding,
9
+ CType,
10
+ Expr,
11
+ NamedCType,
12
+ opmath_t,
13
+ scalar_t,
14
+ StructuredImplSignature,
15
+ VectorizedCType,
16
+ )
17
+ from torchgen.api.ufunc import UfunctorBindings
18
+ from torchgen.context import with_native_function
19
+ from torchgen.model import (
20
+ Argument,
21
+ BaseTy,
22
+ BaseType,
23
+ DispatchKey,
24
+ NativeFunctionsGroup,
25
+ ScalarType,
26
+ UfuncKey,
27
+ )
28
+ from torchgen.utils import OrderedSet
29
+
30
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
31
+ #
32
+ # CUDA STUFF
33
+ #
34
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
35
+
36
+ # NB: not bothering to generate dispatch stub forward declaration in header,
37
+ # we can just paste it whereever necessary
38
+
39
+ # TODO: use BackendIndex
40
+ # dispatch_key: DispatchKey # only CPU/CUDA right now
41
+
42
+
43
+ # Represents functors for implementing CUDA ufuncs.
44
+ # Functors are templated by scalar_t because when USERS instantiate functors
45
+ # they are templated. A functor looks something like this:
46
+ #
47
+ # template <typename scalar_t>
48
+ # struct CUDAFunctorOnSelf_add {
49
+ # using opmath_t = at::opmath_type<scalar_t>;
50
+ # opmath_t other_;
51
+ # opmath_t alpha_;
52
+ # CUDAFunctorOnSelf_add(opmath_t other, opmath_t alpha)
53
+ # : other_(other), alpha_(alpha) {}
54
+ # __device__ scalar_t operator()(scalar_t self) {
55
+ # return ufunc::add(static_cast<opmath_t>(self), other_, alpha_);
56
+ # }
57
+ # };
58
+ #
59
+ @dataclass(frozen=True)
60
+ class UfunctorSignature:
61
+ g: NativeFunctionsGroup
62
+ scalar_tensor_idx: Optional[int]
63
+ name: str
64
+
65
+ def arguments(self) -> UfunctorBindings:
66
+ return ufunc.ufunctor_arguments(
67
+ self.g, scalar_tensor_idx=self.scalar_tensor_idx, scalar_t=scalar_t
68
+ )
69
+
70
+ def fields(self) -> List[Binding]:
71
+ # fields are renamed to have a trailing underscore, as is conventional
72
+ return [b.rename(f"{b.name}_") for b in self.arguments().ctor]
73
+
74
+ def returns_type(self) -> CType:
75
+ # TODO: don't hardcode; return type will be inferred based on tags on
76
+ # the native function
77
+ return BaseCType(scalar_t)
78
+
79
+ def decl_fields(self) -> str:
80
+ return "\n".join(f"{f.type} {f.name};" for f in self.fields())
81
+
82
+ def inline_defn_ctor(self) -> str:
83
+ args_str = ", ".join(a.decl() for a in self.arguments().ctor)
84
+ # NB: hypothetically could do this with translate but the
85
+ # transition here is very regular
86
+ init_str = ", ".join(f"{a.name}_({a.name})" for a in self.arguments().ctor)
87
+ return f"{self.name}({args_str}) : {init_str} {{}}"
88
+
89
+ def decl_apply(self) -> str:
90
+ args_str = ", ".join(a.decl() for a in self.arguments().apply)
91
+ return f"{self.returns_type().cpp_type()} operator()({args_str}) const"
92
+
93
+
94
+ @dataclass(frozen=True)
95
+ class UfuncSignature:
96
+ g: NativeFunctionsGroup
97
+ name: str
98
+ compute_t: CType
99
+
100
+ def arguments(self) -> List[Binding]:
101
+ return ufunc.ufunc_arguments(self.g, compute_t=self.compute_t)
102
+
103
+ def call(self, ctx: Sequence[Union[Binding, Expr]]) -> str:
104
+ return f"{self.name}({', '.join(a.expr for a in translate(ctx, self.arguments()))})"
105
+
106
+
107
+ # steps:
108
+ # 1. take the functional signature
109
+ # 2. use api.ufunc to convert it to template signature. this establishes
110
+ # the type of the template function
111
+ # 3. use api.ufunc (II) to generate a split struct / operator() signature.
112
+ # this establish context in which we call the template signature
113
+ #
114
+ # StructuredImplSignature context
115
+ # ~> functor constructor sig
116
+ #
117
+ # Functor constructor context
118
+ # ~> functor fields sig
119
+ #
120
+ # Functor apply context (functor fields + functor apply sig)
121
+ # ~> template sig
122
+ #
123
+
124
+
125
+ def eligible_for_binary_scalar_specialization(g: NativeFunctionsGroup) -> bool:
126
+ num_tensors = sum(
127
+ 1 for a in g.functional.func.arguments.flat_non_out if a.type.is_tensor_like()
128
+ )
129
+ return num_tensors == 2
130
+
131
+
132
+ def compute_ufunc_cuda_functors(
133
+ g: NativeFunctionsGroup,
134
+ ) -> Tuple[Dict[ScalarType, Dict[UfuncKey, UfunctorSignature]], str]:
135
+ # First, build the functors.
136
+ ufunctor_sigs: Dict[ScalarType, Dict[UfuncKey, UfunctorSignature]] = {}
137
+ ufunctors: List[str] = []
138
+ loops = g.out.ufunc_inner_loop
139
+ scalar_tensor_idx_lookup = {
140
+ UfuncKey.CUDAFunctorOnSelf: 1,
141
+ UfuncKey.CUDAFunctorOnOther: 0,
142
+ UfuncKey.CUDAFunctor: None,
143
+ }
144
+ if eligible_for_binary_scalar_specialization(g):
145
+ keys = [
146
+ UfuncKey.CUDAFunctorOnSelf,
147
+ UfuncKey.CUDAFunctorOnOther,
148
+ UfuncKey.CUDAFunctor,
149
+ ]
150
+ else:
151
+ keys = [UfuncKey.CUDAFunctor]
152
+ for k in [UfuncKey.CUDAFunctorOnSelf, UfuncKey.CUDAFunctorOnOther]:
153
+ assert k not in loops, f"cannot use {k} on non-binary function"
154
+ for k in keys:
155
+ # If the key was directly defined, skip functor codegen; we assume the
156
+ # user already done it for us
157
+ if k in loops:
158
+ ufunctor_sig = UfunctorSignature(
159
+ g, scalar_tensor_idx=scalar_tensor_idx_lookup[k], name=loops[k].name
160
+ )
161
+ for dtype in loops[k].supported_dtypes:
162
+ ufunctor_sigs.setdefault(dtype, {})[k] = ufunctor_sig
163
+ continue
164
+
165
+ # Note [ScalarOnly and Generic must match names for CUDA]
166
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
167
+ # Otherwise, look in ANY of the generic entries. For simplicity of
168
+ # codegen, both ScalarOnly and Generic are defined, the ufunc name
169
+ # must match (if they didn't match, we'd have to generate distinct
170
+ # functors per dtype, which is awful, so we're not going to do it unless
171
+ # someone really forces us to)
172
+ ufunc_name = None
173
+ supported_dtypes: OrderedSet[ScalarType] = OrderedSet()
174
+ for lk in [UfuncKey.ScalarOnly, UfuncKey.Generic]:
175
+ if lk not in loops:
176
+ continue
177
+ if ufunc_name is None:
178
+ ufunc_name = loops[lk].name
179
+ else:
180
+ # See Note [ScalarOnly and Generic must match names for CUDA]
181
+ assert (
182
+ ufunc_name == loops[lk].name
183
+ ), "ScalarOnly and Generic must have same ufunc name"
184
+ supported_dtypes |= loops[lk].supported_dtypes
185
+ assert ufunc_name is not None
186
+
187
+ name = f"{k}_{ufunc_name}"
188
+ ufunctor_sig = UfunctorSignature(
189
+ g, scalar_tensor_idx=scalar_tensor_idx_lookup[k], name=name
190
+ )
191
+ for dtype in supported_dtypes:
192
+ ufunctor_sigs.setdefault(dtype, {})[k] = ufunctor_sig
193
+
194
+ ufunc_sig = UfuncSignature(
195
+ g, name=f"ufunc::{ufunc_name}", compute_t=BaseCType(opmath_t)
196
+ )
197
+ apply_ctx = ufunctor_sig.fields() + ufunctor_sig.arguments().apply
198
+ ufunctors.append(
199
+ f"""
200
+ template <typename scalar_t>
201
+ struct {ufunctor_sig.name} {{
202
+ using opmath_t = at::opmath_type<scalar_t>;
203
+ {ufunctor_sig.decl_fields()}
204
+ {ufunctor_sig.inline_defn_ctor()}
205
+ __device__ {ufunctor_sig.decl_apply()} {{
206
+ return {ufunc_sig.call(apply_ctx)};
207
+ }}
208
+ }};
209
+ """
210
+ )
211
+
212
+ return ufunctor_sigs, "\n".join(ufunctors)
213
+
214
+
215
+ @dataclass(frozen=True)
216
+ class BinaryScalarSpecializationConfig:
217
+ scalar_idx: int
218
+ ctor_tensor: str
219
+ ufunc_key: UfuncKey
220
+
221
+
222
+ BinaryScalarSpecializationConfigs = [
223
+ BinaryScalarSpecializationConfig(
224
+ scalar_idx=0,
225
+ ctor_tensor="self",
226
+ ufunc_key=UfuncKey.CUDAFunctorOnOther,
227
+ ),
228
+ BinaryScalarSpecializationConfig(
229
+ scalar_idx=1,
230
+ ctor_tensor="other",
231
+ ufunc_key=UfuncKey.CUDAFunctorOnSelf,
232
+ ),
233
+ ]
234
+
235
+
236
+ def compute_ufunc_cuda_dtype_body(
237
+ g: NativeFunctionsGroup,
238
+ dtype: ScalarType,
239
+ inner_loops: Dict[UfuncKey, UfunctorSignature],
240
+ parent_ctx: Sequence[Binding],
241
+ ) -> str:
242
+ body = "using opmath_t = at::opmath_type<scalar_t>;"
243
+ body += "if (false) {}\n" # for ease of codegen
244
+ for config in BinaryScalarSpecializationConfigs:
245
+ if config.ufunc_key not in inner_loops:
246
+ continue
247
+ ufunctor_sig = inner_loops[config.ufunc_key]
248
+ scalar_idx = config.scalar_idx + 1
249
+ # Make a copy and at the same time widen the type (not permissible
250
+ # without copy; we don't want to mutate the input argument anyway)
251
+ ctx: List[Union[Expr, Binding]] = list(parent_ctx)
252
+ ctx.append(
253
+ Expr(
254
+ expr=f"iter.scalar_value<opmath_t>({scalar_idx})",
255
+ type=NamedCType(config.ctor_tensor, BaseCType(opmath_t)),
256
+ )
257
+ )
258
+ ufunctor_ctor_exprs_str = ", ".join(
259
+ a.expr for a in translate(ctx, ufunctor_sig.arguments().ctor)
260
+ )
261
+
262
+ # NB: ufunctor must be allocated before iter.remove_operand is called,
263
+ # as it relies on iter
264
+ body += f"""\
265
+ else if (iter.is_cpu_scalar({scalar_idx})) {{
266
+ {ufunctor_sig.name}<scalar_t> ufunctor({ufunctor_ctor_exprs_str});
267
+ iter.remove_operand({scalar_idx});
268
+ gpu_kernel(iter, ufunctor);
269
+ }}"""
270
+
271
+ ufunctor_sig = inner_loops[UfuncKey.CUDAFunctor]
272
+ ufunctor_ctor_exprs_str = ", ".join(
273
+ a.expr for a in translate(parent_ctx, ufunctor_sig.arguments().ctor)
274
+ )
275
+ body += f"""
276
+ else {{
277
+ gpu_kernel(iter, {ufunctor_sig.name}<scalar_t>({ufunctor_ctor_exprs_str}));
278
+ }}
279
+ """
280
+ return body
281
+
282
+
283
+ @with_native_function
284
+ def compute_ufunc_cuda(g: NativeFunctionsGroup) -> str:
285
+ # First, build the functors, indexing them by dtype
286
+ ufunctor_sigs, ufunctors = compute_ufunc_cuda_functors(g)
287
+
288
+ # Next, build the conditionals
289
+ sig = StructuredImplSignature(g, ufunc.kernel_name(g, DispatchKey.CUDA))
290
+ dtype_cases = []
291
+ for dtype, inner_ufunc_sigs in ufunctor_sigs.items():
292
+ dtype_cases.append(
293
+ f"""
294
+ AT_DISPATCH_CASE(at::ScalarType::{dtype},
295
+ [&]() {{
296
+ {compute_ufunc_cuda_dtype_body(g, dtype, inner_ufunc_sigs, sig.arguments())}
297
+ }}
298
+ )
299
+ """
300
+ )
301
+
302
+ dtype_cases_str = "\n".join(dtype_cases)
303
+
304
+ stub_sig = StubSignature(g)
305
+
306
+ return f"""
307
+ {ufunctors}
308
+
309
+ {stub_sig.type_defn()};
310
+ {stub_sig.dispatch_decl()};
311
+
312
+ {stub_sig.kernel_defn()} {{
313
+ AT_DISPATCH_SWITCH(iter.common_dtype(), "{sig.name}",
314
+ {dtype_cases_str}
315
+ );
316
+ }}
317
+ REGISTER_DISPATCH({stub_sig.name}, &{stub_sig.kernel_name});
318
+
319
+ {sig.defn()} {{
320
+ {stub_sig.direct_call(sig.arguments())};
321
+ }}
322
+ """
323
+
324
+
325
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
326
+ #
327
+ # CPU STUFF
328
+ #
329
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
330
+
331
+
332
+ @dataclass(frozen=True)
333
+ class StubSignature:
334
+ g: NativeFunctionsGroup
335
+
336
+ @property
337
+ def name(self) -> str:
338
+ return f"{str(self.g.functional.func.name.name)}_stub"
339
+
340
+ @property
341
+ def kernel_name(self) -> str:
342
+ return f"{str(self.g.functional.func.name.name)}_kernel"
343
+
344
+ @property
345
+ def type_name(self) -> str:
346
+ return f"{str(self.g.functional.func.name.name)}_fn"
347
+
348
+ def arguments(self) -> List[Binding]:
349
+ return ufunc.stub_arguments(self.g)
350
+
351
+ def type(self) -> str:
352
+ cpp_args = self.arguments()
353
+ return f"void(*)(TensorIteratorBase&, {', '.join(a.type for a in cpp_args)})"
354
+
355
+ def dispatch_decl(self) -> str:
356
+ return f"DECLARE_DISPATCH({self.type_name}, {self.name})"
357
+
358
+ def dispatch_defn(self) -> str:
359
+ return f"DEFINE_DISPATCH({self.name})"
360
+
361
+ def kernel_defn(self) -> str:
362
+ return f"void {self.kernel_name}(TensorIteratorBase& iter, {', '.join(a.defn() for a in self.arguments())})"
363
+
364
+ def type_defn(self) -> str:
365
+ return f"using {self.type_name} = {self.type()}"
366
+
367
+ # must be called from context where this is TensorIteratorBase*
368
+ def call(self, ctx: Sequence[Binding]) -> str:
369
+ return f"{self.name}(device_type(), *this, {', '.join(a.expr for a in translate(ctx, self.arguments()))})"
370
+
371
+ # used in CUDA to skip the unnecessary dynamic dispatch
372
+ def direct_call(self, ctx: Sequence[Binding]) -> str:
373
+ return f"{self.kernel_name}(*this, {', '.join(a.expr for a in translate(ctx, self.arguments()))})"
374
+
375
+
376
+ @with_native_function
377
+ def compute_ufunc_cpu(g: NativeFunctionsGroup) -> str:
378
+ stub_sig = StubSignature(g)
379
+ sig = StructuredImplSignature(g, ufunc.kernel_name(g, DispatchKey.CPU))
380
+
381
+ return f"""
382
+ {stub_sig.type_defn()};
383
+ {stub_sig.dispatch_decl()};
384
+ {stub_sig.dispatch_defn()};
385
+
386
+ {sig.defn()} {{
387
+ {stub_sig.call(sig.arguments())};
388
+ }}
389
+ """
390
+
391
+
392
+ def compute_ufunc_cpu_dtype_body(
393
+ g: NativeFunctionsGroup,
394
+ dtype: ScalarType,
395
+ inner_loops: Dict[UfuncKey, UfuncSignature],
396
+ parent_ctx: Sequence[Binding],
397
+ ) -> str:
398
+ assert UfuncKey.CPUScalar in inner_loops, f"{dtype}, {inner_loops.keys()}"
399
+ assert inner_loops.keys() <= {UfuncKey.CPUScalar, UfuncKey.CPUVector}
400
+ scalar_loop = inner_loops[UfuncKey.CPUScalar]
401
+ vec_loop = None
402
+ if UfuncKey.CPUVector in inner_loops:
403
+ vec_loop = inner_loops[UfuncKey.CPUVector]
404
+
405
+ # NB: We DON'T use translate here, because translate is
406
+ # incapable of CSE'ing the scalar accesses in case it is also
407
+ # used by Vectorized; also, the unpacking here is very simple
408
+ # and only affects Scalar; everything else is implicitly captured
409
+ # by the lambda
410
+
411
+ # Setup scalar in scope
412
+ body = []
413
+ ctx = []
414
+ for b in parent_ctx:
415
+ if isinstance(b.argument, Argument) and b.argument.type != BaseType(
416
+ BaseTy.Scalar
417
+ ):
418
+ continue
419
+ body.append(f"auto _s_{b.name} = {b.name}.to<scalar_t>();")
420
+ ctx.append(Expr(f"_s_{b.name}", NamedCType(b.nctype.name, BaseCType(scalar_t))))
421
+ if vec_loop is not None:
422
+ for b in parent_ctx:
423
+ if isinstance(b.argument, Argument) and b.argument.type != BaseType(
424
+ BaseTy.Scalar
425
+ ):
426
+ continue
427
+ body.append(
428
+ f"auto _v_{b.name} = at::vec::Vectorized<scalar_t>(_s_{b.name});"
429
+ )
430
+ ctx.append(
431
+ Expr(
432
+ f"_v_{b.name}",
433
+ NamedCType(b.nctype.name, VectorizedCType(BaseCType(scalar_t))),
434
+ )
435
+ )
436
+
437
+ # Setup lambda signature
438
+ # NB: simplified version of ufunctor_arguments
439
+ scalar_bindings = []
440
+ vec_bindings = []
441
+ for a in g.functional.func.arguments.flat_non_out:
442
+ if not a.type.is_tensor_like():
443
+ continue
444
+ assert a.type == BaseType(BaseTy.Tensor)
445
+ scalar_bindings.append(
446
+ Binding(
447
+ name=a.name,
448
+ nctype=NamedCType(a.name, BaseCType(scalar_t)),
449
+ argument=a,
450
+ )
451
+ )
452
+ if vec_loop is not None:
453
+ vec_bindings.append(
454
+ Binding(
455
+ name=a.name,
456
+ nctype=NamedCType(a.name, VectorizedCType(BaseCType(scalar_t))),
457
+ argument=a,
458
+ )
459
+ )
460
+
461
+ def with_ctx(b: Sequence[Binding]) -> List[Union[Expr, Binding]]:
462
+ r: List[Union[Expr, Binding]] = []
463
+ r.extend(ctx)
464
+ r.extend(b)
465
+ return r
466
+
467
+ body_str = "\n".join(body)
468
+ if vec_loop is not None:
469
+ return f"""
470
+ {body_str}
471
+ cpu_kernel_vec(iter,
472
+ [=]({', '.join(b.decl() for b in scalar_bindings)}) {{ return {scalar_loop.call(with_ctx(scalar_bindings))}; }},
473
+ [=]({', '.join(b.decl() for b in vec_bindings)}) {{ return {vec_loop.call(with_ctx(vec_bindings))}; }}
474
+ );
475
+ """
476
+ else:
477
+ return f"""
478
+ {body_str}
479
+ cpu_kernel(iter,
480
+ [=]({', '.join(b.decl() for b in scalar_bindings)}) {{ return {scalar_loop.call(with_ctx(scalar_bindings))}; }}
481
+ );
482
+ """
483
+
484
+
485
+ @with_native_function
486
+ def compute_ufunc_cpu_kernel(g: NativeFunctionsGroup) -> str:
487
+ stub_sig = StubSignature(g)
488
+
489
+ # Reindex the ufunc by dtypes; processing generic/scalaronly as well
490
+ loops = g.out.ufunc_inner_loop
491
+ ufunc_sigs: Dict[ScalarType, Dict[UfuncKey, UfuncSignature]] = {}
492
+ for k in [UfuncKey.CPUScalar, UfuncKey.CPUVector]:
493
+ lks = []
494
+ # ORDER MATTERS: this specifies overriding precedence
495
+ if k in loops: # should happen rarely
496
+ lks.append(k)
497
+ if UfuncKey.ScalarOnly in loops and k is UfuncKey.CPUScalar:
498
+ lks.append(UfuncKey.ScalarOnly)
499
+ if UfuncKey.Generic in loops:
500
+ lks.append(UfuncKey.Generic)
501
+ # TODO: don't hardcode ufunc:: namespace here, should be centralized smh
502
+ for lk in lks:
503
+ for dtype in loops[lk].supported_dtypes:
504
+ compute_t: CType
505
+ if k is UfuncKey.CPUScalar:
506
+ compute_t = BaseCType(scalar_t)
507
+ elif k is UfuncKey.CPUVector:
508
+ compute_t = VectorizedCType(BaseCType(scalar_t))
509
+ else:
510
+ raise AssertionError()
511
+ inner_ufunc_sigs = ufunc_sigs.setdefault(dtype, {})
512
+ if k not in inner_ufunc_sigs:
513
+ inner_ufunc_sigs[k] = UfuncSignature(
514
+ g, name=f"ufunc::{loops[lk].name}", compute_t=compute_t
515
+ )
516
+
517
+ # Build the conditionals
518
+ dtype_cases = []
519
+ for dtype, inner_ufunc_sigs in ufunc_sigs.items():
520
+ dtype_cases.append(
521
+ f"""
522
+ AT_DISPATCH_CASE(at::ScalarType::{dtype},
523
+ [&]() {{
524
+ {compute_ufunc_cpu_dtype_body(g, dtype, inner_ufunc_sigs, stub_sig.arguments())}
525
+ }}
526
+ )
527
+ """
528
+ )
529
+
530
+ dtype_cases_str = "\n".join(dtype_cases)
531
+ return f"""
532
+ namespace {{
533
+
534
+ {stub_sig.kernel_defn()} {{
535
+ AT_DISPATCH_SWITCH(iter.common_dtype(), "{stub_sig.name}",
536
+ {dtype_cases_str}
537
+ );
538
+ }}
539
+
540
+ }} // anonymous namespace
541
+
542
+ {stub_sig.type_defn()};
543
+ {stub_sig.dispatch_decl()};
544
+ REGISTER_DISPATCH({stub_sig.name}, &{stub_sig.kernel_name});
545
+ """
env-llmeval/lib/python3.10/site-packages/torchgen/executorch/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/selective_build/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torchgen/selective_build/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/selective_build/__pycache__/operator.cpython-310.pyc ADDED
Binary file (3.27 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/selective_build/__pycache__/selector.cpython-310.pyc ADDED
Binary file (7.99 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/selective_build/operator.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Dict, Optional, Tuple
3
+
4
+
5
+ # This class holds information about a single operator used to determine
6
+ # the outcome of a selective/custom PyTorch build that doesn't include
7
+ # registration code for all the supported operators. This is done to
8
+ # reduce the size of the generated binary so that it can be deployed in
9
+ # situations where binary size comes at a premium.
10
+ #
11
+ @dataclass(frozen=True)
12
+ class SelectiveBuildOperator:
13
+ # The name of the operator. This includes the aten::, etc... prefix
14
+ # The operator name may or may not have the overload name. If this
15
+ # operator name does not specify an overload name, the way to determine
16
+ # if this entry refers to the family of operators with this base name
17
+ # or just the operator with this name is to look at the value of the
18
+ # 'include_all_overloads' flag in this class.
19
+ name: str
20
+
21
+ # True if this is a root operator (i.e. called directly from a
22
+ # TorchScript model, etc...). An operator is considered to be a
23
+ # root operator if it is called directly from any one of the models
24
+ # that this instance of the pytorch library was built for. Hence, it
25
+ # may not be a root operator in all of the models that are used in
26
+ # this instance of the pytorch library.
27
+ is_root_operator: bool
28
+
29
+ # Is this operator used for on-device training? If True, then we need to
30
+ # use the information to generate code in VariableType_N.cpp for registration
31
+ # of training related operators. Again, this is True if this operator
32
+ # is used for training in one or more models used by this instance of the
33
+ # pytorch library.
34
+ is_used_for_training: bool
35
+
36
+ # If True, it indicates that this operator instance (object) refers to an
37
+ # operator without the overload name and should apply to all overloads
38
+ # which have this operator name as the base name. This flag is applicable
39
+ # only for objects that have operator names without a DOT (period) character
40
+ # in them.
41
+ #
42
+ # Note: This flag is a temporary workaround to grandfather in the current
43
+ # static selective (custom) build mechanism, which largely ignores overload
44
+ # names when determining whether to select operators for registration
45
+ # purposes.
46
+ include_all_overloads: bool
47
+
48
+ # Debug Information at the operator level
49
+ _debug_info: Optional[Tuple[str, ...]]
50
+
51
+ @staticmethod
52
+ def from_yaml_dict(
53
+ op_name: str, op_info: Dict[str, object]
54
+ ) -> "SelectiveBuildOperator":
55
+ allowed_keys = {
56
+ "name",
57
+ "is_root_operator",
58
+ "is_used_for_training",
59
+ "include_all_overloads",
60
+ "debug_info",
61
+ }
62
+
63
+ if len(set(op_info.keys()) - allowed_keys) > 0:
64
+ raise Exception(
65
+ "Got unexpected top level keys: {}".format(
66
+ ",".join(set(op_info.keys()) - allowed_keys),
67
+ )
68
+ )
69
+
70
+ if "name" in op_info:
71
+ assert op_name == op_info["name"]
72
+
73
+ is_root_operator = op_info.get("is_root_operator", True)
74
+ assert isinstance(is_root_operator, bool)
75
+
76
+ is_used_for_training = op_info.get("is_used_for_training", True)
77
+ assert isinstance(is_used_for_training, bool)
78
+
79
+ include_all_overloads = op_info.get("include_all_overloads", True)
80
+ assert isinstance(include_all_overloads, bool)
81
+
82
+ debug_info: Optional[Tuple[str, ...]] = None
83
+ if "debug_info" in op_info:
84
+ di_list = op_info["debug_info"]
85
+ assert isinstance(di_list, list)
86
+ debug_info = tuple(str(x) for x in di_list)
87
+
88
+ return SelectiveBuildOperator(
89
+ name=op_name,
90
+ is_root_operator=is_root_operator,
91
+ is_used_for_training=is_used_for_training,
92
+ include_all_overloads=include_all_overloads,
93
+ _debug_info=debug_info,
94
+ )
95
+
96
+ @staticmethod
97
+ def from_legacy_operator_name_without_overload(
98
+ name: str,
99
+ ) -> "SelectiveBuildOperator":
100
+ return SelectiveBuildOperator(
101
+ name=name,
102
+ is_root_operator=True,
103
+ is_used_for_training=True,
104
+ include_all_overloads=True,
105
+ _debug_info=None,
106
+ )
107
+
108
+ def to_dict(self) -> Dict[str, object]:
109
+ ret: Dict[str, object] = {
110
+ "is_root_operator": self.is_root_operator,
111
+ "is_used_for_training": self.is_used_for_training,
112
+ "include_all_overloads": self.include_all_overloads,
113
+ }
114
+ if self._debug_info is not None:
115
+ ret["debug_info"] = self._debug_info
116
+
117
+ return ret
118
+
119
+
120
+ def merge_debug_info(
121
+ lhs: Optional[Tuple[str, ...]],
122
+ rhs: Optional[Tuple[str, ...]],
123
+ ) -> Optional[Tuple[str, ...]]:
124
+ # Ensure that when merging, each entry shows up just once.
125
+ if lhs is None and rhs is None:
126
+ return None
127
+
128
+ return tuple(set((lhs or ()) + (rhs or ())))
129
+
130
+
131
+ def combine_operators(
132
+ lhs: "SelectiveBuildOperator", rhs: "SelectiveBuildOperator"
133
+ ) -> "SelectiveBuildOperator":
134
+ if str(lhs.name) != str(rhs.name):
135
+ raise Exception(
136
+ f"Expected both arguments to have the same name, but got '{str(lhs.name)}' and '{str(rhs.name)}' instead"
137
+ )
138
+
139
+ return SelectiveBuildOperator(
140
+ name=lhs.name,
141
+ # Consider this operator to be a root operator if it is a
142
+ # root operator in any of the models used in this instance of
143
+ # the pytorch library.
144
+ is_root_operator=lhs.is_root_operator or rhs.is_root_operator,
145
+ # Consider this operator to be a training operator if it is
146
+ # an operator used for training in any of the models used
147
+ # in this instance of the pytorch library.
148
+ is_used_for_training=lhs.is_used_for_training or rhs.is_used_for_training,
149
+ include_all_overloads=lhs.include_all_overloads or rhs.include_all_overloads,
150
+ _debug_info=merge_debug_info(lhs._debug_info, rhs._debug_info),
151
+ )
152
+
153
+
154
+ def merge_operator_dicts(
155
+ lhs: Dict[str, SelectiveBuildOperator],
156
+ rhs: Dict[str, SelectiveBuildOperator],
157
+ ) -> Dict[str, SelectiveBuildOperator]:
158
+ operators: Dict[str, SelectiveBuildOperator] = {}
159
+ for op_name, op in list(lhs.items()) + list(rhs.items()):
160
+ new_op = op
161
+ if op_name in operators:
162
+ new_op = combine_operators(operators[op_name], op)
163
+
164
+ operators[op_name] = new_op
165
+
166
+ return operators
167
+
168
+
169
+ def strip_operator_overload_name(op_name: str) -> str:
170
+ return op_name.split(".")[0]
env-llmeval/lib/python3.10/site-packages/torchgen/selective_build/selector.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from collections.abc import Iterable
3
+ from dataclasses import dataclass
4
+ from typing import Dict, List, Optional, Set, Tuple
5
+
6
+ import yaml
7
+
8
+ from torchgen.model import NativeFunction
9
+ from torchgen.selective_build.operator import (
10
+ merge_debug_info,
11
+ merge_operator_dicts,
12
+ SelectiveBuildOperator,
13
+ strip_operator_overload_name,
14
+ )
15
+
16
+
17
+ # A SelectiveBuilder holds information extracted from the selective build
18
+ # YAML specification.
19
+ #
20
+ # It includes information about the build's selectivity, the debug_info
21
+ # associated with this selective build (opaque string), and the set of
22
+ # operators that should be included in the build.
23
+ #
24
+ @dataclass(frozen=True)
25
+ class SelectiveBuilder:
26
+ # If true, then the build is not selective, and includes all
27
+ # operators.
28
+ include_all_operators: bool
29
+
30
+ # Debug Information at the selective/custom build level.
31
+ _debug_info: Optional[Tuple[str, ...]]
32
+
33
+ # A dictionary of operator -> operator metadata.
34
+ operators: Dict[str, SelectiveBuildOperator]
35
+
36
+ # A dictionary of selected kernel tags and dtypes. Typically a
37
+ # PyTorch Operator Kernel (function) may have many code paths
38
+ # that are specialized for many many Tensor dtypes, so it's not
39
+ # one per kernel function, but there could be many per kernel
40
+ # function. The tag isn't a kernel function name, but some fragment
41
+ # of the kernel function implementation itself.
42
+ kernel_metadata: Dict[str, List[str]]
43
+
44
+ # ExecuTorch only. A dictionary of kernel tag -> list of (list of input
45
+ # dtypes for tensor-like input args).
46
+ # This is from selective.yaml
47
+ et_kernel_metadata: Dict[str, List[str]]
48
+
49
+ # A set of all the custom torch bind classes used by the selected models
50
+ # Stored as a set internally to remove duplicates proactively, but written
51
+ # as a list to yamls
52
+ custom_classes: Set[str]
53
+
54
+ # A set of all the build features used by the selected models
55
+ # Stored as a set internally to remove duplicates proactively, but written
56
+ # as a list to yamls
57
+ build_features: Set[str]
58
+
59
+ # If true, then fragments for all dtypes for all kernel functions
60
+ # are included as well as all custom classes. This is typically set when any one of the
61
+ # operator lists is generated from a mechanism other than
62
+ # tracing based selective build.
63
+ include_all_non_op_selectives: bool
64
+
65
+ @staticmethod
66
+ def get_nop_selector() -> "SelectiveBuilder":
67
+ return SelectiveBuilder.from_yaml_dict({"include_all_operators": True})
68
+
69
+ @staticmethod
70
+ def from_yaml_dict(data: Dict[str, object]) -> "SelectiveBuilder":
71
+ valid_top_level_keys = {
72
+ "include_all_non_op_selectives",
73
+ "include_all_operators",
74
+ "debug_info",
75
+ "operators",
76
+ "kernel_metadata",
77
+ "et_kernel_metadata",
78
+ "custom_classes",
79
+ "build_features",
80
+ }
81
+ top_level_keys = set(data.keys())
82
+ if len(top_level_keys - valid_top_level_keys) > 0:
83
+ raise Exception(
84
+ "Got unexpected top level keys: {}".format(
85
+ ",".join(top_level_keys - valid_top_level_keys),
86
+ )
87
+ )
88
+ include_all_operators = data.get("include_all_operators", False)
89
+ assert isinstance(include_all_operators, bool)
90
+
91
+ debug_info = None
92
+ if "debug_info" in data:
93
+ di_list = data["debug_info"]
94
+ assert isinstance(di_list, list)
95
+
96
+ debug_info = tuple(str(x) for x in di_list)
97
+
98
+ operators = {}
99
+ operators_dict = data.get("operators", {})
100
+ assert isinstance(operators_dict, dict)
101
+
102
+ for k, v in operators_dict.items():
103
+ operators[k] = SelectiveBuildOperator.from_yaml_dict(k, v)
104
+
105
+ kernel_metadata = {}
106
+ kernel_metadata_dict = data.get("kernel_metadata", {})
107
+ assert isinstance(kernel_metadata_dict, dict)
108
+
109
+ for k, v in kernel_metadata_dict.items():
110
+ kernel_metadata[str(k)] = [str(dtype) for dtype in v]
111
+
112
+ et_kernel_metadata = data.get("et_kernel_metadata", {})
113
+ assert isinstance(et_kernel_metadata, dict)
114
+
115
+ custom_classes = data.get("custom_classes", [])
116
+ assert isinstance(custom_classes, Iterable)
117
+ custom_classes = set(custom_classes)
118
+
119
+ build_features = data.get("build_features", [])
120
+ assert isinstance(build_features, Iterable)
121
+ build_features = set(build_features)
122
+
123
+ include_all_non_op_selectives = data.get("include_all_non_op_selectives", False)
124
+ assert isinstance(include_all_non_op_selectives, bool)
125
+
126
+ return SelectiveBuilder(
127
+ include_all_operators,
128
+ debug_info,
129
+ operators,
130
+ kernel_metadata,
131
+ et_kernel_metadata,
132
+ custom_classes, # type: ignore[arg-type]
133
+ build_features, # type: ignore[arg-type]
134
+ include_all_non_op_selectives,
135
+ )
136
+
137
+ @staticmethod
138
+ def from_yaml_str(config_contents: str) -> "SelectiveBuilder":
139
+ contents = yaml.safe_load(config_contents)
140
+ return SelectiveBuilder.from_yaml_dict(contents)
141
+
142
+ @staticmethod
143
+ def from_yaml_path(config_path: str) -> "SelectiveBuilder":
144
+ with open(config_path) as f:
145
+ contents = yaml.safe_load(f)
146
+ return SelectiveBuilder.from_yaml_dict(contents)
147
+
148
+ @staticmethod
149
+ def from_legacy_op_registration_allow_list(
150
+ allow_list: Set[str], is_root_operator: bool, is_used_for_training: bool
151
+ ) -> "SelectiveBuilder":
152
+ operators = {}
153
+ for op in allow_list:
154
+ operators[op] = {
155
+ "name": op,
156
+ "is_root_operator": is_root_operator,
157
+ "is_used_for_training": is_used_for_training,
158
+ "include_all_overloads": True,
159
+ }
160
+ return SelectiveBuilder.from_yaml_dict(
161
+ {
162
+ "operators": operators,
163
+ "include_all_non_op_selectives": True,
164
+ }
165
+ )
166
+
167
+ def is_operator_selected(self, name: str) -> bool:
168
+ if self.include_all_operators:
169
+ return True
170
+
171
+ if name in self.operators:
172
+ return True
173
+ name = strip_operator_overload_name(name)
174
+ return name in self.operators and self.operators[name].include_all_overloads
175
+
176
+ def is_native_function_selected(self, func: NativeFunction) -> bool:
177
+ op_name = op_name_from_native_function(func)
178
+ return self.is_operator_selected(op_name)
179
+
180
+ def is_operator_selected_for_training(self, name: str) -> bool:
181
+ if not self.is_operator_selected(name):
182
+ return False
183
+ if self.include_all_operators:
184
+ return True
185
+
186
+ not_training_op = SelectiveBuildOperator(
187
+ name="",
188
+ is_root_operator=False,
189
+ is_used_for_training=False,
190
+ include_all_overloads=False,
191
+ _debug_info=None,
192
+ )
193
+ op = not_training_op
194
+ if name in self.operators:
195
+ op = self.operators[name]
196
+
197
+ name = strip_operator_overload_name(name)
198
+ base_op = not_training_op
199
+ if name in self.operators:
200
+ base_op = self.operators[name]
201
+
202
+ return op.is_used_for_training or (
203
+ base_op.include_all_overloads and base_op.is_used_for_training
204
+ )
205
+
206
+ def is_native_function_selected_for_training(self, func: NativeFunction) -> bool:
207
+ op_name = op_name_from_native_function(func)
208
+ return self.is_operator_selected_for_training(op_name)
209
+
210
+ def is_root_operator(self, name: str) -> bool:
211
+ if not self.is_operator_selected(name):
212
+ return False
213
+ if self.include_all_operators:
214
+ return True
215
+
216
+ if name in self.operators:
217
+ op: SelectiveBuildOperator = self.operators[name]
218
+ return op.is_root_operator
219
+ name = strip_operator_overload_name(name)
220
+ if name not in self.operators:
221
+ return False
222
+ base_op: SelectiveBuildOperator = self.operators[name]
223
+ return base_op.include_all_overloads and base_op.is_root_operator
224
+
225
+ def is_kernel_dtype_selected(self, kernel_tag: str, dtype: str) -> bool:
226
+ if self.include_all_operators or self.include_all_non_op_selectives:
227
+ return True
228
+
229
+ return (
230
+ kernel_tag in self.kernel_metadata
231
+ and dtype in self.kernel_metadata[kernel_tag]
232
+ )
233
+
234
+ def et_get_selected_kernels(self, op_name: str, kernel_key: List[str]) -> List[str]:
235
+ """
236
+ Return a list of kernel keys that cover the used ops
237
+ """
238
+ # If no kernel metadata, either it's implied by include_all_operators=True or the op is not used.
239
+ if op_name not in self.et_kernel_metadata:
240
+ return kernel_key if self.include_all_operators else []
241
+ # Otherwise, only return the specific kernel keys.
242
+
243
+ result_set = set()
244
+
245
+ for model_kernel_keys in self.et_kernel_metadata[op_name]:
246
+ key_found = False
247
+ for key in kernel_key:
248
+ # Don't compare the version for now
249
+ if (
250
+ key != "default"
251
+ and key.split("/")[1] == model_kernel_keys.split("/")[1]
252
+ ):
253
+ result_set.add(key)
254
+ key_found = True
255
+ break
256
+ if not key_found:
257
+ if "default" not in kernel_key:
258
+ raise Exception("Missing kernel for the model")
259
+ else:
260
+ result_set.add("default")
261
+
262
+ return list(result_set)
263
+
264
+ def to_dict(self) -> Dict[str, object]:
265
+ ret: Dict[str, object] = {
266
+ "include_all_non_op_selectives": self.include_all_non_op_selectives,
267
+ "include_all_operators": self.include_all_operators,
268
+ }
269
+ operators = {}
270
+ for op_name, op in self.operators.items():
271
+ operators[op_name] = op.to_dict()
272
+ ret["operators"] = operators
273
+
274
+ if self._debug_info is not None:
275
+ ret["debug_info"] = sorted(self._debug_info)
276
+
277
+ ret["kernel_metadata"] = {
278
+ k: sorted(v) for (k, v) in self.kernel_metadata.items()
279
+ }
280
+
281
+ ret["et_kernel_metadata"] = self.et_kernel_metadata
282
+
283
+ ret["custom_classes"] = sorted(self.custom_classes)
284
+
285
+ ret["build_features"] = sorted(self.build_features)
286
+
287
+ return ret
288
+
289
+
290
+ def merge_kernel_metadata(
291
+ lhs: Dict[str, List[str]],
292
+ rhs: Dict[str, List[str]],
293
+ ) -> Dict[str, List[str]]:
294
+ kernel_metadata: Dict[str, List[str]] = {}
295
+ for tag_name, dtypes in list(lhs.items()) + list(rhs.items()):
296
+ dtypes_copy = set(dtypes)
297
+ if tag_name in kernel_metadata:
298
+ dtypes_copy |= set(kernel_metadata[tag_name])
299
+
300
+ kernel_metadata[tag_name] = list(dtypes_copy)
301
+
302
+ return kernel_metadata
303
+
304
+
305
+ def merge_et_kernel_metadata(
306
+ lhs: Dict[str, List[str]],
307
+ rhs: Dict[str, List[str]],
308
+ ) -> Dict[str, List[str]]:
309
+ merge_et_kernel_metadata: Dict[str, Set[str]] = defaultdict(set)
310
+ for op in list(lhs.keys()) + list(rhs.keys()):
311
+ merge_et_kernel_metadata[op].update(lhs.get(op, []))
312
+ merge_et_kernel_metadata[op].update(rhs.get(op, []))
313
+
314
+ return {op: sorted(val) for op, val in merge_et_kernel_metadata.items()}
315
+
316
+
317
+ def combine_selective_builders(
318
+ lhs: SelectiveBuilder, rhs: SelectiveBuilder
319
+ ) -> SelectiveBuilder:
320
+ include_all_operators = lhs.include_all_operators or rhs.include_all_operators
321
+ debug_info = merge_debug_info(lhs._debug_info, rhs._debug_info)
322
+ operators = merge_operator_dicts(lhs.operators, rhs.operators)
323
+ kernel_metadata = merge_kernel_metadata(lhs.kernel_metadata, rhs.kernel_metadata)
324
+ et_kernel_metadata = merge_et_kernel_metadata(
325
+ lhs.et_kernel_metadata, rhs.et_kernel_metadata
326
+ )
327
+ include_all_non_op_selectives = (
328
+ lhs.include_all_non_op_selectives or rhs.include_all_non_op_selectives
329
+ )
330
+ custom_classes = lhs.custom_classes.union(rhs.custom_classes)
331
+ build_features = lhs.build_features.union(rhs.build_features)
332
+ return SelectiveBuilder(
333
+ include_all_operators,
334
+ debug_info,
335
+ operators,
336
+ kernel_metadata,
337
+ et_kernel_metadata,
338
+ custom_classes,
339
+ build_features,
340
+ include_all_non_op_selectives,
341
+ )
342
+
343
+
344
+ def op_name_from_native_function(f: NativeFunction) -> str:
345
+ # This was originally read from the 'operator_name_with_overload' field in the
346
+ # declaration dict, which was the part before the first '(' in 'schema_string'.
347
+ return f"{f.namespace}::{f.func.name}"
env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/config.cpython-310.pyc ADDED
Binary file (7.73 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/gen_static_runtime_ops.cpython-310.pyc ADDED
Binary file (7.21 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/generator.cpython-310.pyc ADDED
Binary file (19.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/config.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Union
2
+
3
+ from torchgen.model import NativeFunctionsGroup, NativeFunctionsViewGroup
4
+
5
+
6
+ def func_name_base_str(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> str:
7
+ if isinstance(g, NativeFunctionsGroup):
8
+ return str(g.functional.func.name.name.base)
9
+ else:
10
+ return str(g.view.root_name)
11
+
12
+
13
+ is_hand_written_ops_ = frozenset(
14
+ (
15
+ "abs",
16
+ "add",
17
+ "addmm",
18
+ "all",
19
+ "any",
20
+ "argmin",
21
+ "bmm",
22
+ "clamp",
23
+ "clamp_min",
24
+ "cumsum",
25
+ "div",
26
+ "fmod",
27
+ "index_select",
28
+ "leaky_relu",
29
+ "linear",
30
+ "log",
31
+ "matmul",
32
+ "mul",
33
+ "narrow_copy",
34
+ "nonzero",
35
+ "pow",
36
+ "remainder",
37
+ "sigmoid",
38
+ "sign",
39
+ "sub",
40
+ "tanh",
41
+ "detach",
42
+ "expand_as",
43
+ "flatten",
44
+ "narrow",
45
+ "reshape_as",
46
+ "select",
47
+ "slice",
48
+ "softmax",
49
+ "split",
50
+ "squeeze",
51
+ "transpose",
52
+ "view",
53
+ "where",
54
+ )
55
+ )
56
+
57
+
58
+ def is_hand_written(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> bool:
59
+ name_base = func_name_base_str(g)
60
+ return name_base in is_hand_written_ops_
61
+
62
+
63
+ def override_test_values(arg_map: Dict[str, str], op_name: str, index: int) -> None:
64
+ assert index == 0 or index == 1
65
+ if op_name == "addr":
66
+ if index == 0:
67
+ arg_map["self"] = "at::rand({6, 6})"
68
+ arg_map["vec1"] = "at::rand({6})"
69
+ arg_map["vec2"] = "at::rand({6})"
70
+ else:
71
+ arg_map["self"] = "at::rand({22, 22})"
72
+ arg_map["vec1"] = "at::rand({22})"
73
+ arg_map["vec2"] = "at::rand({22})"
74
+ return
75
+ if op_name == "mv":
76
+ if index == 0:
77
+ arg_map["self"] = "at::rand({6, 6})"
78
+ arg_map["vec"] = "at::rand({6})"
79
+ else:
80
+ arg_map["self"] = "at::rand({22, 22})"
81
+ arg_map["vec"] = "at::rand({22})"
82
+ return
83
+ if op_name == "addbmm":
84
+ if index == 0:
85
+ arg_map["self"] = "at::rand({6, 6})"
86
+ else:
87
+ arg_map["self"] = "at::rand({22, 22})"
88
+ return
89
+ if op_name == "cross":
90
+ if index == 0:
91
+ arg_map["self"] = "at::rand({3, 3, 3})"
92
+ arg_map["other"] = "at::rand({3, 3, 3})"
93
+ else:
94
+ arg_map["self"] = "at::rand({22, 3, 22})"
95
+ arg_map["other"] = "at::rand({22, 3, 22})"
96
+ return
97
+ if op_name == "take":
98
+ if index == 0:
99
+ arg_map["index"] = "at::randint(0, 216, {20}, torch::kInt64)"
100
+ else:
101
+ arg_map["index"] = "at::randint(0, 1000, {100}, torch::kInt64)"
102
+ return
103
+ if op_name == "take_along_dim":
104
+ if index == 0:
105
+ arg_map["indices"] = "at::argsort(self0, 1, true)"
106
+ else:
107
+ arg_map["indices"] = "at::argsort(self1, 1, true)"
108
+ return
109
+ if op_name == "masked_select":
110
+ if index == 0:
111
+ arg_map["mask"] = "at::randn({6, 6, 6}) > 0.5"
112
+ else:
113
+ arg_map["mask"] = "at::rand({22, 22, 22}) > 0.5"
114
+ return
115
+ if op_name == "orgqr":
116
+ if index == 0:
117
+ arg_map["input2"] = "at::rand({6, 6})"
118
+ else:
119
+ arg_map["input2"] = "at::rand({22, 22})"
120
+ return
121
+ if op_name == "ormqr":
122
+ if index == 0:
123
+ arg_map["input2"] = "at::rand({6, 6})"
124
+ else:
125
+ arg_map["input2"] = "at::rand({22, 22})"
126
+ return
127
+ if op_name == "quantile":
128
+ if index == 0:
129
+ arg_map["q"] = "at::rand({6})"
130
+ arg_map["interpolation"] = '"linear"'
131
+ else:
132
+ arg_map["q"] = "at::rand({22})"
133
+ arg_map["interpolation"] = '"linear"'
134
+ return
135
+ if op_name == "nanquantile":
136
+ if index == 0:
137
+ arg_map["q"] = "at::rand({6})"
138
+ arg_map["interpolation"] = '"linear"'
139
+ else:
140
+ arg_map["q"] = "at::rand({22})"
141
+ arg_map["interpolation"] = '"linear"'
142
+ return
143
+ if op_name == "multi_margin_loss":
144
+ if index == 0:
145
+ arg_map["self"] = "at::rand({6, 6})"
146
+ arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
147
+ arg_map["weight"] = "at::rand({6})"
148
+ else:
149
+ arg_map["self"] = "at::rand({22, 22})"
150
+ arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
151
+ arg_map["weight"] = "at::rand({22})"
152
+ return
153
+ if op_name == "multilabel_margin_loss":
154
+ if index == 0:
155
+ arg_map["self"] = "at::rand({6, 6})"
156
+ arg_map["target"] = "at::randint(6, {6, 6}, torch::kInt64)"
157
+ else:
158
+ arg_map["self"] = "at::rand({22, 22})"
159
+ arg_map["target"] = "at::randint(22, {22, 22}, torch::kInt64)"
160
+ return
161
+ if op_name == "nll_loss":
162
+ if index == 0:
163
+ arg_map["self"] = "at::rand({6, 6})"
164
+ arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
165
+ arg_map["weight"] = "at::rand({6})"
166
+ else:
167
+ arg_map["self"] = "at::rand({22, 22})"
168
+ arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
169
+ arg_map["weight"] = "at::rand({22})"
170
+ return
171
+ if op_name == "nll_loss2d":
172
+ if index == 0:
173
+ arg_map["self"] = "at::rand({6, 6, 6, 6})"
174
+ arg_map["target"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
175
+ arg_map["weight"] = "at::rand({6})"
176
+ else:
177
+ arg_map["self"] = "at::rand({22, 22, 22, 22})"
178
+ arg_map["target"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
179
+ arg_map["weight"] = "at::rand({22})"
180
+ return
181
+ if op_name in (
182
+ "fft_fft",
183
+ "fft_ifft",
184
+ "fft_rfft",
185
+ "fft_irfft",
186
+ "fft_hfft",
187
+ "fft_ihfft",
188
+ ):
189
+ arg_map["norm"] = '"forward"'
190
+ return
191
+ if op_name == "linalg_tensorinv":
192
+ if index == 0:
193
+ arg_map["self"] = "at::rand({6, 6, 6, 6})"
194
+ arg_map["ind"] = "2"
195
+ else:
196
+ arg_map["self"] = "at::rand({22, 22, 22, 22})"
197
+ arg_map["ind"] = "2"
198
+ return
199
+ if op_name == "addmv":
200
+ if index == 0:
201
+ arg_map["self"] = "at::rand({2})"
202
+ arg_map["mat"] = "at::rand({2, 2})"
203
+ arg_map["vec"] = "at::rand({2})"
204
+ else:
205
+ arg_map["self"] = "at::rand({35})"
206
+ arg_map["mat"] = "at::rand({35, 35})"
207
+ arg_map["vec"] = "at::rand({35})"
208
+ return
209
+ if op_name == "acosh":
210
+ if index == 0:
211
+ arg_map["self"] = "at::rand({2, 2, 2}) + at::ones({2, 2, 2})"
212
+ else:
213
+ arg_map["self"] = "at::rand({5, 5, 5}) + at::ones({5, 5, 5})"
214
+ return
215
+ if op_name == "adaptive_max_pool2d_backward":
216
+ if index == 0:
217
+ arg_map["grad_output"] = "at::rand({2, 2, 2}, at::kFloat)"
218
+ arg_map["self"] = "at::rand({2, 2, 2}, at::kFloat)"
219
+ arg_map["indices"] = "at::randint(0, 1, {2, 2, 2}, at::kLong)"
220
+ else:
221
+ arg_map["grad_output"] = "at::rand({3, 3, 3}, at::kFloat)"
222
+ arg_map["self"] = "at::rand({3, 3, 3}, at::kFloat)"
223
+ arg_map["indices"] = "at::randint(0, 1, {3, 3, 3}, at::kLong)"
224
+ return
225
+ if op_name == "adaptive_max_pool3d_backward":
226
+ if index == 0:
227
+ arg_map["grad_output"] = "at::rand({2, 2, 2, 2}, at::kFloat)"
228
+ arg_map["self"] = "at::rand({2, 2, 2, 2}, at::kFloat)"
229
+ arg_map["indices"] = "at::randint(0, 1, {2, 2, 2, 2}, at::kLong)"
230
+ else:
231
+ arg_map["grad_output"] = "at::rand({3, 3, 3, 3}, at::kFloat)"
232
+ arg_map["self"] = "at::rand({3, 3, 3, 3}, at::kFloat)"
233
+ arg_map["indices"] = "at::randint(0, 1, {3, 3, 3, 3}, at::kLong)"
234
+ return
235
+ if op_name == "bitwise_left_shift":
236
+ if index == 0:
237
+ arg_map["self"] = "at::randint(1, 1 << 4, {6, 6, 6}, at::kInt)"
238
+ arg_map["other"] = "at::randint(1, 26, {6, 6, 6}, at::kInt)"
239
+ else:
240
+ arg_map["self"] = "at::randint(1, 1 << 4, {22, 22, 22}, at::kInt)"
241
+ arg_map["other"] = "at::randint(1, 26, {22, 22, 22}, at::kInt)"
242
+ return
243
+ if op_name == "bitwise_right_shift":
244
+ if index == 0:
245
+ arg_map["self"] = "at::randint(1 << 21, 1 << 30, {6, 6, 6}, at::kInt)"
246
+ arg_map["other"] = "at::randint(1, 22, {6, 6, 6}, at::kInt)"
247
+ else:
248
+ arg_map["self"] = "at::randint(1 << 21, 1 << 30, {22, 22, 22}, at::kInt)"
249
+ arg_map["other"] = "at::randint(1, 22, {22, 22, 22}, at::kInt)"
250
+ return
251
+ if op_name == "gather":
252
+ if index == 0:
253
+ arg_map["self"] = "at::randint(1, 100, {2,2,2}, at::kInt)"
254
+ arg_map["dim"] = "1"
255
+ arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
256
+ arg_map["sparse_grad"] = "false"
257
+ else:
258
+ arg_map["self"] = "at::randint(1, 100, {5,5,5}, at::kInt)"
259
+ arg_map["dim"] = "1"
260
+ arg_map["index"] = "at::randint(0, 4, {5,5,5}, torch::kInt64)"
261
+ arg_map["sparse_grad"] = "false"
262
+ return
263
+ if op_name == "gelu":
264
+ if index == 0:
265
+ arg_map["self"] = "at::rand({6, 6, 6})"
266
+ arg_map["approximate"] = '"tanh"'
267
+ else:
268
+ arg_map["self"] = "at::rand({22, 22, 22})"
269
+ arg_map["approximate"] = '"tanh"'
270
+ return
271
+ if op_name == "gelu_backward":
272
+ if index == 0:
273
+ arg_map["grad_output"] = "at::rand({6, 6, 6})"
274
+ arg_map["self"] = "at::rand({6, 6, 6})"
275
+ arg_map["approximate"] = '"tanh"'
276
+ else:
277
+ arg_map["grad_output"] = "at::rand({22, 22, 22})"
278
+ arg_map["self"] = "at::rand({22, 22, 22})"
279
+ arg_map["approximate"] = '"tanh"'
280
+ return
281
+ if op_name == "index_add":
282
+ if index == 0:
283
+ arg_map["self"] = "at::rand({2})"
284
+ arg_map["dim"] = "0"
285
+ arg_map["index"] = "at::randint(0, 1, {2}, at::kInt)"
286
+ arg_map["source"] = "at::rand({2})"
287
+ arg_map["alpha"] = "2"
288
+ else:
289
+ arg_map["self"] = "at::rand({16})"
290
+ arg_map["dim"] = "0"
291
+ arg_map["index"] = "at::randint(0, 10, {16}, at::kInt)"
292
+ arg_map["source"] = "at::rand({16})"
293
+ arg_map["alpha"] = "2"
294
+ return
295
+ if op_name == "index_copy":
296
+ if index == 0:
297
+ arg_map["self"] = "at::rand({2})"
298
+ arg_map["dim"] = "0"
299
+ arg_map["index"] = "at::randint(0, 1, {2}, at::kLong)"
300
+ arg_map["source"] = "at::rand({2})"
301
+ else:
302
+ arg_map["self"] = "at::rand({32})"
303
+ arg_map["dim"] = "0"
304
+ arg_map["index"] = "at::randint(0, 10, {32}, at::kLong)"
305
+ arg_map["source"] = "at::rand({32})"
306
+ return
307
+ if op_name == "linalg_cross":
308
+ if index == 0:
309
+ arg_map["self"] = "at::rand({6, 3, 6})"
310
+ arg_map["other"] = "at::rand({6, 3, 6})"
311
+ arg_map["dim"] = "1"
312
+ else:
313
+ arg_map["self"] = "at::rand({22, 3, 22})"
314
+ arg_map["other"] = "at::rand({22, 3, 22})"
315
+ arg_map["dim"] = "1"
316
+ return
317
+ if op_name == "nll_loss_backward":
318
+ if index == 0:
319
+ arg_map["grad_output"] = "at::rand({})"
320
+ arg_map["self"] = "at::rand({6})"
321
+ arg_map["target"] = "at::randint(0, 5, {6}, torch::kInt64)"
322
+ arg_map["weight"] = "at::rand({6})"
323
+ arg_map["reduction"] = "1"
324
+ arg_map["ignore_index"] = "1"
325
+ arg_map["total_weight"] = "at::rand({})"
326
+ else:
327
+ arg_map["grad_output"] = "at::rand({})"
328
+ arg_map["self"] = "at::rand({36})"
329
+ arg_map["target"] = "at::randint(0, 11, {36}, torch::kInt64)"
330
+ arg_map["weight"] = "at::rand({36})"
331
+ arg_map["reduction"] = "1"
332
+ arg_map["ignore_index"] = "1"
333
+ arg_map["total_weight"] = "at::rand({})"
334
+ return
335
+ if op_name in ["scatter", "scatter_add", "_scatter_reduce"]:
336
+ if index == 0:
337
+ arg_map["self"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
338
+ arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
339
+ arg_map["src"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
340
+ else:
341
+ arg_map["self"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
342
+ arg_map["index"] = "at::randint(0, 1, {5,5,5}, torch::kInt64)"
343
+ arg_map["src"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
344
+ if "reduce" in arg_map:
345
+ arg_map["reduce"] = '"sum"' if op_name == "_scatter_reduce" else '"add"'
346
+ return
347
+ if op_name == "scatter_reduce":
348
+ arg_map["reduce"] = '"mean"'
349
+ if index == 0:
350
+ arg_map["index"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
351
+ else:
352
+ arg_map["index"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
353
+ return
354
+ if op_name == "special_zeta":
355
+ if index == 0:
356
+ arg_map["self"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
357
+ arg_map["other"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
358
+ else:
359
+ arg_map["self"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
360
+ arg_map["other"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
361
+ return
362
+ if op_name == "_convert_indices_from_csr_to_coo":
363
+ if index == 0:
364
+ arg_map["crow_indices"] = "torch::tensor({1}, torch::kInt32)"
365
+ arg_map["col_indices"] = "torch::tensor({0, 1, 0}, torch::kInt32)"
366
+ arg_map["out_int32"] = "false"
367
+ else:
368
+ arg_map["crow_indices"] = "torch::tensor({0}, torch::kInt32)"
369
+ arg_map[
370
+ "col_indices"
371
+ ] = "torch::tensor({0, 1, 0, 2, 1, 2, 0, 1, 0, 2, 1, 2}, torch::kInt32)"
372
+ arg_map["out_int32"] = "false"
373
+ return
374
+ if op_name == "_convert_indices_from_coo_to_csr":
375
+ if index == 0:
376
+ arg_map["self"] = "at::randint(0, 3, {2}, at::kInt)"
377
+ arg_map["size"] = "10"
378
+ arg_map["out_int32"] = "false"
379
+ else:
380
+ arg_map["self"] = "at::randint(0, 3, {12}, at::kInt)"
381
+ arg_map["size"] = "24"
382
+ arg_map["out_int32"] = "false"
383
+ return
384
+ if op_name in ("diagonal", "linalg_diagonal"):
385
+ arg_map["offset"] = "0"
386
+ arg_map["dim0"] = "1"
387
+ arg_map["dim1"] = "2"
388
+ return
env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/gen_static_runtime_ops.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import itertools
3
+ import os
4
+ from typing import Sequence, TypeVar, Union
5
+
6
+ from libfb.py.log import set_simple_logging # type: ignore[import]
7
+
8
+ from torchgen import gen
9
+ from torchgen.context import native_function_manager
10
+ from torchgen.model import DispatchKey, NativeFunctionsGroup, NativeFunctionsViewGroup
11
+ from torchgen.static_runtime import config, generator
12
+
13
+ # Given a list of `grouped_native_functions` sorted by their op names, return a list of
14
+ # lists each of which groups ops that share the base name. For example, `mean` and
15
+ # `mean.dim` are grouped together by this function.
16
+
17
+ NativeGroupT = TypeVar(
18
+ "NativeGroupT",
19
+ bound=Union[NativeFunctionsGroup, NativeFunctionsViewGroup],
20
+ )
21
+
22
+
23
+ def group_functions_by_op_name(
24
+ grouped_native_functions: Sequence[NativeGroupT],
25
+ ) -> Sequence[Sequence[NativeGroupT]]:
26
+ if not grouped_native_functions:
27
+ return []
28
+ groups = []
29
+
30
+ def is_supported(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> bool:
31
+ with native_function_manager(g):
32
+ return generator.is_supported(g)
33
+
34
+ eligible_ops = (g for g in grouped_native_functions if is_supported(g))
35
+ groups = [
36
+ list(group)
37
+ for k, group in (
38
+ itertools.groupby(
39
+ eligible_ops,
40
+ key=config.func_name_base_str,
41
+ )
42
+ )
43
+ ]
44
+
45
+ return groups
46
+
47
+
48
+ def clang_format(cpp_file_path: str) -> None:
49
+ import subprocess
50
+
51
+ subprocess.check_call(["clang-format", "-i", cpp_file_path])
52
+
53
+
54
+ def write_cpp(cpp_ops: Sequence[str], file_path: str) -> None:
55
+ code = "\n".join(cpp_ops)
56
+ generated = f"""// @lint-ignore-every CLANGTIDY HOWTOEVEN
57
+ // AUTO-GENERATED FROM: torchgen/static_runtime/gen_static_runtime_ops.py
58
+ #include <torch/csrc/jit/runtime/static/ops.h>
59
+
60
+ #include <ATen/CPUFunctions.h>
61
+ #include <ATen/InferSize.h>
62
+ #include <ATen/NativeFunctions.h>
63
+ #include <ATen/Parallel.h>
64
+ #include <ATen/ScalarOps.h>
65
+ #include <ATen/TensorUtils.h>
66
+ #include <ATen/cpu/vec/functional.h>
67
+ #include <ATen/cpu/vec/vec.h>
68
+ #include <ATen/native/EmbeddingBag.h>
69
+ #include <ATen/native/Fill.h>
70
+ #include <ATen/native/IndexingUtils.h>
71
+ #include <ATen/native/NonSymbolicBC.h>
72
+ #include <ATen/native/Resize.h>
73
+ #include <ATen/native/SharedReduceOps.h>
74
+ #include <ATen/native/TensorAdvancedIndexing.h>
75
+ #include <ATen/native/cpu/SerialStackImpl.h>
76
+ #include <ATen/native/layer_norm.h>
77
+ #include <ATen/native/quantized/cpu/fbgemm_utils.h>
78
+ #include <ATen/native/quantized/cpu/qembeddingbag.h>
79
+ #include <ATen/native/quantized/cpu/qembeddingbag_prepack.h>
80
+ #include <ATen/quantized/QTensorImpl.h>
81
+ #include <ATen/quantized/Quantizer.h>
82
+ #include <c10/core/ScalarType.h>
83
+ #include <c10/core/WrapDimMinimal.h>
84
+ #include <c10/util/irange.h>
85
+ #include <torch/csrc/jit/ir/ir.h>
86
+ #include <torch/csrc/jit/runtime/static/impl.h>
87
+ #include <torch/csrc/jit/runtime/static/te_wrapper.h>
88
+ #include <torch/csrc/jit/runtime/vararg_functions.h>
89
+ #include <torch/csrc/jit/tensorexpr/ir.h>
90
+ #include <torch/csrc/jit/tensorexpr/ir_simplifier.h>
91
+ #include <torch/csrc/jit/tensorexpr/llvm_codegen.h>
92
+ #include <torch/csrc/jit/tensorexpr/loopnest.h>
93
+
94
+ namespace torch {{
95
+ namespace jit {{
96
+
97
+ {code}
98
+
99
+ }} // namespace jit
100
+ }} // namespace torch
101
+ """
102
+ with open(file_path, "w") as f:
103
+ f.write(generated)
104
+ clang_format(file_path)
105
+
106
+
107
+ def write_test_cpp(cpp_ops: Sequence[str], file_path: str) -> None:
108
+ code = "\n".join(cpp_ops)
109
+ generated = f"""// @lint-ignore-every CLANGTIDY HOWTOEVEN
110
+ // AUTO-GENERATED FROM: torchgen/static_runtime/gen_static_runtime_ops.py
111
+ #include <gtest/gtest.h>
112
+ #include <torch/csrc/jit/runtime/static/impl.h>
113
+ #include <torch/torch.h>
114
+
115
+ #include "test_utils.h"
116
+
117
+ using namespace caffe2;
118
+ using namespace torch;
119
+ using namespace torch::jit;
120
+ using namespace torch::jit::test;
121
+ using c10::IValue;
122
+
123
+ {code}
124
+
125
+ """
126
+ with open(file_path, "w") as f:
127
+ f.write(generated)
128
+ clang_format(file_path)
129
+
130
+
131
+ def main() -> None:
132
+ parser = argparse.ArgumentParser(description="Generate ATen source files")
133
+ parser.add_argument(
134
+ "-s",
135
+ "--source-path",
136
+ help="path to source directory for ATen",
137
+ default="caffe2/aten/src/ATen",
138
+ )
139
+ parser.add_argument(
140
+ "-p",
141
+ "--generated-ops-cpp-path",
142
+ help="path to directory to generate op dispatcher .cpp file",
143
+ default="caffe2/torch/csrc/jit/runtime/static/generated_ops.cpp",
144
+ )
145
+ parser.add_argument(
146
+ "-t",
147
+ "--generated-ops-test-cpp-path",
148
+ help="path to directory to generate op dispatcher .cpp file",
149
+ default="caffe2/benchmarks/static_runtime/test_generated_ops.cc",
150
+ )
151
+ options = parser.parse_args()
152
+ native_yaml_path = os.path.join(options.source_path, "native/native_functions.yaml")
153
+ tags_yaml_path = os.path.join(options.source_path, "native/tags.yaml")
154
+ parsed_yaml = gen.parse_native_yaml(native_yaml_path, tags_yaml_path)
155
+ native_functions, backend_indices = (
156
+ parsed_yaml.native_functions,
157
+ parsed_yaml.backend_indices,
158
+ )
159
+
160
+ op_generator = generator.GenOpDispatcher()
161
+ test_case_generator = generator.GenOpTestCase()
162
+
163
+ native_functions_groups = [
164
+ g
165
+ for g in gen.get_grouped_native_functions(native_functions)
166
+ if isinstance(g, NativeFunctionsGroup)
167
+ ]
168
+
169
+ supported_functions_groups = group_functions_by_op_name(native_functions_groups)
170
+
171
+ out_variant_op_result = [
172
+ op_generator.out_variant(groups, backend_indices[DispatchKey.CPU])
173
+ for groups in supported_functions_groups
174
+ ]
175
+ out_variant_test_result = [
176
+ test_case_generator.out_variant(groups) for groups in supported_functions_groups
177
+ ]
178
+
179
+ native_functions_view_groups = [
180
+ g
181
+ for g in gen.get_grouped_by_view_native_functions(native_functions)
182
+ if isinstance(g, NativeFunctionsViewGroup)
183
+ ]
184
+
185
+ supported_functions_view_groups = group_functions_by_op_name(
186
+ native_functions_view_groups
187
+ )
188
+
189
+ view_op_result = [
190
+ op_generator.view(groups, backend_indices[DispatchKey.CPU])
191
+ for groups in supported_functions_view_groups
192
+ ]
193
+ view_test_result = [
194
+ test_case_generator.view(groups) for groups in supported_functions_view_groups
195
+ ]
196
+
197
+ op_result = out_variant_op_result + ["\n\n"] + view_op_result
198
+ test_result = out_variant_test_result + ["\n\n"] + view_test_result
199
+
200
+ write_cpp(op_result, options.generated_ops_cpp_path)
201
+ write_test_cpp(test_result, options.generated_ops_test_cpp_path)
202
+
203
+ print(
204
+ "\ntotal grouped native ops: %d"
205
+ % len(gen.get_grouped_native_functions(native_functions))
206
+ )
207
+
208
+ print("grouped native ops with out variant: %d" % len(native_functions_groups))
209
+ supported_functions_num = sum(
210
+ [len(groups) for groups in supported_functions_groups]
211
+ )
212
+ print("generated functions groups with out variant: %d" % supported_functions_num)
213
+
214
+ print("\nview grouped native ops: %d" % len(native_functions_view_groups))
215
+ supported_view_functions_num = sum(
216
+ [len(groups) for groups in supported_functions_view_groups]
217
+ )
218
+ print("generated functions view groups: %d" % supported_view_functions_num)
219
+
220
+ print(
221
+ "\noverall generated : %d"
222
+ % (supported_functions_num + supported_view_functions_num)
223
+ )
224
+
225
+
226
+ if __name__ == "__main__":
227
+ set_simple_logging(escape_newlines=False)
228
+ main()
env-llmeval/lib/python3.10/site-packages/torchgen/static_runtime/generator.py ADDED
@@ -0,0 +1,796 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+
4
+ import math
5
+ from typing import Dict, List, Optional, Sequence, Tuple, Union
6
+
7
+ import torchgen.api.cpp as cpp
8
+ from torchgen.context import native_function_manager
9
+ from torchgen.model import (
10
+ Argument,
11
+ BackendIndex,
12
+ BaseTy,
13
+ BaseType,
14
+ FunctionSchema,
15
+ NativeFunctionsGroup,
16
+ NativeFunctionsViewGroup,
17
+ OptionalType,
18
+ SelfArgument,
19
+ TensorOptionsArguments,
20
+ Type,
21
+ )
22
+ from torchgen.static_runtime import config
23
+
24
+ logger: logging.Logger = logging.getLogger()
25
+
26
+
27
+ def has_alias(
28
+ arguments: Sequence[Union[Argument, SelfArgument, TensorOptionsArguments]]
29
+ ) -> bool:
30
+ for arg in arguments:
31
+ annotation = getattr(arg, "annotation", None)
32
+ if not annotation:
33
+ continue
34
+ alias_set = getattr(annotation, "alias_set", ())
35
+ if alias_set:
36
+ return True
37
+ return False
38
+
39
+
40
+ BLOCKED_OPS = frozenset(
41
+ (
42
+ # non cpu ops
43
+ "sparse_sampled_addmm",
44
+ "hspmm",
45
+ "linalg_svdvals",
46
+ # sparse ops
47
+ "sspaddmm",
48
+ "coalesce",
49
+ "_indices",
50
+ "indices",
51
+ "_values",
52
+ "values",
53
+ "crow_indices",
54
+ "col_indices",
55
+ # deprecated ops
56
+ "floor_divide",
57
+ "ger",
58
+ # buggy ops
59
+ "conj_physical", # P495807361
60
+ "binary_cross_entropy", # P496394764
61
+ "arccosh",
62
+ # uncommon ops
63
+ "cholesky",
64
+ "lu_solve",
65
+ "linalg_cholesky",
66
+ "linalg_householder_product",
67
+ "linalg_ldl_solve",
68
+ "_compute_linear_combination",
69
+ # training related ops
70
+ "_make_dual",
71
+ # cannot call directly
72
+ "_fw_primal",
73
+ # no documentation
74
+ "_index_reduce",
75
+ # TODO: these ones got added recently and need manual inspection
76
+ "_new_zeros_with_same_feature_meta",
77
+ "_conj_physical",
78
+ "binary_cross_entropy_with_logits",
79
+ "bincount",
80
+ "conv_tbc",
81
+ "copy",
82
+ "_copy_from",
83
+ "_copy_from_and_resize",
84
+ "count_nonzero",
85
+ "cudnn_affine_grid_generator",
86
+ "cudnn_affine_grid_generator_backward",
87
+ "cudnn_grid_sampler",
88
+ "diag_embed",
89
+ "embedding",
90
+ "embedding_dense_backward",
91
+ "_embedding_bag_dense_backward",
92
+ "_embedding_bag_per_sample_weights_backward",
93
+ "grid_sampler_2d",
94
+ "_grid_sampler_2d_cpu_fallback",
95
+ "grid_sampler_3d",
96
+ "isnan",
97
+ "mkldnn_linear",
98
+ "median",
99
+ "nanmedian",
100
+ "_sparse_sparse_matmul",
101
+ "batch_norm_backward_elemt",
102
+ "_euclidean_dist",
103
+ "pixel_shuffle",
104
+ "pixel_unshuffle",
105
+ "channel_shuffle",
106
+ "_reshape_nested_backward",
107
+ "relu",
108
+ "prelu",
109
+ "celu",
110
+ "slice_scatter",
111
+ "select_scatter",
112
+ "diagonal_scatter",
113
+ "sum",
114
+ "_mkldnn_transpose",
115
+ "_nested_tensor_from_mask",
116
+ "_nested_from_padded",
117
+ "_nested_tensor_size",
118
+ "_nested_from_padded_and_nested_example",
119
+ "_standard_gamma_grad",
120
+ "_dirichlet_grad",
121
+ "native_norm",
122
+ "_sparse_softmax",
123
+ "_sparse_softmax_backward_data",
124
+ "_sparse_log_softmax",
125
+ "_sparse_log_softmax_backward_data",
126
+ "zero",
127
+ "_sparse_addmm",
128
+ "sparse_mask",
129
+ "_sparse_mask_projection",
130
+ "_to_dense",
131
+ "_coalesce",
132
+ "_coalesced",
133
+ "copy_sparse_to_sparse",
134
+ "to_sparse",
135
+ "to_sparse_csr",
136
+ "to_sparse_csc",
137
+ "to_mkldnn",
138
+ "quantize_per_tensor_dynamic",
139
+ "quantize_per_channel",
140
+ "q_per_channel_scales",
141
+ "q_per_channel_zero_points",
142
+ "int_repr",
143
+ "_make_per_channel_quantized_tensor",
144
+ "set",
145
+ "lift",
146
+ "lift_fresh",
147
+ "lift_fresh_copy",
148
+ "masked_scatter",
149
+ "_masked_softmax",
150
+ "_masked_softmax_backward",
151
+ "put",
152
+ "index_reduce",
153
+ "trace",
154
+ "_cholesky_solve_helper",
155
+ "dist",
156
+ "max",
157
+ "_torch_cuda_cu_linker_symbol_op",
158
+ "glu_jvp",
159
+ "glu_backward_jvp",
160
+ "hardswish_backward",
161
+ "rrelu_with_noise_backward",
162
+ "mkldnn_adaptive_avg_pool2d_backward",
163
+ "_adaptive_avg_pool2d_backward",
164
+ "_adaptive_avg_pool3d_backward",
165
+ "isinf",
166
+ "linalg_lu_solve",
167
+ "linalg_vecdot",
168
+ "linalg_matrix_exp",
169
+ "linalg_eigvalsh",
170
+ "_test_warn_in_autograd",
171
+ "_test_autograd_multiple_dispatch_view",
172
+ "_test_autograd_multiple_dispatch_view_copy",
173
+ "_segment_reduce",
174
+ "_segment_reduce_backward",
175
+ "_fw_primal_copy",
176
+ "_make_dual_copy",
177
+ "view_as_real_copy",
178
+ "view_as_complex_copy",
179
+ "_conj_copy",
180
+ "_neg_view_copy",
181
+ "diagonal_copy",
182
+ "detach_copy",
183
+ "squeeze_copy",
184
+ "t_copy",
185
+ "unsqueeze_copy",
186
+ "_indices_copy",
187
+ "_values_copy",
188
+ "indices_copy",
189
+ "values_copy",
190
+ "crow_indices_copy",
191
+ "col_indices_copy",
192
+ "ccol_indices",
193
+ "ccol_indices_copy",
194
+ "row_indices",
195
+ "row_indices_copy",
196
+ "unfold_copy",
197
+ "alias_copy",
198
+ "_triton_multi_head_attention",
199
+ "special_airy_ai",
200
+ "special_bessel_j0",
201
+ "special_bessel_j1",
202
+ "special_bessel_y0",
203
+ "special_bessel_y1",
204
+ "special_chebyshev_polynomial_t",
205
+ "special_chebyshev_polynomial_u",
206
+ "special_chebyshev_polynomial_v",
207
+ "special_chebyshev_polynomial_w",
208
+ "special_hermite_polynomial_h",
209
+ "special_hermite_polynomial_he",
210
+ "special_laguerre_polynomial_l",
211
+ "special_legendre_polynomial_p",
212
+ "special_modified_bessel_i0",
213
+ "special_modified_bessel_i1",
214
+ "special_modified_bessel_k0",
215
+ "special_modified_bessel_k1",
216
+ "special_scaled_modified_bessel_k0",
217
+ "special_scaled_modified_bessel_k1",
218
+ "special_shifted_chebyshev_polynomial_t",
219
+ "special_shifted_chebyshev_polynomial_u",
220
+ "special_shifted_chebyshev_polynomial_v",
221
+ "special_shifted_chebyshev_polynomial_w",
222
+ "special_spherical_bessel_j0",
223
+ "_foobar",
224
+ "_nested_tensor_strides",
225
+ )
226
+ )
227
+
228
+
229
+ def is_supported(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> bool:
230
+ base_op_name = ""
231
+ func = None
232
+ if isinstance(g, NativeFunctionsViewGroup):
233
+ base_op_name = g.view.root_name
234
+ func = g.view.func
235
+ else:
236
+ base_op_name = g.out.func.name.name.base
237
+ func = g.out.func
238
+ if config.is_hand_written(g):
239
+ logger.info("HAND WRITTEN: %s", base_op_name)
240
+ return False
241
+ if base_op_name in BLOCKED_OPS:
242
+ logger.info("BLOCKED: %s", base_op_name)
243
+ return False
244
+ for arg in func.schema_order_arguments():
245
+ maybe_method = ivalue_type_conversion_method(arg.type)
246
+ if not maybe_method:
247
+ # Type converting is unsupported yet.
248
+ logger.info("NOT SUPPORTED TYPE CONVERTING: %s", func)
249
+ return False
250
+
251
+ if isinstance(g, NativeFunctionsViewGroup):
252
+ # TODO: stop doing type tests by converting to C++ and then testing
253
+ # the string, just test the dang thing directly
254
+ if "at::Tensor" != cpp.returns_type(func.returns, symint=False).cpp_type():
255
+ # Returns a non-Tensor value.
256
+ logger.info("NON-TENSOR RET TYPE: %s", str(func))
257
+ return False
258
+ return True
259
+
260
+ # For out variant ops, we need to check the arguments of its functional func.
261
+ for arg in g.functional.func.schema_order_arguments():
262
+ maybe_method = ivalue_type_conversion_method(arg.type)
263
+ if not maybe_method:
264
+ # Type converting is unsupported yet.
265
+ logger.info("NOT SUPPORTED TYPE CONVERTING: %s", g.functional.func)
266
+ return False
267
+
268
+ if not g.structured:
269
+ # In case of unstructured op, we check if it has out variant implementation.
270
+ # The out variant implementation satisfies the minimum requirement that it has the output tensor as the last
271
+ # parameter.
272
+ if (
273
+ not hasattr(g, "out")
274
+ or not str(func).endswith("Tensor(a!) out) -> Tensor(a!)")
275
+ or not str(func.name).endswith(".out")
276
+ ):
277
+ return False
278
+ # TODO: stop type testing by converting to C++
279
+ if "at::Tensor &" != cpp.returns_type(func.returns, symint=False).cpp_type():
280
+ logger.info("NON_TENSOR RET TYPE: %s", func)
281
+ return False
282
+ if has_alias(func.arguments.non_out):
283
+ # This op may create an alias of inputs.
284
+ logger.info("INPUTS ALIAS: %s", base_op_name)
285
+ return False
286
+ return True
287
+
288
+
289
+ def ivalue_type_conversion_method(
290
+ arg_type: Union[BaseType, OptionalType, Type]
291
+ ) -> Optional[Tuple[bool, str]]:
292
+ """
293
+ Return the method call expression of `c10::ivalue' to convert its contained value to
294
+ the expected value of `arg_type` type. For example, for `arg_type` == BaseTy.Tensor,
295
+ this function returns ".toTensor()", so that it can be appended to the ivalue's
296
+ variable name to get the value of the expected type.
297
+ """
298
+ type_conversion_methods = {
299
+ BaseTy.Tensor: ((True, "toTensor()"), (False, "toOptional<at::Tensor>()")),
300
+ BaseTy.int: ((False, "toInt()"), (False, "toOptional<int64_t>()")),
301
+ BaseTy.bool: ((False, "toBool()"), (False, "toOptional<bool>()")),
302
+ BaseTy.Scalar: ((False, "toScalar()"), (False, "toOptional<at::Scalar>()")),
303
+ BaseTy.ScalarType: (
304
+ (False, "toScalarType()"),
305
+ (False, "toOptional<at::ScalarType>()"),
306
+ ),
307
+ BaseTy.str: (
308
+ (False, "toStringView()"),
309
+ (False, "toOptional<c10::string_view>()"),
310
+ ),
311
+ }
312
+
313
+ base_ty_object = None
314
+ if isinstance(arg_type, BaseType):
315
+ base_ty_object = arg_type.name
316
+ elif isinstance(arg_type, OptionalType):
317
+ if not isinstance(arg_type.elem, BaseType):
318
+ # ListType is currently unsupported.
319
+ return None
320
+ base_ty_object = arg_type.elem.name
321
+ else:
322
+ return None
323
+
324
+ if base_ty_object not in type_conversion_methods:
325
+ return None
326
+ methods = type_conversion_methods[base_ty_object]
327
+ if isinstance(arg_type, BaseType):
328
+ return methods[0]
329
+ return methods[1]
330
+
331
+
332
+ should_use_int_tensor_ops_ = frozenset(
333
+ (
334
+ "bitwise_not",
335
+ "bitwise_and",
336
+ "bitwise_or",
337
+ "bitwise_xor",
338
+ "bitwise_left_shift",
339
+ "bitwise_right_shift",
340
+ "gcd",
341
+ "lcm",
342
+ "scatter",
343
+ "gather",
344
+ "_convert_indices_from_coo_to_csr",
345
+ "_convert_indices_from_csr_to_coo",
346
+ )
347
+ )
348
+ should_use_complex_tensor_ops_ = frozenset(("view_as_real", "imag", "_conj"))
349
+
350
+
351
+ def should_use_int_tensor(op_name: str) -> bool:
352
+ return op_name in should_use_int_tensor_ops_
353
+
354
+
355
+ def should_use_complex_tensor(op_name: str) -> bool:
356
+ return op_name in should_use_complex_tensor_ops_
357
+
358
+
359
+ test_tensor_dim_ops_1_ = frozenset(
360
+ (
361
+ "addmv",
362
+ "index_add",
363
+ "_convert_indices_from_coo_to_csr",
364
+ "_convert_indices_from_csr_to_coo",
365
+ "nll_loss_backward",
366
+ "dot",
367
+ "vdot",
368
+ "outer",
369
+ "ger",
370
+ )
371
+ )
372
+ test_tensor_dim_ops_2_ = frozenset(
373
+ ("addmm", "mm", "nuclear_norm", "diag", "_addmm_activation", "matrix_H", "t")
374
+ )
375
+
376
+
377
+ def test_tensor_dim(op_name: str) -> int:
378
+ if op_name in test_tensor_dim_ops_1_:
379
+ return 1
380
+ if op_name in test_tensor_dim_ops_2_:
381
+ return 2
382
+ return 3
383
+
384
+
385
+ test_tensor_shapes_string = '{"view_as_complex": "{2, 2}"}'
386
+ test_tensor_shape_json: Dict[str, str] = json.loads(test_tensor_shapes_string)
387
+
388
+
389
+ def test_tensor_shape(op_name: str) -> str:
390
+ if op_name in test_tensor_shape_json:
391
+ return test_tensor_shape_json[op_name]
392
+ else:
393
+ return ""
394
+
395
+
396
+ def test_value_expression(
397
+ arg_type: Union[BaseType, OptionalType, Type], index: int, op_name: str
398
+ ) -> str:
399
+ tensor_size_ex = test_tensor_shape(op_name)
400
+ if tensor_size_ex == "":
401
+ num_tensors = 16 if index == 0 else 64
402
+ num_dim = test_tensor_dim(op_name)
403
+ size_per_dim = math.ceil(num_tensors / float(num_dim))
404
+ size_per_dim += size_per_dim % 2
405
+ tensor_size_ex = "{%s}" % (",".join([f"{size_per_dim}"] * num_dim))
406
+ if should_use_int_tensor(op_name):
407
+ tensor_expression = f"at::randint(1, 100, {tensor_size_ex}, at::kInt)"
408
+ elif should_use_complex_tensor(op_name):
409
+ tensor_expression = f"at::randn({tensor_size_ex}, at::kComplexFloat)"
410
+ else:
411
+ tensor_expression = f"at::rand({tensor_size_ex})"
412
+
413
+ value_expressions = {
414
+ BaseTy.Tensor: tensor_expression,
415
+ BaseTy.int: "1",
416
+ BaseTy.bool: "false",
417
+ BaseTy.Scalar: "2",
418
+ BaseTy.ScalarType: "at::ScalarType::Float",
419
+ BaseTy.str: '"floor"',
420
+ }
421
+
422
+ base_ty_object = None
423
+ if isinstance(arg_type, BaseType):
424
+ base_ty_object = arg_type.name
425
+ else:
426
+ assert isinstance(arg_type, OptionalType) and isinstance(
427
+ arg_type.elem, BaseType
428
+ )
429
+ base_ty_object = arg_type.elem.name
430
+ assert base_ty_object in value_expressions, "not expected type"
431
+ value_expression = value_expressions[base_ty_object]
432
+ return value_expression
433
+
434
+
435
+ def generate_test_value_definitions(schema: FunctionSchema, index: int) -> str:
436
+ assert not schema.is_out_fn()
437
+ schema_name = schema.name.name.base
438
+ arg_map = {}
439
+ for arg in schema.schema_order_arguments():
440
+ test_value_exp = test_value_expression(arg.type, index, schema_name)
441
+ arg_map[arg.name] = test_value_exp
442
+ config.override_test_values(arg_map, schema_name, index)
443
+ arg_populations = []
444
+ for arg_name, arg_value in arg_map.items():
445
+ arg_populations.append(f"auto {arg_name}{index} = {arg_value}")
446
+ return ";\n ".join(arg_populations) + ";"
447
+
448
+
449
+ def generate_test_value_names(schema: FunctionSchema, index: int) -> str:
450
+ assert not schema.is_out_fn()
451
+ return ",".join(f"{arg.name}{index}" for arg in schema.schema_order_arguments())
452
+
453
+
454
+ generate_test_ir_arguments_base_ty_to_type_str_ = {
455
+ BaseTy.Tensor: "Tensor",
456
+ BaseTy.int: "int",
457
+ BaseTy.float: "float",
458
+ BaseTy.str: "str",
459
+ BaseTy.Scalar: "int",
460
+ BaseTy.ScalarType: "int",
461
+ BaseTy.bool: "bool",
462
+ }
463
+
464
+
465
+ def generate_test_ir_arguments(
466
+ schema: FunctionSchema,
467
+ ) -> List[Tuple[str, Optional[str]]]:
468
+ def ir_argument(arg: Argument) -> Tuple[str, Optional[str]]:
469
+ t = arg.type
470
+ add_optional = False
471
+ if isinstance(t, OptionalType):
472
+ t = t.elem
473
+ add_optional = True
474
+ assert isinstance(t, BaseType)
475
+ type_str = None
476
+ if t.name in generate_test_ir_arguments_base_ty_to_type_str_:
477
+ type_str = generate_test_ir_arguments_base_ty_to_type_str_[t.name]
478
+ if type_str and add_optional:
479
+ type_str = f"{type_str}?"
480
+ return ("%" + arg.name, type_str)
481
+
482
+ return [ir_argument(arg) for arg in schema.schema_order_arguments()]
483
+
484
+
485
+ def generate_arg_extraction(schema: FunctionSchema) -> str:
486
+ arg_populations = []
487
+ for i, arg in enumerate(schema.schema_order_arguments()):
488
+ maybe_method = ivalue_type_conversion_method(arg.type)
489
+ assert maybe_method
490
+ is_reference, type_conversion_method = maybe_method
491
+ reference = "&" if is_reference else ""
492
+ arg_populations.append(
493
+ f"const auto{reference} {arg.name} = p_node->Input({i}).{type_conversion_method}"
494
+ )
495
+ return ";\n ".join(arg_populations) + ";"
496
+
497
+
498
+ def get_kernel_name(g: NativeFunctionsGroup, backend_index: BackendIndex) -> str:
499
+ kernel = backend_index.get_kernel(g.functional)
500
+ if g.structured or kernel is None:
501
+ return cpp.name(g.functional.func)
502
+ return kernel.kernel
503
+
504
+
505
+ def get_out_kernel_name(g: NativeFunctionsGroup, backend_index: BackendIndex) -> str:
506
+ kernel = backend_index.get_kernel(g.out)
507
+ if g.structured or kernel is None:
508
+ return cpp.name(g.out.func)
509
+ return kernel.kernel
510
+
511
+
512
+ def generate_non_out_variant_call(
513
+ g: NativeFunctionsGroup, backend_index: BackendIndex
514
+ ) -> str:
515
+ schema = g.functional.func
516
+ assert not schema.is_out_fn()
517
+ kernel_name = get_kernel_name(g, backend_index)
518
+ arg_names = (arg.name for arg in schema.schema_order_arguments())
519
+ namespace_name = "cpu" if g.structured else "native"
520
+ return f'at::{namespace_name}::{kernel_name}({",".join(arg_names)})'
521
+
522
+
523
+ def generate_call_to_view_ops(
524
+ g: NativeFunctionsViewGroup, backend_index: BackendIndex
525
+ ) -> str:
526
+ schema = g.view.func
527
+ kernel_name = cpp.name(schema)
528
+ kernel = backend_index.get_kernel(g.view)
529
+ if kernel:
530
+ kernel_name = kernel.kernel
531
+ arg_names = (arg.name for arg in schema.schema_order_arguments())
532
+ namespace_name = "native"
533
+ return f'at::{namespace_name}::{kernel_name}({",".join(arg_names)})'
534
+
535
+
536
+ def generate_out_variant_call(
537
+ g: NativeFunctionsGroup, backend_index: BackendIndex
538
+ ) -> str:
539
+ schema = g.out.func
540
+ assert schema.is_out_fn()
541
+ arg_names = []
542
+ kernel_name = get_out_kernel_name(g, backend_index)
543
+ if g.structured:
544
+ # structured op starts with the output tensor argument.
545
+ arg_names = [out_arg.name for out_arg in schema.arguments.out]
546
+ else:
547
+ arg_names = []
548
+ for arg in schema.arguments.non_out:
549
+ if isinstance(arg, SelfArgument):
550
+ arg_names.append(arg.argument.name)
551
+ else:
552
+ assert isinstance(arg, Argument)
553
+ arg_names.append(arg.name)
554
+ if not g.structured:
555
+ assert len(schema.arguments.out) == 1
556
+ arg_names.append(schema.arguments.out[0].name)
557
+ cpp_arg_names = ",".join(arg_names)
558
+ namespace_name = "cpu" if g.structured else "native"
559
+ return f"at::{namespace_name}::{kernel_name}({cpp_arg_names})"
560
+
561
+
562
+ no_memory_resize_ops = frozenset(
563
+ (
564
+ "isin.Scalar_Tensor",
565
+ "index_add",
566
+ "dot",
567
+ "vdot",
568
+ "nuclear_norm",
569
+ "histc",
570
+ "l1_loss",
571
+ "multi_margin_loss",
572
+ "multilabel_margin_loss",
573
+ "nll_loss",
574
+ "nll_loss2d",
575
+ "prod",
576
+ )
577
+ )
578
+
579
+
580
+ def should_check_resize(schema: FunctionSchema) -> bool:
581
+ schema_str = str(schema)
582
+ type_variant_op_name = schema_str[: schema_str.find("(")]
583
+ return type_variant_op_name not in no_memory_resize_ops
584
+
585
+
586
+ def op_name_from_group(g: NativeFunctionsGroup) -> str:
587
+ return g.functional.func.name.name.base
588
+
589
+
590
+ class GenOpDispatcher:
591
+ def out_variant(
592
+ self, groups: Sequence[NativeFunctionsGroup], backend_index: BackendIndex
593
+ ) -> str:
594
+ if not groups:
595
+ return ""
596
+ generated_type_variants = []
597
+ for g in groups:
598
+ with native_function_manager(g):
599
+ assert is_supported(g)
600
+ assert isinstance(g, NativeFunctionsGroup)
601
+ generated_type_variant = self.out_variant_op_generator(g, backend_index)
602
+ generated_type_variants.append(generated_type_variant)
603
+ op_name = op_name_from_group(groups[0])
604
+ body = "\n".join(generated_type_variants)
605
+ generated = f"""
606
+ REGISTER_OPERATOR_FUNCTOR(
607
+ aten::{op_name},
608
+ aten_{op_name},
609
+ [](Node* n) -> SROperator {{
610
+ {body}
611
+ LogAndDumpSchema(n);
612
+ return nullptr;
613
+ }});
614
+ """
615
+ return generated
616
+
617
+ def view(
618
+ self, groups: Sequence[NativeFunctionsViewGroup], backend_index: BackendIndex
619
+ ) -> str:
620
+ if not groups:
621
+ return ""
622
+ generated_type_variants = []
623
+ for g in groups:
624
+ with native_function_manager(g):
625
+ assert is_supported(g)
626
+ assert isinstance(g, NativeFunctionsViewGroup)
627
+ generated_type_variant = self.view_op_generator(g, backend_index)
628
+ generated_type_variants.append(generated_type_variant)
629
+ op_name = config.func_name_base_str(groups[0])
630
+ body = "\n".join(generated_type_variants)
631
+ generated = f"""
632
+ REGISTER_NATIVE_OPERATOR_FUNCTOR(
633
+ aten::{op_name},
634
+ aten_{op_name},
635
+ [](Node* n) -> SROperator {{
636
+ {body}
637
+ LogAndDumpSchema(n);
638
+ return nullptr;
639
+ }});
640
+ """
641
+ return generated
642
+
643
+ def out_variant_op_generator(
644
+ self, g: NativeFunctionsGroup, backend_index: BackendIndex
645
+ ) -> str:
646
+ functional = g.functional
647
+ schema = str(functional.func)
648
+ populated_argument = generate_arg_extraction(g.functional.func)
649
+ functional_variant_call = generate_non_out_variant_call(g, backend_index)
650
+ assert len(g.out.func.arguments.out) == 1
651
+ out_variable_name = str(g.out.func.arguments.out[0].name)
652
+ out_variant_call = generate_out_variant_call(g, backend_index)
653
+ generated = f"""
654
+ if (n->matches(torch::schema("aten::{schema}"))) {{
655
+ return [](ProcessedNode* p_node) {{
656
+ {populated_argument}
657
+ if (p_node->Output(0).isNone()) {{
658
+ p_node->Output(0) = {functional_variant_call};
659
+ return;
660
+ }}
661
+ auto& {out_variable_name} = p_node->Output(0).toTensor();
662
+ fastResizeToZero({out_variable_name});
663
+ {out_variant_call};
664
+ }};
665
+ }}"""
666
+ return generated
667
+
668
+ def view_op_generator(
669
+ self, g: NativeFunctionsViewGroup, backend_index: BackendIndex
670
+ ) -> str:
671
+ schema = str(g.view.func)
672
+ populated_argument = generate_arg_extraction(g.view.func)
673
+ functional_variant_call = generate_call_to_view_ops(g, backend_index)
674
+ generated = f"""
675
+ if (n->matches(torch::schema("aten::{schema}"))) {{
676
+ return [](ProcessedNode* p_node) {{
677
+ {populated_argument}
678
+ p_node->Output(0) = {functional_variant_call};
679
+ }};
680
+ }}"""
681
+ return generated
682
+
683
+
684
+ class GenOpTestCase:
685
+ def out_variant(self, groups: Sequence[NativeFunctionsGroup]) -> str:
686
+ if not groups:
687
+ return ""
688
+ generated_type_variants = []
689
+ for g in groups:
690
+ with native_function_manager(g):
691
+ assert is_supported(g)
692
+ assert isinstance(g, NativeFunctionsGroup)
693
+ generated_type_variant = self.out_variant_op_test_case_generator(g)
694
+ generated_type_variants.append(generated_type_variant)
695
+ return "\n".join(generated_type_variants)
696
+
697
+ def view(self, groups: Sequence[NativeFunctionsViewGroup]) -> str:
698
+ if not groups:
699
+ return ""
700
+ generated_type_variants = []
701
+ for g in groups:
702
+ with native_function_manager(g):
703
+ assert is_supported(g)
704
+ assert isinstance(g, NativeFunctionsViewGroup)
705
+ generated_type_variant = self.view_op_test_case_generator(g)
706
+ generated_type_variants.append(generated_type_variant)
707
+ return "\n".join(generated_type_variants)
708
+
709
+ def out_variant_op_test_case_generator(self, g: NativeFunctionsGroup) -> str:
710
+ schema = g.functional.func
711
+ schema_str = str(schema)
712
+ assert schema_str.find("(") > 0
713
+ type_variant_op_name = schema_str[: schema_str.find("(")].replace(".", "_")
714
+ op_name = op_name_from_group(g)
715
+ assert type_variant_op_name.startswith(op_name)
716
+
717
+ arg_types = generate_test_ir_arguments(schema)
718
+ arg_declarations = ", ".join(
719
+ (
720
+ arg_name if arg_type is None else f"{arg_name}: {arg_type}"
721
+ for arg_name, arg_type in arg_types
722
+ )
723
+ )
724
+ arg_names = ", ".join((arg_name for arg_name, _ in arg_types))
725
+ assert (
726
+ len(schema.returns) == 1
727
+ and isinstance(schema.returns[0].type, BaseType)
728
+ and schema.returns[0].type.name is BaseTy.Tensor
729
+ )
730
+ test_value_definitions = generate_test_value_definitions(schema, 0)
731
+ test_value_names = generate_test_value_names(schema, 0)
732
+ test_value_definitions2 = generate_test_value_definitions(schema, 1)
733
+ test_value_names2 = generate_test_value_names(schema, 1)
734
+ check_resize = "true" if should_check_resize(schema) else "false"
735
+ generated = f"""
736
+ TEST(StaticRuntime, autogen_{type_variant_op_name}) {{
737
+ const std::string script = R"IR(
738
+ graph({arg_declarations}):
739
+ %bias: None = prim::Constant()
740
+ %ret = aten::{op_name}({arg_names})
741
+ %cloned = aten::clone(%ret, %bias)
742
+ return (%cloned)
743
+ )IR";
744
+
745
+ {test_value_definitions}
746
+ std::vector<IValue> args{{{test_value_names}}};
747
+ testStaticRuntime(script, args, {{}}, /*use_allclose=*/false, /*use_equalnan=*/false, /*check_resize=*/{check_resize});
748
+
749
+ {test_value_definitions2}
750
+ std::vector<IValue> args2{{{test_value_names2}}};
751
+ testStaticRuntime(script, args, args2, /*use_allclose=*/false, /*use_equalnan=*/false, /*check_resize=*/{check_resize});
752
+
753
+ }}
754
+ """
755
+ return generated
756
+
757
+ def view_op_test_case_generator(self, g: NativeFunctionsViewGroup) -> str:
758
+ schema = g.view.func
759
+ schema_str = str(schema)
760
+ assert schema_str.find("(") > 0
761
+ type_variant_op_name = schema_str[: schema_str.find("(")].replace(".", "_")
762
+ op_name = g.view.root_name
763
+ assert type_variant_op_name.startswith(op_name)
764
+
765
+ arg_types = generate_test_ir_arguments(schema)
766
+ arg_declarations = ", ".join(
767
+ (
768
+ arg_name if arg_type is None else f"{arg_name}: {arg_type}"
769
+ for arg_name, arg_type in arg_types
770
+ )
771
+ )
772
+ arg_names = ", ".join((arg_name for arg_name, _ in arg_types))
773
+ assert (
774
+ len(schema.returns) == 1
775
+ and isinstance(schema.returns[0].type, BaseType)
776
+ and schema.returns[0].type.name is BaseTy.Tensor
777
+ )
778
+ test_value_definitions = generate_test_value_definitions(schema, 0)
779
+ test_value_names = generate_test_value_names(schema, 0)
780
+ generated = f"""
781
+ TEST(StaticRuntime, autogen_{type_variant_op_name}) {{
782
+ const std::string script = R"IR(
783
+ graph({arg_declarations}):
784
+ %bias: None = prim::Constant()
785
+ %ret = aten::{op_name}({arg_names})
786
+ %cloned = aten::clone(%ret, %bias)
787
+ return (%cloned)
788
+ )IR";
789
+
790
+ {test_value_definitions}
791
+ std::vector<IValue> args{{{test_value_names}}};
792
+ testStaticRuntime(script, args);
793
+ }}
794
+ """
795
+
796
+ return generated
env-llmeval/lib/python3.10/site-packages/tzdata/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # IANA versions like 2020a are not valid PEP 440 identifiers; the recommended
2
+ # way to translate the version is to use YYYY.n where `n` is a 0-based index.
3
+ __version__ = "2024.1"
4
+
5
+ # This exposes the original IANA version number.
6
+ IANA_VERSION = "2024a"
env-llmeval/lib/python3.10/site-packages/tzdata/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (220 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/CST6CDT ADDED
Binary file (951 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/Cuba ADDED
Binary file (1.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/Egypt ADDED
Binary file (1.31 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/Eire ADDED
Binary file (1.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/Factory ADDED
Binary file (113 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/GMT-0 ADDED
Binary file (111 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/GMT0 ADDED
Binary file (111 Bytes). View file