applied-ai-018 commited on
Commit
8c369de
·
verified ·
1 Parent(s): ab59c26

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/15.input_layernorm.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step120/zero/4.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/INSTALLER +1 -0
  5. venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/LICENSE +21 -0
  6. venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/METADATA +102 -0
  7. venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/RECORD +29 -0
  8. venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/WHEEL +5 -0
  9. venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/top_level.txt +1 -0
  10. venv/lib/python3.10/site-packages/torchgen/__pycache__/__init__.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torchgen/__pycache__/code_template.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torchgen/__pycache__/context.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torchgen/__pycache__/gen.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_aoti_c_shim.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_backend_stubs.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_executorch.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_functionalization_type.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_lazy_tensor.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_vmap_plumbing.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torchgen/__pycache__/local.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torchgen/__pycache__/model.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torchgen/__pycache__/native_function_generation.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torchgen/__pycache__/utils.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torchgen/__pycache__/yaml_utils.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torchgen/executorch/__init__.py +0 -0
  26. venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/__init__.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torchgen/executorch/api/__init__.py +0 -0
  30. venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torchgen/executorch/api/custom_ops.py +142 -0
  35. venv/lib/python3.10/site-packages/torchgen/executorch/api/et_cpp.py +368 -0
  36. venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py +2 -0
  37. venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/types.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torchgen/executorch/api/types/signatures.py +73 -0
  41. venv/lib/python3.10/site-packages/torchgen/executorch/api/types/types.py +81 -0
  42. venv/lib/python3.10/site-packages/torchgen/executorch/api/unboxing.py +213 -0
  43. venv/lib/python3.10/site-packages/torchgen/executorch/model.py +220 -0
  44. venv/lib/python3.10/site-packages/torchgen/executorch/parse.py +151 -0
  45. venv/lib/python3.10/site-packages/torchgen/operator_versions/__init__.py +0 -0
  46. venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders_constant.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py +392 -0
  50. venv/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders_constant.py +7 -0
ckpts/universal/global_step120/zero/15.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b11171abcbb32326f56c2eb0db8091bd8a2367cfd1c556f7349591e73dd84e4
3
+ size 9293
ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eef756aaf0216b5a75ad77c8a12da70b98e5d9850cec810be54f27a0b7527fed
3
+ size 33555533
ckpts/universal/global_step120/zero/4.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2e19a993f23ee14741479fea4b62fd90d8220342f7f5da3acc8ceafc0942c98
3
+ size 33555533
venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2017 Tsuyoshi Hombashi
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/METADATA ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: tabledata
3
+ Version: 1.3.3
4
+ Summary: tabledata is a Python library to represent tabular data. Used for pytablewriter/pytablereader/SimpleSQLite/etc.
5
+ Home-page: https://github.com/thombashi/tabledata
6
+ Author: Tsuyoshi Hombashi
7
+ Author-email: [email protected]
8
+ License: MIT License
9
+ Project-URL: Changlog, https://github.com/thombashi/tabledata/releases
10
+ Project-URL: Documentation, https://tabledata.rtfd.io/
11
+ Project-URL: Source, https://github.com/thombashi/tabledata
12
+ Project-URL: Tracker, https://github.com/thombashi/tabledata/issues
13
+ Keywords: table
14
+ Classifier: Development Status :: 5 - Production/Stable
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: Intended Audience :: Information Technology
17
+ Classifier: License :: OSI Approved :: MIT License
18
+ Classifier: Operating System :: OS Independent
19
+ Classifier: Programming Language :: Python :: 3
20
+ Classifier: Programming Language :: Python :: 3.7
21
+ Classifier: Programming Language :: Python :: 3.8
22
+ Classifier: Programming Language :: Python :: 3.9
23
+ Classifier: Programming Language :: Python :: 3.10
24
+ Classifier: Programming Language :: Python :: 3.11
25
+ Classifier: Programming Language :: Python :: 3 :: Only
26
+ Classifier: Programming Language :: Python :: Implementation :: CPython
27
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
28
+ Classifier: Topic :: Software Development :: Libraries
29
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
+ Requires-Python: >=3.7
31
+ Description-Content-Type: text/x-rst
32
+ License-File: LICENSE
33
+ Requires-Dist: DataProperty <2,>=1.0.1
34
+ Requires-Dist: typepy <2,>=1.2.0
35
+ Provides-Extra: logging
36
+ Requires-Dist: loguru <1,>=0.4.1 ; extra == 'logging'
37
+ Provides-Extra: test
38
+ Requires-Dist: pytablewriter >=0.46 ; extra == 'test'
39
+ Requires-Dist: pytest ; extra == 'test'
40
+
41
+ .. contents:: **tabledata**
42
+ :backlinks: top
43
+ :depth: 2
44
+
45
+ Summary
46
+ ---------
47
+ `tabledata <https://github.com/thombashi/tabledata>`__ is a Python library to represent tabular data. Used for pytablewriter/pytablereader/SimpleSQLite/etc.
48
+
49
+ .. image:: https://badge.fury.io/py/tabledata.svg
50
+ :target: https://badge.fury.io/py/tabledata
51
+ :alt: PyPI package version
52
+
53
+ .. image:: https://img.shields.io/pypi/pyversions/tabledata.svg
54
+ :target: https://pypi.org/project/tabledata
55
+ :alt: Supported Python versions
56
+
57
+ .. image:: https://img.shields.io/pypi/implementation/tabledata.svg
58
+ :target: https://pypi.org/project/tabledata
59
+ :alt: Supported Python implementations
60
+
61
+ .. image:: https://github.com/thombashi/tabledata/actions/workflows/ci.yml/badge.svg
62
+ :target: https://github.com/thombashi/tabledata/actions/workflows/ci.yml
63
+ :alt: Linux/macOS/Windows CI status
64
+
65
+ .. image:: https://coveralls.io/repos/github/thombashi/tabledata/badge.svg?branch=master
66
+ :target: https://coveralls.io/github/thombashi/tabledata?branch=master
67
+ :alt: Test coverage
68
+
69
+ Installation
70
+ ============
71
+
72
+ Install from PyPI
73
+ ------------------------------
74
+ ::
75
+
76
+ pip install tabledata
77
+
78
+ Install from PPA (for Ubuntu)
79
+ ------------------------------
80
+ ::
81
+
82
+ sudo add-apt-repository ppa:thombashi/ppa
83
+ sudo apt update
84
+ sudo apt install python3-tabledata
85
+
86
+
87
+ Dependencies
88
+ ============
89
+ - Python 3.7+
90
+ - `Mandatory Python package dependencies (automatically installed) <https://github.com/thombashi/tabledata/network/dependencies>`__
91
+
92
+ Optional Python packages
93
+ ------------------------------------------------
94
+ - `loguru <https://github.com/Delgan/loguru>`__
95
+ - Used for logging if the package installed
96
+ - `pandas <https://pandas.pydata.org/>`__
97
+ - required to get table data as a pandas data frame
98
+
99
+ Documentation
100
+ ===============
101
+ https://tabledata.rtfd.io/
102
+
venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/RECORD ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ tabledata-1.3.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ tabledata-1.3.3.dist-info/LICENSE,sha256=vrvfBSShR_iaYV__U9eb3JDLx2MVUPtLclzT873NJPY,1074
3
+ tabledata-1.3.3.dist-info/METADATA,sha256=IKxSJeg1Qrr6dSTCJdvnBIiKl6IKCa4aAIC_B4Ngwfg,3657
4
+ tabledata-1.3.3.dist-info/RECORD,,
5
+ tabledata-1.3.3.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
6
+ tabledata-1.3.3.dist-info/top_level.txt,sha256=wPYCjph2PxB5odPJWPADX_65iL1gAIjMQFlAyZi80iI,10
7
+ tabledata/__init__.py,sha256=OkkMA83NWJOKsmUru4qWiUXrwTxF5jDhHXl_dR2zQBQ,683
8
+ tabledata/__pycache__/__init__.cpython-310.pyc,,
9
+ tabledata/__pycache__/__version__.cpython-310.pyc,,
10
+ tabledata/__pycache__/_common.cpython-310.pyc,,
11
+ tabledata/__pycache__/_constant.cpython-310.pyc,,
12
+ tabledata/__pycache__/_converter.cpython-310.pyc,,
13
+ tabledata/__pycache__/_core.cpython-310.pyc,,
14
+ tabledata/__pycache__/error.cpython-310.pyc,,
15
+ tabledata/__pycache__/normalizer.cpython-310.pyc,,
16
+ tabledata/__version__.py,sha256=JC4TkyHfH-eP9nAvfI04H3gEbgfItYa1jLE09ARSNSc,201
17
+ tabledata/_common.py,sha256=eB3xHflvbF5p5hz1f5D9xNHQCujy6Uk91NLPTy5fFHY,274
18
+ tabledata/_constant.py,sha256=I763_Fx-9IT_ZQTTncxi04WsXd6tK78z2VBYZ3up5Aw,154
19
+ tabledata/_converter.py,sha256=0H61eirjQw_rs0h1N_APtCthRRFbYkKZVUHK-5-0GAE,895
20
+ tabledata/_core.py,sha256=4y0sLRCEcvjJvqi_pUlhz5qjIass_pZu5FcnK_kpr7U,14530
21
+ tabledata/_logger/__init__.py,sha256=7rkhAj6PGbUI3fouTa7GEzjRelUFj0_UPfzkZ_Yk71g,55
22
+ tabledata/_logger/__pycache__/__init__.cpython-310.pyc,,
23
+ tabledata/_logger/__pycache__/_logger.cpython-310.pyc,,
24
+ tabledata/_logger/__pycache__/_null_logger.cpython-310.pyc,,
25
+ tabledata/_logger/_logger.py,sha256=3HreG22mzHGZvexAGZpjkU4A995ZZmGJmiIkPcrkA4o,783
26
+ tabledata/_logger/_null_logger.py,sha256=QJuaErUIV_x6NjQ9qNX9eNSi_GB_9CrO7lKeXYZnuaw,1088
27
+ tabledata/error.py,sha256=UGGJm3_9oLQi9GBWZz4cqp1dnzc5Kbu37c6CsiWozME,526
28
+ tabledata/normalizer.py,sha256=lVz4agT8Bm97rvKUUUhP3OT1pGDsMczB5rAlx316XoY,6465
29
+ tabledata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.41.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ tabledata
venv/lib/python3.10/site-packages/torchgen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (532 Bytes). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/code_template.cpython-310.pyc ADDED
Binary file (3.05 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/context.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/gen.cpython-310.pyc ADDED
Binary file (66 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_aoti_c_shim.cpython-310.pyc ADDED
Binary file (9.98 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_backend_stubs.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_executorch.cpython-310.pyc ADDED
Binary file (27.9 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_functionalization_type.cpython-310.pyc ADDED
Binary file (22.7 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_lazy_tensor.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_vmap_plumbing.cpython-310.pyc ADDED
Binary file (8.76 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/local.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/model.cpython-310.pyc ADDED
Binary file (65.1 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/native_function_generation.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/utils.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/__pycache__/yaml_utils.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/executorch/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc ADDED
Binary file (7.33 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc ADDED
Binary file (4.3 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/executorch/api/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc ADDED
Binary file (7.45 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc ADDED
Binary file (6.38 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/executorch/api/custom_ops.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Dict, List, Optional, Sequence, Tuple
5
+
6
+ from torchgen import dest
7
+
8
+ # disable import sorting to avoid circular dependency.
9
+ from torchgen.api.types import DispatcherSignature # isort:skip
10
+ from torchgen.context import method_with_native_function
11
+ from torchgen.executorch.model import ETKernelIndex
12
+ from torchgen.model import BaseTy, BaseType, DispatchKey, NativeFunction, Variant
13
+ from torchgen.selective_build.selector import SelectiveBuilder
14
+ from torchgen.utils import concatMap, Target
15
+
16
+
17
+ # Generates RegisterKernelStub.cpp, which provides placeholder kernels for custom operators. This will be used at
18
+ # model authoring side.
19
+ @dataclass(frozen=True)
20
+ class ComputeNativeFunctionStub:
21
+ @method_with_native_function
22
+ def __call__(self, f: NativeFunction) -> Optional[str]:
23
+ if Variant.function not in f.variants:
24
+ return None
25
+
26
+ sig = DispatcherSignature.from_schema(
27
+ f.func, prefix=f"wrapper_CPU_{f.func.name.overload_name}_", symint=False
28
+ )
29
+ assert sig is not None
30
+ if len(f.func.returns) == 0:
31
+ ret_name = ""
32
+ elif len(f.func.returns) == 1:
33
+ if f.func.arguments.out:
34
+ ret_name = f.func.arguments.out[0].name
35
+ else:
36
+ ret_name = next(
37
+ (
38
+ a.name
39
+ for a in f.func.arguments.flat_non_out
40
+ if a.type == f.func.returns[0].type
41
+ ),
42
+ "",
43
+ )
44
+ if not ret_name:
45
+ # if return type is tensor
46
+ if f.func.returns[0].type == BaseType(BaseTy.Tensor):
47
+ # Returns an empty tensor
48
+ ret_name = "at::Tensor()"
49
+ else:
50
+ raise Exception(f"Can't handle this return type {f.func}")
51
+ elif len(f.func.arguments.out) == len(f.func.returns):
52
+ # Returns a tuple of out arguments
53
+ tensor_type = "at::Tensor &"
54
+ comma = ", "
55
+ ret_name = f"""::std::tuple<{comma.join([tensor_type] * len(f.func.returns))}>(
56
+ {comma.join([r.name for r in f.func.arguments.out])}
57
+ )"""
58
+ else:
59
+ assert all(
60
+ a.type == BaseType(BaseTy.Tensor) for a in f.func.returns
61
+ ), f"Only support tensor returns but got {f.func.returns}"
62
+ # Returns a tuple of empty tensors
63
+ tensor_type = "at::Tensor"
64
+ comma = ", "
65
+ ret_name = f"""::std::tuple<{comma.join([tensor_type] * len(f.func.returns))}>(
66
+ {comma.join(["at::Tensor()" for _ in f.func.returns])}
67
+ )"""
68
+ ret_str = f"return {ret_name};" if len(f.func.returns) > 0 else ""
69
+ return f"""
70
+ {sig.defn()} {{
71
+ {ret_str}
72
+ }}
73
+ """
74
+
75
+
76
+ def gen_custom_ops_registration(
77
+ *,
78
+ native_functions: Sequence[NativeFunction],
79
+ selector: SelectiveBuilder,
80
+ kernel_index: ETKernelIndex,
81
+ rocm: bool,
82
+ ) -> Tuple[str, str]:
83
+ """
84
+ Generate custom ops registration code for dest.RegisterDispatchKey.
85
+
86
+ :param native_functions: a sequence of `NativeFunction`
87
+ :param selector: for selective build.
88
+ :param kernel_index: kernels for all the ops.
89
+ :param rocm: bool for dest.RegisterDispatchKey.
90
+ :return: generated C++ code to register custom operators into PyTorch
91
+ """
92
+
93
+ # convert kernel index to BackendIndex. This is because we can't handle ETKernelIndex yet.
94
+ # TODO larryliu: evaluate if this code is still needed. If yes let it handle ETKernelIndex.
95
+
96
+ dispatch_key = DispatchKey.CPU
97
+ backend_index = kernel_index._to_backend_index()
98
+ static_init_dispatch_registrations = ""
99
+ ns_grouped_native_functions: Dict[str, List[NativeFunction]] = defaultdict(list)
100
+ for native_function in native_functions:
101
+ ns_grouped_native_functions[native_function.namespace].append(native_function)
102
+
103
+ for namespace, functions in ns_grouped_native_functions.items():
104
+ if len(functions) == 0:
105
+ continue
106
+ dispatch_registrations_body = "\n".join(
107
+ list(
108
+ concatMap(
109
+ dest.RegisterDispatchKey(
110
+ backend_index,
111
+ Target.REGISTRATION,
112
+ selector,
113
+ rocm=rocm,
114
+ symint=False,
115
+ class_method_name=None,
116
+ skip_dispatcher_op_registration=False,
117
+ ),
118
+ functions,
119
+ )
120
+ )
121
+ )
122
+ static_init_dispatch_registrations += f"""
123
+ TORCH_LIBRARY_IMPL({namespace}, {dispatch_key}, m) {{
124
+ {dispatch_registrations_body}
125
+ }};"""
126
+ anonymous_definition = "\n".join(
127
+ list(
128
+ concatMap(
129
+ dest.RegisterDispatchKey(
130
+ backend_index,
131
+ Target.ANONYMOUS_DEFINITION,
132
+ selector,
133
+ rocm=rocm,
134
+ symint=False,
135
+ class_method_name=None,
136
+ skip_dispatcher_op_registration=False,
137
+ ),
138
+ native_functions,
139
+ )
140
+ )
141
+ )
142
+ return anonymous_definition, static_init_dispatch_registrations
venv/lib/python3.10/site-packages/torchgen/executorch/api/et_cpp.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Sequence, Set, Union
2
+
3
+ from torchgen import local
4
+ from torchgen.api.types import (
5
+ ArgName,
6
+ ArrayCType,
7
+ BaseCType,
8
+ Binding,
9
+ ConstRefCType,
10
+ CType,
11
+ MutRefCType,
12
+ NamedCType,
13
+ SpecialArgName,
14
+ TupleCType,
15
+ VectorCType,
16
+ voidT,
17
+ )
18
+ from torchgen.model import (
19
+ Argument,
20
+ Arguments,
21
+ BaseTy,
22
+ BaseType,
23
+ ListType,
24
+ NativeFunction,
25
+ OptionalType,
26
+ Return,
27
+ SelfArgument,
28
+ TensorOptionsArguments,
29
+ Type,
30
+ )
31
+ from torchgen.utils import assert_never
32
+ from .types import (
33
+ ArrayRefCType,
34
+ BaseTypeToCppMapping,
35
+ OptionalCType,
36
+ scalarT,
37
+ tensorListT,
38
+ tensorT,
39
+ )
40
+
41
+ """
42
+ This file describes the translation of JIT schema to the public C++ API, which is what people use when they call
43
+ functions like at::add. It also serves as a native function API, which is the signature of kernels,
44
+ since in Executorch CppSignature is the same as NativeSignature.
45
+
46
+ Difference between this file and torchgen.api.cpp.py:
47
+
48
+ - Executorch doesn't support TensorOptions, however in this file we still keep the logic here to be compatible with
49
+ torchgen.api.cpp, so that we can do stuff like ATen mode (running ATen kernels in Executorch).
50
+
51
+ - Executorch doesn't support Dimname.
52
+
53
+ - Executorch runtime doesn't support SymInt, will treat it as int.
54
+ """
55
+
56
+
57
+ # Translation of "value types" in JIT schema to C++ API type. Value
58
+ # types look the same no matter if they are argument types or return
59
+ # types. Returns None if the type in question is not a value type.
60
+ def valuetype_type(
61
+ t: Type,
62
+ *,
63
+ binds: ArgName,
64
+ remove_non_owning_ref_types: bool = False,
65
+ ) -> Optional[NamedCType]:
66
+ if isinstance(t, BaseType):
67
+ if t.name == BaseTy.Tensor or t.name == BaseTy.Scalar:
68
+ return None
69
+ # For SymInt we simply treat it as int.
70
+ elif str(t) == "SymInt":
71
+ return NamedCType(binds, BaseCType(BaseTypeToCppMapping[BaseTy.int]))
72
+ if remove_non_owning_ref_types:
73
+ if t.name == BaseTy.str:
74
+ raise AssertionError(
75
+ "string ref->value conversion: not implemented yet"
76
+ )
77
+ # All other BaseType currently map directly to BaseCppTypes.
78
+ return NamedCType(binds, BaseCType(BaseTypeToCppMapping[t.name]))
79
+ elif isinstance(t, OptionalType):
80
+ elem = valuetype_type(t.elem, binds=binds)
81
+ if elem is None:
82
+ return None
83
+ return NamedCType(binds, OptionalCType(elem.type))
84
+ elif isinstance(t, ListType):
85
+ if str(t.elem) == "bool":
86
+ assert t.size is not None
87
+ return NamedCType(
88
+ binds, ArrayCType(BaseCType(BaseTypeToCppMapping[BaseTy.bool]), t.size)
89
+ )
90
+ else:
91
+ return None
92
+ else:
93
+ raise AssertionError(f"unrecognized type {repr(t)}")
94
+
95
+
96
+ # Translation of types occurring in JIT arguments to a C++ argument type.
97
+ # If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
98
+ # For example, we'll return std::vector<int> instead of IntArrayRef.
99
+ # See Note [translation from C++ reference to value types]
100
+ def argumenttype_type(
101
+ t: Type,
102
+ *,
103
+ mutable: bool,
104
+ binds: ArgName,
105
+ remove_non_owning_ref_types: bool = False,
106
+ ) -> NamedCType:
107
+ # If it's a value type, do the value type translation
108
+ r = valuetype_type(
109
+ t,
110
+ binds=binds,
111
+ remove_non_owning_ref_types=remove_non_owning_ref_types,
112
+ )
113
+ if r is not None:
114
+ return r
115
+ if isinstance(t, BaseType):
116
+ if t.name == BaseTy.Tensor:
117
+ if mutable and not local.use_const_ref_for_mutable_tensors():
118
+ return NamedCType(binds, MutRefCType(BaseCType(tensorT)))
119
+ else:
120
+ return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
121
+ elif t.name == BaseTy.Scalar:
122
+ return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
123
+ else:
124
+ raise AssertionError(f"base type should have been value type {t}")
125
+ elif isinstance(t, OptionalType):
126
+ if str(t.elem) == "Tensor":
127
+ if mutable and not local.use_const_ref_for_mutable_tensors():
128
+ return NamedCType(
129
+ binds, MutRefCType(BaseCType(tensorT))
130
+ ) # TODO: fix this discrepancy
131
+ else:
132
+ return NamedCType(
133
+ binds, ConstRefCType(OptionalCType(BaseCType(tensorT)))
134
+ )
135
+ elif str(t.elem) == "Scalar":
136
+ return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
137
+ elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
138
+ return NamedCType(binds, OptionalCType(elem.type))
139
+ elif isinstance(t, ListType):
140
+ # TODO: keeping these special cases for Tensor[] and Tensor?[] so that we can hookup with ATen kernels.
141
+ if str(t.elem) == "Tensor":
142
+ return NamedCType(binds, BaseCType(tensorListT))
143
+ elif str(t.elem) == "Dimname":
144
+ raise NotImplementedError("Executorch doesn't support Dimname")
145
+ elif str(t.elem) == "Tensor?":
146
+ return NamedCType(binds, ArrayRefCType(OptionalCType(BaseCType(tensorT))))
147
+ elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
148
+ return NamedCType(binds, ArrayRefCType(elem.type))
149
+ else:
150
+ raise AssertionError(f"unrecognized type {repr(t)}")
151
+
152
+
153
+ # Translate a JIT argument into its C++ type
154
+ def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
155
+ return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
156
+
157
+
158
+ # Translation of a (non-multi) return type from JIT to C++
159
+ # N.B: returntype_type returns a CType, not a NamedCType.
160
+ # This is mostly because of the mismatch between return types and return names.
161
+ # e.g. a function with a return type of 'void' has 0 return names,
162
+ # and a function with a return type of 'std::tuple' has >1 return name.
163
+ def returntype_type(t: Type, *, mutable: bool) -> CType:
164
+ # placeholder is ignored
165
+ r = valuetype_type(t, binds="__placeholder__")
166
+ if r is not None:
167
+ return r.type
168
+
169
+ if isinstance(t, BaseType):
170
+ if t.name == BaseTy.Tensor:
171
+ if mutable:
172
+ if local.use_const_ref_for_mutable_tensors():
173
+ return ConstRefCType(BaseCType(tensorT))
174
+ else:
175
+ return MutRefCType(BaseCType(tensorT))
176
+ else:
177
+ # Note [Tensor Copy Returns]
178
+ # Currently, we use "Argument.is_write" to determine
179
+ # whether or not Tensor return types should be copies or references.
180
+ # If that ever changes, take a look at other locations of this note!
181
+ return BaseCType(tensorT)
182
+ elif t.name == BaseTy.Scalar:
183
+ return BaseCType(scalarT)
184
+ elif isinstance(t, ListType):
185
+ assert (
186
+ not mutable
187
+ ), "Native functions should never return a mutable tensor list. They should return void."
188
+ elem = returntype_type(t.elem, mutable=False)
189
+ assert t.size is None, f"fixed size list returns not supported: {t}"
190
+ return VectorCType(elem)
191
+
192
+ raise AssertionError(f"unrecognized return type {t}")
193
+
194
+
195
+ # Translation of a single return to its C++ type
196
+ def return_type(r: Return) -> CType:
197
+ return returntype_type(r.type, mutable=r.is_write)
198
+
199
+
200
+ # Translation of a full (possibly multi) return from JIT to its C++ type
201
+ def returns_type(rs: Sequence[Return]) -> CType:
202
+ if len(rs) == 0:
203
+ return BaseCType(voidT)
204
+ elif len(rs) == 1:
205
+ return return_type(rs[0])
206
+ else:
207
+ return TupleCType([return_type(r) for r in rs])
208
+
209
+
210
+ def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequence[str]:
211
+ returns: List[str] = []
212
+ for i, r in enumerate(f.func.returns):
213
+ # If we have an inplace function, the return argument is
214
+ # implicitly named self.
215
+ # TODO: Consider incorporating this into the data model
216
+ if f.func.name.name.inplace:
217
+ assert i == 0, "illegal inplace function with multiple returns"
218
+ name = "self"
219
+ # If we are out function, the name is the name of the
220
+ # corresponding output function (r.name will get recorded
221
+ # in field_name later.)
222
+ elif f.func.is_out_fn():
223
+ name = f.func.arguments.out[i].name
224
+ # If the return argument is explicitly named...
225
+ elif r.name:
226
+ name_conflict = any(
227
+ r.name == a.name for a in f.func.schema_order_arguments()
228
+ )
229
+ if name_conflict and not f.func.is_out_fn():
230
+ name = f"{r.name}_return"
231
+ else:
232
+ name = r.name
233
+ # If there is no explicit name and no fallback name was passed in, we just name the output result,
234
+ # unless it's a multi-return, in which case it's result0,
235
+ # result1, etc (zero-indexed)
236
+ else:
237
+ name = fallback_name if len(f.func.returns) == 1 else f"{fallback_name}{i}"
238
+ returns.append(name)
239
+ return returns
240
+
241
+
242
+ JIT_TO_CPP_DEFAULT = {
243
+ "False": "false",
244
+ "True": "true",
245
+ "None": "torch::executorch::nullopt", # UGH this one is type directed
246
+ "[]": "{}",
247
+ "contiguous_format": "torch::executorch::MemoryFormat::Contiguous",
248
+ "long": "torch::executorch::kLong",
249
+ }
250
+
251
+
252
+ # Convert a JIT default into C++ expression representing the default
253
+ def default_expr(d: str, t: Type) -> str:
254
+ if d == "None" and str(t) == "Tensor?":
255
+ return "{}"
256
+ if isinstance(t, BaseType) and t.name is BaseTy.str:
257
+ # Schema allows single quotes but C++ needs double
258
+ if len(d) >= 2 and d[0] == "'" and d[-1] == "'":
259
+ s = ""
260
+ i = 1
261
+ while i + 1 < len(d):
262
+ if d[i] != "\\":
263
+ if d[i] == '"':
264
+ s += '\\"'
265
+ else:
266
+ s += d[i]
267
+ i += 1
268
+ else:
269
+ if d[i + 1] == "'":
270
+ s += "'"
271
+ else:
272
+ s += d[i : i + 2]
273
+ i += 2
274
+
275
+ return f'"{s}"'
276
+
277
+ if isinstance(t, OptionalType):
278
+ if d == "None":
279
+ return "torch::executor::nullopt"
280
+
281
+ return default_expr(d, t.elem)
282
+
283
+ if isinstance(t, ListType):
284
+ if d.startswith("[") and d.endswith("]"):
285
+ return "{" + d[1:-1] + "}"
286
+ elif t.size is None:
287
+ # NOTE: Sized lists can have scalar defaults
288
+ raise ValueError(f"Expected a list default '[...]' but found: '{d}'")
289
+
290
+ return JIT_TO_CPP_DEFAULT.get(d, d)
291
+
292
+
293
+ # Convert an argument into its C++ API form
294
+
295
+
296
+ def argument(
297
+ a: Union[Argument, TensorOptionsArguments, SelfArgument],
298
+ *,
299
+ cpp_no_default_args: Set[str],
300
+ method: bool,
301
+ faithful: bool,
302
+ has_tensor_options: bool,
303
+ ) -> List[Binding]:
304
+ def sub_argument(
305
+ a: Union[Argument, TensorOptionsArguments, SelfArgument]
306
+ ) -> List[Binding]:
307
+ return argument(
308
+ a,
309
+ cpp_no_default_args=cpp_no_default_args,
310
+ method=method,
311
+ faithful=faithful,
312
+ has_tensor_options=has_tensor_options,
313
+ )
314
+
315
+ if isinstance(a, Argument):
316
+ binds: ArgName
317
+ if a.name == "memory_format" and has_tensor_options:
318
+ binds = SpecialArgName.possibly_redundant_memory_format
319
+ else:
320
+ binds = a.name
321
+ default: Optional[str] = None
322
+ if a.name not in cpp_no_default_args and a.default is not None:
323
+ default = default_expr(a.default, a.type)
324
+ return [
325
+ Binding(
326
+ nctype=argument_type(a, binds=binds),
327
+ name=a.name,
328
+ default=default,
329
+ argument=a,
330
+ )
331
+ ]
332
+ elif isinstance(a, TensorOptionsArguments):
333
+ raise NotImplementedError("Need to implement type resolution for TensorOptions")
334
+ elif isinstance(a, SelfArgument):
335
+ if method:
336
+ # Caller is responsible for installing implicit this in context!
337
+ return []
338
+ else:
339
+ return sub_argument(a.argument)
340
+ else:
341
+ assert_never(a)
342
+
343
+
344
+ def arguments(
345
+ arguments: Arguments,
346
+ *,
347
+ faithful: bool,
348
+ method: bool,
349
+ cpp_no_default_args: Set[str],
350
+ ) -> List[Binding]:
351
+ args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
352
+ if faithful:
353
+ args.extend(arguments.non_out)
354
+ args.extend(arguments.out)
355
+ else:
356
+ args.extend(arguments.out)
357
+ args.extend(arguments.non_out)
358
+ return [
359
+ r.no_default() if faithful else r
360
+ for a in args
361
+ for r in argument(
362
+ a,
363
+ faithful=faithful,
364
+ method=method,
365
+ has_tensor_options=arguments.tensor_options is not None,
366
+ cpp_no_default_args=cpp_no_default_args,
367
+ )
368
+ ]
venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .types import *
2
+ from .signatures import * # isort:skip
venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (241 Bytes). View file
 
venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc ADDED
Binary file (3.05 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/types.cpython-310.pyc ADDED
Binary file (2.61 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/executorch/api/types/signatures.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List, Optional, Set
3
+
4
+ import torchgen.api.cpp as aten_cpp
5
+
6
+ from torchgen.api.types import Binding, CType
7
+ from torchgen.model import FunctionSchema, NativeFunction
8
+
9
+ from .types import contextArg
10
+
11
+
12
+ @dataclass(frozen=True)
13
+ class ExecutorchCppSignature:
14
+ """
15
+ This signature is merely a CppSignature with Executorch types (optionally
16
+ contains KernelRuntimeContext as well). The inline definition of
17
+ CppSignature is generated in Functions.h and it's used by unboxing
18
+ functions.
19
+ """
20
+
21
+ # The schema this signature is derived from
22
+ func: FunctionSchema
23
+
24
+ # The set of C++ arguments which should not have defaults applied to them
25
+ cpp_no_default_args: Set[str]
26
+
27
+ # Allows you to prepend an arbitrary prefix to the signature name.
28
+ # This is useful for parts of the codegen that generate wrappers around kernels,
29
+ # and need to avoid naming collisions.
30
+ prefix: str = ""
31
+
32
+ def arguments(self, *, include_context: bool = True) -> List[Binding]:
33
+ return ([contextArg] if include_context else []) + et_cpp.arguments(
34
+ self.func.arguments,
35
+ faithful=True, # always faithful, out argument at the end
36
+ method=False, # method not supported
37
+ cpp_no_default_args=self.cpp_no_default_args,
38
+ )
39
+
40
+ def name(self) -> str:
41
+ return self.prefix + aten_cpp.name(
42
+ self.func,
43
+ faithful_name_for_out_overloads=True,
44
+ )
45
+
46
+ def decl(self, name: Optional[str] = None, *, include_context: bool = True) -> str:
47
+ args_str = ", ".join(
48
+ a.decl() for a in self.arguments(include_context=include_context)
49
+ )
50
+ if name is None:
51
+ name = self.name()
52
+ return f"{self.returns_type().cpp_type()} {name}({args_str})"
53
+
54
+ def defn(self, name: Optional[str] = None) -> str:
55
+ args = [a.defn() for a in self.arguments()]
56
+ args_str = ", ".join(args)
57
+ if name is None:
58
+ name = self.name()
59
+ return f"{self.returns_type().cpp_type()} {name}({args_str})"
60
+
61
+ def returns_type(self) -> CType:
62
+ return et_cpp.returns_type(self.func.returns)
63
+
64
+ @staticmethod
65
+ def from_native_function(
66
+ f: NativeFunction, *, prefix: str = ""
67
+ ) -> "ExecutorchCppSignature":
68
+ return ExecutorchCppSignature(
69
+ func=f.func, prefix=prefix, cpp_no_default_args=f.cpp_no_default_args
70
+ )
71
+
72
+
73
+ from torchgen.executorch.api import et_cpp
venv/lib/python3.10/site-packages/torchgen/executorch/api/types/types.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Dict
3
+
4
+ from torchgen.api.types import (
5
+ BaseCppType,
6
+ BaseCType,
7
+ Binding,
8
+ boolT,
9
+ CType,
10
+ doubleT,
11
+ Expr,
12
+ longT,
13
+ MutRefCType,
14
+ NamedCType,
15
+ )
16
+ from torchgen.model import BaseTy
17
+
18
+ halfT = BaseCppType("torch::executor", "Half")
19
+ bfloat16T = BaseCppType("torch::executor", "BFloat16")
20
+ stringT = BaseCppType("torch::executor", "string_view")
21
+ scalarTypeT = BaseCppType("torch::executor", "ScalarType")
22
+ tensorT = BaseCppType("torch::executor", "Tensor")
23
+ tensorListT = BaseCppType("torch::executor", "TensorList")
24
+ scalarT = BaseCppType("torch::executor", "Scalar")
25
+ memoryFormatT = BaseCppType("torch::executor", "MemoryFormat")
26
+ intArrayRefT = BaseCppType("torch::executor", "IntArrayRef")
27
+ optionalT = BaseCppType("torch::executor", "optional")
28
+ contextT = BaseCppType("torch::executor", "KernelRuntimeContext")
29
+
30
+ contextExpr = Expr(
31
+ expr="context",
32
+ type=NamedCType(name="context", type=MutRefCType(BaseCType(contextT))),
33
+ )
34
+
35
+ contextArg = Binding(
36
+ name="context",
37
+ nctype=contextExpr.type,
38
+ argument=None, # type: ignore[arg-type]
39
+ default=None,
40
+ )
41
+
42
+ BaseTypeToCppMapping: Dict[BaseTy, BaseCppType] = {
43
+ BaseTy.int: longT,
44
+ BaseTy.float: doubleT,
45
+ BaseTy.bool: boolT,
46
+ BaseTy.str: stringT,
47
+ BaseTy.ScalarType: scalarTypeT,
48
+ BaseTy.Tensor: tensorT,
49
+ BaseTy.Scalar: scalarT,
50
+ BaseTy.MemoryFormat: memoryFormatT,
51
+ }
52
+
53
+
54
+ @dataclass(frozen=True)
55
+ class OptionalCType(CType):
56
+ elem: "CType"
57
+
58
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
59
+ # Do not pass `strip_ref` recursively.
60
+ return f"torch::executor::optional<{self.elem.cpp_type()}>"
61
+
62
+ def cpp_type_registration_declarations(self) -> str:
63
+ return f"torch::executor::optional<{self.elem.cpp_type_registration_declarations()}>"
64
+
65
+ def remove_const_ref(self) -> "CType":
66
+ return OptionalCType(self.elem.remove_const_ref())
67
+
68
+
69
+ @dataclass(frozen=True)
70
+ class ArrayRefCType(CType):
71
+ elem: "CType"
72
+
73
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
74
+ # Do not pass `strip_ref` recursively.
75
+ return f"torch::executor::ArrayRef<{self.elem.cpp_type()}>"
76
+
77
+ def cpp_type_registration_declarations(self) -> str:
78
+ return f"torch::executor::ArrayRef<{self.elem.cpp_type_registration_declarations()}>"
79
+
80
+ def remove_const_ref(self) -> "CType":
81
+ return ArrayRefCType(self.elem.remove_const_ref())
venv/lib/python3.10/site-packages/torchgen/executorch/api/unboxing.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Callable, List, Sequence, Tuple
3
+
4
+ from torchgen.api.types import Binding, CType, NamedCType
5
+ from torchgen.model import (
6
+ Argument,
7
+ BaseTy,
8
+ BaseType,
9
+ ListType,
10
+ NativeFunction,
11
+ OptionalType,
12
+ Type,
13
+ )
14
+
15
+ connector = "\n\t"
16
+
17
+
18
+ # Return unboxing function name for a NativeFunction
19
+ def name(f: NativeFunction) -> str:
20
+ return f.func.name.unambiguous_name()
21
+
22
+
23
+ @dataclass(frozen=True)
24
+ class Unboxing:
25
+ """
26
+ Takes a sequence of Bindings and unbox EValues to these Bindings. Return generated code that performs correct unboxing.
27
+ A sample generated code:
28
+ // aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
29
+ void mul_out(EValue** stack) {
30
+ EValue& self = *stack[0];
31
+ EValue& other = *stack[1];
32
+ EValue& out = *stack[2];
33
+ const torch::executor::Tensor & self_base = self.to<torch::executor::Tensor>();
34
+ const torch::executor::Tensor & other_base = other.to<torch::executor::Tensor>();
35
+ torch::executor::Tensor & out_base = out.to<torch::executor::Tensor>();
36
+
37
+ EXECUTORCH_SCOPE_PROF("native_call_mul.out");
38
+ torch::executor::mul_outf(self_base, other_base, out_base);
39
+
40
+
41
+ }
42
+ """
43
+
44
+ # this is a callable that converts a JIT argument, into its C++ type.
45
+ # Translates (type, mutability, binds) to NamedCType. E.g., torchgen.api.cpp.argumenttype_type.
46
+ argument_type_gen: Callable[
47
+ ...,
48
+ NamedCType,
49
+ ]
50
+
51
+ # Convert all the arguments in a NativeFunction to C++ code
52
+ def convert_arguments(
53
+ self, args: Sequence[Binding]
54
+ ) -> Tuple[List[Binding], List[str]]:
55
+ code_list = [f"EValue& {args[i].name} = *stack[{i}];" for i in range(len(args))]
56
+ binding_list = []
57
+ for arg in args:
58
+ # expecting only Argument
59
+ if not isinstance(arg.argument, Argument):
60
+ raise Exception(
61
+ f"Unexpected argument type, expecting `Argument` but got {arg}"
62
+ )
63
+ argument: Argument = arg.argument
64
+ unboxed_name, _, code, decl = self.argumenttype_evalue_convert(
65
+ argument.type, argument.name, mutable=argument.is_write
66
+ )
67
+ code_list.extend(decl)
68
+ code_list.extend(code)
69
+ binding_list.append(arg.with_name(unboxed_name))
70
+ return binding_list, code_list
71
+
72
+ def argumenttype_evalue_convert(
73
+ self, t: Type, arg_name: str, *, mutable: bool = False
74
+ ) -> Tuple[str, CType, List[str], List[str]]:
75
+ """
76
+ Takes in the type, name and mutability corresponding to an argument, and generates a tuple of:
77
+ (1) the C++ code necessary to unbox the argument
78
+ (2) A Binding corresponding to the newly created unboxed variable, including variable name and its CType
79
+ :param t: a `Type` of an argument
80
+ :param arg_name: argument name
81
+ :param mutable: boolean for whether this argument type is mutable
82
+ :return: unboxed result
83
+ """
84
+ ctype = self.argument_type_gen(t, mutable=mutable, binds=arg_name).type
85
+
86
+ if isinstance(t, BaseType):
87
+ out_name = f"{arg_name}_base"
88
+ code, decl = self._gen_code_base_type(
89
+ arg_name=arg_name, out_name=out_name, ctype=ctype
90
+ )
91
+ elif isinstance(t, OptionalType):
92
+ out_name = f"{arg_name}_opt_out"
93
+ code, decl = self._gen_code_optional_type(
94
+ arg_name=arg_name, out_name=out_name, t=t, ctype=ctype
95
+ )
96
+ elif isinstance(t, ListType):
97
+ out_name = f"{arg_name}_list_out"
98
+ code, decl = self._gen_code_list_type(
99
+ arg_name=arg_name, out_name=out_name, t=t, ctype=ctype
100
+ )
101
+ else:
102
+ raise Exception(f"Cannot handle type {t}. arg_name: {arg_name}")
103
+ return out_name, ctype, code, decl
104
+
105
+ def _gen_code_base_type(
106
+ self, arg_name: str, out_name: str, ctype: CType
107
+ ) -> Tuple[List[str], List[str]]:
108
+ return [
109
+ f"{ctype.cpp_type()} {out_name} = {arg_name}.to<{ctype.cpp_type(strip_ref=True)}>();"
110
+ ], []
111
+
112
+ def _gen_code_optional_type(
113
+ self, arg_name: str, out_name: str, t: OptionalType, ctype: CType
114
+ ) -> Tuple[List[str], List[str]]:
115
+ in_name = f"{arg_name}_opt_in"
116
+ res_name, base_type, res_code, decl = self.argumenttype_evalue_convert(
117
+ t.elem, in_name
118
+ )
119
+ return (
120
+ f"""
121
+ {ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toOptional<{base_type.cpp_type(strip_ref=True)}>();
122
+ """.split(
123
+ "\n"
124
+ ),
125
+ decl,
126
+ )
127
+
128
+ def _gen_code_list_type(
129
+ self, arg_name: str, out_name: str, t: ListType, ctype: CType
130
+ ) -> Tuple[List[str], List[str]]:
131
+ in_name = f"{arg_name}_list_in"
132
+ elem_name = f"{arg_name}_elem"
133
+ code = []
134
+ res_name, res_ctype, res_code, decl = self.argumenttype_evalue_convert(
135
+ t.elem, elem_name
136
+ )
137
+
138
+ if isinstance(t.elem, BaseType) and t.elem.name == BaseTy.Tensor:
139
+ code.extend(
140
+ f"""
141
+ {ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toTensorList();
142
+ """.split(
143
+ "\n"
144
+ )
145
+ )
146
+ elif isinstance(t.elem, BaseType) and (
147
+ t.elem.name == BaseTy.int or t.elem.name == BaseTy.SymInt
148
+ ):
149
+ code.extend(
150
+ f"""
151
+ {ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toIntList();
152
+ """.split(
153
+ "\n"
154
+ )
155
+ )
156
+ elif isinstance(t.elem, BaseType) and t.elem.name == BaseTy.float:
157
+ code.extend(
158
+ f"""
159
+ {ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toDoubleList();
160
+ """.split(
161
+ "\n"
162
+ )
163
+ )
164
+ elif isinstance(t.elem, BaseType) and t.elem.name == BaseTy.bool:
165
+ # handle list type with size, e.g., bool[4]
166
+ code.extend(
167
+ f"""
168
+ {ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toBoolList();
169
+ """.split(
170
+ "\n"
171
+ )
172
+ )
173
+ # pytorch codegen:
174
+ # we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List<c10::optional<at::Tensor>>
175
+ elif (
176
+ isinstance(t.elem, OptionalType)
177
+ and isinstance(t.elem.elem, BaseType)
178
+ and t.elem.elem.name == BaseTy.Tensor
179
+ ):
180
+ code.extend(
181
+ f"""
182
+ #ifdef USE_ATEN_LIB
183
+ at::ArrayRef<c10::optional<at::Tensor>> {in_name} = {arg_name}.toListOptionalTensor();
184
+ c10::List<c10::optional<at::Tensor>> {out_name};
185
+ for (auto {elem_name}: {in_name}) {{
186
+ {out_name}.push_back({elem_name});
187
+ }}
188
+ #else
189
+ torch::executor::ArrayRef<torch::executor::optional<torch::executor::Tensor>> {out_name} = {arg_name}.toListOptionalTensor();
190
+ #endif
191
+ """.split(
192
+ "\n"
193
+ )
194
+ )
195
+ else:
196
+ # use ArrayRef as default.
197
+ vec_name = arg_name + "_vec"
198
+ # need to bring vector instantiation out of scope so that ArrayRef has valid data
199
+ decl.append(
200
+ f"std::vector<{res_ctype.cpp_type(strip_ref=True)}> {vec_name};"
201
+ )
202
+ code.extend(
203
+ f"""
204
+ for (EValue {elem_name}: {in_name}) {{
205
+ {connector.join(res_code)}
206
+ {vec_name}.push_back({res_name});
207
+ }}
208
+ {ctype.cpp_type(strip_ref=True)} {out_name}({vec_name});
209
+ """.split(
210
+ "\n"
211
+ )
212
+ )
213
+ return code, decl
venv/lib/python3.10/site-packages/torchgen/executorch/model.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Represents all kernels used by an Executorch model.
2
+ # It maintains a Dict[OperatorName, Dict[ETKernelKey, BackendMetadata]] structure.
3
+
4
+ import itertools
5
+ from collections import defaultdict, namedtuple
6
+ from dataclasses import dataclass
7
+ from enum import IntEnum
8
+ from typing import Dict, List, Tuple, Union
9
+
10
+ from torchgen.model import (
11
+ BackendIndex,
12
+ BackendMetadata,
13
+ DispatchKey,
14
+ NativeFunction,
15
+ NativeFunctionsGroup,
16
+ OperatorName,
17
+ )
18
+ from torchgen.utils import assert_never
19
+
20
+ KERNEL_KEY_VERSION = 1
21
+
22
+
23
+ # TODO: Duplicated Subset from codegen.tool.gen_oplist, remove declaration in codegen
24
+ class ScalarType(IntEnum):
25
+ Byte = 0
26
+ Char = 1
27
+ Short = 2
28
+ Int = 3
29
+ Long = 4
30
+ Float = 6
31
+ Double = 7
32
+ Bool = 11
33
+
34
+
35
+ ETParsedYaml = namedtuple("ETParsedYaml", ["native_functions", "kernel_index"])
36
+
37
+
38
+ @dataclass(frozen=True)
39
+ class ETKernelKeyOpArgMeta:
40
+ arg_name: str
41
+ dtype: str
42
+ # The order of the dimensions if entry is a Tensor
43
+ dim_order: Tuple[int, ...]
44
+
45
+ def to_native_string(self) -> str:
46
+ dtype_str = ScalarType[self.dtype].value
47
+ dim_str = str(self.dim_order)[1:-1].replace(" ", "")
48
+ return f"{dtype_str};{dim_str}"
49
+
50
+
51
+ @dataclass(frozen=True)
52
+ class ETKernelKey:
53
+ # Field undefined is default = True
54
+ arg_meta: Tuple[ETKernelKeyOpArgMeta, ...] = ()
55
+
56
+ # Indicator for this kernel being used as a catch all
57
+ default: bool = False
58
+
59
+ version: int = KERNEL_KEY_VERSION
60
+
61
+ @staticmethod
62
+ def gen_from_yaml(
63
+ args: Dict[str, Tuple[str, str]],
64
+ type_alias_map: Dict[str, List[str]], # TODO: Support unwrapped str val
65
+ dim_order_alias_map: Dict[str, List[int]],
66
+ ) -> List["ETKernelKey"]:
67
+ """Generate ETKernelKeys from arg kernel specs
68
+ Multiple ETKernelKeys are returned due to dtype permutations from utilizing
69
+ type_alias_map (actualizing each potential type permutation as a KernelKey)
70
+
71
+ Args:
72
+ args: Mapping from argument name to kernel specs
73
+ Kernel specs are a tuple of (dtype, dim_order).
74
+ Currently tuple entries must be aliased via the alias map arguments
75
+ type_alias_map: Mapping from type alias to potential type enums
76
+ i.e { T0 : [Double, Int] } means T0 can be either Double or Int
77
+ Used for lookup by args
78
+ dim_order_alias_map: Mapping from alias to a list of dimension orders
79
+ Used for lookup by args
80
+ """
81
+ # Cast to dim order to int
82
+ dim_order_alias_map = {
83
+ k: [int(alias) for alias in v] for k, v in dim_order_alias_map.items()
84
+ }
85
+ kernel_keys = []
86
+
87
+ # Get all used Dtype Alias
88
+ dtype_alias_used = set()
89
+ for type_alias, dim_order in args.values():
90
+ # Enforce usage of alias initially
91
+ # TODO: Support inlined arguments
92
+ assert type_alias in type_alias_map, "Undefined type alias: " + str(
93
+ type_alias
94
+ )
95
+ assert (
96
+ dim_order in dim_order_alias_map
97
+ ), "Undefined dim_order alias: " + str(dim_order)
98
+ dtype_alias_used.add(type_alias)
99
+
100
+ # Generate all permutations of dtype alias values
101
+ alias_dtypes = [
102
+ [(alias, dtype) for dtype in type_alias_map[alias]]
103
+ for alias in dtype_alias_used
104
+ ]
105
+ alias_permutations = [
106
+ dict(permutation) for permutation in list(itertools.product(*alias_dtypes))
107
+ ]
108
+
109
+ # Using each alias value permutation, generate kernel keys
110
+ op_arg_cache = {}
111
+ for permutation in alias_permutations:
112
+ arg_list = []
113
+ for arg_name, arg_spec in args.items():
114
+ dtype = permutation[arg_spec[0]]
115
+ dim_order = dim_order_alias_map[arg_spec[1]] # type: ignore[assignment]
116
+ if (
117
+ cache_key := (arg_name, dtype, tuple(dim_order))
118
+ ) not in op_arg_cache:
119
+ op_arg_cache[cache_key] = ETKernelKeyOpArgMeta(*cache_key) # type: ignore[arg-type]
120
+
121
+ arg_list.append(op_arg_cache[cache_key])
122
+ kernel_keys.append(ETKernelKey(tuple(arg_list)))
123
+
124
+ return kernel_keys
125
+
126
+ def to_native_string(self) -> str:
127
+ if self.default:
128
+ return "default"
129
+ return (
130
+ "v"
131
+ + str(KERNEL_KEY_VERSION)
132
+ + "/"
133
+ + "|".join([arg.to_native_string() for arg in self.arg_meta])
134
+ )
135
+
136
+
137
+ @dataclass(frozen=True)
138
+ class ETKernelIndex:
139
+ index: Dict[OperatorName, Dict[ETKernelKey, BackendMetadata]]
140
+
141
+ def has_kernels(self, g: Union[NativeFunction, NativeFunctionsGroup]) -> bool:
142
+ m = self.get_kernels(g)
143
+ return m is not None
144
+
145
+ def get_kernels(
146
+ self, g: Union[NativeFunction, NativeFunctionsGroup]
147
+ ) -> Dict[ETKernelKey, BackendMetadata]:
148
+ if isinstance(g, NativeFunction):
149
+ f = g
150
+ elif isinstance(g, NativeFunctionsGroup):
151
+ f = g.functional
152
+ else:
153
+ assert_never(g)
154
+ if f.func.name not in self.index:
155
+ return {}
156
+ return self.index[f.func.name]
157
+
158
+ @staticmethod
159
+ def grow_from_backend_indices(
160
+ kernel_index: Dict[OperatorName, Dict[ETKernelKey, BackendMetadata]],
161
+ backend_indices: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]],
162
+ ) -> None:
163
+ for dk in backend_indices:
164
+ index = backend_indices[dk]
165
+ for op, backend_metadata in index.items():
166
+ if op in kernel_index:
167
+ kernel_index[op][ETKernelKey(default=True)] = backend_metadata
168
+ else:
169
+ kernel_index[op] = {ETKernelKey(default=True): backend_metadata}
170
+
171
+ @staticmethod
172
+ def from_backend_indices(
173
+ backend_indices: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]]
174
+ ) -> "ETKernelIndex":
175
+ kernel_index: Dict[
176
+ OperatorName, Dict[ETKernelKey, BackendMetadata]
177
+ ] = defaultdict(dict)
178
+ ETKernelIndex.grow_from_backend_indices(kernel_index, backend_indices)
179
+ return ETKernelIndex(kernel_index)
180
+
181
+ def grow(
182
+ self, backend_indices: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]]
183
+ ) -> "ETKernelIndex":
184
+ ETKernelIndex.grow_from_backend_indices(self.index, backend_indices)
185
+ return self
186
+
187
+ def _to_backend_index(self) -> BackendIndex:
188
+ """
189
+ WARNING: this will be deprecated once all the codegen places know how to handle ETKernelIndex.
190
+ """
191
+ index: Dict[OperatorName, BackendMetadata] = {}
192
+ for op in self.index:
193
+ kernel_dict = self.index[op]
194
+ assert (
195
+ len(kernel_dict.values()) == 1
196
+ ), f"Can't convert ETKernelIndex to BackendIndex because {op} has more than one kernels. Got {kernel_dict}"
197
+ index[op] = kernel_dict.get(
198
+ ETKernelKey(default=True),
199
+ BackendMetadata(kernel="", structured=False, cpp_namespace=""),
200
+ )
201
+ return BackendIndex(
202
+ dispatch_key=DispatchKey.CPU,
203
+ use_out_as_primary=False,
204
+ device_guard=False,
205
+ external=False,
206
+ index=index,
207
+ )
208
+
209
+ # Note duplicate ETKernelKey from index_b will clobber the metadata from index_a
210
+ @staticmethod
211
+ def merge_indices(
212
+ index_a: "ETKernelIndex", index_b: "ETKernelIndex"
213
+ ) -> "ETKernelIndex":
214
+ combined = defaultdict(dict, index_a.index.copy())
215
+
216
+ for op, entry in index_b.index.items():
217
+ for key, metadata in entry.items():
218
+ combined[op][key] = metadata
219
+
220
+ return ETKernelIndex(combined)
venv/lib/python3.10/site-packages/torchgen/executorch/parse.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict, namedtuple
2
+ from typing import Any, Dict, List, Optional, Set, Tuple
3
+
4
+ import yaml
5
+
6
+ from torchgen.executorch.model import ETKernelIndex, ETKernelKey
7
+
8
+ from torchgen.gen import LineLoader, parse_native_yaml
9
+ from torchgen.model import (
10
+ BackendMetadata,
11
+ DispatchKey,
12
+ FunctionSchema,
13
+ NativeFunction,
14
+ OperatorName,
15
+ )
16
+ from torchgen.utils import NamespaceHelper
17
+
18
+ # Parse native_functions.yaml into a sequence of NativeFunctions and ET Backend Indices.
19
+ ETParsedYaml = namedtuple("ETParsedYaml", ["native_functions", "et_kernel_indices"])
20
+
21
+ # Fields in native_functions.yaml used to determine which kernels should be used
22
+ ET_FIELDS = ["kernels", "type_alias", "dim_order_alias"]
23
+
24
+
25
+ def parse_from_yaml(ei: Dict[str, object]) -> Dict[ETKernelKey, BackendMetadata]:
26
+ """Given a loaded yaml representing kernel assignment information, extract the
27
+ mapping from `kernel keys` to `BackendMetadata` (the latter representing the kernel instance)
28
+
29
+ Args:
30
+ ei: Dict keys {kernels, type_alias, dim_order_alias}
31
+ See ETKernelKey for description of arguments
32
+ """
33
+ e = ei.copy()
34
+ if (kernels := e.pop("kernels", None)) is None:
35
+ return {}
36
+
37
+ type_alias: Dict[str, List[str]] = e.pop("type_alias", {}) # type: ignore[assignment]
38
+ dim_order_alias: Dict[str, List[str]] = e.pop("dim_order_alias", {}) # type: ignore[assignment]
39
+ dim_order_alias.pop("__line__", None)
40
+
41
+ kernel_mapping: Dict[ETKernelKey, BackendMetadata] = {}
42
+
43
+ for entry in kernels: # type: ignore[attr-defined]
44
+ arg_meta = entry.get("arg_meta")
45
+ if arg_meta is not None:
46
+ arg_meta.pop("__line__")
47
+
48
+ kernel_name = entry.get("kernel_name")
49
+ namespace_helper = NamespaceHelper.from_namespaced_entity(
50
+ kernel_name, max_level=3
51
+ )
52
+ kernel_namespace = namespace_helper.get_cpp_namespace(default="at")
53
+ backend_metadata = BackendMetadata(
54
+ kernel=namespace_helper.entity_name,
55
+ structured=False,
56
+ cpp_namespace=(kernel_namespace + "::native"),
57
+ )
58
+
59
+ kernel_keys = (
60
+ [ETKernelKey((), default=True)]
61
+ if arg_meta is None
62
+ else ETKernelKey.gen_from_yaml(arg_meta, type_alias, dim_order_alias) # type: ignore[arg-type]
63
+ )
64
+
65
+ for kernel_key in kernel_keys:
66
+ assert kernel_key not in kernel_mapping, (
67
+ "Duplicate kernel key: " + str(kernel_key) + " " + str(e)
68
+ )
69
+ kernel_mapping[kernel_key] = backend_metadata
70
+
71
+ return kernel_mapping
72
+
73
+
74
+ def parse_et_yaml_struct(es: object) -> ETKernelIndex:
75
+ """Given a loaded yaml representing a list of operators, for each op extract the mapping
76
+ of `kernel keys` to `BackendMetadata` (the latter representing the kernel instance
77
+ that should be used by the kernel key).
78
+ """
79
+ indices: Dict[OperatorName, Dict[ETKernelKey, BackendMetadata]] = {}
80
+ for ei in es: # type: ignore[attr-defined]
81
+ e = ei.copy()
82
+
83
+ funcs = e.pop("func")
84
+ assert isinstance(funcs, str), f"not a str: {funcs}"
85
+ namespace_helper = NamespaceHelper.from_namespaced_entity(
86
+ namespaced_entity=funcs, max_level=1
87
+ )
88
+ opname = FunctionSchema.parse(namespace_helper.entity_name).name
89
+
90
+ assert opname not in indices, f"Duplicate func found in yaml: {opname} already"
91
+
92
+ if len(index := parse_from_yaml(e)) != 0:
93
+ indices[opname] = index
94
+
95
+ return ETKernelIndex(indices)
96
+
97
+
98
+ def extract_kernel_fields(es: object) -> Dict[OperatorName, Dict[str, Any]]:
99
+ """Given a loaded yaml representing a list of operators, extract the
100
+ kernel key related fields indexed by the operator name.
101
+ """
102
+ fields: Dict[OperatorName, Dict[str, Any]] = defaultdict(dict)
103
+ for ei in es: # type: ignore[attr-defined]
104
+ funcs = ei.get("func")
105
+ assert isinstance(funcs, str), f"not a str: {funcs}"
106
+ namespace_helper = NamespaceHelper.from_namespaced_entity(
107
+ namespaced_entity=funcs, max_level=1
108
+ )
109
+ opname = FunctionSchema.parse(namespace_helper.entity_name).name
110
+
111
+ for field in ET_FIELDS:
112
+ if (value := ei.get(field)) is not None:
113
+ fields[opname][field] = value
114
+
115
+ return fields
116
+
117
+
118
+ def parse_et_yaml(
119
+ path: str,
120
+ tags_yaml_path: str,
121
+ ignore_keys: Optional[Set[DispatchKey]] = None,
122
+ skip_native_fns_gen: bool = False,
123
+ ) -> Tuple[List[NativeFunction], Dict[OperatorName, Dict[str, Any]]]:
124
+ """Parse native_functions.yaml into NativeFunctions and an Operator Indexed Dict
125
+ of fields to persist from native_functions.yaml to functions.yaml
126
+ """
127
+ with open(path) as f:
128
+ es = yaml.load(f, Loader=LineLoader)
129
+
130
+ et_kernel = extract_kernel_fields(es)
131
+
132
+ # Remove ET specific fields from entries for BC compatibility
133
+ strip_et_fields(es)
134
+
135
+ native_yaml = parse_native_yaml(
136
+ path,
137
+ tags_yaml_path,
138
+ ignore_keys,
139
+ skip_native_fns_gen=skip_native_fns_gen,
140
+ loaded_yaml=es,
141
+ )
142
+ return native_yaml.native_functions, et_kernel
143
+
144
+
145
+ def strip_et_fields(es: object) -> None:
146
+ """Given a loaded yaml representing a list of operators,
147
+ remove ET specific fields from every entries for BC compatibility
148
+ """
149
+ for entry in es: # type: ignore[attr-defined]
150
+ for field in ET_FIELDS:
151
+ entry.pop(field, None)
venv/lib/python3.10/site-packages/torchgen/operator_versions/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders.cpython-310.pyc ADDED
Binary file (9.84 kB). View file
 
venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders_constant.cpython-310.pyc ADDED
Binary file (453 Bytes). View file
 
venv/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ from enum import Enum
4
+ from pathlib import Path
5
+ from typing import Any, Dict, List
6
+
7
+ import torch
8
+ from torch.jit.generate_bytecode import generate_upgraders_bytecode
9
+
10
+ from torchgen.code_template import CodeTemplate
11
+ from torchgen.operator_versions.gen_mobile_upgraders_constant import (
12
+ MOBILE_UPGRADERS_HEADER_DESCRIPTION,
13
+ )
14
+
15
+
16
+ class ByteCode(Enum):
17
+ instructions = 1
18
+ constants = 2
19
+ types = 3
20
+ operators = 4
21
+ register_size = 5
22
+
23
+
24
+ EXCLUDED_OP_SET = [
25
+ "aten::full.names",
26
+ "aten::full.out",
27
+ "aten::full",
28
+ ]
29
+
30
+ EXCLUE_UPGRADER_SET = ["full_0_4", "full_out_0_4"]
31
+
32
+ ONE_INSTRUCTION = CodeTemplate(
33
+ """
34
+ Instruction{OpCode::${operator_name}, ${X}, ${N}},"""
35
+ )
36
+
37
+ INSTRUCTION_LIST = CodeTemplate(
38
+ """std::vector<Instruction>({
39
+ ${instruction_list}
40
+ }), // instructions list"""
41
+ )
42
+
43
+ ONE_CONSTANT = CodeTemplate(
44
+ """
45
+ c10::IValue(${constant}),"""
46
+ )
47
+
48
+ CONSTANT_LIST = CodeTemplate(
49
+ """std::vector<c10::IValue>({
50
+ ${constant_list}
51
+ }), // constants list"""
52
+ )
53
+
54
+ CONSTANTS_LIST_EMPTY = """std::vector<c10::IValue>(), // constants list"""
55
+
56
+ ONE_TYPE = CodeTemplate("""c10::parseType("${type_str}"),""")
57
+
58
+ TYPE_LIST = CodeTemplate(
59
+ """std::vector<c10::TypePtr>({
60
+ ${type_list}
61
+ }), // types list"""
62
+ )
63
+
64
+ TYPE_LIST_EMPTY = """std::vector<c10::TypePtr>(), // types list"""
65
+
66
+ ONE_OPERATOTR_STRING = CodeTemplate(
67
+ """
68
+ OperatorString({"${operator_name}", "${overload_name}", ${num_of_args}}),"""
69
+ )
70
+
71
+ OPERATOR_STRING_LIST = CodeTemplate(
72
+ """
73
+ std::vector<OperatorString>({
74
+ ${operator_string_list}
75
+ }), // operators list"""
76
+ )
77
+
78
+ ONE_UPGRADER_FUNCTION = CodeTemplate(
79
+ """
80
+ mobile::Function::registerFunc(
81
+ "${upgrader_name}",
82
+ ${instruction_list},
83
+ ${constant_list},
84
+ ${type_list},
85
+ ${register_size}
86
+ )"""
87
+ )
88
+
89
+ ONE_UPGRADER_SRC = CodeTemplate(
90
+ """
91
+ ByteCodeFunctionWithOperator({
92
+ ${bytecode_function},
93
+ ${operator_string_list}
94
+ }),"""
95
+ )
96
+
97
+
98
+ ONE_UPGRADER_IN_VERSION_MAP = CodeTemplate(
99
+ """Upgrader({${upgrader_min_version}, ${upgrader_max_version}, "${upgrader_name}", ${bytecode_func_index}})"""
100
+ ) # noqa: E501
101
+
102
+ ONE_OPERATOR_IN_VERSION_MAP = CodeTemplate(
103
+ """
104
+ {std::string("${operator_name}"),
105
+ std::vector<Upgrader>({
106
+ ${upgrader_list_in_version_map}
107
+ })},"""
108
+ )
109
+
110
+
111
+ OPERATOR_VERSION_MAP = CodeTemplate(
112
+ """
113
+ const std::unordered_map<std::string, std::vector<Upgrader>>
114
+ getOperatorVersionMapForMobile() {
115
+ static std::unordered_map<std::string, std::vector<Upgrader>>
116
+ operatorVersionMapForMobile({
117
+ ${operator_list_in_version_map}
118
+ });
119
+ return operatorVersionMapForMobile;
120
+ }
121
+ """
122
+ )
123
+
124
+
125
+ UPGRADER_CPP_SRC = CodeTemplate(
126
+ MOBILE_UPGRADERS_HEADER_DESCRIPTION
127
+ + """
128
+ #include <caffe2/serialize/versions.h>
129
+ #include <torch/csrc/jit/mobile/upgrader_mobile.h>
130
+
131
+ namespace c10 {
132
+ TypePtr parseType(const std::string& pythonStr);
133
+ } // namespace c10
134
+
135
+ namespace torch {
136
+ namespace jit {
137
+
138
+ // clang-format off
139
+
140
+ // From operator_versions_map
141
+ ${operator_version_map}
142
+
143
+ const std::vector<ByteCodeFunctionWithOperator>& getUpgraderBytecodeList() {
144
+ auto generate_upgrader_bytecode_list = []() {
145
+ std::vector<ByteCodeFunctionWithOperator> upgrader_function_list({
146
+ ${upgrader_bytecode}
147
+ });
148
+ for (const auto& upgrader_function : upgrader_function_list) {
149
+ for (const auto& op : upgrader_function.operators) {
150
+ upgrader_function.function.append_operator(
151
+ op.name,
152
+ op.overload_name,
153
+ op.num_specified_args);
154
+ }
155
+ }
156
+ return upgrader_function_list;
157
+ };
158
+ static std::vector<ByteCodeFunctionWithOperator> upgraderBytecodeList =
159
+ generate_upgrader_bytecode_list();
160
+ return upgraderBytecodeList;
161
+ }
162
+
163
+ // clang-format on
164
+
165
+ } // namespace jit
166
+ } // namespace torch
167
+ """
168
+ )
169
+
170
+ UPGRADER_MOBILE_FILE_NAME = "upgrader_mobile.cpp"
171
+
172
+ UPGRADER_ELEMENT = CodeTemplate(
173
+ """\
174
+ Upgrader({${min_version}, ${max_version}, ${operator_name}, ${index}}),
175
+ """
176
+ )
177
+
178
+ PER_OPERATOR_UPGRADER_LIST = CodeTemplate(
179
+ """\
180
+ {
181
+ std::string(${operator_name}),
182
+ std::vector<Upgrader>({${upgrader_list}});
183
+ }
184
+ """
185
+ )
186
+
187
+
188
+ def construct_instruction(instruction_list_from_yaml: List[Any]) -> str:
189
+ instruction_list_part = []
190
+ for instruction in instruction_list_from_yaml:
191
+ instruction_list_part.append(
192
+ ONE_INSTRUCTION.substitute(
193
+ operator_name=instruction[0],
194
+ X=instruction[1],
195
+ N=instruction[2],
196
+ )
197
+ )
198
+ return INSTRUCTION_LIST.substitute(
199
+ instruction_list="".join(instruction_list_part).lstrip("\n")
200
+ )
201
+
202
+
203
+ def construct_constants(constants_list_from_yaml: List[Any]) -> str:
204
+ constants_list_part = []
205
+ for constant_from_yaml in constants_list_from_yaml:
206
+ convert_constant = None
207
+ if isinstance(constant_from_yaml, str):
208
+ # Add quotes if it's string
209
+ convert_constant = f'"{constant_from_yaml}"'
210
+ elif isinstance(constant_from_yaml, bool):
211
+ convert_constant = "true" if constant_from_yaml else "false"
212
+ elif constant_from_yaml is None:
213
+ convert_constant = ""
214
+ elif isinstance(constant_from_yaml, int):
215
+ convert_constant = str(constant_from_yaml)
216
+ else:
217
+ raise ValueError(
218
+ f"The type of {constant_from_yaml} is {type(constant_from_yaml)}. "
219
+ "Please add change in construct_constants function in gen_mobile_upgraders.py."
220
+ )
221
+ constants_list_part.append(ONE_CONSTANT.substitute(constant=convert_constant))
222
+ if len(constants_list_part) == 0:
223
+ return CONSTANTS_LIST_EMPTY
224
+ return CONSTANT_LIST.substitute(
225
+ constant_list="".join(constants_list_part).lstrip("\n")
226
+ )
227
+
228
+
229
+ def construct_operators(operator_list_from_yaml: List[Any]) -> str:
230
+ operator_list_part = []
231
+ for operator in operator_list_from_yaml:
232
+ operator_list_part.append(
233
+ ONE_OPERATOTR_STRING.substitute(
234
+ operator_name=operator[0],
235
+ overload_name=operator[1],
236
+ num_of_args=operator[2],
237
+ )
238
+ )
239
+ return OPERATOR_STRING_LIST.substitute(
240
+ operator_string_list="".join(operator_list_part).lstrip("\n")
241
+ )
242
+
243
+
244
+ def construct_types(types_tr_list_from_yaml: List[Any]) -> str:
245
+ types_tr_list_part = []
246
+ for types_tr in types_tr_list_from_yaml:
247
+ types_tr_list_part.append(ONE_TYPE.substitute(type_str=types_tr))
248
+ if len(types_tr_list_part) == 0:
249
+ return TYPE_LIST_EMPTY
250
+ return TYPE_LIST.substitute(type_list="".join(types_tr_list_part).lstrip("\n"))
251
+
252
+
253
+ def construct_register_size(register_size_from_yaml: int) -> str:
254
+ if not isinstance(register_size_from_yaml, int):
255
+ raise ValueError(
256
+ f"Input register size is {register_size_from_yaml} and"
257
+ "it's type is {type(register_size_from_yaml)}. An int type is expected."
258
+ )
259
+ return str(register_size_from_yaml)
260
+
261
+
262
+ def construct_version_maps(
263
+ upgrader_bytecode_function_to_index_map: Dict[str, Any]
264
+ ) -> str:
265
+ version_map = torch._C._get_operator_version_map()
266
+ sorted_version_map_ = sorted(version_map.items(), key=lambda item: item[0]) # type: ignore[no-any-return]
267
+ sorted_version_map = dict(sorted_version_map_)
268
+
269
+ operator_list_in_version_map_part = []
270
+ for op_name in sorted_version_map:
271
+ upgraders_in_version_map_part = []
272
+ # TODO: remove the skip after these two operators schemas are fixed
273
+ if op_name in EXCLUDED_OP_SET:
274
+ continue
275
+ upgrader_ranges = torch._C._get_upgrader_ranges(op_name)
276
+ upgrader_entries = sorted_version_map[op_name]
277
+ assert len(upgrader_ranges) == len(upgrader_entries)
278
+ for idx, upgrader_entry in enumerate(upgrader_entries):
279
+ upgrader_name = upgrader_entry.upgrader_name
280
+ bytecode_function_index = upgrader_bytecode_function_to_index_map[
281
+ upgrader_name
282
+ ]
283
+ upgraders_in_version_map_part.append(
284
+ ONE_UPGRADER_IN_VERSION_MAP.substitute(
285
+ upgrader_min_version=upgrader_ranges[idx].min_version,
286
+ upgrader_max_version=upgrader_ranges[idx].max_version,
287
+ upgrader_name=upgrader_name,
288
+ bytecode_func_index=bytecode_function_index,
289
+ )
290
+ )
291
+ operator_list_in_version_map_part.append(
292
+ ONE_OPERATOR_IN_VERSION_MAP.substitute(
293
+ operator_name=op_name,
294
+ upgrader_list_in_version_map="".join(upgraders_in_version_map_part),
295
+ )
296
+ )
297
+ return OPERATOR_VERSION_MAP.substitute(
298
+ operator_list_in_version_map="".join(operator_list_in_version_map_part).lstrip(
299
+ "\n"
300
+ )
301
+ )
302
+
303
+
304
+ def get_upgrader_bytecode_function_to_index_map(
305
+ upgrader_dict: List[Dict[str, Any]]
306
+ ) -> Dict[str, Any]:
307
+ upgrader_bytecode_function_to_index_map = {}
308
+ index = 0
309
+ for upgrader_bytecode in upgrader_dict:
310
+ for upgrader_name in upgrader_bytecode.keys():
311
+ if upgrader_name in EXCLUE_UPGRADER_SET:
312
+ continue
313
+ upgrader_bytecode_function_to_index_map[upgrader_name] = index
314
+ index += 1
315
+ return upgrader_bytecode_function_to_index_map
316
+
317
+
318
+ def write_cpp(cpp_path: str, upgrader_dict: List[Dict[str, Any]]) -> None:
319
+ body_parts = []
320
+ upgrader_bytecode_function_to_index_map = (
321
+ get_upgrader_bytecode_function_to_index_map(upgrader_dict)
322
+ )
323
+ version_map_src = construct_version_maps(upgrader_bytecode_function_to_index_map)
324
+ all_upgrader_src_string = []
325
+ for upgrader_bytecode in upgrader_dict:
326
+ for upgrader_name, bytecode in upgrader_bytecode.items():
327
+ # TODO: remove the skip after these two operators schemas are fixed
328
+ if upgrader_name in EXCLUE_UPGRADER_SET:
329
+ continue
330
+ instruction_list_str = ""
331
+ constant_list_str = ""
332
+ type_list_str = ""
333
+ register_size_str = ""
334
+ operator_list_str = ""
335
+ for table_name, contents in bytecode.items():
336
+ element = ByteCode[table_name]
337
+ body_string = ""
338
+ if element is ByteCode.instructions:
339
+ instruction_list_str = construct_instruction(contents)
340
+ elif element is ByteCode.constants:
341
+ constant_list_str = construct_constants(contents)
342
+ elif element is ByteCode.operators:
343
+ operator_list_str = construct_operators(contents)
344
+ elif element is ByteCode.types:
345
+ type_list_str = construct_types(contents)
346
+ elif element is ByteCode.register_size:
347
+ register_size_str = construct_register_size(contents)
348
+
349
+ one_upgrader_function_string = ONE_UPGRADER_FUNCTION.substitute(
350
+ upgrader_name=upgrader_name,
351
+ instruction_list=instruction_list_str,
352
+ constant_list=constant_list_str,
353
+ type_list=type_list_str,
354
+ register_size=register_size_str,
355
+ )
356
+ one_upgrader_src_string = ONE_UPGRADER_SRC.substitute(
357
+ bytecode_function=one_upgrader_function_string.lstrip("\n"),
358
+ operator_string_list=operator_list_str.lstrip("\n"),
359
+ )
360
+ all_upgrader_src_string.append(one_upgrader_src_string)
361
+
362
+ upgrader_file_content = UPGRADER_CPP_SRC.substitute(
363
+ operator_version_map=version_map_src,
364
+ upgrader_bytecode="".join(all_upgrader_src_string).lstrip("\n"),
365
+ )
366
+ body_parts.append(upgrader_file_content)
367
+ print("writing file to : ", cpp_path + "/" + UPGRADER_MOBILE_FILE_NAME)
368
+ with open(os.path.join(cpp_path, UPGRADER_MOBILE_FILE_NAME), "wb") as out_file:
369
+ final_output = "".join(body_parts)
370
+ out_file.write(upgrader_file_content.encode("utf-8"))
371
+
372
+
373
+ def sort_upgrader(upgrader_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
374
+ sorted_upgrader_list = sorted(
375
+ upgrader_list, key=lambda one_upgrader: next(iter(one_upgrader))
376
+ )
377
+ return sorted_upgrader_list
378
+
379
+
380
+ def main() -> None:
381
+ upgrader_list = generate_upgraders_bytecode()
382
+ sorted_upgrader_list = sort_upgrader(upgrader_list)
383
+ for up in sorted_upgrader_list:
384
+ print("after sort upgrader : ", next(iter(up)))
385
+
386
+ pytorch_dir = Path(__file__).resolve().parents[2]
387
+ upgrader_path = pytorch_dir / "torch" / "csrc" / "jit" / "mobile"
388
+ write_cpp(str(upgrader_path), sorted_upgrader_list)
389
+
390
+
391
+ if __name__ == "__main__":
392
+ main()
venv/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders_constant.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ MOBILE_UPGRADERS_HEADER_DESCRIPTION = """/**
2
+ * @generated
3
+ * This is an auto-generated file. Please do not modify it by hand.
4
+ * To re-generate, please run:
5
+ * cd ~/pytorch && python torchgen/operator_versions/gen_mobile_upgraders.py
6
+ */
7
+ """