diff --git a/ckpts/universal/global_step120/zero/15.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/15.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..a88475a348b03b5d440c82a62acbf21a2d3367e5 --- /dev/null +++ b/ckpts/universal/global_step120/zero/15.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b11171abcbb32326f56c2eb0db8091bd8a2367cfd1c556f7349591e73dd84e4 +size 9293 diff --git a/ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..276ac3cd1ee95577d86d746e42403879ef589600 --- /dev/null +++ b/ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eef756aaf0216b5a75ad77c8a12da70b98e5d9850cec810be54f27a0b7527fed +size 33555533 diff --git a/ckpts/universal/global_step120/zero/4.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step120/zero/4.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..0ff4e263d78c61fc5743c8af5b39988509401501 --- /dev/null +++ b/ckpts/universal/global_step120/zero/4.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2e19a993f23ee14741479fea4b62fd90d8220342f7f5da3acc8ceafc0942c98 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/LICENSE b/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..c467ac089a1caa6610d1379fe6a48132c15dcd5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Tsuyoshi Hombashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/METADATA b/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..00c2945f053be786fb7e11a525ee5641ae3b3d94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/METADATA @@ -0,0 +1,102 @@ +Metadata-Version: 2.1 +Name: tabledata +Version: 1.3.3 +Summary: tabledata is a Python library to represent tabular data. Used for pytablewriter/pytablereader/SimpleSQLite/etc. +Home-page: https://github.com/thombashi/tabledata +Author: Tsuyoshi Hombashi +Author-email: tsuyoshi.hombashi@gmail.com +License: MIT License +Project-URL: Changlog, https://github.com/thombashi/tabledata/releases +Project-URL: Documentation, https://tabledata.rtfd.io/ +Project-URL: Source, https://github.com/thombashi/tabledata +Project-URL: Tracker, https://github.com/thombashi/tabledata/issues +Keywords: table +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: DataProperty <2,>=1.0.1 +Requires-Dist: typepy <2,>=1.2.0 +Provides-Extra: logging +Requires-Dist: loguru <1,>=0.4.1 ; extra == 'logging' +Provides-Extra: test +Requires-Dist: pytablewriter >=0.46 ; extra == 'test' +Requires-Dist: pytest ; extra == 'test' + +.. contents:: **tabledata** + :backlinks: top + :depth: 2 + +Summary +--------- +`tabledata `__ is a Python library to represent tabular data. Used for pytablewriter/pytablereader/SimpleSQLite/etc. + +.. image:: https://badge.fury.io/py/tabledata.svg + :target: https://badge.fury.io/py/tabledata + :alt: PyPI package version + +.. image:: https://img.shields.io/pypi/pyversions/tabledata.svg + :target: https://pypi.org/project/tabledata + :alt: Supported Python versions + +.. image:: https://img.shields.io/pypi/implementation/tabledata.svg + :target: https://pypi.org/project/tabledata + :alt: Supported Python implementations + +.. image:: https://github.com/thombashi/tabledata/actions/workflows/ci.yml/badge.svg + :target: https://github.com/thombashi/tabledata/actions/workflows/ci.yml + :alt: Linux/macOS/Windows CI status + +.. image:: https://coveralls.io/repos/github/thombashi/tabledata/badge.svg?branch=master + :target: https://coveralls.io/github/thombashi/tabledata?branch=master + :alt: Test coverage + +Installation +============ + +Install from PyPI +------------------------------ +:: + + pip install tabledata + +Install from PPA (for Ubuntu) +------------------------------ +:: + + sudo add-apt-repository ppa:thombashi/ppa + sudo apt update + sudo apt install python3-tabledata + + +Dependencies +============ +- Python 3.7+ +- `Mandatory Python package dependencies (automatically installed) `__ + +Optional Python packages +------------------------------------------------ +- `loguru `__ + - Used for logging if the package installed +- `pandas `__ + - required to get table data as a pandas data frame + +Documentation +=============== +https://tabledata.rtfd.io/ + diff --git a/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/RECORD b/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ef0f46d59e3fc059c82a7c256f4fc6d56d1c7378 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/RECORD @@ -0,0 +1,29 @@ +tabledata-1.3.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tabledata-1.3.3.dist-info/LICENSE,sha256=vrvfBSShR_iaYV__U9eb3JDLx2MVUPtLclzT873NJPY,1074 +tabledata-1.3.3.dist-info/METADATA,sha256=IKxSJeg1Qrr6dSTCJdvnBIiKl6IKCa4aAIC_B4Ngwfg,3657 +tabledata-1.3.3.dist-info/RECORD,, +tabledata-1.3.3.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +tabledata-1.3.3.dist-info/top_level.txt,sha256=wPYCjph2PxB5odPJWPADX_65iL1gAIjMQFlAyZi80iI,10 +tabledata/__init__.py,sha256=OkkMA83NWJOKsmUru4qWiUXrwTxF5jDhHXl_dR2zQBQ,683 +tabledata/__pycache__/__init__.cpython-310.pyc,, +tabledata/__pycache__/__version__.cpython-310.pyc,, +tabledata/__pycache__/_common.cpython-310.pyc,, +tabledata/__pycache__/_constant.cpython-310.pyc,, +tabledata/__pycache__/_converter.cpython-310.pyc,, +tabledata/__pycache__/_core.cpython-310.pyc,, +tabledata/__pycache__/error.cpython-310.pyc,, +tabledata/__pycache__/normalizer.cpython-310.pyc,, +tabledata/__version__.py,sha256=JC4TkyHfH-eP9nAvfI04H3gEbgfItYa1jLE09ARSNSc,201 +tabledata/_common.py,sha256=eB3xHflvbF5p5hz1f5D9xNHQCujy6Uk91NLPTy5fFHY,274 +tabledata/_constant.py,sha256=I763_Fx-9IT_ZQTTncxi04WsXd6tK78z2VBYZ3up5Aw,154 +tabledata/_converter.py,sha256=0H61eirjQw_rs0h1N_APtCthRRFbYkKZVUHK-5-0GAE,895 +tabledata/_core.py,sha256=4y0sLRCEcvjJvqi_pUlhz5qjIass_pZu5FcnK_kpr7U,14530 +tabledata/_logger/__init__.py,sha256=7rkhAj6PGbUI3fouTa7GEzjRelUFj0_UPfzkZ_Yk71g,55 +tabledata/_logger/__pycache__/__init__.cpython-310.pyc,, +tabledata/_logger/__pycache__/_logger.cpython-310.pyc,, +tabledata/_logger/__pycache__/_null_logger.cpython-310.pyc,, +tabledata/_logger/_logger.py,sha256=3HreG22mzHGZvexAGZpjkU4A995ZZmGJmiIkPcrkA4o,783 +tabledata/_logger/_null_logger.py,sha256=QJuaErUIV_x6NjQ9qNX9eNSi_GB_9CrO7lKeXYZnuaw,1088 +tabledata/error.py,sha256=UGGJm3_9oLQi9GBWZz4cqp1dnzc5Kbu37c6CsiWozME,526 +tabledata/normalizer.py,sha256=lVz4agT8Bm97rvKUUUhP3OT1pGDsMczB5rAlx316XoY,6465 +tabledata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/WHEEL b/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..43aaf2f47a3d66da5f27d0be87b84007bd70669a --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/top_level.txt @@ -0,0 +1 @@ +tabledata diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73781b9324c64666eb0ff218660654cf045469be Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/code_template.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/code_template.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e40d96d48d5df400d1455d63ed5d554b590fb17a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/code_template.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/context.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a58d5e5ae4f8b8cd446415e70624d0467f5c0fa6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/context.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a196c1d6874aa5311265e660f909438ec5ff4fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_aoti_c_shim.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_aoti_c_shim.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b274e3b4f2d05296b9553fda7490e0d27d638b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_aoti_c_shim.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_backend_stubs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_backend_stubs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ff752e395c5543354e6662c94fa0f5f7bc69645 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_backend_stubs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_executorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_executorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ba8ac09c560de2763227441fafe3c509b897794 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_executorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_functionalization_type.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_functionalization_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e069e6791b56dec3a2e0b67cae51252a5a72b486 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_functionalization_type.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_lazy_tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_lazy_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36aab316d38976f6eeda9fbfeed4d05499d301a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_lazy_tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_vmap_plumbing.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_vmap_plumbing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ada54f6adb5000a6d91fb3e832156e52506b8ec0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/gen_vmap_plumbing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/local.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/local.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42449d009b63445f06086174d14f001bd7d7155b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/local.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f5e64798ef7029566b2cd74fc202d7c0ba5d759 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/native_function_generation.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/native_function_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34fc9a71099550db0a6ff53d94de572ebb1986fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/native_function_generation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4769a8ecea51e8b656ef76629cffd8086ee9d4d3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/__pycache__/yaml_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/__pycache__/yaml_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0935bc48b2d434be38273966dfe14186380868a9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/__pycache__/yaml_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/__init__.py b/venv/lib/python3.10/site-packages/torchgen/executorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d59d2ce6d95000726cf4aa937aff3d872242ca0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02ae501fda1aa93da96172a1110739bd044e656b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ab1ce932e5da8ffd02be44207e638a6ab4ce1ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/__init__.py b/venv/lib/python3.10/site-packages/torchgen/executorch/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e70d88db5e8b58640f998da4454cee22c7b10e8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e288f74c63de615401e16ec57ebe5904941478b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe404a408c3729ef4f6ef4d78bfb6c251bbe6bf7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74c1aebf677ce18a3e0890f027714fbeebfec0a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/custom_ops.py b/venv/lib/python3.10/site-packages/torchgen/executorch/api/custom_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..5d11f1300bb8b7ccb7d6b4bbd372a70f2e6fb219 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/executorch/api/custom_ops.py @@ -0,0 +1,142 @@ +from collections import defaultdict + +from dataclasses import dataclass +from typing import Dict, List, Optional, Sequence, Tuple + +from torchgen import dest + +# disable import sorting to avoid circular dependency. +from torchgen.api.types import DispatcherSignature # isort:skip +from torchgen.context import method_with_native_function +from torchgen.executorch.model import ETKernelIndex +from torchgen.model import BaseTy, BaseType, DispatchKey, NativeFunction, Variant +from torchgen.selective_build.selector import SelectiveBuilder +from torchgen.utils import concatMap, Target + + +# Generates RegisterKernelStub.cpp, which provides placeholder kernels for custom operators. This will be used at +# model authoring side. +@dataclass(frozen=True) +class ComputeNativeFunctionStub: + @method_with_native_function + def __call__(self, f: NativeFunction) -> Optional[str]: + if Variant.function not in f.variants: + return None + + sig = DispatcherSignature.from_schema( + f.func, prefix=f"wrapper_CPU_{f.func.name.overload_name}_", symint=False + ) + assert sig is not None + if len(f.func.returns) == 0: + ret_name = "" + elif len(f.func.returns) == 1: + if f.func.arguments.out: + ret_name = f.func.arguments.out[0].name + else: + ret_name = next( + ( + a.name + for a in f.func.arguments.flat_non_out + if a.type == f.func.returns[0].type + ), + "", + ) + if not ret_name: + # if return type is tensor + if f.func.returns[0].type == BaseType(BaseTy.Tensor): + # Returns an empty tensor + ret_name = "at::Tensor()" + else: + raise Exception(f"Can't handle this return type {f.func}") + elif len(f.func.arguments.out) == len(f.func.returns): + # Returns a tuple of out arguments + tensor_type = "at::Tensor &" + comma = ", " + ret_name = f"""::std::tuple<{comma.join([tensor_type] * len(f.func.returns))}>( + {comma.join([r.name for r in f.func.arguments.out])} + )""" + else: + assert all( + a.type == BaseType(BaseTy.Tensor) for a in f.func.returns + ), f"Only support tensor returns but got {f.func.returns}" + # Returns a tuple of empty tensors + tensor_type = "at::Tensor" + comma = ", " + ret_name = f"""::std::tuple<{comma.join([tensor_type] * len(f.func.returns))}>( + {comma.join(["at::Tensor()" for _ in f.func.returns])} + )""" + ret_str = f"return {ret_name};" if len(f.func.returns) > 0 else "" + return f""" +{sig.defn()} {{ + {ret_str} +}} + """ + + +def gen_custom_ops_registration( + *, + native_functions: Sequence[NativeFunction], + selector: SelectiveBuilder, + kernel_index: ETKernelIndex, + rocm: bool, +) -> Tuple[str, str]: + """ + Generate custom ops registration code for dest.RegisterDispatchKey. + + :param native_functions: a sequence of `NativeFunction` + :param selector: for selective build. + :param kernel_index: kernels for all the ops. + :param rocm: bool for dest.RegisterDispatchKey. + :return: generated C++ code to register custom operators into PyTorch + """ + + # convert kernel index to BackendIndex. This is because we can't handle ETKernelIndex yet. + # TODO larryliu: evaluate if this code is still needed. If yes let it handle ETKernelIndex. + + dispatch_key = DispatchKey.CPU + backend_index = kernel_index._to_backend_index() + static_init_dispatch_registrations = "" + ns_grouped_native_functions: Dict[str, List[NativeFunction]] = defaultdict(list) + for native_function in native_functions: + ns_grouped_native_functions[native_function.namespace].append(native_function) + + for namespace, functions in ns_grouped_native_functions.items(): + if len(functions) == 0: + continue + dispatch_registrations_body = "\n".join( + list( + concatMap( + dest.RegisterDispatchKey( + backend_index, + Target.REGISTRATION, + selector, + rocm=rocm, + symint=False, + class_method_name=None, + skip_dispatcher_op_registration=False, + ), + functions, + ) + ) + ) + static_init_dispatch_registrations += f""" +TORCH_LIBRARY_IMPL({namespace}, {dispatch_key}, m) {{ +{dispatch_registrations_body} +}};""" + anonymous_definition = "\n".join( + list( + concatMap( + dest.RegisterDispatchKey( + backend_index, + Target.ANONYMOUS_DEFINITION, + selector, + rocm=rocm, + symint=False, + class_method_name=None, + skip_dispatcher_op_registration=False, + ), + native_functions, + ) + ) + ) + return anonymous_definition, static_init_dispatch_registrations diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/et_cpp.py b/venv/lib/python3.10/site-packages/torchgen/executorch/api/et_cpp.py new file mode 100644 index 0000000000000000000000000000000000000000..24dda58ecdbc4884b8502d0d44dba29098e080af --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/executorch/api/et_cpp.py @@ -0,0 +1,368 @@ +from typing import List, Optional, Sequence, Set, Union + +from torchgen import local +from torchgen.api.types import ( + ArgName, + ArrayCType, + BaseCType, + Binding, + ConstRefCType, + CType, + MutRefCType, + NamedCType, + SpecialArgName, + TupleCType, + VectorCType, + voidT, +) +from torchgen.model import ( + Argument, + Arguments, + BaseTy, + BaseType, + ListType, + NativeFunction, + OptionalType, + Return, + SelfArgument, + TensorOptionsArguments, + Type, +) +from torchgen.utils import assert_never +from .types import ( + ArrayRefCType, + BaseTypeToCppMapping, + OptionalCType, + scalarT, + tensorListT, + tensorT, +) + +""" +This file describes the translation of JIT schema to the public C++ API, which is what people use when they call +functions like at::add. It also serves as a native function API, which is the signature of kernels, +since in Executorch CppSignature is the same as NativeSignature. + +Difference between this file and torchgen.api.cpp.py: + + - Executorch doesn't support TensorOptions, however in this file we still keep the logic here to be compatible with + torchgen.api.cpp, so that we can do stuff like ATen mode (running ATen kernels in Executorch). + + - Executorch doesn't support Dimname. + + - Executorch runtime doesn't support SymInt, will treat it as int. +""" + + +# Translation of "value types" in JIT schema to C++ API type. Value +# types look the same no matter if they are argument types or return +# types. Returns None if the type in question is not a value type. +def valuetype_type( + t: Type, + *, + binds: ArgName, + remove_non_owning_ref_types: bool = False, +) -> Optional[NamedCType]: + if isinstance(t, BaseType): + if t.name == BaseTy.Tensor or t.name == BaseTy.Scalar: + return None + # For SymInt we simply treat it as int. + elif str(t) == "SymInt": + return NamedCType(binds, BaseCType(BaseTypeToCppMapping[BaseTy.int])) + if remove_non_owning_ref_types: + if t.name == BaseTy.str: + raise AssertionError( + "string ref->value conversion: not implemented yet" + ) + # All other BaseType currently map directly to BaseCppTypes. + return NamedCType(binds, BaseCType(BaseTypeToCppMapping[t.name])) + elif isinstance(t, OptionalType): + elem = valuetype_type(t.elem, binds=binds) + if elem is None: + return None + return NamedCType(binds, OptionalCType(elem.type)) + elif isinstance(t, ListType): + if str(t.elem) == "bool": + assert t.size is not None + return NamedCType( + binds, ArrayCType(BaseCType(BaseTypeToCppMapping[BaseTy.bool]), t.size) + ) + else: + return None + else: + raise AssertionError(f"unrecognized type {repr(t)}") + + +# Translation of types occurring in JIT arguments to a C++ argument type. +# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type. +# For example, we'll return std::vector instead of IntArrayRef. +# See Note [translation from C++ reference to value types] +def argumenttype_type( + t: Type, + *, + mutable: bool, + binds: ArgName, + remove_non_owning_ref_types: bool = False, +) -> NamedCType: + # If it's a value type, do the value type translation + r = valuetype_type( + t, + binds=binds, + remove_non_owning_ref_types=remove_non_owning_ref_types, + ) + if r is not None: + return r + if isinstance(t, BaseType): + if t.name == BaseTy.Tensor: + if mutable and not local.use_const_ref_for_mutable_tensors(): + return NamedCType(binds, MutRefCType(BaseCType(tensorT))) + else: + return NamedCType(binds, ConstRefCType(BaseCType(tensorT))) + elif t.name == BaseTy.Scalar: + return NamedCType(binds, ConstRefCType(BaseCType(scalarT))) + else: + raise AssertionError(f"base type should have been value type {t}") + elif isinstance(t, OptionalType): + if str(t.elem) == "Tensor": + if mutable and not local.use_const_ref_for_mutable_tensors(): + return NamedCType( + binds, MutRefCType(BaseCType(tensorT)) + ) # TODO: fix this discrepancy + else: + return NamedCType( + binds, ConstRefCType(OptionalCType(BaseCType(tensorT))) + ) + elif str(t.elem) == "Scalar": + return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT)))) + elem = argumenttype_type(t.elem, mutable=mutable, binds=binds) + return NamedCType(binds, OptionalCType(elem.type)) + elif isinstance(t, ListType): + # TODO: keeping these special cases for Tensor[] and Tensor?[] so that we can hookup with ATen kernels. + if str(t.elem) == "Tensor": + return NamedCType(binds, BaseCType(tensorListT)) + elif str(t.elem) == "Dimname": + raise NotImplementedError("Executorch doesn't support Dimname") + elif str(t.elem) == "Tensor?": + return NamedCType(binds, ArrayRefCType(OptionalCType(BaseCType(tensorT)))) + elem = argumenttype_type(t.elem, mutable=mutable, binds=binds) + return NamedCType(binds, ArrayRefCType(elem.type)) + else: + raise AssertionError(f"unrecognized type {repr(t)}") + + +# Translate a JIT argument into its C++ type +def argument_type(a: Argument, *, binds: ArgName) -> NamedCType: + return argumenttype_type(a.type, mutable=a.is_write, binds=binds) + + +# Translation of a (non-multi) return type from JIT to C++ +# N.B: returntype_type returns a CType, not a NamedCType. +# This is mostly because of the mismatch between return types and return names. +# e.g. a function with a return type of 'void' has 0 return names, +# and a function with a return type of 'std::tuple' has >1 return name. +def returntype_type(t: Type, *, mutable: bool) -> CType: + # placeholder is ignored + r = valuetype_type(t, binds="__placeholder__") + if r is not None: + return r.type + + if isinstance(t, BaseType): + if t.name == BaseTy.Tensor: + if mutable: + if local.use_const_ref_for_mutable_tensors(): + return ConstRefCType(BaseCType(tensorT)) + else: + return MutRefCType(BaseCType(tensorT)) + else: + # Note [Tensor Copy Returns] + # Currently, we use "Argument.is_write" to determine + # whether or not Tensor return types should be copies or references. + # If that ever changes, take a look at other locations of this note! + return BaseCType(tensorT) + elif t.name == BaseTy.Scalar: + return BaseCType(scalarT) + elif isinstance(t, ListType): + assert ( + not mutable + ), "Native functions should never return a mutable tensor list. They should return void." + elem = returntype_type(t.elem, mutable=False) + assert t.size is None, f"fixed size list returns not supported: {t}" + return VectorCType(elem) + + raise AssertionError(f"unrecognized return type {t}") + + +# Translation of a single return to its C++ type +def return_type(r: Return) -> CType: + return returntype_type(r.type, mutable=r.is_write) + + +# Translation of a full (possibly multi) return from JIT to its C++ type +def returns_type(rs: Sequence[Return]) -> CType: + if len(rs) == 0: + return BaseCType(voidT) + elif len(rs) == 1: + return return_type(rs[0]) + else: + return TupleCType([return_type(r) for r in rs]) + + +def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequence[str]: + returns: List[str] = [] + for i, r in enumerate(f.func.returns): + # If we have an inplace function, the return argument is + # implicitly named self. + # TODO: Consider incorporating this into the data model + if f.func.name.name.inplace: + assert i == 0, "illegal inplace function with multiple returns" + name = "self" + # If we are out function, the name is the name of the + # corresponding output function (r.name will get recorded + # in field_name later.) + elif f.func.is_out_fn(): + name = f.func.arguments.out[i].name + # If the return argument is explicitly named... + elif r.name: + name_conflict = any( + r.name == a.name for a in f.func.schema_order_arguments() + ) + if name_conflict and not f.func.is_out_fn(): + name = f"{r.name}_return" + else: + name = r.name + # If there is no explicit name and no fallback name was passed in, we just name the output result, + # unless it's a multi-return, in which case it's result0, + # result1, etc (zero-indexed) + else: + name = fallback_name if len(f.func.returns) == 1 else f"{fallback_name}{i}" + returns.append(name) + return returns + + +JIT_TO_CPP_DEFAULT = { + "False": "false", + "True": "true", + "None": "torch::executorch::nullopt", # UGH this one is type directed + "[]": "{}", + "contiguous_format": "torch::executorch::MemoryFormat::Contiguous", + "long": "torch::executorch::kLong", +} + + +# Convert a JIT default into C++ expression representing the default +def default_expr(d: str, t: Type) -> str: + if d == "None" and str(t) == "Tensor?": + return "{}" + if isinstance(t, BaseType) and t.name is BaseTy.str: + # Schema allows single quotes but C++ needs double + if len(d) >= 2 and d[0] == "'" and d[-1] == "'": + s = "" + i = 1 + while i + 1 < len(d): + if d[i] != "\\": + if d[i] == '"': + s += '\\"' + else: + s += d[i] + i += 1 + else: + if d[i + 1] == "'": + s += "'" + else: + s += d[i : i + 2] + i += 2 + + return f'"{s}"' + + if isinstance(t, OptionalType): + if d == "None": + return "torch::executor::nullopt" + + return default_expr(d, t.elem) + + if isinstance(t, ListType): + if d.startswith("[") and d.endswith("]"): + return "{" + d[1:-1] + "}" + elif t.size is None: + # NOTE: Sized lists can have scalar defaults + raise ValueError(f"Expected a list default '[...]' but found: '{d}'") + + return JIT_TO_CPP_DEFAULT.get(d, d) + + +# Convert an argument into its C++ API form + + +def argument( + a: Union[Argument, TensorOptionsArguments, SelfArgument], + *, + cpp_no_default_args: Set[str], + method: bool, + faithful: bool, + has_tensor_options: bool, +) -> List[Binding]: + def sub_argument( + a: Union[Argument, TensorOptionsArguments, SelfArgument] + ) -> List[Binding]: + return argument( + a, + cpp_no_default_args=cpp_no_default_args, + method=method, + faithful=faithful, + has_tensor_options=has_tensor_options, + ) + + if isinstance(a, Argument): + binds: ArgName + if a.name == "memory_format" and has_tensor_options: + binds = SpecialArgName.possibly_redundant_memory_format + else: + binds = a.name + default: Optional[str] = None + if a.name not in cpp_no_default_args and a.default is not None: + default = default_expr(a.default, a.type) + return [ + Binding( + nctype=argument_type(a, binds=binds), + name=a.name, + default=default, + argument=a, + ) + ] + elif isinstance(a, TensorOptionsArguments): + raise NotImplementedError("Need to implement type resolution for TensorOptions") + elif isinstance(a, SelfArgument): + if method: + # Caller is responsible for installing implicit this in context! + return [] + else: + return sub_argument(a.argument) + else: + assert_never(a) + + +def arguments( + arguments: Arguments, + *, + faithful: bool, + method: bool, + cpp_no_default_args: Set[str], +) -> List[Binding]: + args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = [] + if faithful: + args.extend(arguments.non_out) + args.extend(arguments.out) + else: + args.extend(arguments.out) + args.extend(arguments.non_out) + return [ + r.no_default() if faithful else r + for a in args + for r in argument( + a, + faithful=faithful, + method=method, + has_tensor_options=arguments.tensor_options is not None, + cpp_no_default_args=cpp_no_default_args, + ) + ] diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py b/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb5e802634f82e1557f9245bf857d9e54b748d31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py @@ -0,0 +1,2 @@ +from .types import * +from .signatures import * # isort:skip diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b54aa1a52eeb2ddb41ed36aed3f32a4ee478224 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eff55e4ac9b7e6440e29c2838642a09a5267db08 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/types.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..236487cff9a732c68c9736ae1c7f040bee725d21 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/signatures.py b/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/signatures.py new file mode 100644 index 0000000000000000000000000000000000000000..a53d15c036a9106e865f4665945ab3b9cf0de6e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/signatures.py @@ -0,0 +1,73 @@ +from dataclasses import dataclass +from typing import List, Optional, Set + +import torchgen.api.cpp as aten_cpp + +from torchgen.api.types import Binding, CType +from torchgen.model import FunctionSchema, NativeFunction + +from .types import contextArg + + +@dataclass(frozen=True) +class ExecutorchCppSignature: + """ + This signature is merely a CppSignature with Executorch types (optionally + contains KernelRuntimeContext as well). The inline definition of + CppSignature is generated in Functions.h and it's used by unboxing + functions. + """ + + # The schema this signature is derived from + func: FunctionSchema + + # The set of C++ arguments which should not have defaults applied to them + cpp_no_default_args: Set[str] + + # Allows you to prepend an arbitrary prefix to the signature name. + # This is useful for parts of the codegen that generate wrappers around kernels, + # and need to avoid naming collisions. + prefix: str = "" + + def arguments(self, *, include_context: bool = True) -> List[Binding]: + return ([contextArg] if include_context else []) + et_cpp.arguments( + self.func.arguments, + faithful=True, # always faithful, out argument at the end + method=False, # method not supported + cpp_no_default_args=self.cpp_no_default_args, + ) + + def name(self) -> str: + return self.prefix + aten_cpp.name( + self.func, + faithful_name_for_out_overloads=True, + ) + + def decl(self, name: Optional[str] = None, *, include_context: bool = True) -> str: + args_str = ", ".join( + a.decl() for a in self.arguments(include_context=include_context) + ) + if name is None: + name = self.name() + return f"{self.returns_type().cpp_type()} {name}({args_str})" + + def defn(self, name: Optional[str] = None) -> str: + args = [a.defn() for a in self.arguments()] + args_str = ", ".join(args) + if name is None: + name = self.name() + return f"{self.returns_type().cpp_type()} {name}({args_str})" + + def returns_type(self) -> CType: + return et_cpp.returns_type(self.func.returns) + + @staticmethod + def from_native_function( + f: NativeFunction, *, prefix: str = "" + ) -> "ExecutorchCppSignature": + return ExecutorchCppSignature( + func=f.func, prefix=prefix, cpp_no_default_args=f.cpp_no_default_args + ) + + +from torchgen.executorch.api import et_cpp diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/types.py b/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/types.py new file mode 100644 index 0000000000000000000000000000000000000000..c9db1baa245fa2375896930febeddcd98ae2d4e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/executorch/api/types/types.py @@ -0,0 +1,81 @@ +from dataclasses import dataclass +from typing import Dict + +from torchgen.api.types import ( + BaseCppType, + BaseCType, + Binding, + boolT, + CType, + doubleT, + Expr, + longT, + MutRefCType, + NamedCType, +) +from torchgen.model import BaseTy + +halfT = BaseCppType("torch::executor", "Half") +bfloat16T = BaseCppType("torch::executor", "BFloat16") +stringT = BaseCppType("torch::executor", "string_view") +scalarTypeT = BaseCppType("torch::executor", "ScalarType") +tensorT = BaseCppType("torch::executor", "Tensor") +tensorListT = BaseCppType("torch::executor", "TensorList") +scalarT = BaseCppType("torch::executor", "Scalar") +memoryFormatT = BaseCppType("torch::executor", "MemoryFormat") +intArrayRefT = BaseCppType("torch::executor", "IntArrayRef") +optionalT = BaseCppType("torch::executor", "optional") +contextT = BaseCppType("torch::executor", "KernelRuntimeContext") + +contextExpr = Expr( + expr="context", + type=NamedCType(name="context", type=MutRefCType(BaseCType(contextT))), +) + +contextArg = Binding( + name="context", + nctype=contextExpr.type, + argument=None, # type: ignore[arg-type] + default=None, +) + +BaseTypeToCppMapping: Dict[BaseTy, BaseCppType] = { + BaseTy.int: longT, + BaseTy.float: doubleT, + BaseTy.bool: boolT, + BaseTy.str: stringT, + BaseTy.ScalarType: scalarTypeT, + BaseTy.Tensor: tensorT, + BaseTy.Scalar: scalarT, + BaseTy.MemoryFormat: memoryFormatT, +} + + +@dataclass(frozen=True) +class OptionalCType(CType): + elem: "CType" + + def cpp_type(self, *, strip_ref: bool = False) -> str: + # Do not pass `strip_ref` recursively. + return f"torch::executor::optional<{self.elem.cpp_type()}>" + + def cpp_type_registration_declarations(self) -> str: + return f"torch::executor::optional<{self.elem.cpp_type_registration_declarations()}>" + + def remove_const_ref(self) -> "CType": + return OptionalCType(self.elem.remove_const_ref()) + + +@dataclass(frozen=True) +class ArrayRefCType(CType): + elem: "CType" + + def cpp_type(self, *, strip_ref: bool = False) -> str: + # Do not pass `strip_ref` recursively. + return f"torch::executor::ArrayRef<{self.elem.cpp_type()}>" + + def cpp_type_registration_declarations(self) -> str: + return f"torch::executor::ArrayRef<{self.elem.cpp_type_registration_declarations()}>" + + def remove_const_ref(self) -> "CType": + return ArrayRefCType(self.elem.remove_const_ref()) diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/api/unboxing.py b/venv/lib/python3.10/site-packages/torchgen/executorch/api/unboxing.py new file mode 100644 index 0000000000000000000000000000000000000000..9a8f717ddbb28d970779d2247d84c58450c5de45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/executorch/api/unboxing.py @@ -0,0 +1,213 @@ +from dataclasses import dataclass +from typing import Callable, List, Sequence, Tuple + +from torchgen.api.types import Binding, CType, NamedCType +from torchgen.model import ( + Argument, + BaseTy, + BaseType, + ListType, + NativeFunction, + OptionalType, + Type, +) + +connector = "\n\t" + + +# Return unboxing function name for a NativeFunction +def name(f: NativeFunction) -> str: + return f.func.name.unambiguous_name() + + +@dataclass(frozen=True) +class Unboxing: + """ + Takes a sequence of Bindings and unbox EValues to these Bindings. Return generated code that performs correct unboxing. + A sample generated code: + // aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + void mul_out(EValue** stack) { + EValue& self = *stack[0]; + EValue& other = *stack[1]; + EValue& out = *stack[2]; + const torch::executor::Tensor & self_base = self.to(); + const torch::executor::Tensor & other_base = other.to(); + torch::executor::Tensor & out_base = out.to(); + + EXECUTORCH_SCOPE_PROF("native_call_mul.out"); + torch::executor::mul_outf(self_base, other_base, out_base); + + + } + """ + + # this is a callable that converts a JIT argument, into its C++ type. + # Translates (type, mutability, binds) to NamedCType. E.g., torchgen.api.cpp.argumenttype_type. + argument_type_gen: Callable[ + ..., + NamedCType, + ] + + # Convert all the arguments in a NativeFunction to C++ code + def convert_arguments( + self, args: Sequence[Binding] + ) -> Tuple[List[Binding], List[str]]: + code_list = [f"EValue& {args[i].name} = *stack[{i}];" for i in range(len(args))] + binding_list = [] + for arg in args: + # expecting only Argument + if not isinstance(arg.argument, Argument): + raise Exception( + f"Unexpected argument type, expecting `Argument` but got {arg}" + ) + argument: Argument = arg.argument + unboxed_name, _, code, decl = self.argumenttype_evalue_convert( + argument.type, argument.name, mutable=argument.is_write + ) + code_list.extend(decl) + code_list.extend(code) + binding_list.append(arg.with_name(unboxed_name)) + return binding_list, code_list + + def argumenttype_evalue_convert( + self, t: Type, arg_name: str, *, mutable: bool = False + ) -> Tuple[str, CType, List[str], List[str]]: + """ + Takes in the type, name and mutability corresponding to an argument, and generates a tuple of: + (1) the C++ code necessary to unbox the argument + (2) A Binding corresponding to the newly created unboxed variable, including variable name and its CType + :param t: a `Type` of an argument + :param arg_name: argument name + :param mutable: boolean for whether this argument type is mutable + :return: unboxed result + """ + ctype = self.argument_type_gen(t, mutable=mutable, binds=arg_name).type + + if isinstance(t, BaseType): + out_name = f"{arg_name}_base" + code, decl = self._gen_code_base_type( + arg_name=arg_name, out_name=out_name, ctype=ctype + ) + elif isinstance(t, OptionalType): + out_name = f"{arg_name}_opt_out" + code, decl = self._gen_code_optional_type( + arg_name=arg_name, out_name=out_name, t=t, ctype=ctype + ) + elif isinstance(t, ListType): + out_name = f"{arg_name}_list_out" + code, decl = self._gen_code_list_type( + arg_name=arg_name, out_name=out_name, t=t, ctype=ctype + ) + else: + raise Exception(f"Cannot handle type {t}. arg_name: {arg_name}") + return out_name, ctype, code, decl + + def _gen_code_base_type( + self, arg_name: str, out_name: str, ctype: CType + ) -> Tuple[List[str], List[str]]: + return [ + f"{ctype.cpp_type()} {out_name} = {arg_name}.to<{ctype.cpp_type(strip_ref=True)}>();" + ], [] + + def _gen_code_optional_type( + self, arg_name: str, out_name: str, t: OptionalType, ctype: CType + ) -> Tuple[List[str], List[str]]: + in_name = f"{arg_name}_opt_in" + res_name, base_type, res_code, decl = self.argumenttype_evalue_convert( + t.elem, in_name + ) + return ( + f""" + {ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toOptional<{base_type.cpp_type(strip_ref=True)}>(); + """.split( + "\n" + ), + decl, + ) + + def _gen_code_list_type( + self, arg_name: str, out_name: str, t: ListType, ctype: CType + ) -> Tuple[List[str], List[str]]: + in_name = f"{arg_name}_list_in" + elem_name = f"{arg_name}_elem" + code = [] + res_name, res_ctype, res_code, decl = self.argumenttype_evalue_convert( + t.elem, elem_name + ) + + if isinstance(t.elem, BaseType) and t.elem.name == BaseTy.Tensor: + code.extend( + f""" + {ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toTensorList(); + """.split( + "\n" + ) + ) + elif isinstance(t.elem, BaseType) and ( + t.elem.name == BaseTy.int or t.elem.name == BaseTy.SymInt + ): + code.extend( + f""" + {ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toIntList(); + """.split( + "\n" + ) + ) + elif isinstance(t.elem, BaseType) and t.elem.name == BaseTy.float: + code.extend( + f""" + {ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toDoubleList(); + """.split( + "\n" + ) + ) + elif isinstance(t.elem, BaseType) and t.elem.name == BaseTy.bool: + # handle list type with size, e.g., bool[4] + code.extend( + f""" + {ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toBoolList(); + """.split( + "\n" + ) + ) + # pytorch codegen: + # we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List> + elif ( + isinstance(t.elem, OptionalType) + and isinstance(t.elem.elem, BaseType) + and t.elem.elem.name == BaseTy.Tensor + ): + code.extend( + f""" +#ifdef USE_ATEN_LIB +at::ArrayRef> {in_name} = {arg_name}.toListOptionalTensor(); +c10::List> {out_name}; +for (auto {elem_name}: {in_name}) {{ + {out_name}.push_back({elem_name}); +}} +#else +torch::executor::ArrayRef> {out_name} = {arg_name}.toListOptionalTensor(); +#endif + """.split( + "\n" + ) + ) + else: + # use ArrayRef as default. + vec_name = arg_name + "_vec" + # need to bring vector instantiation out of scope so that ArrayRef has valid data + decl.append( + f"std::vector<{res_ctype.cpp_type(strip_ref=True)}> {vec_name};" + ) + code.extend( + f""" + for (EValue {elem_name}: {in_name}) {{ + {connector.join(res_code)} + {vec_name}.push_back({res_name}); + }} + {ctype.cpp_type(strip_ref=True)} {out_name}({vec_name}); + """.split( + "\n" + ) + ) + return code, decl diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/model.py b/venv/lib/python3.10/site-packages/torchgen/executorch/model.py new file mode 100644 index 0000000000000000000000000000000000000000..cec9251a3187cfe0a1a3e84744f49760331761f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/executorch/model.py @@ -0,0 +1,220 @@ +# Represents all kernels used by an Executorch model. +# It maintains a Dict[OperatorName, Dict[ETKernelKey, BackendMetadata]] structure. + +import itertools +from collections import defaultdict, namedtuple +from dataclasses import dataclass +from enum import IntEnum +from typing import Dict, List, Tuple, Union + +from torchgen.model import ( + BackendIndex, + BackendMetadata, + DispatchKey, + NativeFunction, + NativeFunctionsGroup, + OperatorName, +) +from torchgen.utils import assert_never + +KERNEL_KEY_VERSION = 1 + + +# TODO: Duplicated Subset from codegen.tool.gen_oplist, remove declaration in codegen +class ScalarType(IntEnum): + Byte = 0 + Char = 1 + Short = 2 + Int = 3 + Long = 4 + Float = 6 + Double = 7 + Bool = 11 + + +ETParsedYaml = namedtuple("ETParsedYaml", ["native_functions", "kernel_index"]) + + +@dataclass(frozen=True) +class ETKernelKeyOpArgMeta: + arg_name: str + dtype: str + # The order of the dimensions if entry is a Tensor + dim_order: Tuple[int, ...] + + def to_native_string(self) -> str: + dtype_str = ScalarType[self.dtype].value + dim_str = str(self.dim_order)[1:-1].replace(" ", "") + return f"{dtype_str};{dim_str}" + + +@dataclass(frozen=True) +class ETKernelKey: + # Field undefined is default = True + arg_meta: Tuple[ETKernelKeyOpArgMeta, ...] = () + + # Indicator for this kernel being used as a catch all + default: bool = False + + version: int = KERNEL_KEY_VERSION + + @staticmethod + def gen_from_yaml( + args: Dict[str, Tuple[str, str]], + type_alias_map: Dict[str, List[str]], # TODO: Support unwrapped str val + dim_order_alias_map: Dict[str, List[int]], + ) -> List["ETKernelKey"]: + """Generate ETKernelKeys from arg kernel specs + Multiple ETKernelKeys are returned due to dtype permutations from utilizing + type_alias_map (actualizing each potential type permutation as a KernelKey) + + Args: + args: Mapping from argument name to kernel specs + Kernel specs are a tuple of (dtype, dim_order). + Currently tuple entries must be aliased via the alias map arguments + type_alias_map: Mapping from type alias to potential type enums + i.e { T0 : [Double, Int] } means T0 can be either Double or Int + Used for lookup by args + dim_order_alias_map: Mapping from alias to a list of dimension orders + Used for lookup by args + """ + # Cast to dim order to int + dim_order_alias_map = { + k: [int(alias) for alias in v] for k, v in dim_order_alias_map.items() + } + kernel_keys = [] + + # Get all used Dtype Alias + dtype_alias_used = set() + for type_alias, dim_order in args.values(): + # Enforce usage of alias initially + # TODO: Support inlined arguments + assert type_alias in type_alias_map, "Undefined type alias: " + str( + type_alias + ) + assert ( + dim_order in dim_order_alias_map + ), "Undefined dim_order alias: " + str(dim_order) + dtype_alias_used.add(type_alias) + + # Generate all permutations of dtype alias values + alias_dtypes = [ + [(alias, dtype) for dtype in type_alias_map[alias]] + for alias in dtype_alias_used + ] + alias_permutations = [ + dict(permutation) for permutation in list(itertools.product(*alias_dtypes)) + ] + + # Using each alias value permutation, generate kernel keys + op_arg_cache = {} + for permutation in alias_permutations: + arg_list = [] + for arg_name, arg_spec in args.items(): + dtype = permutation[arg_spec[0]] + dim_order = dim_order_alias_map[arg_spec[1]] # type: ignore[assignment] + if ( + cache_key := (arg_name, dtype, tuple(dim_order)) + ) not in op_arg_cache: + op_arg_cache[cache_key] = ETKernelKeyOpArgMeta(*cache_key) # type: ignore[arg-type] + + arg_list.append(op_arg_cache[cache_key]) + kernel_keys.append(ETKernelKey(tuple(arg_list))) + + return kernel_keys + + def to_native_string(self) -> str: + if self.default: + return "default" + return ( + "v" + + str(KERNEL_KEY_VERSION) + + "/" + + "|".join([arg.to_native_string() for arg in self.arg_meta]) + ) + + +@dataclass(frozen=True) +class ETKernelIndex: + index: Dict[OperatorName, Dict[ETKernelKey, BackendMetadata]] + + def has_kernels(self, g: Union[NativeFunction, NativeFunctionsGroup]) -> bool: + m = self.get_kernels(g) + return m is not None + + def get_kernels( + self, g: Union[NativeFunction, NativeFunctionsGroup] + ) -> Dict[ETKernelKey, BackendMetadata]: + if isinstance(g, NativeFunction): + f = g + elif isinstance(g, NativeFunctionsGroup): + f = g.functional + else: + assert_never(g) + if f.func.name not in self.index: + return {} + return self.index[f.func.name] + + @staticmethod + def grow_from_backend_indices( + kernel_index: Dict[OperatorName, Dict[ETKernelKey, BackendMetadata]], + backend_indices: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]], + ) -> None: + for dk in backend_indices: + index = backend_indices[dk] + for op, backend_metadata in index.items(): + if op in kernel_index: + kernel_index[op][ETKernelKey(default=True)] = backend_metadata + else: + kernel_index[op] = {ETKernelKey(default=True): backend_metadata} + + @staticmethod + def from_backend_indices( + backend_indices: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]] + ) -> "ETKernelIndex": + kernel_index: Dict[ + OperatorName, Dict[ETKernelKey, BackendMetadata] + ] = defaultdict(dict) + ETKernelIndex.grow_from_backend_indices(kernel_index, backend_indices) + return ETKernelIndex(kernel_index) + + def grow( + self, backend_indices: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]] + ) -> "ETKernelIndex": + ETKernelIndex.grow_from_backend_indices(self.index, backend_indices) + return self + + def _to_backend_index(self) -> BackendIndex: + """ + WARNING: this will be deprecated once all the codegen places know how to handle ETKernelIndex. + """ + index: Dict[OperatorName, BackendMetadata] = {} + for op in self.index: + kernel_dict = self.index[op] + assert ( + len(kernel_dict.values()) == 1 + ), f"Can't convert ETKernelIndex to BackendIndex because {op} has more than one kernels. Got {kernel_dict}" + index[op] = kernel_dict.get( + ETKernelKey(default=True), + BackendMetadata(kernel="", structured=False, cpp_namespace=""), + ) + return BackendIndex( + dispatch_key=DispatchKey.CPU, + use_out_as_primary=False, + device_guard=False, + external=False, + index=index, + ) + + # Note duplicate ETKernelKey from index_b will clobber the metadata from index_a + @staticmethod + def merge_indices( + index_a: "ETKernelIndex", index_b: "ETKernelIndex" + ) -> "ETKernelIndex": + combined = defaultdict(dict, index_a.index.copy()) + + for op, entry in index_b.index.items(): + for key, metadata in entry.items(): + combined[op][key] = metadata + + return ETKernelIndex(combined) diff --git a/venv/lib/python3.10/site-packages/torchgen/executorch/parse.py b/venv/lib/python3.10/site-packages/torchgen/executorch/parse.py new file mode 100644 index 0000000000000000000000000000000000000000..89b4b93558a6a22b21beafba722bff76372be9c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/executorch/parse.py @@ -0,0 +1,151 @@ +from collections import defaultdict, namedtuple +from typing import Any, Dict, List, Optional, Set, Tuple + +import yaml + +from torchgen.executorch.model import ETKernelIndex, ETKernelKey + +from torchgen.gen import LineLoader, parse_native_yaml +from torchgen.model import ( + BackendMetadata, + DispatchKey, + FunctionSchema, + NativeFunction, + OperatorName, +) +from torchgen.utils import NamespaceHelper + +# Parse native_functions.yaml into a sequence of NativeFunctions and ET Backend Indices. +ETParsedYaml = namedtuple("ETParsedYaml", ["native_functions", "et_kernel_indices"]) + +# Fields in native_functions.yaml used to determine which kernels should be used +ET_FIELDS = ["kernels", "type_alias", "dim_order_alias"] + + +def parse_from_yaml(ei: Dict[str, object]) -> Dict[ETKernelKey, BackendMetadata]: + """Given a loaded yaml representing kernel assignment information, extract the + mapping from `kernel keys` to `BackendMetadata` (the latter representing the kernel instance) + + Args: + ei: Dict keys {kernels, type_alias, dim_order_alias} + See ETKernelKey for description of arguments + """ + e = ei.copy() + if (kernels := e.pop("kernels", None)) is None: + return {} + + type_alias: Dict[str, List[str]] = e.pop("type_alias", {}) # type: ignore[assignment] + dim_order_alias: Dict[str, List[str]] = e.pop("dim_order_alias", {}) # type: ignore[assignment] + dim_order_alias.pop("__line__", None) + + kernel_mapping: Dict[ETKernelKey, BackendMetadata] = {} + + for entry in kernels: # type: ignore[attr-defined] + arg_meta = entry.get("arg_meta") + if arg_meta is not None: + arg_meta.pop("__line__") + + kernel_name = entry.get("kernel_name") + namespace_helper = NamespaceHelper.from_namespaced_entity( + kernel_name, max_level=3 + ) + kernel_namespace = namespace_helper.get_cpp_namespace(default="at") + backend_metadata = BackendMetadata( + kernel=namespace_helper.entity_name, + structured=False, + cpp_namespace=(kernel_namespace + "::native"), + ) + + kernel_keys = ( + [ETKernelKey((), default=True)] + if arg_meta is None + else ETKernelKey.gen_from_yaml(arg_meta, type_alias, dim_order_alias) # type: ignore[arg-type] + ) + + for kernel_key in kernel_keys: + assert kernel_key not in kernel_mapping, ( + "Duplicate kernel key: " + str(kernel_key) + " " + str(e) + ) + kernel_mapping[kernel_key] = backend_metadata + + return kernel_mapping + + +def parse_et_yaml_struct(es: object) -> ETKernelIndex: + """Given a loaded yaml representing a list of operators, for each op extract the mapping + of `kernel keys` to `BackendMetadata` (the latter representing the kernel instance + that should be used by the kernel key). + """ + indices: Dict[OperatorName, Dict[ETKernelKey, BackendMetadata]] = {} + for ei in es: # type: ignore[attr-defined] + e = ei.copy() + + funcs = e.pop("func") + assert isinstance(funcs, str), f"not a str: {funcs}" + namespace_helper = NamespaceHelper.from_namespaced_entity( + namespaced_entity=funcs, max_level=1 + ) + opname = FunctionSchema.parse(namespace_helper.entity_name).name + + assert opname not in indices, f"Duplicate func found in yaml: {opname} already" + + if len(index := parse_from_yaml(e)) != 0: + indices[opname] = index + + return ETKernelIndex(indices) + + +def extract_kernel_fields(es: object) -> Dict[OperatorName, Dict[str, Any]]: + """Given a loaded yaml representing a list of operators, extract the + kernel key related fields indexed by the operator name. + """ + fields: Dict[OperatorName, Dict[str, Any]] = defaultdict(dict) + for ei in es: # type: ignore[attr-defined] + funcs = ei.get("func") + assert isinstance(funcs, str), f"not a str: {funcs}" + namespace_helper = NamespaceHelper.from_namespaced_entity( + namespaced_entity=funcs, max_level=1 + ) + opname = FunctionSchema.parse(namespace_helper.entity_name).name + + for field in ET_FIELDS: + if (value := ei.get(field)) is not None: + fields[opname][field] = value + + return fields + + +def parse_et_yaml( + path: str, + tags_yaml_path: str, + ignore_keys: Optional[Set[DispatchKey]] = None, + skip_native_fns_gen: bool = False, +) -> Tuple[List[NativeFunction], Dict[OperatorName, Dict[str, Any]]]: + """Parse native_functions.yaml into NativeFunctions and an Operator Indexed Dict + of fields to persist from native_functions.yaml to functions.yaml + """ + with open(path) as f: + es = yaml.load(f, Loader=LineLoader) + + et_kernel = extract_kernel_fields(es) + + # Remove ET specific fields from entries for BC compatibility + strip_et_fields(es) + + native_yaml = parse_native_yaml( + path, + tags_yaml_path, + ignore_keys, + skip_native_fns_gen=skip_native_fns_gen, + loaded_yaml=es, + ) + return native_yaml.native_functions, et_kernel + + +def strip_et_fields(es: object) -> None: + """Given a loaded yaml representing a list of operators, + remove ET specific fields from every entries for BC compatibility + """ + for entry in es: # type: ignore[attr-defined] + for field in ET_FIELDS: + entry.pop(field, None) diff --git a/venv/lib/python3.10/site-packages/torchgen/operator_versions/__init__.py b/venv/lib/python3.10/site-packages/torchgen/operator_versions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7547bf9ef6333768c646958c24f7448cee322290 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b762c89ab4019356c2dfbf1337678950776d1b55 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders_constant.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders_constant.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01dcab3eb009fd589a3d9e74b0dedc430aa3808d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders_constant.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py b/venv/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py new file mode 100644 index 0000000000000000000000000000000000000000..dab15685804ea25edd15d59f427b6b57c27227d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py @@ -0,0 +1,392 @@ +#!/usr/bin/env python3 +import os +from enum import Enum +from pathlib import Path +from typing import Any, Dict, List + +import torch +from torch.jit.generate_bytecode import generate_upgraders_bytecode + +from torchgen.code_template import CodeTemplate +from torchgen.operator_versions.gen_mobile_upgraders_constant import ( + MOBILE_UPGRADERS_HEADER_DESCRIPTION, +) + + +class ByteCode(Enum): + instructions = 1 + constants = 2 + types = 3 + operators = 4 + register_size = 5 + + +EXCLUDED_OP_SET = [ + "aten::full.names", + "aten::full.out", + "aten::full", +] + +EXCLUE_UPGRADER_SET = ["full_0_4", "full_out_0_4"] + +ONE_INSTRUCTION = CodeTemplate( + """ + Instruction{OpCode::${operator_name}, ${X}, ${N}},""" +) + +INSTRUCTION_LIST = CodeTemplate( + """std::vector({ + ${instruction_list} + }), // instructions list""" +) + +ONE_CONSTANT = CodeTemplate( + """ + c10::IValue(${constant}),""" +) + +CONSTANT_LIST = CodeTemplate( + """std::vector({ + ${constant_list} + }), // constants list""" +) + +CONSTANTS_LIST_EMPTY = """std::vector(), // constants list""" + +ONE_TYPE = CodeTemplate("""c10::parseType("${type_str}"),""") + +TYPE_LIST = CodeTemplate( + """std::vector({ + ${type_list} + }), // types list""" +) + +TYPE_LIST_EMPTY = """std::vector(), // types list""" + +ONE_OPERATOTR_STRING = CodeTemplate( + """ + OperatorString({"${operator_name}", "${overload_name}", ${num_of_args}}),""" +) + +OPERATOR_STRING_LIST = CodeTemplate( + """ + std::vector({ + ${operator_string_list} + }), // operators list""" +) + +ONE_UPGRADER_FUNCTION = CodeTemplate( + """ + mobile::Function::registerFunc( + "${upgrader_name}", + ${instruction_list}, + ${constant_list}, + ${type_list}, + ${register_size} + )""" +) + +ONE_UPGRADER_SRC = CodeTemplate( + """ + ByteCodeFunctionWithOperator({ + ${bytecode_function}, + ${operator_string_list} + }),""" +) + + +ONE_UPGRADER_IN_VERSION_MAP = CodeTemplate( + """Upgrader({${upgrader_min_version}, ${upgrader_max_version}, "${upgrader_name}", ${bytecode_func_index}})""" +) # noqa: E501 + +ONE_OPERATOR_IN_VERSION_MAP = CodeTemplate( + """ + {std::string("${operator_name}"), + std::vector({ + ${upgrader_list_in_version_map} + })},""" +) + + +OPERATOR_VERSION_MAP = CodeTemplate( + """ +const std::unordered_map> +getOperatorVersionMapForMobile() { + static std::unordered_map> + operatorVersionMapForMobile({ + ${operator_list_in_version_map} + }); + return operatorVersionMapForMobile; +} +""" +) + + +UPGRADER_CPP_SRC = CodeTemplate( + MOBILE_UPGRADERS_HEADER_DESCRIPTION + + """ +#include +#include + +namespace c10 { +TypePtr parseType(const std::string& pythonStr); +} // namespace c10 + +namespace torch { +namespace jit { + +// clang-format off + +// From operator_versions_map +${operator_version_map} + +const std::vector& getUpgraderBytecodeList() { + auto generate_upgrader_bytecode_list = []() { + std::vector upgrader_function_list({ + ${upgrader_bytecode} + }); + for (const auto& upgrader_function : upgrader_function_list) { + for (const auto& op : upgrader_function.operators) { + upgrader_function.function.append_operator( + op.name, + op.overload_name, + op.num_specified_args); + } + } + return upgrader_function_list; + }; + static std::vector upgraderBytecodeList = + generate_upgrader_bytecode_list(); + return upgraderBytecodeList; +} + +// clang-format on + +} // namespace jit +} // namespace torch +""" +) + +UPGRADER_MOBILE_FILE_NAME = "upgrader_mobile.cpp" + +UPGRADER_ELEMENT = CodeTemplate( + """\ +Upgrader({${min_version}, ${max_version}, ${operator_name}, ${index}}), +""" +) + +PER_OPERATOR_UPGRADER_LIST = CodeTemplate( + """\ +{ + std::string(${operator_name}), + std::vector({${upgrader_list}}); +} +""" +) + + +def construct_instruction(instruction_list_from_yaml: List[Any]) -> str: + instruction_list_part = [] + for instruction in instruction_list_from_yaml: + instruction_list_part.append( + ONE_INSTRUCTION.substitute( + operator_name=instruction[0], + X=instruction[1], + N=instruction[2], + ) + ) + return INSTRUCTION_LIST.substitute( + instruction_list="".join(instruction_list_part).lstrip("\n") + ) + + +def construct_constants(constants_list_from_yaml: List[Any]) -> str: + constants_list_part = [] + for constant_from_yaml in constants_list_from_yaml: + convert_constant = None + if isinstance(constant_from_yaml, str): + # Add quotes if it's string + convert_constant = f'"{constant_from_yaml}"' + elif isinstance(constant_from_yaml, bool): + convert_constant = "true" if constant_from_yaml else "false" + elif constant_from_yaml is None: + convert_constant = "" + elif isinstance(constant_from_yaml, int): + convert_constant = str(constant_from_yaml) + else: + raise ValueError( + f"The type of {constant_from_yaml} is {type(constant_from_yaml)}. " + "Please add change in construct_constants function in gen_mobile_upgraders.py." + ) + constants_list_part.append(ONE_CONSTANT.substitute(constant=convert_constant)) + if len(constants_list_part) == 0: + return CONSTANTS_LIST_EMPTY + return CONSTANT_LIST.substitute( + constant_list="".join(constants_list_part).lstrip("\n") + ) + + +def construct_operators(operator_list_from_yaml: List[Any]) -> str: + operator_list_part = [] + for operator in operator_list_from_yaml: + operator_list_part.append( + ONE_OPERATOTR_STRING.substitute( + operator_name=operator[0], + overload_name=operator[1], + num_of_args=operator[2], + ) + ) + return OPERATOR_STRING_LIST.substitute( + operator_string_list="".join(operator_list_part).lstrip("\n") + ) + + +def construct_types(types_tr_list_from_yaml: List[Any]) -> str: + types_tr_list_part = [] + for types_tr in types_tr_list_from_yaml: + types_tr_list_part.append(ONE_TYPE.substitute(type_str=types_tr)) + if len(types_tr_list_part) == 0: + return TYPE_LIST_EMPTY + return TYPE_LIST.substitute(type_list="".join(types_tr_list_part).lstrip("\n")) + + +def construct_register_size(register_size_from_yaml: int) -> str: + if not isinstance(register_size_from_yaml, int): + raise ValueError( + f"Input register size is {register_size_from_yaml} and" + "it's type is {type(register_size_from_yaml)}. An int type is expected." + ) + return str(register_size_from_yaml) + + +def construct_version_maps( + upgrader_bytecode_function_to_index_map: Dict[str, Any] +) -> str: + version_map = torch._C._get_operator_version_map() + sorted_version_map_ = sorted(version_map.items(), key=lambda item: item[0]) # type: ignore[no-any-return] + sorted_version_map = dict(sorted_version_map_) + + operator_list_in_version_map_part = [] + for op_name in sorted_version_map: + upgraders_in_version_map_part = [] + # TODO: remove the skip after these two operators schemas are fixed + if op_name in EXCLUDED_OP_SET: + continue + upgrader_ranges = torch._C._get_upgrader_ranges(op_name) + upgrader_entries = sorted_version_map[op_name] + assert len(upgrader_ranges) == len(upgrader_entries) + for idx, upgrader_entry in enumerate(upgrader_entries): + upgrader_name = upgrader_entry.upgrader_name + bytecode_function_index = upgrader_bytecode_function_to_index_map[ + upgrader_name + ] + upgraders_in_version_map_part.append( + ONE_UPGRADER_IN_VERSION_MAP.substitute( + upgrader_min_version=upgrader_ranges[idx].min_version, + upgrader_max_version=upgrader_ranges[idx].max_version, + upgrader_name=upgrader_name, + bytecode_func_index=bytecode_function_index, + ) + ) + operator_list_in_version_map_part.append( + ONE_OPERATOR_IN_VERSION_MAP.substitute( + operator_name=op_name, + upgrader_list_in_version_map="".join(upgraders_in_version_map_part), + ) + ) + return OPERATOR_VERSION_MAP.substitute( + operator_list_in_version_map="".join(operator_list_in_version_map_part).lstrip( + "\n" + ) + ) + + +def get_upgrader_bytecode_function_to_index_map( + upgrader_dict: List[Dict[str, Any]] +) -> Dict[str, Any]: + upgrader_bytecode_function_to_index_map = {} + index = 0 + for upgrader_bytecode in upgrader_dict: + for upgrader_name in upgrader_bytecode.keys(): + if upgrader_name in EXCLUE_UPGRADER_SET: + continue + upgrader_bytecode_function_to_index_map[upgrader_name] = index + index += 1 + return upgrader_bytecode_function_to_index_map + + +def write_cpp(cpp_path: str, upgrader_dict: List[Dict[str, Any]]) -> None: + body_parts = [] + upgrader_bytecode_function_to_index_map = ( + get_upgrader_bytecode_function_to_index_map(upgrader_dict) + ) + version_map_src = construct_version_maps(upgrader_bytecode_function_to_index_map) + all_upgrader_src_string = [] + for upgrader_bytecode in upgrader_dict: + for upgrader_name, bytecode in upgrader_bytecode.items(): + # TODO: remove the skip after these two operators schemas are fixed + if upgrader_name in EXCLUE_UPGRADER_SET: + continue + instruction_list_str = "" + constant_list_str = "" + type_list_str = "" + register_size_str = "" + operator_list_str = "" + for table_name, contents in bytecode.items(): + element = ByteCode[table_name] + body_string = "" + if element is ByteCode.instructions: + instruction_list_str = construct_instruction(contents) + elif element is ByteCode.constants: + constant_list_str = construct_constants(contents) + elif element is ByteCode.operators: + operator_list_str = construct_operators(contents) + elif element is ByteCode.types: + type_list_str = construct_types(contents) + elif element is ByteCode.register_size: + register_size_str = construct_register_size(contents) + + one_upgrader_function_string = ONE_UPGRADER_FUNCTION.substitute( + upgrader_name=upgrader_name, + instruction_list=instruction_list_str, + constant_list=constant_list_str, + type_list=type_list_str, + register_size=register_size_str, + ) + one_upgrader_src_string = ONE_UPGRADER_SRC.substitute( + bytecode_function=one_upgrader_function_string.lstrip("\n"), + operator_string_list=operator_list_str.lstrip("\n"), + ) + all_upgrader_src_string.append(one_upgrader_src_string) + + upgrader_file_content = UPGRADER_CPP_SRC.substitute( + operator_version_map=version_map_src, + upgrader_bytecode="".join(all_upgrader_src_string).lstrip("\n"), + ) + body_parts.append(upgrader_file_content) + print("writing file to : ", cpp_path + "/" + UPGRADER_MOBILE_FILE_NAME) + with open(os.path.join(cpp_path, UPGRADER_MOBILE_FILE_NAME), "wb") as out_file: + final_output = "".join(body_parts) + out_file.write(upgrader_file_content.encode("utf-8")) + + +def sort_upgrader(upgrader_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + sorted_upgrader_list = sorted( + upgrader_list, key=lambda one_upgrader: next(iter(one_upgrader)) + ) + return sorted_upgrader_list + + +def main() -> None: + upgrader_list = generate_upgraders_bytecode() + sorted_upgrader_list = sort_upgrader(upgrader_list) + for up in sorted_upgrader_list: + print("after sort upgrader : ", next(iter(up))) + + pytorch_dir = Path(__file__).resolve().parents[2] + upgrader_path = pytorch_dir / "torch" / "csrc" / "jit" / "mobile" + write_cpp(str(upgrader_path), sorted_upgrader_list) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders_constant.py b/venv/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders_constant.py new file mode 100644 index 0000000000000000000000000000000000000000..04b5ad887e54153115eeca7b6686d7c2de8dfc06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders_constant.py @@ -0,0 +1,7 @@ +MOBILE_UPGRADERS_HEADER_DESCRIPTION = """/** + * @generated + * This is an auto-generated file. Please do not modify it by hand. + * To re-generate, please run: + * cd ~/pytorch && python torchgen/operator_versions/gen_mobile_upgraders.py + */ +""" diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/native/native_functions.yaml b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/native/native_functions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dae8d951268975af65479c7479cca47f9e022e89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/native/native_functions.yaml @@ -0,0 +1,15514 @@ +# See README.md in this directory for more guidance + +# *********NB: _cast_* operators are DEPRECATED and will be removed +# eventually. These were previously used before TorchScript IR supported +# representing ScalarType's. They are now superseded by usage of +# `aten::to()`. The ops remain here for backward compatibility purposes. + +# DEPRECATED. DO NOT USE +- func: _cast_Byte(Tensor self, bool non_blocking=False) -> Tensor + variants: function + +# DEPRECATED. DO NOT USE +- func: _cast_Char(Tensor self, bool non_blocking=False) -> Tensor + variants: function + +# DEPRECATED. DO NOT USE +- func: _cast_Double(Tensor self, bool non_blocking=False) -> Tensor + variants: function + +# DEPRECATED. DO NOT USE +- func: _cast_Float(Tensor self, bool non_blocking=False) -> Tensor + variants: function + +# DEPRECATED. DO NOT USE +- func: _cast_Int(Tensor self, bool non_blocking=False) -> Tensor + variants: function + +# DEPRECATED. DO NOT USE +- func: _cast_Long(Tensor self, bool non_blocking=False) -> Tensor + variants: function + +# DEPRECATED. DO NOT USE +- func: _cast_Short(Tensor self, bool non_blocking=False) -> Tensor + variants: function + +# DEPRECATED. DO NOT USE +- func: _cast_Half(Tensor self, bool non_blocking=False) -> Tensor + variants: function + +# Computes the gradient of current tensor w.r.t. graph leaves. +- func: _backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> () + manual_cpp_binding: True + variants: method + +# DEPRECATED. Sets the tensor data held by this `Variable` to be the same as +# `new_data`. It requires that `new_data` and `Variable` have compatible tensor +# type, by checking `_has_compatible_shallow_copy_type(this, new_data)`. +# +# This function is deprecated because it doesn't really make sense in a world +# where Variables *are* Tensors (as opposed to them containing tensors, which +# is what the previous interpretation was.) +- func: set_data(Tensor(a!) self, Tensor new_data) -> () + manual_cpp_binding: True + variants: method + +- func: data(Tensor self) -> Tensor + manual_cpp_binding: True + variants: method + +# True if this `Variable` is a leaf and thus does not have a `grad_fn`. +- func: is_leaf(Tensor self) -> bool + manual_cpp_binding: True + variants: method + +# Returns the output index of this variable from the forward operation that +# produced it. Conversely, it returns the input index of the gradient `Node` to +# which this `Variable` is connected (because in the gradient computation, +# inputs and outputs switch meaning). For example: +# +# y0, y1, y2 = f(x) +# assert y0.output_nr == 0 +# assert y1.output_nr == 1 +# assert y2.output_nr == 2 +# +- func: output_nr(Tensor self) -> int + manual_cpp_binding: True + variants: method + +- func: _version(Tensor self) -> int + manual_cpp_binding: True + variants: method + +- func: requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!) + manual_cpp_binding: True + variants: method + +# Enables .grad attribute for non-leaf Tensors. +- func: retain_grad(Tensor(a!) self) -> () + manual_cpp_binding: True + variants: method + +- func: retains_grad(Tensor self) -> bool + manual_cpp_binding: True + variants: method + +- func: _fw_primal(Tensor(a) self, int level) -> Tensor(a) + variants: method + dispatch: + CompositeExplicitAutograd: _fw_primal + +- func: _make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a) + variants: function + dispatch: + CompositeExplicitAutograd: _make_dual + +- func: _unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent) + variants: function + +# NOTE: [_new_zeros_with_same_feature_meta] +# This function creates a new tensor with the layout and TensorOptions +# of `other` but also takes into account the batch dimensions of `self` +# +# This function has a couple extra constraints because it is also used for `jvp` +# in functorch. +# - is used for forward AD because there is the restriction +# that the primal and tangent must have the same layout +# - We cannot assume that `self` and `other` have the same sizes or even dim +# because in the inplace over view case, `other` is the base tensor, and +# `self` is the forward grad with respect to the view, which can have an +# entirely different shape +# - takes the number of batch dims for `self` because we also handle +# some batching logic. We handle that here instead of a batching rule because +# we'd like to avoid calling as_strided in the batching rule (as to enable +# nested vmap in functorch). +# - needs to be CompositeExplicitAutograd for jvp support in functorch. +# functorch currently relies on TensorWrapper which does not have storage +# CompositeExplicitAutograd makes sure the TensorWrapper is unwrapped. +# - this function may eventually take on another int argument to store the +# the number of batch dims for other once we support that use case +- func: _new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: _new_zeros_with_same_feature_meta + autogen: _new_zeros_with_same_feature_meta.out + +# This function compares the storage numel of self with that of other, where +# storage numel is computed as: `other.storage().nbytes() / other.itemsize()`. +# We create this function for composite compliance purposes. The batching rule +# always returns true because vmapped as_strided does not support accessing +# storage locations not indexable by the input tensor. +# See the note above for more information. +- func: _has_same_storage_numel(Tensor self, Tensor other) -> bool + variants: function + dispatch: + CompositeExplicitAutograd: _has_same_storage_numel + +- func: rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!) + variants: method + tags: inplace_view + +- func: rename(Tensor(a) self, Dimname[]? names) -> Tensor(a) + variants: method + +- func: align_to(Tensor(a) self, Dimname[] names) -> Tensor(a) + variants: method + +- func: align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a) + variants: method + +- func: align_as(Tensor self, Tensor other) -> Tensor + variants: method + +- func: align_tensors(Tensor[] tensors) -> Tensor[] + +# Not assert because it's a keyword; not Assert because FX already +# took that syntax +# TODO: need to specify this is side-effectful somehow +- func: _assert_async(Tensor self) -> () + dispatch: + CPU: _assert_async_cpu + CUDA: _assert_async_cuda + +- func: _assert_async.msg(Tensor self, str assert_msg) -> () + dispatch: + CPU: _assert_async_msg_cpu + CUDA: _assert_async_msg_cuda + +- func: _assert_scalar(Scalar self, str assert_msg) -> () + dispatch: + CompositeExplicitAutograd: _assert_scalar + +- func: _functional_assert_scalar(Scalar self, str assert_msg, Tensor dep_token) -> Tensor + dispatch: + CompositeExplicitAutograd: _functional_assert_scalar + +- func: _functional_assert_async.msg(Tensor self, str assert_msg, Tensor dep_token) -> Tensor + dispatch: + CPU: _functional_assert_async_msg_cpu + +- func: _assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None) -> () + +- func: _print(str s) -> () + dispatch: + CompositeExplicitAutograd: _print + +- func: sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> () + dispatch: + CompositeExplicitAutograd: sym_constrain_range + +- func: sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> () + dispatch: + CompositeExplicitAutograd: sym_constrain_range_for_size + +- func: _functional_sym_constrain_range(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor + dispatch: + CompositeExplicitAutograd: _functional_sym_constrain_range + +- func: _functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor + dispatch: + CompositeExplicitAutograd: _functional_sym_constrain_range_for_size + +- func: _make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + dispatch: + CPU: _make_dep_token_cpu + +- func: refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a) + variants: method + +- func: _use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool + device_check: NoCheck # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss + dispatch: + CUDA: _use_cudnn_ctc_loss + +- func: _use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool + device_check: NoCheck # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss + dispatch: + CUDA: _use_cudnn_ctc_loss_tensor + +- func: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) + device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU + dispatch: + CUDA: _cudnn_ctc_loss + autogen: _cudnn_ctc_loss.out + +- func: _cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) + device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU + dispatch: + CUDA: _cudnn_ctc_loss_tensor + +- func: _use_cudnn_rnn_flatten_weight() -> bool + +- func: _cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor + dispatch: + CUDA: _cudnn_rnn_flatten_weight + autogen: _cudnn_rnn_flatten_weight.out + +- func: _cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + # rnn_tanh may or may not redispatch to _cudnn_rnn based on algorithm and build. Thus it might hit dispatch or kernel device check. + # Disable dispatch time device check for consistent behavior. + device_check: NoCheck + dispatch: + CUDA: _cudnn_rnn + autogen: _cudnn_rnn.out + tags: nondeterministic_seeded + +- func: _cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) + dispatch: + CUDA: _cudnn_rnn_backward + autogen: _cudnn_rnn_backward.out + +- func: _cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + dispatch: + CUDA: _cudnn_init_dropout_state + autogen: _cudnn_init_dropout_state.out + tags: nondeterministic_seeded + +- func: _debug_has_internal_overlap(Tensor self) -> int + variants: function + +- func: _fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor) + variants: function + dispatch: + CUDA: fused_dropout_cuda + tags: nondeterministic_seeded + autogen: _fused_dropout.out + +- func: _masked_scale(Tensor self, Tensor mask, float scale) -> Tensor + variants: function + dispatch: + CUDA: masked_scale_cuda + autogen: _masked_scale.out + +- func: native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor) + variants: function + dispatch: + CPU: native_dropout_cpu + CUDA: native_dropout_cuda + NestedTensorCPU, NestedTensorCUDA: native_dropout_nested + tags: [nondeterministic_seeded, core] + autogen: native_dropout.out + +- func: native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor + dispatch: + CPU, NestedTensorCPU, NestedTensorCUDA: native_dropout_backward + CUDA: native_dropout_backward_cuda + autogen: native_dropout_backward.out + tags: pointwise + +- func: _sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor) + +- func: _sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!) + +- func: _sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!) + +- func: _sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!) + +- func: _reshape_from_tensor(Tensor self, Tensor shape) -> Tensor + +- func: _shape_as_tensor(Tensor self) -> Tensor + +- func: dropout(Tensor input, float p, bool train) -> Tensor + tags: nondeterministic_seeded + +- func: dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + tags: nondeterministic_seeded + +- func: feature_dropout(Tensor input, float p, bool train) -> Tensor + tags: nondeterministic_seeded + +- func: feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + tags: nondeterministic_seeded + +- func: alpha_dropout(Tensor input, float p, bool train) -> Tensor + tags: nondeterministic_seeded + +- func: alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + tags: nondeterministic_seeded + +- func: feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor + tags: nondeterministic_seeded + +- func: feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + tags: nondeterministic_seeded + +- func: abs(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: abs + SparseCPU, SparseCUDA: abs_sparse + SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr + NestedTensorCPU, NestedTensorCUDA: NestedTensor_abs + tags: [core, pointwise] + +- func: abs_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: abs_ + SparseCPU, SparseCUDA: abs_sparse_ + SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_ + NestedTensorCPU, NestedTensorCUDA: NestedTensor_abs_ + +- func: abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: abs_out + MPS: abs_out_mps + SparseCPU, SparseCUDA: abs_sparse_out + SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_out + tags: pointwise + +# Note [Adding an alias] +# To add an alias do the following: +# +# 1) Copy the original functions native_functions.yaml entry, but replace the +# original function's name with their own and delete any dispatch +# keys for the aliases. Specifying a dispatch key will prevent +# autograd from recording the operations the alias performs, which +# will stop it from "inheriting" the original operation's autograd behavior. +# 2) Implement the corresponding functions and have them redispatch to the +# original function. +# 3) Add docstrings to the new function that reference the original function, +# and document the method as usual (if it exists.) +# (See torch/_torch_docs.py and docs/source/torch.rst if adding a function, +# torch/_tensor_docs.py and docs/source/tensors.rst if adding a method, +# or module-specific doc bindings (like torch/linalg/__init__.py) if +# adding an alias in a namespace.) +# 4) Update torch/overrides.py consistent with the original function. +# 5) Update the alias_map in torch/csrc/jit/passes/normalize_ops.cpp. +# 6) Add aliases argument to existing OpInfo/UnaryUfuncInfo or create new OpInfo/UnaryUfuncInfo entry +# in op_db list in torch/testing/_internal/common_methods_invocations.py +# +# See torch.absolute, an alias for torch.abs, as an example. +# Absolute, alias for abs + +- func: absolute(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: absolute_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + +- func: absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: angle(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CPU, CUDA: angle + SparseCsrCPU, SparseCsrCUDA: angle_sparse_csr + tags: pointwise + +- func: angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: angle_out + SparseCsrCPU, SparseCsrCUDA: angle_sparse_csr_out + tags: pointwise + +- func: view_as_real(Tensor(a) self) -> Tensor(a) + variants: function + dispatch: + CPU, CUDA, MPS, Meta: view_as_real + +- func: view_as_complex(Tensor(a) self) -> Tensor(a) + variants: function + dispatch: + CPU, CUDA, MPS, Meta: view_as_complex + +- func: sgn(Tensor self) -> Tensor + variants: function, method + structured_delegate: sgn.out + dispatch: + SparseCPU, SparseCUDA: sgn_sparse + SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr + NestedTensorCPU, NestedTensorCUDA: NestedTensor_sgn + tags: pointwise + +- func: sgn_(Tensor(a!) self) -> Tensor(a!) + variants: method + structured_delegate: sgn.out + dispatch: + SparseCPU, SparseCUDA: sgn_sparse_ + SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr_ + NestedTensorCPU, NestedTensorCUDA: NestedTensor_sgn_ + tags: pointwise + +- func: sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: sgn_out + MPS: sgn_out_mps + SparseCPU, SparseCUDA: sgn_sparse_out + SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr_out + tags: pointwise + +- func: chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor + variants: method + +- func: real(Tensor(a) self) -> Tensor(a) + device_check: NoCheck # TensorIterator + variants: function + +- func: imag(Tensor(a) self) -> Tensor(a) + device_check: NoCheck # TensorIterator + variants: function + +- func: _conj(Tensor(a) self) -> Tensor(a) + variants: function, method + dispatch: + CompositeExplicitAutograd: _conj + +- func: conj(Tensor(a) self) -> Tensor(a) + variants: function, method + manual_cpp_binding: True + +- func: _conj_physical(Tensor self) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: _conj_physical + SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr + autogen: _conj_physical.out + +- func: conj_physical(Tensor self) -> Tensor + variants: function, method + tags: pointwise + +- func: conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: conj_physical_out + MPS: conj_physical_out_mps + SparseCPU, SparseCUDA: conj_physical_out_sparse + SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr_out + tags: pointwise + +- func: conj_physical_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + dispatch: + CompositeExplicitAutograd: conj_physical_ + SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr_ + tags: pointwise + +- func: resolve_conj(Tensor(a) self) -> Tensor(a) + variants: function, method + +- func: resolve_neg(Tensor(a) self) -> Tensor(a) + variants: function, method + +- func: _neg_view(Tensor(a) self) -> Tensor(a) + variants: function, method + dispatch: + CompositeExplicitAutograd: _neg_view + +- func: acos(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: acos.out + tags: [core, pointwise] + +- func: acos_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: acos.out + tags: pointwise + +- func: acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: acos_out + MPS: acos_out_mps + tags: pointwise + +# arccos, alias of acos +- func: arccos(Tensor self) -> Tensor + variants: function, method + +- func: arccos_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + +- func: arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + +- func: avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor + tags: core + +- func: adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor + tags: core + +# Return: (Tensor output, Tensor indices) +- func: adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) + +- func: add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: add.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: add_sparse + SparseCsrCPU, SparseCsrCUDA: add_sparse_csr + MkldnnCPU: mkldnn_add + ZeroTensor: add_zerotensor + NestedTensorCPU, NestedTensorCUDA: NestedTensor_add_Tensor + tags: [core, pointwise] + +- func: add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: add.out + dispatch: + SparseCPU, SparseCUDA: add_sparse_ + SparseCsrCPU, SparseCsrCUDA: add_sparse_csr_ + MkldnnCPU: mkldnn_add_ + NestedTensorCPU, NestedTensorCUDA: NestedTensor_add__Tensor + tags: pointwise + +- func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + ufunc_inner_loop: + Generic: add (AllAndComplex, BFloat16, Half, ComplexHalf) + ScalarOnly: add (Bool) + dispatch: + SparseCPU: add_out_sparse_cpu + SparseCUDA: add_out_sparse_cuda + SparseCsrCPU: add_out_sparse_compressed_cpu + SparseCsrCUDA: add_out_sparse_compressed_cuda + MkldnnCPU: mkldnn_add_out + MPS: add_out_mps + tags: pointwise + +- func: _add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + variants: function + dispatch: + CPU: add_relu + +- func: _add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + variants: function + dispatch: + CPU: add_relu_ + +- func: _add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + variants: function + dispatch: + CPU: add_relu_out + +- func: _add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + variants: function + dispatch: + CPU: add_relu + +- func: _add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + variants: function + dispatch: + CPU: add_relu_ + autogen: _add_relu.Scalar_out + +# For C++ only, until we have conversion from C++ numbers to Tensor +- func: add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: add + tags: [core, pointwise] + +- func: add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: add_ + autogen: add.Scalar_out + tags: pointwise + +- func: addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor + structured_delegate: addmv.out + variants: function, method + +- func: addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + structured_delegate: addmv.out + variants: function, method + +- func: addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU: addmv_out_cpu + CUDA: addmv_out_cuda + MPS: addmv_out_mps + SparseCsrCPU: addmv_out_sparse_compressed + SparseCsrCUDA: addmv_out_sparse_compressed_cuda + +- func: addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + variants: function, method + dispatch: + CPU, CUDA: addr + MPS: addr_mps + CompositeExplicitAutograd: math_addr + +- func: addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + variants: method + dispatch: + CompositeExplicitAutograd: addr_ + +- func: addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: addr_out + MPS: addr_out_mps + CompositeExplicitAutograd: math_addr_out + +- func: affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: affine_grid_generator + autogen: affine_grid_generator.out + +- func: affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor + variants: function + +- func: _is_all_true(Tensor self) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: _is_all_true + +- func: _is_any_true(Tensor self) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: _is_any_true + +# Note: this function is only for testing. +- func: _test_check_tensor(Tensor self) -> Tensor + variants: function + +# Note; this function is only for testing +- func: _test_functorch_fallback(Tensor self, Tensor other) -> Tensor + variants: function + dispatch: + CPU: _test_functorch_fallback + autogen: _test_functorch_fallback.out + +- func: all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: all.out + variants: function, method + +- func: all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: all.dims_out + variants: function, method + cpp_no_default_args: ['dim'] + dispatch: + CompositeExplicitAutograd: all_dims_default + +- func: all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + dispatch: + CPU, CUDA: all_out + MPS: all_out_mps + +- func: all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + dispatch: + CPU, CUDA: all_dims_out + CompositeExplicitAutograd: all_dims_out_default + cpp_no_default_args: ['dim'] + +- func: all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool + variants: function, method + tags: data_dependent_output + dispatch: + CompositeExplicitAutograd: allclose + +- func: any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: any.out + variants: function, method + tags: core + +- func: any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: any.dims_out + variants: function, method + cpp_no_default_args: ['dim'] + tags: core + dispatch: + CompositeExplicitAutograd: any_dims_default + +- func: any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + dispatch: + CPU, CUDA: any_out + MPS: any_out_mps + +- func: any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + dispatch: + CPU, CUDA: any_dims_out + CompositeExplicitAutograd: any_dims_out_default + cpp_no_default_args: ['dim'] + +- func: any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: arange + +- func: arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: arange + +# This operator should be named `arange.start_out` if following the naming convention. However that +# name is already taken. Disabled because of CI job failures. +# FIXME: enable this +#- func: arange.start_out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!) +# dispatch: +# CompositeExplicitAutograd: arange_start_out + +- func: arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: arange + cpp_no_default_args: ['step'] + tags: core + +- func: arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: arange_out + +- func: arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, Meta: arange_out + CUDA: arange_cuda_out + MPS: arange_mps_out + cpp_no_default_args: ['step'] + +# This function is a temporary hack to allow tracing of arange like constructs with dynamic +# bounds on arange. Normal arange is not traceable because it does not take any tensor inputs; +# if the range you need is based on another tensor, calling this function directly will +# preserve tracing. Get rid of this when arange can directly take tensors for bounds +# (so that it can be traced directly). +- func: _dim_arange(Tensor like, int dim) -> Tensor + +- func: argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor + structured_delegate: argmax.out + device_check: NoCheck # TensorIterator + variants: function, method + tags: core + +- func: argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU, CUDA: argmax_out + MPS: argmax_out_mps + +- func: argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor + structured_delegate: argmin.out + device_check: NoCheck # TensorIterator + variants: function, method + tags: core + +- func: argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU, CUDA: argmin_out + MPS: argmin_out_mps + +- func: acosh(Tensor self) -> Tensor + variants: function, method + structured_delegate: acosh.out + tags: [core, pointwise] + +- func: acosh_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + structured_delegate: acosh.out + tags: pointwise + +- func: acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: acosh_out + MPS: acosh_out_mps + tags: pointwise +# arccosh, alias for acosh + +- func: arccosh(Tensor self) -> Tensor + variants: function, method + +- func: arccosh_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + +- func: arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + +- func: asinh(Tensor self) -> Tensor + variants: function, method + structured_delegate: asinh.out + dispatch: + SparseCPU, SparseCUDA: asinh_sparse + SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr + tags: [core, pointwise] + +- func: asinh_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + structured_delegate: asinh.out + dispatch: + SparseCPU, SparseCUDA: asinh_sparse_ + SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr_ + tags: pointwise + +- func: asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: asinh_out + MPS: asinh_out_mps + SparseCPU, SparseCUDA: asinh_sparse_out + SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr_out + tags: pointwise + +# arcsinh, alias for asinh +- func: arcsinh(Tensor self) -> Tensor + variants: function, method + +- func: arcsinh_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + +- func: arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + +- func: atanh(Tensor self) -> Tensor + structured_delegate: atanh.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: atanh_sparse + SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr + tags: [core, pointwise] + +- func: atanh_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: atanh.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: atanh_sparse_ + SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr_ + tags: pointwise + +- func: atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: atanh_out + MPS: atanh_out_mps + SparseCPU, SparseCUDA: atanh_sparse_out + SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr_out + tags: pointwise +# arctanh, alias for atanh + +- func: arctanh(Tensor self) -> Tensor + variants: function, method + +- func: arctanh_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + +- func: arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + +- func: as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) + variants: function, method + dispatch: + ZeroTensor, CPU, CUDA: as_strided_tensorimpl + Meta: as_strided_tensorimpl_meta_symint + MPS: as_strided_tensorimpl_mps + QuantizedCPU, QuantizedCUDA: as_strided_qtensorimpl + device_check: NoCheck + device_guard: False + tags: core + +- func: as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + variants: function, method + device_check: NoCheck + device_guard: False + tags: inplace_view + dispatch: + CompositeExplicitAutogradNonFunctional: as_strided__symint + +- func: asin(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: asin.out + dispatch: + SparseCPU, SparseCUDA: asin_sparse + SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr + tags: [core, pointwise] + +- func: asin_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: asin.out + dispatch: + SparseCPU, SparseCUDA: asin_sparse_ + SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr_ + tags: pointwise + +- func: asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: asin_out + MPS: asin_out_mps + SparseCPU, SparseCUDA: asin_sparse_out + SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr_out + tags: pointwise + +# arcsin, alias of asin +- func: arcsin(Tensor self) -> Tensor + variants: function, method + +- func: arcsin_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + +- func: arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + +- func: atan(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: atan.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: atan_sparse + SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr + tags: [core, pointwise] + +- func: atan_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: atan.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: atan_sparse_ + SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr_ + tags: pointwise + +- func: atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: atan_out + MPS: atan_out_mps + SparseCPU, SparseCUDA: atan_sparse_out + SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr_out + tags: pointwise + +# arctan, alias of atan +- func: arctan(Tensor self) -> Tensor + variants: function, method + +- func: arctan_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + +- func: arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + +- func: atleast_1d(Tensor self) -> Tensor + variants: function + +- func: atleast_1d.Sequence(Tensor[] tensors) -> Tensor[] + +- func: atleast_2d(Tensor self) -> Tensor + variants: function + +- func: atleast_2d.Sequence(Tensor[] tensors) -> Tensor[] + variants: function + +- func: atleast_3d(Tensor self) -> Tensor + variants: function + +- func: atleast_3d.Sequence(Tensor[] tensors) -> Tensor[] + variants: function + +- func: baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + variants: function, method + structured_delegate: baddbmm.out + +- func: baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + variants: method + structured_delegate: baddbmm.out + +- func: baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + structured: True + variants: function + dispatch: + CPU: baddbmm_out_cpu + CUDA: baddbmm_out_cuda + MPS: baddbmm_out_mps + SparseCsrCUDA: baddbmm_out_sparse_csr_cuda + +- func: bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: bartlett_window + autogen: bartlett_window.out + +- func: bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: bartlett_window + autogen: bartlett_window.periodic_out + +- func: batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor + +- func: quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor + dispatch: + QuantizedCPU: quantized_batch_norm + autogen: quantized_batch_norm.out + +- func: _batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int) + +- func: _batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor) + +# Sample bernoulli with values in `self` as probability. +- func: bernoulli(Tensor self, *, Generator? generator=None) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: bernoulli + tags: nondeterministic_seeded + +- func: bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + tags: nondeterministic_seeded + dispatch: + CPU, CUDA: bernoulli_out + MPS: bernoulli_out_mps + +- func: bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + tags: nondeterministic_seeded + dispatch: + CPU, CUDA: bernoulli_ + MPS: bernoulli_mps_ + autogen: bernoulli.Tensor, bernoulli.Tensor_out + +- func: bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + tags: nondeterministic_seeded + dispatch: + CPU, CUDA: bernoulli_ + MPS: bernoulli_mps_ + autogen: bernoulli.float_out + +# Note [bernoulli.p schema] +# We should probably just fix the overload ambiguity by appending a _functional to the C++ API name (BC breaking) +# This out-of-place version isn't used explicitly, but needed by jit. +# There is no default valid on `p` here because it would introduce ambiguity +# with `bernoulli(Tensor self, *, Generator? generator=None)` declaration. +- func: bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutogradNonFunctional: bernoulli + +- func: bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor + +- func: binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor + device_check: NoCheck # TensorIterator + python_module: nn + variants: function + dispatch: + CPU: binary_cross_entropy_cpu + CUDA: binary_cross_entropy_cuda + MPS: binary_cross_entropy_mps + +- func: binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: nn + variants: function + dispatch: + CPU: binary_cross_entropy_out_cpu + CUDA: binary_cross_entropy_out_cuda + MPS: binary_cross_entropy_out_mps + +- func: binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor + python_module: nn + variants: function + dispatch: + CPU: binary_cross_entropy_backward_cpu + CUDA: binary_cross_entropy_backward_cuda + MPS: binary_cross_entropy_backward_mps + +- func: binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + variants: function + dispatch: + CPU: binary_cross_entropy_backward_out_cpu + CUDA: binary_cross_entropy_backward_out_cuda + MPS: binary_cross_entropy_backward_out_mps + +- func: binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: binary_cross_entropy_with_logits + autogen: binary_cross_entropy_with_logits.out + +- func: bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor + variants: function, method + dispatch: + CPU: _bincount_cpu + CUDA: _bincount_cuda + MPS: _bincount_mps + tags: dynamic_output_shape + autogen: bincount.out + +- func: bitwise_not(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: bitwise_not.out + variants: function, method + tags: [core, pointwise] + +- func: bitwise_not_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: bitwise_not.out + variants: method + tags: pointwise + +- func: bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: bitwise_not_out + MPS: bitwise_not_out_mps + tags: pointwise + +- func: copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA, MPS: copysign_out + tags: pointwise + +- func: copysign.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: copysign.out + tags: pointwise + +- func: copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: copysign.out + +- func: copysign.Scalar(Tensor self, Scalar other) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: copysign + tags: pointwise + +- func: copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + variants: method + dispatch: + CompositeExplicitAutograd: copysign_ + +- func: copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: copysign_out + tags: pointwise + +- func: _lazy_clone(Tensor self) -> Tensor + # Like clone, but the copy takes place lazily, only if either the + # input or the output are written. + variants: function, method + dispatch: + CompositeExplicitAutograd: _lazy_clone + +- func: logical_not(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: logical_not + NestedTensorCPU, NestedTensorCUDA: NestedTensor_logical_not + tags: [core, pointwise] + +- func: logical_not_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: logical_not_ + NestedTensorCPU, NestedTensorCUDA: NestedTensor_logical_not_ + tags: pointwise + +- func: logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: logical_not_out + MPS: logical_not_out_mps + tags: pointwise + +- func: logical_xor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: logical_xor + tags: [core, pointwise] + +- func: logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: logical_xor_ + tags: pointwise + +- func: logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: logical_xor_out + MPS: logical_xor_out_mps + tags: pointwise + +- func: logical_and(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: logical_and + tags: [core, pointwise] + +- func: logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: logical_and_ + tags: pointwise + +- func: logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: logical_and_out + MPS: logical_and_out_mps + tags: pointwise + +- func: logical_or(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: logical_or + tags: [core, pointwise] + +- func: logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: logical_or_ + tags: pointwise + +- func: logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: logical_or_out + MPS: logical_or_out_mps + tags: pointwise + +- func: blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: blackman_window + autogen: blackman_window.out + +- func: blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: blackman_window + autogen: blackman_window.periodic_out + +- func: bmm(Tensor self, Tensor mat2) -> Tensor + structured_delegate: bmm.out + variants: function, method + dispatch: + SparseCPU: bmm_sparse_cpu + SparseCUDA: bmm_sparse_cuda + NestedTensorCPU: bmm_nested + NestedTensorCUDA: bmm_nested_cuda + tags: core + +- func: bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + structured: True + variants: function + dispatch: + CPU: bmm_out_cpu + CUDA: bmm_out_cuda + MPS: bmm_out_mps + SparseCPU: bmm_out_sparse_cpu + SparseCUDA: bmm_out_sparse_cuda + SparseCsrCUDA: bmm_out_sparse_csr_cuda + +- func: broadcast_tensors(Tensor[] tensors) -> Tensor[] + device_check: NoCheck + device_guard: False + +- func: broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) + variants: function, method + dispatch: + CompositeImplicitAutograd: broadcast_to_symint + +- func: _sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a) + variants: function + dispatch: + SparseCPU, SparseCUDA: sparse_broadcast_to + +- func: cat(Tensor[] tensors, int dim=0) -> Tensor + structured_delegate: cat.out + dispatch: + SparseCPU, SparseCUDA: cat_sparse + QuantizedCPU: cat_quantized_cpu + NestedTensorCPU, NestedTensorCUDA: cat_nested + tags: core + +- func: cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + structured: True + precomputed: + - dim -> int dim, int valid, bool all_contiguous, bool all_same_dtype, bool all_same_sizes_and_stride, MemoryFormat memory_format + dispatch: + CPU: cat_out_cpu + CUDA: cat_out_cuda + MPS: cat_out_mps + QuantizedCPU: cat_out_quantized_cpu + +- func: cat.names(Tensor[] tensors, Dimname dim) -> Tensor + +- func: cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + +# alias for torch.cat +- func: concat(Tensor[] tensors, int dim=0) -> Tensor + +- func: concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + +- func: concat.names(Tensor[] tensors, Dimname dim) -> Tensor + +- func: concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + +# alias for torch.cat +- func: concatenate(Tensor[] tensors, int dim=0) -> Tensor + +- func: concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + +- func: concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor + +- func: concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + +- func: block_diag(Tensor[] tensors) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: block_diag + autogen: block_diag.out + +- func: ceil(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: ceil.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: ceil_sparse + SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr + tags: [core, pointwise] + +- func: ceil_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: ceil.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: ceil_sparse_ + SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_ + tags: pointwise + +- func: ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: ceil_out + MPS: ceil_out_mps + SparseCPU, SparseCUDA: ceil_sparse_out + SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_out + tags: pointwise + +# alias for torch.linalg.multi_dot +- func: chain_matmul(Tensor[] matrices) -> Tensor + variants: function + +# alias for torch.linalg.multi_dot +- func: chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) + +- func: unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] + variants: function, method + device_check: NoCheck + device_guard: False + +- func: chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[] + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeImplicitAutograd: chunk + NestedTensorCPU, NestedTensorCUDA: chunk_nested_tensor + +- func: tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] + variants: function, method + dispatch: + CompositeImplicitAutograd: tensor_split_sections_symint + +- func: tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] + variants: function, method + dispatch: + CompositeImplicitAutograd: tensor_split_indices_symint + +- func: tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] + variants: function, method + +- func: clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + cpp_no_default_args: ['min'] + structured_delegate: clamp.out + dispatch: + QuantizedCPU: clamp_quantized_cpu + tags: [core, pointwise] + +- func: clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor + variants: function, method + structured_delegate: clamp.Tensor_out + tags: [core, pointwise] + +- func: clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + cpp_no_default_args: ['min'] + structured_delegate: clamp.out + tags: pointwise + +- func: clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) + variants: function, method + structured_delegate: clamp.Tensor_out + tags: pointwise + +- func: clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + cpp_no_default_args: ['min'] + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: clamp_out + MPS: clamp_out_mps + tags: pointwise + +- func: clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: clamp_Tensor_out + MPS: clamp_Tensor_out_mps + tags: pointwise + +- func: clamp_max(Tensor self, Scalar max) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: clamp_max.out + tags: pointwise + +- func: clamp_max.Tensor(Tensor self, Tensor max) -> Tensor + variants: function, method + structured_delegate: clamp_max.Tensor_out + tags: pointwise + +- func: clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: clamp_max.out + tags: pointwise + +- func: clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) + variants: function, method + structured_delegate: clamp_max.Tensor_out + tags: pointwise + +- func: clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: clamp_max_out + MPS: clamp_max_out_mps + tags: pointwise + +- func: clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: clamp_max_Tensor_out + MPS: clamp_max_Tensor_out_mps + tags: pointwise + +- func: clamp_min(Tensor self, Scalar min) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: clamp_min.out + tags: pointwise + +- func: clamp_min.Tensor(Tensor self, Tensor min) -> Tensor + variants: function, method + structured_delegate: clamp_min.Tensor_out + tags: pointwise + +- func: clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: clamp_min.out + tags: pointwise + +- func: clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!) + variants: function, method + structured_delegate: clamp_min.Tensor_out + tags: pointwise + +- func: clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: clamp_min_out + MPS: clamp_min_out_mps + tags: pointwise + +- func: clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: clamp_min_Tensor_out + MPS: clamp_min_Tensor_out_mps + tags: pointwise + +# clip is an alias for clamp +- func: clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor + cpp_no_default_args: ['min'] + variants: function, method + tags: pointwise + +- func: clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor + variants: function, method + tags: pointwise + +- func: clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) + cpp_no_default_args: ['min'] + variants: function, method + tags: pointwise + +- func: clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) + variants: function, method + tags: pointwise + +- func: clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + cpp_no_default_args: ['min'] + tags: pointwise + +- func: clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + +- func: cudnn_is_acceptable(Tensor self) -> bool + device_check: NoCheck + device_guard: False + +- func: complex(Tensor real, Tensor imag) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: complex + +- func: complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: complex_out + MPS: complex_out_mps + +- func: polar(Tensor abs, Tensor angle) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: polar + +- func: polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: polar_out + MPS: polar_out_mps + +- func: constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: constant_pad_nd + MPS: constant_pad_nd_mps + autogen: constant_pad_nd.out + tags: core + +- func: contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a) + variants: method + manual_cpp_binding: True + +- func: convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor + dispatch: + CompositeExplicitAutograd: convolution + autogen: convolution.out + tags: core + +- func: convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + dispatch: + CompositeExplicitAutograd, CUDA: convolution_backward + autogen: convolution_backward.out + tags: core + +- func: convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor + dispatch: + CompositeExplicitAutograd: convolution_overrideable + autogen: convolution_overrideable.out + +- func: convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + dispatch: + CompositeExplicitAutograd: convolution_backward_overrideable + autogen: convolution_backward_overrideable.out + +- func: _convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor + dispatch: + CompositeExplicitAutograd: _convolution + autogen: _convolution.out + +- func: _convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor + +- func: _convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor + dispatch: + CompositeImplicitAutograd: _convolution_mode_symint + +- func: _convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + +- func: conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor + dispatch: + CompositeImplicitAutograd: conv1d_symint + +- func: conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor + dispatch: + CompositeImplicitAutograd: conv2d_symint + +- func: conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor + dispatch: + CompositeImplicitAutograd: conv3d_symint + +- func: conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor + cpp_no_default_args: ['bias', 'stride', 'padding'] + dispatch: + CompositeImplicitAutograd: conv1d_padding_symint + +- func: conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding="valid", SymInt[2] dilation=1, SymInt groups=1) -> Tensor + cpp_no_default_args: ['bias', 'stride', 'padding'] + dispatch: + CompositeImplicitAutograd: conv2d_padding_symint + +- func: conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor + cpp_no_default_args: ['bias', 'stride', 'padding'] + dispatch: + CompositeImplicitAutograd: conv3d_padding_symint + +- func: conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor + dispatch: + CompositeExplicitAutograd: conv_tbc + autogen: conv_tbc.out + +- func: conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) + +# NB: we inherit the goofy argument order from PyTorch torch.nn.functional +- func: conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor + dispatch: + CompositeImplicitAutograd: conv_transpose1d_symint + +- func: conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor + dispatch: + CompositeImplicitAutograd: conv_transpose2d_symint + +- func: conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor + dispatch: + CompositeImplicitAutograd: conv_transpose3d_symint + +- func: copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: copy + tags: core + +- func: copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + dispatch: + MkldnnCPU: copy_mkldnn_ + SparseCPU, SparseCUDA: copy_sparse_wrapper_ + CompositeExplicitAutograd: copy_ + SparseCsrCPU, SparseCsrCUDA: copy_sparse_compressed_ + NestedTensorCPU, NestedTensorCUDA: copy_nested_ + autogen: copy.out + +- func: _copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor + dispatch: + MPS: _copy_from_mps + autogen: _copy_from.out + +# We need this to be able to properly copy from a CPU to an XLA tensor with different sizes. +# See https://github.com/pytorch/xla/issues/2881 +- func: _copy_from_and_resize(Tensor self, Tensor dst) -> Tensor + dispatch: + MPS: _copy_from_and_resize_mps + autogen: _copy_from_and_resize.out + +- func: cos(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: cos.out + dispatch: + NestedTensorCPU, NestedTensorCUDA: cos_nested + tags: [core, pointwise] + +- func: cos_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: cos.out + tags: pointwise + +- func: cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: cos_out + MPS: cos_out_mps + tags: pointwise + +- func: cosh(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: cosh.out + tags: [core, pointwise] + +- func: cosh_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: cosh.out + tags: pointwise + +- func: cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: cosh_out + MPS: cosh_out_mps + tags: pointwise + +- func: cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor + +- func: count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor + variants: function, method + dispatch: + CPU: count_nonzero_cpu + CUDA: count_nonzero_cuda + MPS: count_nonzero_mps + autogen: count_nonzero.dim_IntList_out + +- func: count_nonzero(Tensor self, int? dim=None) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: count_nonzero + autogen: count_nonzero.out + +- func: cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor + variants: function, method + +- func: corrcoef(Tensor self) -> Tensor + variants: function, method + +- func: cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid + dispatch: + CUDA: cudnn_affine_grid_generator_forward + autogen: cudnn_affine_grid_generator.out + +# TODO: Why do I have to call this grad?! +- func: cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta + dispatch: + CUDA: cudnn_affine_grid_generator_backward + autogen: cudnn_affine_grid_generator_backward.out + +- func: cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor) + dispatch: + CUDA: cudnn_batch_norm + autogen: cudnn_batch_norm.out + +# NB: You can only use this if you used cudnn_batch_norm training=True +- func: cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor) + dispatch: + CUDA: cudnn_batch_norm_backward + autogen: cudnn_batch_norm_backward.out + +- func: cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + dispatch: + CUDA: cudnn_convolution + +- func: cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CUDA: cudnn_convolution_out + +- func: cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + dispatch: + CUDA: cudnn_convolution_transpose + autogen: cudnn_convolution_transpose.out + +- func: _mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor + dispatch: + MPS: _mps_convolution_transpose + autogen: _mps_convolution_transpose.out + +- func: mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) + dispatch: + MPS: mps_convolution_transpose_backward + autogen: mps_convolution_transpose_backward.out + +- func: cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor + dispatch: + CUDA: cudnn_convolution_relu + autogen: cudnn_convolution_relu.out + +- func: cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor + dispatch: + CUDA: cudnn_convolution_add_relu + autogen: cudnn_convolution_add_relu.out + +# NB: input is special cased in a way I don't quite understand +- func: cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output + dispatch: + CUDA: cudnn_grid_sampler_forward + autogen: cudnn_grid_sampler.out + +- func: cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid) + dispatch: + CUDA: cudnn_grid_sampler_backward + autogen: cudnn_grid_sampler_backward.out + +- func: cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: cummax + +- func: cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + device_check: NoCheck # TensorIterator + dispatch: + CompositeExplicitAutograd: cummax_out + +- func: cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator + variants: function, method + +- func: cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + device_check: NoCheck # TensorIterator + +- func: _cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () + variants: function + dispatch: + CPU: cummax_helper_cpu + CUDA: cummax_helper_cuda + +- func: cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: cummin + +- func: cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + device_check: NoCheck # TensorIterator + dispatch: + CompositeExplicitAutograd: cummin_out + +- func: cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator + variants: function, method + +- func: cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + device_check: NoCheck # TensorIterator + +- func: _cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () + variants: function + dispatch: + CPU: cummin_helper_cpu + CUDA: cummin_helper_cuda + +- func: cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor + variants: function + device_check: NoCheck + device_guard: False + +- func: cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + structured_delegate: cumprod.out + device_check: NoCheck # TensorIterator + variants: function, method + +- func: cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) + structured_delegate: cumprod.out + variants: method + +- func: cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + structured: True + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: cumprod_out + MPS: cumprod_out_mps + +- func: cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) + variants: method + +- func: cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor + variants: function + device_check: NoCheck + device_guard: False + +- func: cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + structured_delegate: cumsum.out + device_check: NoCheck # TensorIterator + variants: function, method + tags: core + +- func: cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) + structured_delegate: cumsum.out + variants: method + +- func: cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + structured: True + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: cumsum_out + MPS: cumsum_out_mps + +- func: cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) + variants: method + +- func: cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + +- func: cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor + +- func: ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor + +# convenience function that converts to intlists for you +- func: ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor + +- func: _ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) + dispatch: + CPU: ctc_loss_cpu + CUDA: ctc_loss_gpu + Meta: ctc_loss_meta + autogen: _ctc_loss.out + tags: dynamic_output_shape # the shape of second output is data dependent + +- func: _ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) + dispatch: + CPU, CUDA: ctc_loss_tensor + autogen: _ctc_loss.Tensor_out + tags: dynamic_output_shape # the shape of second output is data dependent + +- func: _ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor + dispatch: + CPU: ctc_loss_backward_cpu + CUDA: ctc_loss_backward_gpu + autogen: _ctc_loss_backward.out + +- func: _ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor + dispatch: + CPU, CUDA: ctc_loss_backward_tensor + +- func: diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutogradNonFunctional: diag_embed + autogen: diag_embed.out + +- func: diagflat(Tensor self, int offset=0) -> Tensor + variants: function, method + +- func: diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) + variants: function, method + dispatch: + CompositeExplicitAutograd: diagonal + tags: core + +- func: linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a) + python_module: linalg + variants: function + +- func: diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) + variants: function, method + +- func: diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor + variants: function + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: diagonal_backward_symint + autogen: diagonal_backward.out + +- func: fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) + variants: method + +- func: diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor + variants: function, method + +- func: diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) + variants: function + +- func: gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[] + variants: function + +- func: gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[] + variants: function + +- func: gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[] + variants: function + +- func: gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[] + variants: function + +- func: gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[] + variants: function + +- func: gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[] + variants: function + +- func: gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[] + variants: function + +- func: div.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: div.out + dispatch: + SparseCPU, SparseCUDA: div_sparse + ZeroTensor: div_zerotensor + NestedTensorCPU, NestedTensorCUDA: NestedTensor_div_Tensor + tags: [core, pointwise] + +- func: div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: div.out + dispatch: + SparseCPU, SparseCUDA: div_sparse_ + tags: pointwise + +- func: div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: div_out + MPS: div_out_mps + SparseCPU, SparseCUDA: div_out_sparse_zerodim + tags: pointwise + +- func: div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: div.out_mode + dispatch: + SparseCPU, SparseCUDA: div_sparse + tags: [core, pointwise] + +- func: div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: div.out_mode + dispatch: + SparseCPU, SparseCUDA: div_sparse_ + tags: pointwise + +- func: div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: div_out_mode + MPS: div_out_mode_mps + SparseCPU, SparseCUDA: div_out_sparse_zerodim + tags: pointwise + +# For C++ only, until we have conversion from C++ numbers to Tensor +- func: div.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: div + NestedTensorCPU, NestedTensorCUDA: NestedTensor_div_Scalar + tags: [core, pointwise] + +- func: div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: div_ + autogen: div.Scalar_out + tags: pointwise + +- func: div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: div + tags: [core, pointwise] + +- func: div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) + variants: method + dispatch: + CompositeExplicitAutograd: div_ + autogen: div.Scalar_mode_out + tags: pointwise + +# divide, alias for div +- func: divide.Tensor(Tensor self, Tensor other) -> Tensor + variants: function, method + +- func: divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + variants: method + +- func: divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + +- func: divide.Scalar(Tensor self, Scalar other) -> Tensor + variants: function, method + +- func: divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + variants: method + +- func: divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor + variants: function, method + +- func: divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) + variants: method + +- func: divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + +- func: divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor + variants: function, method + +- func: divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) + variants: method + + # true_divide, an alias for div +- func: true_divide.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + tags: pointwise + +- func: true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + +- func: true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: true_divide.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + +- func: dot(Tensor self, Tensor tensor) -> Tensor + variants: function, method + dispatch: + CPU: dot + CUDA: dot_cuda + MPS: dot_mps + +- func: dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: dot_out + +- func: vdot(Tensor self, Tensor other) -> Tensor + variants: function, method + dispatch: + CPU: vdot + CUDA: vdot_cuda + +- func: vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: vdot_out + +- func: einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor + +- func: embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor + dispatch: + CompositeExplicitAutograd: embedding_symint + NestedTensorCPU, NestedTensorCUDA: NestedTensor_embedding + autogen: embedding.out + tags: core + +- func: embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor + dispatch: + CompositeImplicitAutograd: embedding_backward_symint + +- func: embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor + dispatch: + CPU: embedding_dense_backward_cpu + CUDA: embedding_dense_backward_cuda + MPS: embedding_dense_backward_mps + autogen: embedding_dense_backward.out + tags: core + +- func: embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!) + dispatch: + CPU: embedding_renorm_cpu_ + CUDA: embedding_renorm_cuda_ + autogen: embedding_renorm, embedding_renorm.out + +- func: embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor + +# NOTE [ embedding_bag Native Functions ] +# The `_embedding_bag.*` variants assume that input tensors except for `weight`, +# e.g. `indices` and `offsets` (and `offset2bag`), are contiguous. +# We really only need to enforce this for `_embedding_bag` (the forward) because +# the backward inputs are the same as forward ones. +# The above `embedding_bag` wrapper is created to achieve this, e.g., +# applying indices = indices.contiguous(). +# The backward functions apply a check that these input tensors are contiguous. + + +- func: _embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) + dispatch: + CPU: _embedding_bag_forward_only_cpu + CUDA: _embedding_bag_forward_only_cuda + autogen: _embedding_bag_forward_only.out + +- func: _rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor) + +# row_stack is the alias of vstack +- func: row_stack(Tensor[] tensors) -> Tensor + +- func: row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + +- func: embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) + +# To keep backward and forward compatibility, and to avoid ambiguity with the +# original signature above, scale_grad_by_freq, mode, sparse, +# per_sample_weights, and include_last_offset parameters do not have default +# values. Once the original signature is removed, default values can be added. +- func: embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor) + +- func: _embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) + dispatch: + CPU: _embedding_bag_cpu + CUDA: _embedding_bag_cuda + autogen: _embedding_bag.out + tags: core + +- func: _embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + dispatch: + CompositeImplicitAutograd: _embedding_bag_backward_symint + +- func: _embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + dispatch: + CompositeImplicitAutograd: _embedding_bag_sparse_backward_symint + +- func: _embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + dispatch: + CPU: _embedding_bag_dense_backward_cpu + CUDA: _embedding_bag_dense_backward_cuda + autogen: _embedding_bag_dense_backward.out + +- func: _embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor + dispatch: + CPU: _embedding_bag_per_sample_weights_backward_cpu + CUDA: _embedding_bag_per_sample_weights_backward_cuda + autogen: _embedding_bag_per_sample_weights_backward.out + +- func: empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: empty_names + autogen: empty.names_out + +- func: empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + dispatch: + CPU: empty_cpu + CUDA: empty_cuda + MPS: empty_mps + Meta: empty_meta_symint + MkldnnCPU: empty_mkldnn + SparseCPU, SparseCUDA, SparseMeta: empty_sparse + SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: empty_sparse_compressed + QuantizedCPU, QuantizedCUDA, QuantizedMeta: empty_unknown_quantized + tags: core + +- func: empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: empty_permuted_symint + autogen: empty_permuted.out + +# We do not make new_empty a composite that calls into new_empty_strided, as the strided version +# is significantly more difficult to implement by different backends +- func: new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + variants: method + dispatch: + CompositeExplicitAutograd: new_empty_symint + autogen: new_empty.out + +- func: new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + variants: method + dispatch: + CompositeExplicitAutogradNonFunctional: new_empty_strided_symint + autogen: new_empty_strided.out + +- func: new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + variants: method + dispatch: + # NB: Although this composite mutates on the inside, it is + # non-differentiable so NonFunctional doesn't apply + CompositeExplicitAutograd: new_full + autogen: new_full.out + +- func: new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + variants: method + dispatch: + # NB: Although this composite mutates on the inside, it is + # non-differentiable so NonFunctional doesn't apply + CompositeExplicitAutograd: new_zeros + autogen: new_zeros.out + +- func: new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + variants: method + dispatch: + # NB: Although this composite mutates on the inside, it is + # non-differentiable so NonFunctional doesn't apply + CompositeExplicitAutograd: new_ones + autogen: new_ones.out + +# other overrides are to provide a more helpful error message that dtype is required +- func: _empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor + dispatch: + CPU: empty_affine_quantized_other_backends_stub + QuantizedCPU, QuantizedCUDA: empty_affine_quantized + autogen: _empty_affine_quantized.out + +# it's a factory function receiving a tensor argument, thus overriding explicitly +# other overrides are to provide a more helpful error message that dtype is required +- func: _empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor + category_override: factory + dispatch: + CPU: empty_per_channel_affine_quantized_other_backends_stub + QuantizedCPU, QuantizedCUDA: empty_per_channel_affine_quantized + autogen: _empty_per_channel_affine_quantized.out + +- func: resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + variants: method + device_check: NoCheck + device_guard: False + tags: [core, inplace_view] + dispatch: + Meta: resize__symint + CPU: resize_ + CUDA: resize_cuda_ + MPS: resize_mps_ + QuantizedCPU: quantized_resize_cpu_ + SparseCsrCPU, SparseCsrCUDA: resize_sparse_csr_ + autogen: resize, resize.out + +# This is a utility function to enable users to resize out tensor while registering kernels for out variants. +# Eventually, we can consider exposing `resize_output` as a public API to ship it with python op registration +# to make it easy to register out variants for ops. +- func: _resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + variants: function + dispatch: + Meta: _resize_output_ + autogen: _resize_output, _resize_output.out + +- func: empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + category_override: factory + variants: function + dispatch: + QuantizedCPU, QuantizedCUDA: empty_quantized + autogen: empty_quantized.out + +- func: empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + device_guard: False + +- func: empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: empty_like + QuantizedCPU, QuantizedCUDA: empty_like_quantized + SparseCPU, SparseCUDA, SparseMeta: empty_like_sparse_coo + SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: empty_like_sparse_csr + NestedTensorCPU, NestedTensorCUDA: empty_like_nested + autogen: empty_like.out + +- func: empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CPU: empty_strided_cpu + CUDA: empty_strided_cuda + MPS: empty_strided_mps + Meta: empty_strided_meta_symint + QuantizedCPU, QuantizedCUDA: empty_strided_unknown_quantized + autogen: empty_strided.out + tags: core + +- func: erf(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: erf.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: erf_sparse + SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr + tags: [core, pointwise] + +- func: erf_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: erf.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: erf_sparse_ + SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr_ + tags: pointwise + +- func: erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: erf_out + MPS: erf_out_mps + SparseCPU, SparseCUDA: erf_sparse_out + SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr_out + tags: pointwise + +- func: erfc(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: erfc.out + variants: function, method + tags: pointwise + +- func: erfc_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: erfc.out + variants: function, method + tags: pointwise + +- func: erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: erfc_out + tags: pointwise + +- func: exp(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: exp.out + variants: function, method + tags: [core, pointwise] + +- func: exp_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: exp.out + variants: function, method + tags: pointwise + +- func: exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: exp_out + MPS: exp_out_mps + tags: pointwise + +- func: exp2(Tensor self) -> Tensor + structured_delegate: exp2.out + variants: function, method + tags: pointwise + +- func: exp2_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: exp2.out + variants: function, method + tags: pointwise + +- func: exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: exp2_out + MPS: exp2_out_mps + tags: pointwise + +- func: expm1(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: expm1.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: expm1_sparse + SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr + tags: [core, pointwise] + +- func: expm1_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: expm1.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: expm1_sparse_ + SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_ + tags: pointwise + +- func: expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: expm1_out + MPS: expm1_out_mps + SparseCPU, SparseCUDA: expm1_sparse_out + SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_out + tags: pointwise + +- func: expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) + variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: expand + tags: core + +- func: expand_as(Tensor(a) self, Tensor other) -> Tensor(a) + variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. + device_check: NoCheck + device_guard: False + +# decomposes to eye.m +- func: eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: eye + +- func: eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: eye + +- func: eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, Meta: eye_out_cpu + CUDA: eye_out_cuda + MPS: eye_out_mps + +- func: eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, Meta: eye_out_cpu + CUDA: eye_out_cuda + MPS: eye_out_mps + +- func: flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) + variants: function, method + +- func: flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a) + variants: function, method + +- func: flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a) + variants: function, method + +- func: flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) + variants: function, method + +- func: unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) + variants: function, method + dispatch: + CompositeImplicitAutograd: unflatten_symint + +- func: unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) + variants: function, method + dispatch: + CompositeImplicitAutograd: unflatten_dimname_symint + +- func: fill.Scalar(Tensor self, Scalar value) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: fill + tags: core + +- func: fill.Tensor(Tensor self, Tensor value) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: fill + +- func: fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CPU, CUDA: fill_ + MPS: fill_scalar_mps + QuantizedCPU, QuantizedCUDA: fill_quantized_ + Meta: fill_meta_ + SparseCsrCPU, SparseCsrCUDA: fill_sparse_csr_ + NestedTensorCPU, NestedTensorCUDA: fill_nested_ + autogen: fill.Scalar_out + +- func: fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CPU, CUDA: fill_ + MPS: fill_tensor_mps_ + QuantizedCPU, QuantizedCUDA: fill_quantized_ + Meta: fill_meta_ + NestedTensorCPU, NestedTensorCUDA: fill_nested_ + autogen: fill.Tensor_out + +- func: floor(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: floor.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: floor_sparse + SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr + tags: [core, pointwise] + +- func: floor_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: floor.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: floor_sparse_ + SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_ + tags: pointwise + +- func: floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: floor_out + MPS: floor_out_mps + SparseCPU, SparseCUDA: floor_sparse_out + SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_out + tags: pointwise + +- func: floor_divide(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CPU, CUDA: floor_divide + MPS: floor_divide_mps + SparseCPU, SparseCUDA: floor_divide_sparse + +- func: floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CPU, CUDA: floor_divide_ + MPS: floor_divide_mps_ + SparseCPU, SparseCUDA: floor_divide_sparse_ + +- func: floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: floor_divide_out + MPS: floor_divide_out_mps + SparseCPU, SparseCUDA: floor_divide_out_sparse_zerodim + +- func: floor_divide.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: floor_divide + +- func: floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: floor_divide_ + autogen: floor_divide.Scalar_out + +- func: frac(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: frac.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: frac_sparse + SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr + tags: pointwise + +- func: frac_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: frac.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: frac_sparse_ + SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr_ + tags: pointwise + +- func: frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: frac_out + MPS: frac_out_mps + SparseCPU, SparseCUDA: frac_sparse_out + SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr_out + tags: pointwise + +- func: full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: full + autogen: full.names_out + +- func: full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: full + tags: core + +- func: full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: full_out + +- func: full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + dispatch: + # NB: Although this composite mutates on the inside, it is + # non-differentiable so NonFunctional doesn't apply + CompositeExplicitAutograd: full_like + autogen: full_like.out + +- func: from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CPU: from_file + autogen: from_file.out + +- func: gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: gcd_out + tags: pointwise + +- func: gcd(Tensor self, Tensor other) -> Tensor + structured_delegate: gcd.out + variants: function, method + tags: pointwise + +- func: gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: gcd.out + variants: function, method + +- func: lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: lcm_out + tags: pointwise + +- func: lcm(Tensor self, Tensor other) -> Tensor + structured_delegate: lcm.out + variants: function, method + tags: pointwise + +- func: lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: lcm.out + variants: function, method + +# NOTE [ grid_sampler Native Functions ] +# `grid_sampler` is _supposed to_ do all the shape checking and then dispatch to +# one of `cudnn_grid_sampler`, `grid_sampler_2d`, or `grid_sampler_3d`, each of +# which has the corresponding backward defined as native functions as well. +# However, we do shape checking everywhere for now since each of the mentioned +# functions can be called directly, which will lead to crashes otherwise. +# See https://github.com/pytorch/pytorch/issues/73187 for more information. +# +# There is also _grid_sampler_2d_backward_cpu_fallback which is an +# implementation detail of grid_sampler_2d and is only exposed here for testing +# purposes. +# +# Additionally, arguments `padding_mode` and `interpolation_mode` are cast to +# enums defined in `native/GridSampler.h`. `cudnn_grid_sampler` doesn't take in +# `interpolation_mode` because it only supports Bilinear interpolation mode. +# Nor does it take in `align_corners` because it only supports the mode +# `align_corners = True`. +- func: grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + +- func: grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + dispatch: + CPU, QuantizedCPU: grid_sampler_2d_cpu + CUDA: grid_sampler_2d_cuda + MPS: grid_sampler_2d_mps + autogen: grid_sampler_2d.out + tags: core + +# `grid_sampler_2d_backward` takes in `output_mask` to optimize performance for +# the case where `input` doesn't require gradient. Gradient for `grid` is always +# computed (only `output_mask[0]` is checked by the implementations). +- func: grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) + dispatch: + CPU: grid_sampler_2d_backward_cpu + CUDA: grid_sampler_2d_backward_cuda + autogen: grid_sampler_2d_backward.out + +# See NOTE [ grid_sample CPU fallback ] +- func: _grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + dispatch: + CompositeExplicitAutograd: _grid_sampler_2d_cpu_fallback + autogen: _grid_sampler_2d_cpu_fallback.out + +- func: _grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) + +- func: grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + dispatch: + CPU: grid_sampler_3d_cpu + CUDA: grid_sampler_3d_cuda + autogen: grid_sampler_3d.out + +# `grid_sampler_3d_backward` takes in `output_mask` to optimize performance for +# the case where `input` doesn't require gradient. Gradient for `grid` is always +# computed (only `output_mask[0]` is checked by the implementations). +- func: grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) + dispatch: + CPU: grid_sampler_3d_backward_cpu + CUDA: grid_sampler_3d_backward_cuda + autogen: grid_sampler_3d_backward.out + +- func: hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: hann_window + autogen: hann_window.out + +- func: hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: hann_window + autogen: hann_window.periodic_out + +- func: hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: hamming_window + autogen: hamming_window.out + +- func: hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: hamming_window + autogen: hamming_window.periodic_out + +- func: hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: hamming_window + autogen: hamming_window.periodic_alpha_out + +- func: hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: hamming_window + autogen: hamming_window.periodic_alpha_beta_out + +- func: kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: kaiser_window + autogen: kaiser_window.out + +- func: kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: kaiser_window + autogen: kaiser_window.periodic_out + +- func: kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: kaiser_window + autogen: kaiser_window.beta_out + +- func: hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor + +- func: group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor + +- func: native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor) + dispatch: + CPU, CUDA: native_group_norm + CompositeExplicitAutograd: math_group_norm + autogen: native_group_norm.out + tags: core + +- func: native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + dispatch: + CPU, CUDA: native_group_norm_backward + autogen: native_group_norm_backward.out + tags: core + +# Real to complex forward FFT +- func: _fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor + variants: function + dispatch: + CPU: _fft_r2c_mkl + CUDA: _fft_r2c_cufft + MPS: _fft_r2c_mps + +- func: _fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!) + variants: function + dispatch: + CPU: _fft_r2c_mkl_out + CUDA: _fft_r2c_cufft_out + MPS: _fft_r2c_mps_out + +# Complex to real inverse FFT +- func: _fft_c2r(Tensor self, int[] dim, int normalization, SymInt last_dim_size) -> Tensor + variants: function + dispatch: + CPU: _fft_c2r_mkl + CUDA: _fft_c2r_cufft + MPS: _fft_c2r_mps + +- func: _fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!) + variants: function + dispatch: + CPU: _fft_c2r_mkl_out + CUDA: _fft_c2r_cufft_out + MPS: _fft_c2r_mps_out + +# Standard complex to complex FFT (forward or backward) +- func: _fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor + variants: function + dispatch: + CPU: _fft_c2c_mkl + CUDA: _fft_c2c_cufft + MPS: _fft_c2c_mps + +- func: _fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) + variants: function + dispatch: + CPU: _fft_c2c_mkl_out + CUDA: _fft_c2c_cufft_out + MPS: _fft_c2c_mps_out + +- func: _validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> () + device_check: NoCheck + variants: function + dispatch: + CPU: _validate_compressed_sparse_indices_cpu + CUDA: _validate_compressed_sparse_indices_cuda + +- func: _cufft_get_plan_cache_size(DeviceIndex device_index) -> int + +- func: _cufft_get_plan_cache_max_size(DeviceIndex device_index) -> int + +- func: _cufft_set_plan_cache_max_size(DeviceIndex device_index, int max_size) -> () + +- func: _cufft_clear_plan_cache(DeviceIndex device_index) -> () + +- func: index.Tensor(Tensor self, Tensor?[] indices) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: index.Tensor_out + variants: function, method + dispatch: + QuantizedCPU: quantized_index + tags: [core, dynamic_output_shape] + # NB: This function is special-cased in tools/autograd/gen_variable_type.py + # NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp: + # - Tensor Tensor::index(ArrayRef indices) + # - Tensor Tensor::index(std::initializer_list indices) + +- func: index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + structured: True + structured_inherits: TensorIteratorBase + precomputed: + - indices -> DimVector sizes, DimVector strides + dispatch: + CPU, CUDA, MPS: index_out + +# Used by inductor to signal indexing without bounds checks +# Note that we don't support boolean indexing, to avoid dynamic output shapes +- func: _unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: _unsafe_index + +- func: index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + structured: True + variants: function + precomputed: + - dim -> int dim + dispatch: + CPU, CUDA: index_copy_out + +- func: index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) + variants: method + structured_delegate: index_copy.out + +- func: index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor + variants: function, method + structured_delegate: index_copy.out + +- func: index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!) + variants: method + +- func: index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor + variants: function, method + +- func: index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) + device_check: NoCheck # delegate to _index_put_impl_, which leverages TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: index_put_ + autogen: index_put.out + # NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp: + # - Tensor & Tensor::index_put_(ArrayRef indices, Tensor const & rhs) + # - Tensor & Tensor::index_put_(ArrayRef indices, Scalar v) + # - Tensor & Tensor::index_put_(std::initializer_list indices, Tensor const & rhs) + # - Tensor & Tensor::index_put_(std::initializer_list indices, Scalar v) + +- func: index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor + device_check: NoCheck # delegate to _index_put_impl_ after clone, which leverages TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: index_put + tags: core + +- func: _unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor + device_check: NoCheck # delegate to _index_put_impl_ after clone, which leverages TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: _unsafe_index_put + +- func: _index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CPU, CUDA, MPS: _index_put_impl_ + QuantizedCPU: _index_put_impl_quantized_cpu_ + QuantizedCUDA: _index_put_impl_quantized_cuda_ + autogen: _index_put_impl, _index_put_impl.out + +- func: instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor + variants: function + +- func: isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor + variants: function, method + +- func: isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + variants: function + structured: True + dispatch: + CPU, CUDA: isin_Tensor_Tensor_out + +- func: isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor + variants: function + structured_delegate: isin.Tensor_Tensor_out + +- func: isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + variants: function + structured: True + dispatch: + CPU, CUDA: isin_Tensor_Scalar_out + +- func: isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor + variants: function + structured_delegate: isin.Tensor_Scalar_out + +- func: isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + variants: function + structured: True + dispatch: + CPU, CUDA: isin_Scalar_Tensor_out + +- func: isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor + variants: function + structured_delegate: isin.Scalar_Tensor_out + +- func: isnan(Tensor self) -> Tensor + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CPU, CUDA, MPS: isnan + SparseCPU, SparseCUDA: isnan_sparse + SparseCsrCPU, SparseCsrCUDA: isnan_sparse_csr + autogen: isnan.out + tags: [core, pointwise] + +- func: is_distributed(Tensor self) -> bool + variants: function, method + device_check: NoCheck + device_guard: False + +- func: is_floating_point(Tensor self) -> bool + variants: function, method + device_check: NoCheck + device_guard: False + manual_cpp_binding: True + +- func: is_complex(Tensor self) -> bool + variants: function, method + device_check: NoCheck + device_guard: False + manual_cpp_binding: True + +- func: is_conj(Tensor self) -> bool + variants: function, method + device_guard: False + manual_cpp_binding: True + +- func: _is_zerotensor(Tensor self) -> bool + variants: function, method + device_guard: False + manual_cpp_binding: True + +- func: is_neg(Tensor self) -> bool + variants: function, method + device_guard: False + manual_cpp_binding: True + +- func: isreal(Tensor self) -> Tensor + variants: function, method + +- func: is_nonzero(Tensor self) -> bool + variants: function, method + device_check: NoCheck + device_guard: False + +- func: is_same_size(Tensor self, Tensor other) -> bool + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + NestedTensorCPU, NestedTensorCUDA: nested_is_same_size + CompositeExplicitAutograd: is_same_size + +- func: is_signed(Tensor self) -> bool + variants: function, method + device_check: NoCheck + device_guard: False + manual_cpp_binding: True + +- func: is_inference(Tensor self) -> bool + variants: function, method + device_check: NoCheck + device_guard: False + manual_cpp_binding: True + +- func: kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor + +- func: kron(Tensor self, Tensor other) -> Tensor + variants: function, method + +- func: kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + +- func: kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) + variants: function, method + dispatch: + CompositeExplicitAutograd: kthvalue + +- func: kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + dispatch: + CPU: kthvalue_out_cpu + CUDA: kthvalue_out_cuda + +- func: kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + variants: function, method + +- func: kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + +- func: layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor + dispatch: + CompositeImplicitAutograd: layer_norm_symint + +- func: native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) + dispatch: + CPU: layer_norm_cpu + CUDA: layer_norm_cuda + MPS: layer_norm_mps + CompositeExplicitAutograd: math_native_layer_norm + NestedTensorCPU, NestedTensorCUDA: nested_layer_norm + autogen: native_layer_norm.out + tags: core + +- func: native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + dispatch: + CPU: layer_norm_backward_cpu + CUDA: layer_norm_backward_cuda + MPS: layer_norm_backward_mps + NestedTensorCPU, NestedTensorCUDA: layer_norm_backward_nested + autogen: native_layer_norm_backward.out + tags: core + +- func: nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: nan_to_num + SparseCPU, SparseCUDA: nan_to_num_sparse + tags: pointwise + +- func: nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) + variants: function, method + dispatch: + CompositeExplicitAutograd: nan_to_num_ + SparseCPU, SparseCUDA: nan_to_num_sparse_ + tags: pointwise + +- func: nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: nan_to_num_out + MPS: nan_to_num_out_mps + SparseCPU, SparseCUDA: nan_to_num_sparse_out + tags: pointwise + +- func: linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor + python_module: nn + dispatch: + CompositeImplicitAutograd: linear + NestedTensorCPU, NestedTensorCUDA: nested_linear + MPS: _mps_linear + +- func: linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + dispatch: + NestedTensorCPU, NestedTensorCUDA: nested_linear_backward + MPS: mps_linear_backward + autogen: linear_backward.out + +- func: linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CompositeExplicitAutograd: linear_out + +- func: mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor + python_module: nn + dispatch: + MkldnnCPU: mkldnn_linear + autogen: mkldnn_linear.out + +- func: mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor + dispatch: + MkldnnCPU: mkldnn_linear_backward_input + autogen: mkldnn_linear_backward_input.out + +- func: mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor) + dispatch: + MkldnnCPU: mkldnn_linear_backward_weights + autogen: mkldnn_linear_backward_weights.out + +- func: mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + dispatch: + MkldnnCPU: mkldnn_linear_backward + autogen: mkldnn_linear_backward.out + +- func: _cslt_compress(Tensor input) -> Tensor + dispatch: + CUDA: _cslt_compress + +- func: _cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0) -> Tensor + dispatch: + CUDA: _cslt_sparse_mm + +- func: _cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int + dispatch: + CUDA: _cslt_sparse_mm_search + +- func: _sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor + dispatch: + CUDA: _sparse_semi_structured_linear + +- func: _mixed_dtypes_linear(Tensor input, Tensor weight, Tensor scale, *, Tensor? bias=None, str? activation=None) -> Tensor + dispatch: + CUDA: _mixed_dtypes_linear + +- func: fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor + +- func: fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor + +- func: fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int) + +- func: fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor + +- func: fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor + +- func: fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor + +- func: fbgemm_pack_quantized_matrix(Tensor input) -> Tensor + +- func: fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor + +- func: ldexp.Tensor(Tensor self, Tensor other) -> Tensor + variants: function, method + +- func: ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) + variants: function, method + tags: pointwise + +- func: ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + tags: pointwise + +- func: linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: linspace + +- func: linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + category_override: factory + dispatch: + CompositeExplicitAutograd: linspace + +- func: linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + category_override: factory + dispatch: + CompositeExplicitAutograd: linspace + +- func: linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + category_override: factory + dispatch: + CompositeExplicitAutograd: linspace + +- func: linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, Meta: linspace_out + CUDA: linspace_cuda_out + MPS: linspace_out_mps + +- func: linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) + category_override: factory + dispatch: + CompositeExplicitAutograd: linspace_out + +- func: linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + category_override: factory + dispatch: + CompositeExplicitAutograd: linspace_out + +- func: linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) + category_override: factory + dispatch: + CompositeExplicitAutograd: linspace_out + +- func: log(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: log.out + variants: function, method + tags: [core, pointwise] + +- func: log_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: log.out + variants: function, method + tags: pointwise + +- func: log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: log_out + MPS: log_out_mps + tags: pointwise + +- func: log10(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: log10.out + variants: function, method + tags: [core, pointwise] + +- func: log10_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: log10.out + variants: function, method + tags: pointwise + +- func: log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: log10_out + MPS: log10_out_mps + tags: pointwise + +- func: log1p(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: log1p.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: log1p_sparse + SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr + tags: [core, pointwise] + +- func: log1p_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: log1p.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: log1p_sparse_ + SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr_ + tags: pointwise + +- func: log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: log1p_out + MPS: log1p_out_mps + SparseCPU, SparseCUDA: log1p_sparse_out + SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr_out + tags: pointwise + +- func: log2(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: log2.out + variants: function, method + tags: [core, pointwise] + +- func: log2_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: log2.out + variants: function, method + tags: pointwise + +- func: log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: log2_out + MPS: log2_out_mps + tags: pointwise + +- func: logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: logaddexp_out + MPS: logaddexp_out_mps + tags: pointwise + +- func: logaddexp(Tensor self, Tensor other) -> Tensor + variants: method, function + structured_delegate: logaddexp.out + tags: pointwise + +- func: logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: logaddexp2_out + MPS: logaddexp2_out_mps + tags: pointwise + +- func: logaddexp2(Tensor self, Tensor other) -> Tensor + variants: method, function + structured_delegate: logaddexp2.out + tags: pointwise + +- func: xlogy.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: xlogy.OutTensor + variants: function, method + tags: pointwise + +- func: xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: xlogy + tags: pointwise + +- func: xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: xlogy + tags: pointwise + +# xlogy: inplace variant +- func: xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: xlogy.OutTensor + tags: pointwise + +- func: xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: xlogy_ + +# xlogy: out variant +- func: xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + variants: function + dispatch: + CPU, CUDA: xlogy_out + MPS: xlogy_out_mps + tags: pointwise + +- func: xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: xlogy_out + tags: pointwise + +- func: xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: xlogy_out + tags: pointwise + +- func: logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: logspace + +- func: logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + category_override: factory + dispatch: + CompositeExplicitAutograd: logspace + +- func: logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + category_override: factory + dispatch: + CompositeExplicitAutograd: logspace + +- func: logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + category_override: factory + dispatch: + CompositeExplicitAutograd: logspace + +- func: logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, Meta: logspace_out + CUDA: logspace_cuda_out + +- func: logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + category_override: factory + dispatch: + CompositeExplicitAutograd: logspace_out + +- func: logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + category_override: factory + dispatch: + CompositeExplicitAutograd: logspace_out + +- func: logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + category_override: factory + dispatch: + CompositeExplicitAutograd: logspace_out + +# log_softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models. +- func: log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + variants: function, method + +- func: log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + variants: function + dispatch: + CompositeExplicitAutograd: log_softmax_out + +- func: log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + variants: function, method + +- func: _log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + structured_delegate: _log_softmax.out + tags: core + +- func: _log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU: log_softmax_cpu_out + CUDA: log_softmax_cuda_out + MPS: log_softmax_mps_out + +- func: _log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor + structured_delegate: _log_softmax_backward_data.out + +- func: _log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU: log_softmax_backward_cpu_out + CUDA: log_softmax_backward_cuda_out + MPS: log_softmax_backward_mps_out + +- func: _logcumsumexp(Tensor self, int dim) -> Tensor + dispatch: + CPU: _logcumsumexp_cpu + CUDA: _logcumsumexp_cuda + +- func: _logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU: _logcumsumexp_out_cpu + CUDA: _logcumsumexp_out_cuda + +- func: logcumsumexp(Tensor self, int dim) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: logcumsumexp + +- func: logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: logcumsumexp_out + +- func: logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor + variants: function, method + +- func: logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + +- func: logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: logsumexp + +- func: logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + # calls squeeze + CompositeExplicitAutogradNonFunctional: logsumexp_out + +- func: logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor + +- func: matmul(Tensor self, Tensor other) -> Tensor + variants: function, method + dispatch: + CompositeImplicitAutograd: matmul + NestedTensorCPU, NestedTensorCUDA: matmul_nested + +- func: matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor) + dispatch: + NestedTensorCPU, NestedTensorCUDA: matmul_backward_nested + autogen: matmul_backward.out + +- func: matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeImplicitAutograd: matmul_out + NestedTensorCPU, NestedTensorCUDA: matmul_out_nested + +# Alias to linalg.matrix_power +- func: matrix_power(Tensor self, int n) -> Tensor + variants: function, method + +# Alias to linalg.matrix_power +- func: matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + +# Alias to linalg.matrix_exp +- func: matrix_exp(Tensor self) -> Tensor + variants: function, method + +# This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp +- func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor + +# DEPRECATED: Use torch.aminmax instead +- func: _aminmax(Tensor self) -> (Tensor, Tensor) + dispatch: + CPU, CUDA: _aminmax_all + autogen: _aminmax.out + +# DEPRECATED: Use torch.aminmax instead +- func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) + dispatch: + CPU, CUDA: _aminmax + autogen: _aminmax.dim_out + +- func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) + device_check: NoCheck # TensorIterator + structured_delegate: aminmax.out + variants: function, method + +- func: aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) + device_check: NoCheck # TensorIterator + structured: True + dispatch: + CPU, CUDA: aminmax_out + MPS: aminmax_out_mps + +- func: _compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor + dispatch: + CPU, CUDA: _compute_linear_combination + +- func: _compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: _compute_linear_combination_out + +- func: max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator + structured_delegate: max.dim_max + variants: function, method + dispatch: + QuantizedCPU, QuantizedCUDA: qmax + tags: core + +- func: max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + device_check: NoCheck # TensorIterator + structured: True + precomputed: + - dim -> int dim + dispatch: + CPU, CUDA: max_out + MPS: max_out_mps + +- func: max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator + variants: function, method + +- func: max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + device_check: NoCheck # TensorIterator + +- func: value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor + variants: function + device_check: NoCheck + device_guard: False + dispatch: + CompositeImplicitAutograd: value_selecting_reduction_backward_symint + +- func: amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor + variants: function, method + structured_delegate: amax.out + tags: core + +- func: amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU, CUDA: amax_out + MPS: amax_out_mps + +# Return: (Tensor output, Tensor indices) +- func: max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + +- func: max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor + +- func: max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + dispatch: + CompositeImplicitAutograd: max_pool2d + MPS: mps_max_pool2d + +- func: max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + dispatch: + MPS: mps_max_pool2d_backward + autogen: max_pool2d_backward.out + +- func: mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + dispatch: + MkldnnCPU: mkldnn_max_pool2d + autogen: mkldnn_max_pool2d.out + +- func: mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + dispatch: + MkldnnCPU: mkldnn_max_pool2d_backward + autogen: mkldnn_max_pool2d_backward.out + +- func: mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + dispatch: + MkldnnCPU: mkldnn_max_pool3d + autogen: mkldnn_max_pool3d.out + +- func: mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + dispatch: + MkldnnCPU: mkldnn_max_pool3d_backward + autogen: mkldnn_max_pool3d_backward.out + +- func: quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor + dispatch: + QuantizedCPU: quantized_max_pool1d + autogen: quantized_max_pool1d.out + +- func: quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + dispatch: + QuantizedCPU: quantized_max_pool2d + QuantizedCUDA: quantized_max_pool2d_cudnn + autogen: quantized_max_pool2d.out + +- func: quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + dispatch: + QuantizedCPU: quantized_max_pool3d + autogen: quantized_max_pool3d.out + +- func: max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + +# The CPU and GPU dispatch variants are named weirdly here because otherwise there +# are namespacing issues in C++ +- func: mean(Tensor self, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: mean + tags: core + +# For normal naming convention this should be `mean.out`. However since we already have `mean.out` we have to rename this. +# FIXME: fix CI jobs and re-enable this +#- func: mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +# device_check: NoCheck # TensorIterator +# dispatch: +# CompositeExplicitAutograd: mean_dtype_out + +- func: mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + structured_delegate: mean.out + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + QuantizedCPU: mean_quantized_cpu + tags: core + +- func: mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + structured: True + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: mean_out + MPS: mean_out_mps + QuantizedCPU: mean_out_quantized_cpu + +- func: mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # Composite + variants: function, method + +- func: nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # Composite + +- func: median(Tensor self) -> Tensor + variants: function, method + dispatch: + CPU: median_cpu + CUDA: median_cuda + MPS: median_mps + autogen: median.out + +- func: median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + variants: function, method + dispatch: + CompositeExplicitAutograd: median + +- func: median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + dispatch: + CPU: median_out_cpu + CUDA: median_out_cuda + MPS: median_out_mps + +- func: median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + variants: function, method + +- func: median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + +- func: nanmedian(Tensor self) -> Tensor + variants: function, method + dispatch: + CPU: nanmedian_cpu + CUDA: nanmedian_cuda + autogen: nanmedian.out + +- func: nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + variants: function, method + dispatch: + CompositeExplicitAutograd: nanmedian + +- func: nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + dispatch: + CPU: nanmedian_out_cpu + CUDA: nanmedian_out_cuda + +- func: nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + variants: function, method + +- func: nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + +- func: min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator + structured_delegate: min.dim_min + variants: function, method + dispatch: + QuantizedCPU, QuantizedCUDA: qmin + tags: core + +- func: min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + device_check: NoCheck # TensorIterator + structured: True + precomputed: + - dim -> int dim + dispatch: + CPU, CUDA: min_out + MPS: min_out_mps + +- func: min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator + variants: function, method + +- func: min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + device_check: NoCheck # TensorIterator + +- func: amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor + variants: function, method + structured_delegate: amin.out + tags: core + +- func: amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU, CUDA: amin_out + MPS: amin_out_mps + +# TODO: Add this function to MPS dispatch key so that we avoid declaring it in +# native_functions.yaml +# https://github.com/pytorch/pytorch/issues/77394 +- func: _mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor + dispatch: + MPS: _mps_convolution + autogen: _mps_convolution.out + +- func: mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + dispatch: + MPS: mps_convolution_backward + autogen: mps_convolution_backward.out + +- func: mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor + dispatch: + CompositeExplicitAutograd: mkldnn_convolution + autogen: mkldnn_convolution.out + +- func: mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor) + dispatch: + CPU: mkldnn_rnn_layer + MkldnnCPU: mkldnn_rnn_layer + autogen: mkldnn_rnn_layer.out + +- func: mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) + dispatch: + CPU: mkldnn_rnn_layer_backward + autogen: mkldnn_rnn_layer_backward.out + +- func: miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) + dispatch: + CUDA: miopen_batch_norm + autogen: miopen_batch_norm.out + +- func: miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) + dispatch: + CUDA: miopen_batch_norm_backward + autogen: miopen_batch_norm_backward.out + +- func: miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor + dispatch: + CUDA: miopen_convolution + autogen: miopen_convolution.out + +- func: miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor + dispatch: + CUDA: miopen_convolution_transpose + autogen: miopen_convolution_transpose.out + +- func: miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor + dispatch: + CUDA: miopen_depthwise_convolution + autogen: miopen_depthwise_convolution.out + +- func: miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor + dispatch: + CUDA: miopen_convolution_relu + +- func: miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor + dispatch: + CUDA: miopen_convolution_add_relu + +- func: miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + dispatch: + CUDA: miopen_rnn + autogen: miopen_rnn.out + tags: nondeterministic_seeded + + +- func: miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) + dispatch: + CUDA: miopen_rnn_backward + autogen: miopen_rnn_backward.out + +- func: mm(Tensor self, Tensor mat2) -> Tensor + structured_delegate: mm.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: _sparse_mm + SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm + tags: core + +- func: mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU: mm_out_cpu + CUDA: mm_out_cuda + MPS: mm_out_mps + SparseCPU, SparseCUDA: _sparse_mm_out + SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm_out + +- func: _int_mm(Tensor self, Tensor mat2) -> Tensor + dispatch: + CUDA: _int_mm_cuda + +- func: _int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CUDA: _int_mm_out_cuda + +- func: _convert_weight_to_int4pack(Tensor self, int innerKTiles) -> Tensor + dispatch: + CPU: _convert_weight_to_int4pack_cpu + CUDA: _convert_weight_to_int4pack_cuda + +- func: _weight_int4pack_mm(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor + dispatch: + CPU: _weight_int4pack_mm_cpu + CUDA: _weight_int4pack_mm_cuda + +- func: _weight_int8pack_mm(Tensor self, Tensor mat2, Tensor scales) -> Tensor + dispatch: + CPU: _weight_int8pack_mm_cpu + +- func: _sparse_mm(Tensor sparse, Tensor dense) -> Tensor + python_module: sparse + +- func: _sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor + python_module: sparse + +- func: _sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor + dispatch: + SparseCPU: sparse_sparse_matmul_cpu + SparseCUDA: sparse_sparse_matmul_cuda + autogen: _sparse_sparse_matmul.out + +- func: mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) + variants: function, method + dispatch: + CPU, CUDA: mode + +- func: mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + dispatch: + CompositeExplicitAutograd: mode_out + +- func: mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + variants: function, method + +- func: mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + +- func: mul.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: mul.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: mul_sparse + SparseCsrCPU, SparseCsrCUDA: mul_sparse_csr + MkldnnCPU: mkldnn_mul + ZeroTensor: mul_zerotensor + NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul_Tensor + tags: [core, pointwise] + +- func: mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: mul.out + variants: method + dispatch: + SparseCPU, SparseCUDA: mul_sparse_ + SparseCsrCPU, SparseCsrCUDA: mul_sparse_csr_ + MkldnnCPU: mkldnn_mul_ + NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul__Tensor + tags: pointwise + +- func: mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: mul_out + MPS: mul_out_mps + SparseCPU: mul_out_sparse_cpu + SparseCUDA: mul_out_sparse_cuda + SparseCsrCPU, SparseCsrCUDA: mul_out_sparse_csr + MkldnnCPU: mkldnn_mul_out + tags: pointwise + # For C++ only, until we have conversion from C++ numbers to Tensor + +- func: mul.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: mul + SparseCsrCPU, SparseCsrCUDA: mul_scalar_sparse_csr + NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul_Scalar + tags: [core, pointwise] + +- func: mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: mul_ + SparseCsrCPU, SparseCsrCUDA: mul__scalar_sparse_csr + NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul__Scalar + autogen: mul.Scalar_out + tags: pointwise +# multiply, alias for mul + +- func: multiply.Tensor(Tensor self, Tensor other) -> Tensor + variants: function, method + +- func: multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + variants: method + +- func: multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + +- func: multiply.Scalar(Tensor self, Scalar other) -> Tensor + variants: function, method + +- func: multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + variants: method + +- func: mv(Tensor self, Tensor vec) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: mv + SparseCPU, SparseCUDA: mv_sparse + +- func: mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: mv_out + +- func: mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: mvlgamma_out + tags: pointwise + +- func: mvlgamma(Tensor self, int p) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: mvlgamma + tags: pointwise + +- func: mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: mvlgamma_ + tags: pointwise + +- func: narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor + variants: function, method + dispatch: + CPU: narrow_copy_dense_cpu + SparseCPU, SparseCUDA: narrow_copy_sparse + CompositeExplicitAutogradNonFunctional: narrow_copy_dense_symint + tags: view_copy + +- func: narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU: narrow_copy_dense_cpu_out + +- func: narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeImplicitAutograd: narrow_symint + NestedTensorCPU, NestedTensorCUDA: narrow_nested_symint + +- func: narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeImplicitAutograd: narrow_tensor_symint + +- func: native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) + dispatch: + CPU: batch_norm_cpu + CUDA: batch_norm_cuda + MPS: batch_norm_mps + MkldnnCPU: mkldnn_batch_norm + +- func: native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + dispatch: + CUDA: batch_norm_cuda_out + MPS: batch_norm_mps_out + CPU: batch_norm_cpu_out + +# TODO: In 2 weeks, we should make native_batch_norm composite implicit so that this correct schema percolates correctly through our dispatching +- func: _native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) + dispatch: + CPU: _batch_norm_legit_cpu + CUDA: _batch_norm_legit_cuda + MPS: _batch_norm_legit_mps + MkldnnCPU: _mkldnn_batch_norm_legit + autogen: _native_batch_norm_legit_functional + tags: core + +# HACK: identical to _native_batch_norm_legit, but training is known to be False, +# So we known that running stats will not be mutated. +# The real fix here is batch norm consolidation. +- func: _native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor) + dispatch: + CompositeExplicitAutograd: _batch_norm_legit_no_training + autogen: _native_batch_norm_legit_no_training.out + tags: core + +- func: _native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!)) + dispatch: + CPU: _batch_norm_legit_cpu_out + CUDA: _batch_norm_legit_cuda_out + MPS: _batch_norm_legit_mps_out + +- func: _native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) + dispatch: + CPU: _batch_norm_legit_no_stats_cpu + CUDA: _batch_norm_legit_no_stats_cuda + MPS: _batch_norm_legit_no_stats_mps + MkldnnCPU: _mkldnn_batch_norm_legit_no_stats + tags: core + +- func: _native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + dispatch: + CPU: _batch_norm_legit_no_stats_cpu_out + CUDA: _batch_norm_legit_no_stats_cuda_out + MPS: _batch_norm_legit_no_stats_mps_out + +- func: batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) + dispatch: + CUDA: batch_norm_stats_cuda + autogen: batch_norm_stats.out + +- func: batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor + dispatch: + CUDA: batch_norm_elemt_cuda + +- func: batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CUDA: batch_norm_elemt_cuda_out + +# for backward compatibility +- func: batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor) + dispatch: + CUDA: batch_norm_gather_stats_cuda + autogen: batch_norm_gather_stats.out + +- func: batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor) + dispatch: + CUDA: batch_norm_gather_stats_with_counts_cuda + autogen: batch_norm_gather_stats_with_counts.out + +- func: native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + dispatch: + CPU: batch_norm_backward_cpu + CUDA: batch_norm_backward_cuda + MPS: batch_norm_backward_mps + MkldnnCPU: mkldnn_batch_norm_backward + autogen: native_batch_norm_backward.out + +- func: batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor) + dispatch: + CUDA: batch_norm_backward_reduce_cuda + autogen: batch_norm_backward_reduce.out + +- func: batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count) -> Tensor + dispatch: + CUDA: batch_norm_backward_elemt_cuda + autogen: batch_norm_backward_elemt.out + +- func: batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor) + dispatch: + CPU: batch_norm_update_stats_cpu + CUDA: batch_norm_update_stats_cuda + autogen: batch_norm_update_stats.out + +- func: is_vulkan_available() -> bool + +- func: _nnpack_available() -> bool + +- func: _nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: _nnpack_spatial_convolution + autogen: _nnpack_spatial_convolution.out + +- func: ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: ones + autogen: ones.names_out + +- func: ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: ones + +- func: ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: ones_out + +- func: ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + dispatch: + # NB: Although this composite mutates on the inside, it is + # non-differentiable so NonFunctional doesn't apply + CompositeExplicitAutograd: ones_like + NestedTensorCPU, NestedTensorCUDA: ones_like + autogen: ones_like.out + +- func: pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor + +- func: cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor + +- func: _euclidean_dist(Tensor x1, Tensor x2) -> Tensor + dispatch: + CompositeExplicitAutograd: _euclidean_dist + autogen: _euclidean_dist.out + +- func: _cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor + dispatch: + CPU, CUDA: _cdist_forward + MPS: _cdist_forward_mps + autogen: _cdist_forward.out + tags: core + +- func: _cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor + dispatch: + CPU, CUDA: _cdist_backward + autogen: _cdist_backward.out + +- func: pdist(Tensor self, float p=2) -> Tensor + +- func: _pdist_forward(Tensor self, float p=2) -> Tensor + dispatch: + CPU, CUDA: _pdist_forward + autogen: _pdist_forward.out + tags: core + +- func: _pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor + dispatch: + CPU, CUDA: _pdist_backward + autogen: _pdist_backward.out + +- func: cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor + variants: function + +- func: permute(Tensor(a) self, int[] dims) -> Tensor(a) + variants: function, method + dispatch: + CompositeExplicitAutograd: permute + MPS: permute_mps + SparseCPU, SparseCUDA: permute_sparse_coo + tags: core + +- func: movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) + variants: function, method + +- func: movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) + variants: function, method + +# moveaxis, alias for movedim +- func: moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) + variants: function, method + +- func: moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) + variants: function, method + +# Only exposed from C++ -- in Python, +# we expose it as an attribute `T`, not a function. +# +# I'd like to name this "T" in C++ too, but +# calling a native function "T" causes undefined +# behavior on Windows, for reasons I don't understand +# (maybe related to capital letter collation somehow...) +- func: numpy_T(Tensor(a) self) -> Tensor(a) + variants: method + +# Exposed on Python as an attribute 'H' +- func: matrix_H(Tensor(a) self) -> Tensor(a) + variants: method + +# Exposed on Python as an attribute 'mT' +- func: mT(Tensor(a) self) -> Tensor(a) + variants: method + +# Exposed on Python as an attribute 'mH' +- func: mH(Tensor(a) self) -> Tensor(a) + variants: method + +- func: adjoint(Tensor(a) self) -> Tensor(a) + variants: function, method + +- func: pixel_shuffle(Tensor self, int upscale_factor) -> Tensor + dispatch: + CPU: pixel_shuffle_cpu + MPS: pixel_shuffle_mps + CompositeExplicitAutogradNonFunctional: math_pixel_shuffle + autogen: pixel_shuffle.out + +- func: pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor + dispatch: + CPU: pixel_unshuffle_cpu + MPS: pixel_unshuffle_mps + CompositeExplicitAutogradNonFunctional: math_pixel_unshuffle + autogen: pixel_unshuffle.out + +- func: channel_shuffle(Tensor self, SymInt groups) -> Tensor + dispatch: + CPU, CUDA: channel_shuffle + QuantizedCPU: channel_shuffle_quantized_cpu + autogen: channel_shuffle.out + +- func: native_channel_shuffle(Tensor self, SymInt groups) -> Tensor + dispatch: + CPU: channel_shuffle_cpu + CompositeImplicitAutograd: math_channel_shuffle + +- func: is_pinned(Tensor self, Device? device=None) -> bool + variants: method + dispatch: + NestedTensorCUDA, CUDA: is_pinned_cuda + MPS: is_pinned_mps + CompositeExplicitAutograd: is_pinned_default + +# TODO: add a copy kwarg that guarantees that the tensor is put into fresh +# pinned memory +- func: pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a) + variants: method + +# Unlike pin_memory, this is guaranteed to give a new non-aliasing tensor +- func: _pin_memory(Tensor self, Device? device=None) -> Tensor + dispatch: + CUDA: _pin_memory_cuda + MPS: _pin_memory_mps + NestedTensorCUDA, NestedTensorCPU: _pin_memory_nested + autogen: _pin_memory.out + +- func: pinverse(Tensor self, float rcond=1e-15) -> Tensor + variants: function, method + +- func: poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor + variants: function + +- func: rad2deg(Tensor self) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: rad2deg + SparseCPU, SparseCUDA: rad2deg_sparse + SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr + +- func: rad2deg_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + dispatch: + CompositeExplicitAutograd: rad2deg_ + SparseCPU, SparseCUDA: rad2deg_sparse_ + SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr_ + +- func: rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: rad2deg_out + SparseCPU, SparseCUDA: rad2deg_sparse_out + SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr_out + +- func: deg2rad(Tensor self) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: deg2rad + SparseCPU, SparseCUDA: deg2rad_sparse + SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr + tags: pointwise + +- func: deg2rad_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + dispatch: + CompositeExplicitAutograd: deg2rad_ + SparseCPU, SparseCUDA: deg2rad_sparse_ + SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr_ + tags: pointwise + +- func: deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: deg2rad_out + SparseCPU, SparseCUDA: deg2rad_sparse_out + SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr_out + tags: pointwise + +- func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: scalar_tensor + autogen: scalar_tensor.out + tags: core + +- func: rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: rand + autogen: rand.names_out + tags: nondeterministic_seeded + +- func: rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + device_check: NoCheck + device_guard: False + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: rand + autogen: rand.generator_with_names_out + +- func: rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + tags: [core, nondeterministic_seeded] + dispatch: + CompositeExplicitAutograd: rand + +- func: rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: rand + +- func: rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: rand_out + +- func: rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + tags: nondeterministic_seeded + +- func: rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + tags: nondeterministic_seeded + dispatch: + # NB: Although this composite mutates on the inside, it is + # non-differentiable so NonFunctional doesn't apply + CompositeExplicitAutograd: rand_like + autogen: rand_like.out + +- func: randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: randint + +- func: randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: randint + +- func: randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: randint + +- func: randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: randint + +- func: randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: randint_out + +- func: randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: randint_out + +- func: randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: randint_out + +- func: randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: randint_out + +- func: randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + tags: nondeterministic_seeded + dispatch: + # NB: Although this composite mutates on the inside, it is + # non-differentiable so NonFunctional doesn't apply + CompositeExplicitAutograd: randint_like + autogen: randint_like.out + +- func: randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + tags: nondeterministic_seeded + dispatch: + # NB: Although this composite mutates on the inside, it is + # non-differentiable so NonFunctional doesn't apply + CompositeExplicitAutograd: randint_like + autogen: randint_like.low_dtype_out + +- func: randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + tags: [core, nondeterministic_seeded] + dispatch: + CompositeExplicitAutograd: randn + +- func: randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: randn + +- func: randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + tags: nondeterministic_seeded + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: randn + autogen: randn.names_out + +- func: randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + tags: nondeterministic_seeded + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: randn + autogen: randn.generator_with_names_out + +- func: randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + tags: nondeterministic_seeded + +- func: randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + tags: nondeterministic_seeded + +- func: randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + tags: nondeterministic_seeded + dispatch: + # NB: Although this composite mutates on the inside, it is + # non-differentiable so NonFunctional doesn't apply + CompositeExplicitAutograd, CompositeImplicitAutogradNestedTensor: randn_like + autogen: randn_like.out + +- func: randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + tags: [core, nondeterministic_seeded] + dispatch: + CompositeExplicitAutograd: randperm + +- func: randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: randperm + +- func: randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: randperm_out + +- func: randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + tags: nondeterministic_seeded + dispatch: + CPU: randperm_out_cpu + CUDA: randperm_out_cuda + MPS: randperm_out_mps + +- func: range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: range + +- func: range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: range + +- func: range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: range_out_no_step + +- func: range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, Meta: range_out + CUDA: range_cuda_out + MPS: range_mps_out + cpp_no_default_args: ['step'] + +- func: ravel(Tensor(a) self) -> Tensor(a) + variants: function, method + +- func: reciprocal(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: reciprocal.out + variants: function, method + tags: [core, pointwise] + +- func: reciprocal_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: reciprocal.out + variants: function, method + tags: pointwise + +- func: reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: reciprocal_out + MPS: reciprocal_out_mps + tags: pointwise + +- func: neg(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: neg.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: neg_sparse + SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr + NestedTensorCPU, NestedTensorCUDA: NestedTensor_neg + tags: [core, pointwise] + +- func: neg_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: neg.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: neg_sparse_ + SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr_ + NestedTensorCPU, NestedTensorCUDA: NestedTensor_neg_ + tags: pointwise + +- func: neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: neg_out + MPS: neg_out_mps + SparseCPU, SparseCUDA: neg_out_sparse + SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr_out + tags: pointwise +# Alias for neg + +- func: negative(Tensor self) -> Tensor + variants: function, method + +- func: negative_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + +- func: negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + +- func: repeat(Tensor self, SymInt[] repeats) -> Tensor + variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. + dispatch: + CompositeExplicitAutograd: repeat + MPS: repeat_mps + autogen: repeat.out + tags: core + +- func: repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor + variants: function + dispatch: + CPU: repeat_interleave_cpu + CUDA: repeat_interleave_cuda + MPS: repeat_interleave_mps + tags: dynamic_output_shape + autogen: repeat_interleave.Tensor_out + +- func: repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor + variants: function, method + dispatch: + CompositeImplicitAutograd: repeat_interleave_symint + +- func: repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor + variants: function, method + dispatch: + CompositeImplicitAutograd: repeat_interleave_symint + +- func: reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeImplicitAutograd: reshape_symint + CompositeImplicitAutogradNestedTensor: reshape_nested_symint + +- func: _reshape_copy(Tensor self, SymInt[] size) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: _reshape_copy_symint + +# NOTE [ _reshape_alias ] is meant to be used in the implementation of reshape. +# They are not user-facing, hence the leading underscore. Please don't use it +# anywhere else. +- func: _reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CPU, CUDA, Meta, QuantizedCPU, QuantizedCUDA, ZeroTensor, MPS: _reshape_alias + # We don't need to support mkldnn since this is handled explicitly by the reshape operator. + +- func: _mkldnn_reshape(Tensor self, int[] shape) -> Tensor + device_check: NoCheck + device_guard: False + dispatch: + MkldnnCPU: mkldnn_reshape + autogen: _mkldnn_reshape.out + +- func: reshape_as(Tensor(a) self, Tensor other) -> Tensor(a) + variants: method + device_check: NoCheck + device_guard: False + dispatch: + CompositeImplicitAutograd: reshape_as + CompositeImplicitAutogradNestedTensor: reshape_as_nested + +- func: round(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: round.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: round_sparse + SparseCsrCPU, SparseCsrCUDA: round_sparse_csr + tags: [core, pointwise] + +- func: round_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: round.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: round_sparse_ + SparseCsrCPU, SparseCsrCUDA: round_sparse_csr_ + tags: pointwise + +- func: round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU: round_out + CUDA: round_out + MPS: round_out_mps + SparseCPU, SparseCUDA: round_sparse_out + SparseCsrCPU, SparseCsrCUDA: round_sparse_csr_out + tags: pointwise + +- func: round.decimals(Tensor self, *, int decimals) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: round.decimals_out + variants: function, method + tags: pointwise + +- func: round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: round.decimals_out + variants: function, method + tags: pointwise + +- func: round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU: round_decimals_out + CUDA: round_decimals_out + tags: pointwise + +- func: rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor + device_check: NoCheck # TensorIterator + tags: nondeterministic_seeded + +- func: rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) + tags: nondeterministic_seeded + device_check: NoCheck # TensorIterator + +- func: relu(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CPU, CUDA: relu + MPS: relu_mps + MkldnnCPU: mkldnn_relu + QuantizedCPU: relu_quantized_cpu + QuantizedCUDA: relu_quantized_cuda + NestedTensorCPU, NestedTensorCUDA: NestedTensor_relu + SparseCPU, SparseCUDA: relu_sparse + SparseCsrCPU, SparseCsrCUDA: relu_sparse_csr + tags: [core, pointwise] + +- func: relu_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CPU, CUDA: relu_ + MPS: relu_mps_ + MkldnnCPU: mkldnn_relu_ + QuantizedCPU: relu_quantized_cpu_ + QuantizedCUDA: relu_quantized_cuda_ + NestedTensorCPU, NestedTensorCUDA: NestedTensor_relu_ + SparseCPU, SparseCUDA: relu_sparse_ + SparseCsrCPU, SparseCsrCUDA: relu_sparse_csr_ + autogen: relu.out + tags: pointwise + +- func: relu6(Tensor self) -> Tensor + python_module: nn + +- func: relu6_(Tensor(a!) self) -> Tensor(a!) + python_module: nn + +- func: prelu(Tensor self, Tensor weight) -> Tensor + variants: function, method + autogen: prelu.out + +- func: _prelu_kernel(Tensor self, Tensor weight) -> Tensor + dispatch: + CPU, CUDA: _prelu_kernel + QuantizedCPU: _prelu_kernel_quantized_cpu + MkldnnCPU: mkldnn_prelu + MPS: prelu_mps + +- func: _prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor) + dispatch: + CPU, CUDA: _prelu_kernel_backward + MkldnnCPU: mkldnn_prelu_backward + MPS: prelu_backward_mps + +- func: gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU: gelu_out_cpu + CUDA: gelu_out_cuda + MPS: gelu_out_mps + +- func: gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!) + structured_delegate: gelu.out + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + QuantizedCPU: gelu_quantized_cpu_ + NestedTensorCPU, NestedTensorCUDA: NestedTensor_gelu_ + +- func: gelu(Tensor self, *, str approximate='none') -> Tensor + structured_delegate: gelu.out + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + MkldnnCPU: mkldnn_gelu + QuantizedCPU: gelu_quantized_cpu + QuantizedCUDA: gelu_quantized_cuda + NestedTensorCPU, NestedTensorCUDA: NestedTensor_gelu + tags: [core, pointwise] + +- func: gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU: gelu_backward_out_cpu + CUDA: gelu_backward_out_cuda + MPS: gelu_backward_out_mps + +- func: gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor + structured_delegate: gelu_backward.grad_input + python_module: nn + dispatch: + MkldnnCPU: mkldnn_gelu_backward + NestedTensorCPU, NestedTensorCUDA: gelu_backwards_nested + tags: pointwise + +- func: infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor + variants: function + python_module: nn + device_check: NoCheck + device_guard: False + +- func: hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: hardshrink_out + +- func: hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor + structured_delegate: hardshrink.out + device_check: NoCheck # TensorIterator + variants: function, method + +- func: hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: hardshrink_backward_out + +- func: hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor + structured_delegate: hardshrink_backward.grad_input + variants: function, method + +- func: rsqrt(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: rsqrt.out + variants: function, method + tags: [core, pointwise] + +- func: rsqrt_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: rsqrt.out + variants: function, method + tags: pointwise + +- func: rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: rsqrt_out + MPS: rsqrt_out_mps + tags: pointwise + +- func: select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + +- func: select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: select_symint + SparseCsrCPU, SparseCsrCUDA: select_sparse_csr + NestedTensorCPU, NestedTensorCUDA: select_nested + tags: core + +- func: select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor + variants: function + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutogradNonFunctional: select_backward_symint + autogen: select_backward.out + +- func: _nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor + variants: function + device_check: NoCheck + device_guard: False + dispatch: + NestedTensorCPU, NestedTensorCUDA: _nested_select_backward_symint + +- func: selu(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + +- func: selu_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: celu(Tensor self, Scalar alpha=1.0) -> Tensor + device_check: NoCheck # TensorIterator + dispatch: + CompositeExplicitAutograd: celu + +- func: celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CompositeExplicitAutograd: celu_ + autogen: celu.out + +- func: silu(Tensor self) -> Tensor + structured_delegate: silu.out + python_module: nn + dispatch: + NestedTensorCPU, NestedTensorCUDA: NestedTensor_silu + tags: pointwise + +- func: silu_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: silu.out + python_module: nn + dispatch: + NestedTensorCPU, NestedTensorCUDA: NestedTensor_silu_ + tags: pointwise + +- func: silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU, CUDA: silu_out + MPS: silu_out_mps + tags: pointwise + +- func: silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU, CUDA: silu_backward_out + MPS: silu_backward_out_mps + tags: pointwise + +- func: silu_backward(Tensor grad_output, Tensor self) -> Tensor + structured_delegate: silu_backward.grad_input + python_module: nn + dispatch: + CompositeImplicitAutograd: math_silu_backward + NestedTensorCPU, NestedTensorCUDA: silu_backward_nested + tags: pointwise + +- func: mish(Tensor self) -> Tensor + structured_delegate: mish.out + python_module: nn + +- func: mish_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: mish.out + python_module: nn + +- func: mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU, CUDA: mish_out + MPS: mish_out_mps + +- func: mish_backward(Tensor grad_output, Tensor self) -> Tensor + python_module: nn + dispatch: + CPU, CUDA: mish_backward + MPS: mish_backward_mps + CompositeImplicitAutograd: math_mish_backward + +- func: sigmoid(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: sigmoid.out + variants: function, method + dispatch: + QuantizedCPU: sigmoid_quantized_cpu + MkldnnCPU: mkldnn_sigmoid + tags: [core, pointwise] + +- func: sigmoid_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: sigmoid.out + variants: function, method + dispatch: + MkldnnCPU: mkldnn_sigmoid_ + tags: pointwise + +- func: sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: sigmoid_out + MPS: sigmoid_out_mps + tags: pointwise + +- func: logit(Tensor self, float? eps=None) -> Tensor + variants: function, method + dispatch: + CPU, CUDA: logit + MPS: logit_mps + tags: pointwise + +- func: logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) + variants: function, method + dispatch: + CPU, CUDA: logit_ + tags: pointwise + +- func: logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: logit_out + MPS: logit_out_mps + tags: pointwise + +- func: sin(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: sin.out + variants: function, method + dispatch: + SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr + SparseCPU, SparseCUDA: sin_sparse + NestedTensorCPU, NestedTensorCUDA: sin_nested + tags: [core, pointwise] + +- func: sin_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: sin.out + variants: function, method + dispatch: + SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr_ + SparseCPU, SparseCUDA: sin_sparse_ + tags: pointwise + +- func: sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: sin_out + MPS: sin_out_mps + SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr_out + SparseCPU, SparseCUDA: sin_sparse_out + tags: pointwise + +- func: sinc(Tensor self) -> Tensor + structured_delegate: sinc.out + variants: function, method + tags: pointwise + +- func: sinc_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: sinc.out + variants: function, method + tags: pointwise + +- func: sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: sinc_out + tags: pointwise + +- func: sinh(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: sinh.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: sinh_sparse + SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr + tags: [core, pointwise] + +- func: sinh_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: sinh.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: sinh_sparse_ + SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr_ + tags: pointwise + +- func: sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: sinh_out + MPS: sinh_out_mps + SparseCPU, SparseCUDA: sinh_sparse_out + SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr_out + +# Returns a copy of this `Variable` that is detached from its autograd graph. +# This method is OK to call if the `Variable` is a view. +# +# NOTE: Previously, if we change the tensor metadata (e.g. sizes / strides / +# storage / storage_offset) of a tensor created from `detach()`, those metadata +# in the original tensor will also be updated. However, the new behavior is that +# those metadata changes to the detached tensor will not update the original tensor +# anymore, and in the `detach()` function we need to set `allow_tensor_metadata_change_` +# to false to make such changes explicitly illegal, in order to prevent users from +# changing metadata of the detached tensor and expecting the original tensor to also +# be updated. + tags: pointwise +- func: detach(Tensor(a) self) -> Tensor(a) + variants: function, method + dispatch: + CompositeExplicitAutograd: detach + NestedTensorCPU, NestedTensorCUDA: detach + +# Like `detach()`, but modifies this `Variable` in-place. This method may +# only be called on non-view `Variable`s. You can use `is_view()` to check +# this. If this `Variable` is a view, throws an `std::runtime_error()`. +- func: detach_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + tags: inplace_view + dispatch: + CompositeExplicitAutograd: detach_ + +- func: size.int(Tensor self, int dim) -> int + variants: function + device_check: NoCheck + device_guard: False + manual_cpp_binding: True + +- func: size.Dimname(Tensor self, Dimname dim) -> int + variants: function, method + device_check: NoCheck + device_guard: False + +- func: sym_size.int(Tensor self, int dim) -> SymInt + variants: function + device_check: NoCheck + device_guard: False + tags: core + manual_cpp_binding: True + +- func: sym_numel(Tensor self) -> SymInt + variants: function + device_check: NoCheck + device_guard: False + tags: core + manual_cpp_binding: True + +- func: sym_storage_offset(Tensor self) -> SymInt + variants: function + device_check: NoCheck + device_guard: False + tags: core + manual_cpp_binding: True + +- func: slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: slice + tags: core + +# NOTE: The implementation of split_with_sizes bypasses the dispatcher to call this; undo +# that if adding specific implementations here! + +- func: slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor + variants: function + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: slice_backward + autogen: slice_backward.out + +# NB: This op exists to back the implementation of reverse view_funcs for various views (chunk, +# slice.Tensor, split_with_sizes, et. al.). Currently, these are only used during fake-ification +# of PT2 graph input subclass instances that are views. This means: +# * This op shouldn't really show up in eager mode (so e.g. XLA shouldn't have to implement it) +# * This op shouldn't show up in a PT2 graph (so a PT2 backend shouldn't have to implement it) +# * A subclass will have to implement this to work in PT2 if a subclass view is used as a graph +# input AND the view utilizes this op in its inverse. The idea is that slice_inverse() is +# easier to implement for a subclass than as_strided() +- func: slice_inverse(Tensor(a) self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: slice_inverse_symint + +- func: slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutogradNonFunctional: slice_scatter + autogen: slice_scatter.out + tags: [core, view_copy] + +- func: select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutogradNonFunctional: select_scatter_symint + autogen: select_scatter.out + tags: core + +- func: diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutogradNonFunctional: diagonal_scatter + autogen: diagonal_scatter.out + +- func: as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutogradNonFunctional: as_strided_scatter_symint + autogen: as_strided_scatter.out + +- func: smm(Tensor self, Tensor mat2) -> Tensor + variants: function, method + +# softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models. +- func: softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + variants: function, method + +- func: softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + variants: function + dispatch: + CompositeExplicitAutograd: softmax_out + +- func: softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + variants: function, method + +- func: _softmax(Tensor self, int dim, bool half_to_float) -> Tensor + structured_delegate: _softmax.out + dispatch: + MkldnnCPU: mkldnn_softmax + NestedTensorCPU, NestedTensorCUDA: softmax_nested + tags: core + +- func: _softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU: softmax_cpu_out + CUDA: softmax_cuda_out + MPS: softmax_mps_out + +- func: _softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor + structured_delegate: _softmax_backward_data.out + dispatch: + NestedTensorCPU, NestedTensorCUDA: nested_softmax_backward + +- func: _softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!) + structured: True + dispatch: + CPU: softmax_backward_cpu_out + CUDA: softmax_backward_cuda_out + MPS: softmax_backward_mps_out + +- func: unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: unsafe_split + autogen: unsafe_split.Tensor_out + +- func: split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: split + +- func: split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] + variants: function, method + device_guard: False + dispatch: + CompositeImplicitAutograd: split_symint + +- func: unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: unsafe_split_with_sizes + autogen: unsafe_split_with_sizes.out + +- func: split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: split_with_sizes + NestedTensorCPU, NestedTensorCUDA: split_with_sizes_nested + tags: core + +- func: hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] + variants: function, method + +- func: hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] + variants: function, method + +- func: vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] + variants: function, method + +- func: vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] + variants: function, method + +- func: dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] + variants: function, method + +- func: dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] + variants: function, method + +- func: squeeze(Tensor(a) self) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: squeeze + QuantizedCPU, QuantizedCUDA: squeeze_quantized + NestedTensorCPU, NestedTensorCUDA: squeeze_nested + +- func: squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: squeeze + QuantizedCPU, QuantizedCUDA: squeeze_quantized + NestedTensorCPU, NestedTensorCUDA: squeeze_dim_nested + tags: core + +- func: squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + + +- func: squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: squeeze + QuantizedCPU, QuantizedCUDA: squeeze_quantized + NestedTensorCPU, NestedTensorCUDA: squeeze_dim_nested + tags: core + +- func: squeeze_(Tensor(a!) self) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + tags: inplace_view + dispatch: + CompositeExplicitAutograd: squeeze_ + +- func: squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + tags: inplace_view + dispatch: + CompositeExplicitAutograd: squeeze_ + +- func: squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + tags: inplace_view + dispatch: + CompositeExplicitAutograd: squeeze_ + +- func: squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + tags: inplace_view + +- func: sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + variants: function, method + +- func: sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU: _sspaddmm_out_only_sparse + CUDA: _sspaddmm_out_only_sparse_cuda + SparseCPU: _sspaddmm_out_cpu + SparseCUDA: _sspaddmm_out_cuda + +- func: _chunk_cat(Tensor[] tensors, int dim, int num_chunks) -> Tensor + dispatch: + CompositeExplicitAutograd: _chunk_cat + +- func: _chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: _chunk_cat_out + +- func: stack(Tensor[] tensors, int dim=0) -> Tensor + dispatch: + CompositeExplicitAutograd: stack + +- func: stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: stack_out + +- func: _stack(Tensor[] tensors, int dim=0) -> Tensor + dispatch: # match the backends supported by _cat + CPU: _stack_cpu + CompositeExplicitAutograd: _stack + +- func: _stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + dispatch: # match the backends supported by _cat_out + CPU: _stack_out_cpu + CompositeExplicitAutograd: _stack_out + +- func: hstack(Tensor[] tensors) -> Tensor + +- func: hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + +- func: vstack(Tensor[] tensors) -> Tensor + +- func: vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + +- func: dstack(Tensor[] tensors) -> Tensor + +- func: dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + +# Overload without center & pad mode, needed for forward-compatibility +- func: stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor + variants: function, method + cpp_no_default_args: ['hop_length', 'win_length', 'window', 'normalized'] + +- func: stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor + variants: function, method + +- func: istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor + variants: function, method + +- func: stride.int(Tensor self, int dim) -> int + variants: function + device_check: NoCheck + device_guard: False + manual_cpp_binding: True + +- func: stride.Dimname(Tensor self, Dimname dim) -> int + variants: function, method + device_check: NoCheck + device_guard: False + +- func: sym_stride.int(Tensor self, int dim) -> SymInt + variants: function + device_check: NoCheck + device_guard: False + tags: core + manual_cpp_binding: True + +- func: sum(Tensor self, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: sum + SparseCPU, SparseCUDA: sum_coo + SparseCsrCPU, SparseCsrCUDA: sum_csr + autogen: sum.out + +- func: sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + # TODO: Align the signature of sum.dim_IntList and _sparse_csr_sum.dim_dtype + structured_delegate: sum.IntList_out + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + NestedTensorCPU: NestedTensor_sum_dim_CPU + SparseCPU, SparseCUDA: sum_sparse_coo + SparseCsrCPU, SparseCsrCUDA: sum_sparse_compressed + tags: core + +- func: sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + structured: True + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: sum_out + MPS: sum_out_mps + +- func: sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +# TODO: this function will be replaced once nested expand semantics have been settled on +- func: _nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor + dispatch: + NestedTensorCPU: _nested_sum_backward_cpu + +- func: nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + variants: function, method + dispatch: + CPU, CUDA: nansum + MPS: nansum_mps + +- func: nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: nansum_out + MPS: nansum_out_mps + +- func: sum_to_size(Tensor self, SymInt[] size) -> Tensor + variants: method + device_check: NoCheck + device_guard: False + dispatch: + CompositeImplicitAutograd: sum_to_size_symint + +- func: sqrt(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: sqrt.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: sqrt_sparse + SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr + tags: [core, pointwise] + +- func: sqrt_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: sqrt.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: sqrt_sparse_ + SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr_ + tags: pointwise + +- func: sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: sqrt_out + MPS: sqrt_out_mps + SparseCPU, SparseCUDA: sqrt_sparse_out + SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr_out + tags: pointwise + +- func: square(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + tags: pointwise + +- func: square_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function, method + tags: pointwise + +- func: square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + tags: pointwise + +- func: std(Tensor self, bool unbiased=True) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + cpp_no_default_args: ["unbiased"] + +- func: std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + cpp_no_default_args: ["unbiased"] + +- func: std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CPU, CUDA: std + MPS: std_mps + QuantizedCPU: std_quantized_cpu + +- func: std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function + cpp_no_default_args: ["unbiased"] + +- func: std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function + cpp_no_default_args: ["unbiased"] + +- func: std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CPU, CUDA: std_mean + MPS: std_mean_mps + autogen: std_mean.correction_out + +- func: std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function + cpp_no_default_args: ["unbiased"] + +- func: std_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function + +- func: std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + cpp_no_default_args: ["unbiased"] + +- func: std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: std_out + QuantizedCPU: std_out_quantized_cpu + +- func: std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + cpp_no_default_args: ["unbiased"] + +- func: std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + cpp_no_default_args: ["unbiased"] + +- func: std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + +- func: prod(Tensor self, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CPU, CUDA: prod + MPS: prod_mps + autogen: prod.out + tags: core + +- func: prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + structured_delegate: prod.int_out + device_check: NoCheck # TensorIterator + variants: function, method + tags: core + +- func: prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + structured: True + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: prod_out + MPS: prod_out_mps + +- func: prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: t(Tensor(a) self) -> Tensor(a) + device_check: NoCheck + device_guard: False + variants: function, method + dispatch: + CompositeExplicitAutograd: t + +- func: t_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck + device_guard: False + variants: method + tags: inplace_view + dispatch: + CompositeExplicitAutograd: t_ + +- func: tan(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: tan.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: tan_sparse + SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr + tags: [core, pointwise] + +- func: tan_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: tan.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: tan_sparse_ + SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr_ + tags: pointwise + +- func: tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: tan_out + MPS: tan_out_mps + SparseCPU, SparseCUDA: tan_sparse_out + SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr_out + tags: pointwise + +- func: tanh(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: tanh.out + variants: function, method + dispatch: + QuantizedCPU: tanh_quantized_cpu + MkldnnCPU: mkldnn_tanh + SparseCPU, SparseCUDA: tanh_sparse + SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr + NestedTensorCPU, NestedTensorCUDA: NestedTensor_tanh + tags: [core, pointwise] + +- func: tanh_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: tanh.out + variants: function, method + dispatch: + MkldnnCPU: mkldnn_tanh_ + SparseCPU, SparseCUDA: tanh_sparse_ + SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr_ + NestedTensorCPU, NestedTensorCUDA: NestedTensor_tanh_ + tags: pointwise + +- func: tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: tanh_out + MPS: tanh_out_mps + SparseCPU, SparseCUDA: tanh_sparse_out + SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr_out + tags: pointwise + +- func: tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor + variants: function + +- func: tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) + variants: function + +# TODO: namespace threshold in 'nn' +- func: threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + structured_delegate: threshold.out + dispatch: + QuantizedCPU: threshold_quantized_cpu + +- func: threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + structured_delegate: threshold.out + +- func: threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: threshold_out + MPS: threshold_out_mps + +- func: threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: threshold_backward_out + MPS: threshold_backward_out_mps + SparseCPU, SparseCUDA: threshold_backward_sparse_out + SparseCsrCPU, SparseCsrCUDA: threshold_backward_sparse_compressed_out + +- func: threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor + variants: function + structured_delegate: threshold_backward.grad_input + dispatch: + MkldnnCPU: mkldnn_relu_backward + SparseCPU, SparseCUDA: threshold_backward_sparse + SparseCsrCPU, SparseCsrCUDA: threshold_backward_sparse_compressed + NestedTensorCPU, NestedTensorCUDA: threshold_backwards_nested + tags: pointwise + +- func: tile(Tensor self, SymInt[] dims) -> Tensor + variants: function, method + dispatch: + CompositeImplicitAutograd: tile_symint + +- func: transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: transpose + NestedTensorCPU, NestedTensorCUDA: transpose_nested + +- func: transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + +- func: _mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor + device_check: NoCheck + device_guard: False + dispatch: + MkldnnCPU: mkldnn_transpose + +- func: transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + tags: inplace_view + dispatch: + CompositeExplicitAutograd: transpose_ + +- func: _mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + device_check: NoCheck + device_guard: False + dispatch: + MkldnnCPU: mkldnn_transpose_ + autogen: _mkldnn_transpose.out + +- func: one_hot(Tensor self, int num_classes=-1) -> Tensor + python_module: nn + variants: function + tags: dynamic_output_shape + +- func: flip(Tensor self, int[] dims) -> Tensor + variants: function, method + dispatch: + CPU, QuantizedCPU, CUDA, QuantizedCUDA: flip + MPS: flip_mps + autogen: flip.out + tags: core + +- func: fliplr(Tensor self) -> Tensor + variants: function, method + +- func: flipud(Tensor self) -> Tensor + variants: function, method + +- func: roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor + variants: function, method + dispatch: + CPU, MPS: roll + CUDA: roll_cuda + autogen: roll.out + +# default int[] value [0,1] should not add space after comma, since codegen parser uses ', ' to split args + +- func: rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: rot90 + autogen: rot90.out + +- func: trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + +- func: trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor + +- func: trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + +- func: trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor + +# Fused implementation detail for transformers. Adds in-projection bias to QKV and divides Q by sqrt(D/num_heads). +- func: _transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor) + dispatch: + CPU, NestedTensorCPU: transform_bias_rescale_qkv_cpu + CUDA, NestedTensorCUDA: transform_bias_rescale_qkv_cuda + autogen: _transform_bias_rescale_qkv.out + +- func: _nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor + dispatch: + CPU, CUDA: NestedTensor_nested_tensor_from_mask + autogen: _nested_tensor_from_mask.out + +- func: _nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool + dispatch: + CPU, CUDA: NestedTensor_nested_tensor_from_mask_left_aligned + +- func: _nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor + device_check: NoCheck # cpu_nested_shape_example will always be on CPU + dispatch: + CPU: nested_from_padded_generic + CUDA: nested_from_padded_cuda + autogen: _nested_from_padded.out + +# These private functions are temporary. They will be updated/deleted when nested tensors switch to using SymInts for their metadata representation +- func: _nested_tensor_size(Tensor self) -> Tensor + variants: method + dispatch: + NestedTensorCPU, NestedTensorCUDA: _nested_tensor_size + autogen: _nested_tensor_size.out + +- func: _nested_tensor_strides(Tensor self) -> Tensor + variants: method + dispatch: + NestedTensorCPU, NestedTensorCUDA: _nested_tensor_strides + autogen: _nested_tensor_strides.out + +- func: _nested_tensor_storage_offsets(Tensor self) -> Tensor + variants: method + dispatch: + NestedTensorCPU, NestedTensorCUDA, NestedTensorMeta: _nested_tensor_storage_offsets + autogen: _nested_tensor_storage_offsets.out + +# _nested_from_padded is not usable from Python, so +# _nested_from_padded_and_nested_example is available for testing. +- func: _nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor + dispatch: + NestedTensorCPU, NestedTensorCUDA: NestedTensor_from_padded_and_nested_example + autogen: _nested_from_padded_and_nested_example.out + +# The input arguments' types to this functions are temporary. When nested tensors switch to using SymInts for their metadata representation +# this will need to be updated +- func: _nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a) + variants: function + device_check: NoCheck + dispatch: + CPU, CUDA: _nested_view_from_buffer + +- func: _nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor + variants: function + device_check: NoCheck + tags: view_copy + dispatch: + CompositeExplicitAutogradNonFunctional: _nested_view_from_buffer_copy + autogen: _nested_view_from_buffer_copy.out + +- func: _nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor(a) + variants: function + device_check: NoCheck + dispatch: {} + +- func: _nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor + variants: function + device_check: NoCheck + tags: view_copy + dispatch: + CompositeExplicitAutogradNonFunctional: _nested_view_from_jagged_copy + autogen: _nested_view_from_jagged_copy.out + +- func: _nested_get_values(Tensor(a) self) -> Tensor(a) + variants: function + device_check: NoCheck + dispatch: {} + +- func: _nested_get_values_copy(Tensor self) -> Tensor + variants: function + device_check: NoCheck + tags: view_copy + dispatch: + CompositeExplicitAutogradNonFunctional: _nested_get_values_copy + autogen: _nested_get_values_copy.out + +- func: _nested_get_offsets(Tensor self) -> Tensor + variants: function + device_check: NoCheck + dispatch: {} + +# returns undefined Tensor if no lengths present +- func: _nested_get_lengths(Tensor self) -> Tensor + variants: function + device_check: NoCheck + dispatch: {} + +- func: _nested_get_ragged_idx(Tensor self) -> int + variants: function + device_check: NoCheck + dispatch: {} + +- func: _nested_get_jagged_dummy(Tensor any) -> Tensor + category_override: dummy + dispatch: {} + +- func: _trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor + dispatch: + # calls unsqueeze + CompositeExplicitAutogradNonFunctional: _trilinear + autogen: _trilinear.out + +- func: triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor + +- func: trunc(Tensor self) -> Tensor + structured_delegate: trunc.out + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + SparseCPU, SparseCUDA: trunc_sparse + SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr + tags: [core, pointwise] + +- func: trunc_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: trunc.out + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + SparseCPU, SparseCUDA: trunc_sparse_ + SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_ + tags: pointwise + +- func: trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: trunc_out + MPS: trunc_out_mps + SparseCPU, SparseCUDA: trunc_sparse_out + SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_out + tags: pointwise +# Alias for trunc + +- func: fix(Tensor self) -> Tensor + variants: function, method + +- func: fix_(Tensor(a!) self) -> Tensor(a!) + variants: function, method + +- func: fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + +- func: type_as(Tensor self, Tensor other) -> Tensor + variants: method + +- func: _has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool + variants: function + +- func: _unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor) + variants: function + dispatch: + CPU: _unique_cpu + CUDA: _unique_cuda + autogen: _unique.out + +- func: unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + variants: function + dispatch: + CPU: unique_dim_cpu + CUDA: unique_dim_cuda + tags: dynamic_output_shape + autogen: unique_dim.out + +- func: unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor) + variants: function + dispatch: + CPU: unique_consecutive_cpu + CUDA: unique_consecutive_cuda + MPS: unique_consecutive_mps + tags: dynamic_output_shape + autogen: unique_consecutive.out + +- func: unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + variants: function + dispatch: + CPU: unique_dim_consecutive_cpu + CUDA: unique_dim_consecutive_cuda + MPS: unique_dim_consecutive_mps + tags: dynamic_output_shape + autogen: unique_dim_consecutive.out + +# _unique and _unique_dim are fragile and modifying them easily cause internal break +# the below operator is a temporary hack for adding return_counts support +# Please don't rely on these two operators, they will be removed soon + +- func: _unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + variants: function + dispatch: + CPU: _unique2_cpu + CUDA: _unique2_cuda + MPS: _unique2_mps + tags: dynamic_output_shape + autogen: _unique2.out + +- func: _unsafe_view(Tensor self, SymInt[] size) -> Tensor + dispatch: + CompositeExplicitAutograd: _unsafe_view + autogen: _unsafe_view.out + +- func: unsqueeze(Tensor(a) self, int dim) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: unsqueeze + SparseCPU, SparseCUDA: unsqueeze_sparse + QuantizedCPU, QuantizedCUDA: unsqueeze_quantized + NestedTensorCPU, NestedTensorCUDA: unsqueeze_nested + tags: core + +- func: unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + tags: inplace_view + dispatch: + CompositeExplicitAutograd: unsqueeze_ + +- func: vander(Tensor x, int? N=None, bool increasing=False) -> Tensor + +- func: var(Tensor self, bool unbiased=True) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + cpp_no_default_args: ["unbiased"] + +- func: var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + tags: core + cpp_no_default_args: ["unbiased"] + +- func: var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CPU, CUDA: var + MPS: var_mps + tags: core + +- func: var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + cpp_no_default_args: ["unbiased"] + +- func: var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: var_out + +- func: var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + cpp_no_default_args: ["unbiased"] + +- func: var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + cpp_no_default_args: ["unbiased"] + +- func: var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + +- func: var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function + cpp_no_default_args: ["unbiased"] + +- func: var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function + cpp_no_default_args: ["unbiased"] + +- func: var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CPU, CUDA: var_mean + MPS: var_mean_mps + autogen: var_mean.correction_out + +- func: var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function + cpp_no_default_args: ["unbiased"] + +- func: var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function + +- func: view_as(Tensor(a) self, Tensor other) -> Tensor(a) + variants: method + device_check: NoCheck + device_guard: False + +- func: where.self(Tensor condition, Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CPU, CUDA, MPS: where + tags: [core, pointwise] + +- func: where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA, MPS: where_self_out + +- func: where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor + variants: function + +- func: where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor + variants: function, method + +- func: where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor + variants: function + +- func: where(Tensor condition) -> Tensor[] + device_check: NoCheck # TensorIterator + variants: function + +- func: norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor + variants: function + +# VariableType::_weight_norm does not want to be given a gap in the autograd graph, +# so we don't define "dispatch" variants for it. +- func: _weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor + variants: function + +- func: _weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor) + variants: function + dispatch: + CPU: weight_norm_cpu + CUDA: weight_norm_cuda + MPS: weight_norm_mps + autogen: _weight_norm_interface.out + +- func: _weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) + variants: function + dispatch: + CPU: weight_norm_backward_cpu + CUDA: weight_norm_backward_cuda + MPS: weight_norm_backward_mps + autogen: _weight_norm_interface_backward.out + +- func: _weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) + variants: function + +- func: zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: zeros + autogen: zeros.names_out + +- func: _efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CPU: _efficientzerotensor + CUDA: _efficientzerotensor_cuda + MPS: _efficientzerotensor_mps + Meta: _efficientzerotensor_meta + autogen: _efficientzerotensor.out + +- func: zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: zeros_symint + +- func: zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: zeros_out + SparseCPU, SparseCUDA, SparseMeta: zeros_sparse_out + +- func: zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + dispatch: + # NB: Although this composite mutates on the inside, it is + # non-differentiable so NonFunctional doesn't apply + CompositeExplicitAutograd, CompositeImplicitAutogradNestedTensor: zeros_like + autogen: zeros_like.out + +- func: _standard_gamma_grad(Tensor self, Tensor output) -> Tensor + variants: function + dispatch: + CPU: _standard_gamma_grad_cpu + CUDA: _standard_gamma_grad_cuda + autogen: _standard_gamma_grad.out + +- func: _standard_gamma(Tensor self, Generator? generator=None) -> Tensor + variants: function + dispatch: + CPU: _s_gamma_cpu + CUDA: _s_gamma_cuda + tags: nondeterministic_seeded + autogen: _standard_gamma.out + +- func: _dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor + dispatch: + CPU: _dirichlet_grad_cpu + CUDA: _dirichlet_grad_cuda + autogen: _dirichlet_grad.out + +- func: _sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor + tags: nondeterministic_seeded + variants: function + dispatch: + CPU: _s_dirichlet_cpu + CUDA: _s_dirichlet_cuda + autogen: _sample_dirichlet.out + +- func: poisson(Tensor self, Generator? generator=None) -> Tensor + device_check: NoCheck # TensorIterator + dispatch: + CPU: _s_poisson_cpu + CUDA: _s_poisson_cuda + tags: nondeterministic_seeded + autogen: poisson.out + +- func: binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor + device_check: NoCheck # TensorIterator + dispatch: + CPU: _s_binomial_cpu + CUDA: _s_binomial_cuda + tags: nondeterministic_seeded + autogen: binomial.out + +# When more variants get ported to native, this dispatch will get more +# complicated + +- func: native_norm(Tensor self, Scalar p=2) -> Tensor + dispatch: + SparseCPU, SparseCUDA: norm_sparse + autogen: native_norm.out + +- func: native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor + dispatch: + SparseCPU, SparseCUDA: norm_sparse + autogen: native_norm.ScalarOpt_dim_dtype_out + +# TODO: reduce signatures down to one when optional args is available +- func: _sparse_sum(Tensor self) -> Tensor + +- func: _sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor + +- func: _sparse_sum.dim(Tensor self, int[1] dim) -> Tensor + dispatch: + CompositeExplicitAutograd: _sparse_sum + autogen: _sparse_sum.dim_out + +- func: _sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor + +- func: _sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor + dispatch: + SparseCPU: _sparse_sum_backward_cpu + SparseCUDA: _sparse_sum_backward_cuda + autogen: _sparse_sum_backward.out + +- func: _sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + dispatch: + SparseCsrCPU: _sparse_csr_sum_cpu + SparseCsrCUDA: _sparse_csr_sum_cuda + autogen: _sparse_csr_sum.dim_dtype_out + +- func: _sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + dispatch: + SparseCsrCPU: _sparse_csr_prod_cpu + SparseCsrCUDA: _sparse_csr_prod_cuda + autogen: _sparse_csr_prod.dim_dtype_out + +- func: _sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + python_module: sparse + variants: function + +- func: _sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + python_module: sparse + variants: function + +- func: _sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + python_module: sparse + dispatch: + SparseCPU: softmax_sparse_cpu + SparseCUDA: softmax_sparse_cuda + autogen: _sparse_softmax.out + +- func: _sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor + dispatch: + SparseCPU: softmax_backward_sparse_cpu + SparseCUDA: softmax_backward_sparse_cuda + autogen: _sparse_softmax_backward_data.out + +- func: _sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + python_module: sparse + variants: function + +- func: _sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + python_module: sparse + variants: function + +- func: _sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + python_module: sparse + dispatch: + SparseCPU: log_softmax_sparse_cpu + SparseCUDA: log_softmax_sparse_cuda + autogen: _sparse_log_softmax.out + +- func: _sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor + dispatch: + SparseCPU: log_softmax_backward_sparse_cpu + SparseCUDA: log_softmax_backward_sparse_cuda + autogen: _sparse_log_softmax_backward_data.out + +- func: _spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor + python_module: sparse + dispatch: + CPU: spdiags + autogen: _spdiags.out + +- func: norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: norm + autogen: norm.ScalarOpt_dtype_out + +- func: norm.Scalar(Tensor self, Scalar p=2) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: norm + autogen: norm.Scalar_out + +- func: norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor + structured_delegate: norm.dtype_out + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + SparseCPU, SparseCUDA: sparse_dtype_norm + +- func: norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor + structured_delegate: norm.out + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + SparseCPU, SparseCUDA: sparse_norm + +- func: norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + structured: True + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: norm_dtype_out + MPS: norm_dtype_out_mps + +- func: norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + structured: True + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: norm_out + MPS: norm_out_mps + +# These four redispatch in their implementation, so OK to be CompositeImplicitAutograd +- func: norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) + variants: method, function + dispatch: + CompositeExplicitAutograd: frexp + tags: pointwise + +- func: frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) + dispatch: + CPU, CUDA: frexp_out + tags: pointwise + +# Deprecated (v.1.12) +- func: frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + variants: function + +# Deprecated (v.1.12) +- func: frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + variants: function + +# Deprecated (v.1.12) +- func: nuclear_norm(Tensor self, bool keepdim=False) -> Tensor + variants: function + +# Deprecated (v.1.12) +- func: nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + variants: function + +# Deprecated (v.1.12) +- func: nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor + variants: function + +# Deprecated (v.1.12) +- func: nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + variants: function + +- func: clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: clone + SparseCPU, SparseCUDA: clone_sparse + SparseCsrCPU, SparseCsrCUDA: clone_sparse_compressed + MkldnnCPU: mkldnn_clone + QuantizedCPU, QuantizedCUDA: quantized_clone + NestedTensorCPU, NestedTensorCUDA: clone_nested + autogen: clone.out + tags: [core, pointwise] + +- func: positive(Tensor(a) self) -> Tensor(a) + variants: function, method + tags: pointwise + +- func: resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + variants: function, method + dispatch: + CompositeExplicitAutograd: resize_as_ + autogen: resize_as, resize_as.out + tags: inplace_view + +- func: resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + variants: function, method + dispatch: + SparseCPU, SparseCUDA: resize_as_sparse_ + SparseCsrCPU, SparseCsrCUDA: resize_as_sparse_compressed_ + autogen: resize_as_sparse, resize_as_sparse.out + +- func: zero_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CPU, CUDA: zero_ + MPS: zero_mps_ + Meta: zero_meta_ + SparseCPU, SparseCUDA, SparseMeta: zero_sparse_ + SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: zero_sparse_csr_ + MkldnnCPU: mkldnn_zero_ + NestedTensorCPU, NestedTensorCUDA: zero_nested_ + autogen: zero, zero.out + +- func: sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: sub_out + MPS: sub_out_mps + SparseCPU, SparseCUDA: sub_out_sparse + tags: pointwise + +- func: sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: sub.out + dispatch: + SparseCPU, SparseCUDA: sub_sparse + ZeroTensor: sub_zerotensor + NestedTensorCPU, NestedTensorCUDA: NestedTensor_sub_Tensor + tags: [core, pointwise] + +- func: sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: sub.out + dispatch: + SparseCPU, SparseCUDA: sub_sparse_ + tags: pointwise +# For C++ only, until we have conversion from C++ numbers to Tensor + +- func: sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: sub + tags: [core, pointwise] + +- func: sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: sub_ + autogen: sub.Scalar_out + tags: pointwise +# subtract, alias for sub + +- func: subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + +- func: subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + variants: function, method + +- func: subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + variants: method + +# For C++ only, until we have conversion from C++ numbers to Tensor +- func: subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + variants: function, method + +- func: subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + variants: method + +- func: rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CPU, CUDA: rsub + autogen: rsub.Tensor_out + +- func: heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: heaviside_out + tags: pointwise + +- func: heaviside(Tensor self, Tensor values) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: heaviside.out + tags: pointwise + +- func: heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: heaviside.out + +# For C++ only, until we have conversion from C++ numbers to Tensor +- func: rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: rsub + autogen: rsub.Scalar_out + +# Functionally the same as addmm, but we give it a different derivative formula +# that doesn't propagate gradients to non-present entries on sparse. + tags: pointwise +- func: _sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + python_module: sparse + dispatch: + CompositeExplicitAutograd: _sparse_addmm + autogen: _sparse_addmm.out + +- func: sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + python_module: sparse + dispatch: + SparseCsrCUDA: sparse_sampled_addmm_out_sparse_csr_cuda + SparseCsrCPU: sparse_sampled_addmm_out_sparse_csr_cpu + +- func: sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + python_module: sparse + dispatch: + SparseCsrCUDA: sparse_sampled_addmm_sparse_csr_cuda + SparseCsrCPU: sparse_sampled_addmm_sparse_csr_cpu + +- func: _sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor) + python_module: sparse + dispatch: + SparseCsrCPU: _sparse_mm_reduce_impl_sparse_csr_cpu + +- func: _sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor) + python_module: sparse + dispatch: + SparseCsrCPU: _sparse_mm_reduce_impl_backward_sparse_csr_cpu + +- func: addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU: addmm_out_cpu + CUDA: addmm_out_cuda + MPS: addmm_out_mps + SparseCPU: addmm_out_sparse_dense_cpu + SparseCUDA: addmm_out_sparse_dense_cuda + SparseCsrCPU: addmm_out_sparse_compressed_cpu + SparseCsrCUDA: addmm_out_sparse_compressed_cuda + +- func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + structured_delegate: addmm.out + variants: function, method + dispatch: + SparseCPU: addmm_sparse_dense_cpu + SparseCUDA: addmm_sparse_dense_cuda + SparseCsrCPU, SparseCsrCUDA: addmm_sparse_compressed_dense + tags: core + +- func: addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + structured_delegate: addmm.out + variants: method + dispatch: + # Warning! For whatever reason, the inplace sparse addmm is NON + # broadcasting + SparseCPU: s_addmm_sparse_dense_cpu_ + SparseCUDA: s_addmm_sparse_dense_cuda_ + +- func: _addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU: addmm_activation_out_cpu + CUDA: addmm_activation_out_cuda + +- func: _addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor + structured_delegate: _addmm_activation.out + variants: function, method + +- func: _scaled_mm(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False) -> (Tensor, Tensor) + variants: function + dispatch: + CUDA: _scaled_mm_cuda + +- func: _scaled_mm.out(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False, Tensor(a!) out, Tensor(b!) out_amax) -> (Tensor(a!), Tensor(b!)) + variants: function + dispatch: + CUDA: _scaled_mm_out_cuda + +# NOTE [ Sparse: autograd and API ] +# +# +# Sparse Tensor Constructors +# ~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# The API entry points to sparse tensor construction should be +# `sparse_coo tensor` and `_sparse_coo_tensor_unsafe`. Depending on whether the +# indices and values tensors are given, they eventually dispatch to either +# `sparse_coo_tensor_with_dims` or `sparse_coo_tensor_with_dims_and_tensors`. +# +# The autograd support for ctor is implement on `sparse_coo_tensor_with_dims_and_tensors`. +# +# The API methods `sparse_coo tensor` and `_sparse_coo_tensor_unsafe` +# **must not** have specific type dispatches because otherwise codegen will +# consider them as abstract methods (see Note [Abstract ATen methods]), dispatch +# using **Tensor** type, and thus lose autograd tracking on the actual method +# they dispatch to, e.g., `sparse_coo_tensor_with_dims_and_tensors`. +# +# +# Sparse Methods API Design +# ~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Goals: 1. Flexible API for users to write custom sparse ops +# 2. ctor and member accessor with autograd support +# +# To achieve 1, we need to provide a set of *dangerous* APIs (dangerous in the +# sense that misusing them will break sparse tensor invariant and may out in +# unexpected behavior, e.g., crash). These methods are all prefixed with +# underscore "_" to indicate that they should be used with care. We provide: +# +# + `_indices()`: returns the *raw* indices within the sparse tensor (not just +# sharing storage). Any inplace operation will change the +# actual indices, including t_, set_, as_strided_, resize_, +# etc. +# + `_values()`: returns the *raw* values within the sparse tensor. Similar +# semantics as `_indices()` +# + `_nnz()`: returns the number of non-zero entries. This will always be +# determined by the shapes of indices and values. +# + `_coalesced_(bool)`: inplace sets whether the tensor is coalesced, and +# returns itself. +# +# These methods are very useful in writing new operations, e.g., a custom +# autograd Function. +# +# We also provide other public *safe* APIs: +# + `indices()`: returns a **view** of the indices tensor if the sparse tensor +# is **coalesced**. +# + `values()`: returns a **view** of the values tensor if the containing +# sparse tensor is **coalesced**. +# + `sparse_dim()`: number of sparse dimensions +# + `dense_dim()`: number of dense dimensions +# + `is_coalesced()`: whether the sparse tensor is coalesced +# +# `_indices()` and `_values()` should returns the raw indices and values dense +# tensors within a sparse tensor. They can be quite unsafe with inplace +# operations like `t_()`, and exposes uncoalesced indices and values. The public +# recommended API is `indices()` and `values()`, both of which first check that +# the tensor is coalesced and return views on those tensors. +# +# +# Autograd Support +# ~~~~~~~~~~~~~~~~ +# +# Autograd is supported on `values()` and sparse tensor ctor with indices and +# values tensors. E.g., `torch.sparse_coo_tensor(i, v).values().sum()` is +# differentiable w.r.t. `v`. +# +# NB: The `values()` and `_values()` operators are special in that they are +# layout-aware, i.e., the output depends not just on the data it represents, but +# also on the input layout details (in this case, the `indices` tensor). See +# NOTE [ as_strided Backward and layout-aware/agnostic autograd ] in Functions.cpp +# for discussion on layout-aware vs layout-agnostic autograd. Since PyTorch ops +# operate in the layout-agnostic mode, similar to `as_strided`, backward of +# these two operators need to consider them in a layout-agnostic way: +# + `values()`: +# Input is coalesced. +# We just pretend having `input.indices()` as an additional argument +# `input_indices`, then forward is similar to +# `input.to(kStrided).index_select(input_indices)` regardless of the layout. +# Note that `values()` normally is layout-aware even if we constrain +# ourselves on sparse inputs since it may include all zeros values entries +# as "present" entries. +# + `_values()`: +# Input may be uncoalesced. +# It is not straightforward to construct a layout-agnostic version because +# duplicate indices entries may exist and additional parameterization is +# needed to distribute the value into different values entries. Furthermore, +# this op is intended to provide ways to write custom sparse ops, rather +# than being used in autograd graph, so it is marked as *non-differentiable* +# in derivatives.yaml. +# +# Before reading the following, see NOTE [ Autograd Variable Views ] in +# variable.h for details on views that are tracked by autograd, and views that +# are not. +# +# Moreover, these methods return tensors that share storage with inputs, so we +# mark these methods as view ops to support autograd history tracking. +# The sparse tensor ctor output should technically be view of both input indices +# and values tensors, but currently we only support setting as view of a single +# Variable, so it is only view of the values tensor. +# TODO: clone indices in sparse tensor ctor. +# +# For other methods that return outputs that share storage with inputs, i.e., +# `indices()` and `_indices()`. We mark their outputs as non-differentiable, so +# the view relation is not tracked by autograd, but the version counter is still +# shared. In other words, their outputs are non-differentiable views of the +# sparse tensor. +# FIXME: would be nicer if TensorOptions was optional based; not adding default arguments for options given +# the default would never make sense. + +- func: sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + dispatch: + CompositeExplicitAutograd: sparse_compressed_tensor + +- func: sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +- func: sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +- func: sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +- func: sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + +- func: sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + dispatch: + CompositeExplicitAutograd: sparse_compressed_tensor +- func: sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +- func: sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +- func: sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +- func: sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + +- func: _sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeImplicitAutograd: _sparse_compressed_tensor_unsafe_symint + +- func: _sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +- func: _sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +- func: _sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +- func: _sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + +- func: sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + dispatch: + CompositeExplicitAutograd: sparse_coo_tensor + autogen: sparse_coo_tensor.size_out + +- func: sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor + +- func: sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor + +- func: _sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor + dispatch: + CompositeImplicitAutograd: _sparse_coo_tensor_unsafe_symint + +- func: _validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> () + +- func: _validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> () +- func: _validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> () +- func: _validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> () +- func: _validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> () +- func: _validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> () + +- func: _sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + dispatch: + SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_sparse + autogen: _sparse_coo_tensor_with_dims.out + +- func: _sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor + dispatch: + SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_and_tensor_sparse_symint + autogen: _sparse_coo_tensor_with_dims_and_tensors.out + +- func: sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + variants: method + dispatch: + SparseCPU, SparseCUDA, SparseMeta: sparse_resize_ + autogen: sparse_resize, sparse_resize.out + +- func: sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + variants: method + dispatch: + SparseCPU, SparseCUDA, SparseMeta: sparse_resize_and_clear_ + autogen: sparse_resize_and_clear, sparse_resize_and_clear.out + +- func: sparse_mask(Tensor self, Tensor mask) -> Tensor + variants: method + dispatch: + SparseCPU, SparseCUDA: sparse_mask + SparseCsrCPU, SparseCsrCUDA: sparse_mask_sparse_compressed + autogen: sparse_mask.out + +- func: _sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor + variants: method + dispatch: + SparseCPU, SparseCUDA: sparse_mask_projection + autogen: _sparse_mask_projection.out + +- func: _to_cpu(Tensor[] tensors) -> Tensor[] + variants: function + +- func: to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor + variants: method + +# Special case of to_dense with custom derivative +- func: _to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor + variants: method + dispatch: + SparseCPU, SparseCUDA: sparse_to_dense + SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_dense + MkldnnCPU: mkldnn_to_dense + autogen: _to_dense.out + +- func: to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor + +- func: sparse_dim(Tensor self) -> int + variants: method + dispatch: + CPU, CUDA: sparse_dim_strided + SparseCPU, SparseCUDA, SparseMeta: sparse_dim_sparse + SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: sparse_dim_sparse_csr + device_check: NoCheck + device_guard: False + +# legacy method +- func: _dimI(Tensor self) -> int + variants: method + dispatch: + SparseCPU, SparseCUDA: sparse_dim_sparse + device_check: NoCheck + device_guard: False + +- func: dense_dim(Tensor self) -> int + variants: method + dispatch: + CPU, CUDA: dense_dim_strided + SparseCPU, SparseCUDA, SparseMeta: dense_dim_sparse + SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: dense_dim_sparse_csr + device_check: NoCheck + device_guard: False + +# legacy method +- func: _dimV(Tensor self) -> int + variants: method + dispatch: + SparseCPU, SparseCUDA, SparseMeta: dense_dim_sparse + device_check: NoCheck + device_guard: False + +- func: _nnz(Tensor self) -> int + variants: method + dispatch: + SparseCPU, SparseCUDA, SparseMeta: _nnz_sparse + SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: _nnz_sparse_csr + device_check: NoCheck + device_guard: False + +# NOTE: [ coalesce autograd ] +# coalesce returns self directly for already coalesced sparse tensors. +# This means coalesce cannot have a derivative registered, otherwise it creates +# circular references in the autograd graph (see gh-52874). +# Instead, the derivative is registered on the slow-path "_coalesce" +- func: coalesce(Tensor(a) self) -> Tensor(a) + variants: method + +- func: _coalesce(Tensor self) -> Tensor + dispatch: + SparseCPU: _coalesce_sparse_cpu + SparseCUDA: _coalesce_sparse_cuda + autogen: _coalesce.out + +- func: is_coalesced(Tensor self) -> bool + variants: method + dispatch: + SparseCPU, SparseCUDA, SparseMeta: is_coalesced_sparse + CompositeExplicitAutograd: is_coalesced_default + device_check: NoCheck + device_guard: False + +- func: _indices(Tensor(a) self) -> Tensor(a) + variants: method + dispatch: + SparseCPU, SparseCUDA, SparseMeta: _indices_sparse + device_check: NoCheck + device_guard: False + +- func: _values(Tensor(a) self) -> Tensor(a) + variants: method + dispatch: + SparseCPU, SparseCUDA, SparseMeta: _values_sparse + device_check: NoCheck + device_guard: False + +# This method doesn't do any check but only directly sets the flag. So it can be +# a bit unsafe. Similar to _indices and _values, this is useful for implementing +# custom sparse operations in Python/C++ extension. +- func: _coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) + variants: method + dispatch: + SparseCPU, SparseCUDA, SparseMeta: _coalesced_sparse_ + device_check: NoCheck + device_guard: False + autogen: _coalesced, _coalesced.out + +- func: indices(Tensor(a) self) -> Tensor(a) + variants: method + dispatch: + SparseCPU, SparseCUDA, SparseMeta: indices_sparse + CompositeExplicitAutograd: indices_default + device_check: NoCheck + device_guard: False + +- func: values(Tensor(a) self) -> Tensor(a) + variants: method + dispatch: + SparseCPU, SparseCUDA, SparseMeta: values_sparse + SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: values_sparse_csr + NestedTensorCPU, NestedTensorCUDA: values_nested + CompositeExplicitAutograd: values_default + device_check: NoCheck + device_guard: False + +- func: crow_indices(Tensor(a) self) -> Tensor(a) + variants: method + dispatch: + SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: crow_indices_sparse_csr + CompositeExplicitAutograd: crow_indices_default + device_check: NoCheck + device_guard: False + +- func: col_indices(Tensor(a) self) -> Tensor(a) + variants: method + dispatch: + SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: col_indices_sparse_csr + CompositeExplicitAutograd: col_indices_default + device_check: NoCheck + device_guard: False + +- func: ccol_indices(Tensor(a) self) -> Tensor(a) + variants: method + dispatch: + SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: ccol_indices_sparse_csr + CompositeExplicitAutograd: ccol_indices_default + device_check: NoCheck + device_guard: False + +- func: row_indices(Tensor(a) self) -> Tensor(a) + variants: method + dispatch: + SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: row_indices_sparse_csr + CompositeExplicitAutograd: row_indices_default + device_check: NoCheck + device_guard: False + +- func: hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + SparseCPU: hspmm_out_sparse_cpu + SparseCUDA: hspmm_out_sparse_cuda + +- func: hspmm(Tensor mat1, Tensor mat2) -> Tensor + dispatch: + SparseCPU: hspmm_sparse_cpu + SparseCUDA: hspmm_sparse_cuda + +- func: copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) + device_check: NoCheck # Allows copy into different device + variants: function + dispatch: + SparseCPU, SparseCUDA: copy_sparse_ + autogen: copy_sparse_to_sparse, copy_sparse_to_sparse.out + +# By adding the AutogradNestedTensor this makes this function CompositeImplicit-like for nested tensors +- func: unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] + variants: function, method + dispatch: + CompositeExplicitAutograd: unbind + NestedTensorCPU, NestedTensorCUDA: NestedTensor_unbind + +- func: unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] + variants: function, method + +- func: to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor + variants: method + +# Special case of to_sparse.sparse_dim with custom derivative +- func: _to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor + variants: method + dispatch: + CPU, CUDA: dense_to_sparse + SparseCPU, SparseCUDA: sparse_coo_to_sparse + SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse + autogen: _to_sparse.sparse_dim_out + +- func: to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor + variants: method + +# Special case of to_sparse with custom derivative +- func: _to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor + variants: method + dispatch: + CPU, CUDA: dense_to_sparse + SparseCPU, SparseCUDA: sparse_coo_to_sparse + SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse + autogen: _to_sparse.out + +- func: to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor + variants: method + +# Special case of to_sparse_csr with custom derivative +- func: _to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor + variants: method + dispatch: + CPU, CUDA: dense_to_sparse_csr + SparseCPU, SparseCUDA: coo_to_sparse_csr + SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_csr + autogen: _to_sparse_csr.out + +- func: to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor + variants: method + +# Special case of to_sparse_csc with custom derivative +- func: _to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor + variants: method + dispatch: + CPU, CUDA: dense_to_sparse_csc + SparseCPU, SparseCUDA: coo_to_sparse_csc + SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_csc + autogen: _to_sparse_csc.out + +- func: to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + variants: method + +# Special case of to_sparse_bsr with custom derivative +- func: _to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + variants: method + dispatch: + CPU, CUDA: dense_to_sparse_bsr + SparseCPU, SparseCUDA: coo_to_sparse_bsr + SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_bsr + autogen: _to_sparse_bsr.out + +- func: to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + variants: method + +# Special case of to_sparse_bsc with custom derivative +- func: _to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + variants: method + dispatch: + CPU, CUDA: dense_to_sparse_bsc + SparseCPU, SparseCUDA: coo_to_sparse_bsc + SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_bsc + autogen: _to_sparse_bsc.out + +- func: _to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor) + variants: function + dispatch: + CUDA: _to_sparse_semi_structured + +- func: to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor + variants: method + dispatch: + CPU: dense_to_mkldnn + autogen: to_mkldnn.out + +- func: mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor + variants: function + python_module: nn + dispatch: + MkldnnCPU: mkldnn_reorder_conv2d_weight + autogen: mkldnn_reorder_conv2d_weight.out + +- func: mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1) -> Tensor + variants: function + python_module: nn + dispatch: + MkldnnCPU: mkldnn_reorder_conv3d_weight + autogen: mkldnn_reorder_conv3d_weight.out + +- func: to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor + +- func: quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor + variants: function + dispatch: + CPU, CUDA: quantize_per_tensor_dynamic + autogen: quantize_per_tensor_dynamic.out + +- func: quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor + variants: function + dispatch: + CPU, CUDA: quantize_per_tensor + autogen: quantize_per_tensor.out + +- func: quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor + variants: function + dispatch: + CPU, CUDA: quantize_per_tensor_tensor_qparams + autogen: quantize_per_tensor.tensor_qparams_out + +- func: quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[] + variants: function + dispatch: + CPU: quantize_per_tensor_list_cpu + autogen: quantize_per_tensor.tensors_out + +- func: quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor + variants: function + dispatch: + CPU, CUDA: quantize_per_channel + autogen: quantize_per_channel.out + +- func: dequantize.self(Tensor self) -> Tensor + variants: function, method + dispatch: + CPU, CUDA: dequantize_cpu_or_cuda + QuantizedCPU, QuantizedCUDA: dequantize_quantized + autogen: dequantize.self_out + +- func: dequantize.tensors(Tensor[] tensors) -> Tensor[] + variants: function + dispatch: + QuantizedCPU: dequantize_tensors_quantized_cpu + autogen: dequantize.tensors_out + +- func: q_scale(Tensor self) -> float + variants: function, method + dispatch: + QuantizedCPU, QuantizedCUDA: q_scale_quant + +- func: q_zero_point(Tensor self) -> int + variants: function, method + dispatch: + QuantizedCPU, QuantizedCUDA: q_zero_point_quant + +- func: q_per_channel_scales(Tensor self) -> Tensor + variants: function, method + dispatch: + QuantizedCPU, QuantizedCUDA: q_per_channel_scales + autogen: q_per_channel_scales.out + +- func: q_per_channel_zero_points(Tensor self) -> Tensor + variants: function, method + dispatch: + QuantizedCPU, QuantizedCUDA: q_per_channel_zero_points + autogen: q_per_channel_zero_points.out + +- func: q_per_channel_axis(Tensor self) -> int + variants: function, method + dispatch: + QuantizedCPU, QuantizedCUDA: q_per_channel_axis + +- func: int_repr(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + QuantizedCPU: int_repr_quantized_cpu + QuantizedCUDA: int_repr_quantized_cuda + autogen: int_repr.out + +- func: _make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor + dispatch: + CPU: make_per_tensor_quantized_tensor_cpu + CUDA: make_per_tensor_quantized_tensor_cuda + autogen: _make_per_tensor_quantized_tensor.out + +- func: _make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor + dispatch: + CPU: make_per_channel_quantized_tensor_cpu + CUDA: make_per_channel_quantized_tensor_cuda + autogen: _make_per_channel_quantized_tensor.out + +- func: qscheme(Tensor self) -> QScheme + variants: method + dispatch: + QuantizedCPU, QuantizedCUDA: qscheme_quant + +- func: fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + +- func: fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + +- func: fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask) + variants: function + dispatch: + CPU, CUDA: fake_quantize_per_tensor_affine_cachemask + autogen: fake_quantize_per_tensor_affine_cachemask.out + +- func: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask) + variants: function + dispatch: + CPU, CUDA: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams + autogen: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out + +- func: fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor + variants: function + +- func: _fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor + variants: function + dispatch: + CPU, CUDA: _fake_quantize_learnable_per_tensor_affine + autogen: _fake_quantize_learnable_per_tensor_affine.out + +- func: _fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) + variants: function + dispatch: + CPU, CUDA: _fake_quantize_learnable_per_tensor_affine_backward + +- func: fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + +- func: fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask) + variants: function + dispatch: + CPU, CUDA: fake_quantize_per_channel_affine_cachemask + autogen: fake_quantize_per_channel_affine_cachemask.out + +- func: fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor + variants: function + +- func: _fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor + variants: function + dispatch: + CPU, CUDA: _fake_quantize_learnable_per_channel_affine + autogen: _fake_quantize_learnable_per_channel_affine.out + +- func: _fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) + variants: function + dispatch: + CPU, CUDA: _fake_quantize_learnable_per_channel_affine_backward + +- func: fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor + variants: function + +- func: _fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) + dispatch: + CPU: fused_moving_avg_obs_fake_quant_cpu + CUDA: fused_moving_avg_obs_fake_quant_cuda + autogen: _fused_moving_avg_obs_fq_helper_functional, _fused_moving_avg_obs_fq_helper.out + +- func: _choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int) + variants: function + +- func: _saturate_weight_to_fp16(Tensor weight) -> Tensor + variants: function + +- func: choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor) + variants: function + +- func: _autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a) + variants: method + device_guard: False + +- func: _autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a) + variants: method + device_guard: False + +- func: _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: _to_copy + NestedTensorCPU, NestedTensorCUDA: _to_copy_nested + autogen: _to_copy.out + tags: core + +# to(Device) must not exist because all constructors of Device also works for +# TensorOptions. Otherwise, an ambiguity error is thrown. +# See NOTE [ TensorOptions Constructors ]. +- func: to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + variants: method + device_check: NoCheck + device_guard: False + +- func: to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + variants: method + device_check: NoCheck + device_guard: False + +- func: to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + variants: method + device_check: NoCheck + device_guard: False + +- func: to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + variants: method + device_check: NoCheck + device_guard: False + +- func: meshgrid(Tensor[] tensors) -> Tensor[] + +# TODO: Two weeks after this lands, combine these two overloads, +# making "indexing" optional. These are temporarily distinct for +# forward-compatibility reasons. +- func: meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] + +- func: cartesian_prod(Tensor[] tensors) -> Tensor + variants: function + +- func: combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor + variants: function + +- func: item(Tensor self) -> Scalar + tags: data_dependent_output + variants: method + +- func: result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType + variants: function + +- func: result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType + variants: function + +- func: result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType + variants: function + +- func: result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType + +- func: can_cast(ScalarType from, ScalarType to) -> bool + variants: function + +- func: promote_types(ScalarType type1, ScalarType type2) -> ScalarType + variants: function + +# NB: Does NOT check precondition that numel == 1 +- func: _local_scalar_dense(Tensor self) -> Scalar + tags: [core, data_dependent_output] + dispatch: + CPU: _local_scalar_dense_cpu + CUDA: _local_scalar_dense_cuda + MPS: _local_scalar_dense_mps + variants: function + +# MPS LSTM implementation + +- func: _lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) + dispatch: + MPS: _lstm_mps + autogen: _lstm_mps.out + tags: nondeterministic_seeded + +- func: lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) + dispatch: + MPS: lstm_mps_backward + autogen: lstm_mps_backward.out + + +# Fused RNN kernels +- func: _thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor) + dispatch: + CUDA: _thnn_fused_lstm_cell_cuda + autogen: _thnn_fused_lstm_cell.out + +# NB: The composite version of this function below is a simple wrapper that duplicates some of the outputs +# It is necessary to avoid triggering TensorImpl use count checks in debug mode +# NB: this is function is NOT differentiable +- func: _thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor) + dispatch: + CUDA: _thnn_fused_lstm_cell_backward_impl_cuda + autogen: _thnn_fused_lstm_cell_backward_impl.out + +- func: _thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + +- func: _thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + +- func: _thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor) + dispatch: + CUDA: _thnn_fused_gru_cell_cuda + autogen: _thnn_fused_gru_cell.out + +- func: _thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + dispatch: + CUDA: _thnn_fused_gru_cell_backward_cuda + autogen: _thnn_fused_gru_cell_backward.out + +- func: _thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + +# RNN cells and layers +- func: lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) + tags: nondeterministic_seeded + +- func: lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) + tags: nondeterministic_seeded + +- func: gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + tags: nondeterministic_seeded + +- func: gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + tags: nondeterministic_seeded + +- func: rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + tags: nondeterministic_seeded + +- func: rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + tags: nondeterministic_seeded + +- func: rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + tags: nondeterministic_seeded + +- func: rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + tags: nondeterministic_seeded + +- func: lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) + +- func: gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + +- func: rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + +- func: rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + +# Quantized RNN layer registration has been moved to C10 dispatch in `RNN.cpp` + +# Quantized RNN layers +# - func: quantized_lstm(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor) + + +# - func: quantized_lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor) + + +# Quantized GRU layers + +# - func: quantized_gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) +# + +# - func: quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) +# + +# Quantized RNN cells +- func: quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor) + +- func: quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + +- func: quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + +- func: quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + +# PackedSequence utilities +- func: _pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor) + dispatch: + CompositeExplicitAutograd: _pack_padded_sequence + autogen: _pack_padded_sequence.out + +- func: _pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor + dispatch: + CompositeImplicitAutograd: _pack_padded_sequence_backward_symint + +- func: _pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor) + +# wrappers for legacy TH methods + +- func: set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + dispatch: + CPU, CUDA, Meta, MPS: set_ + autogen: set.source_Storage, set.source_Storage_out + tags: inplace_view + +- func: set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + dispatch: + CPU: set_storage_cpu_ + Meta: set_storage_meta__symint + CUDA: set_storage_cuda_ + MPS: set_storage_mps_ + QuantizedCPU, QuantizedCUDA: set_storage_quantized_ + autogen: set.source_Storage_storage_offset, set.source_Storage_storage_offset_out + tags: inplace_view + +- func: set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + dispatch: + CompositeImplicitAutograd: set__symint + tags: inplace_view + +- func: set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + dispatch: + CPU, CUDA, Meta, MPS: set_tensor_ + autogen: set.source_Tensor, set.source_Tensor_out + tags: inplace_view + +- func: set_(Tensor(a!) self) -> Tensor(a!) + variants: method + dispatch: + CPU: set_cpu_ + CUDA: set_cuda_ + Meta: set_meta_ + MPS: set_mps_ + autogen: set, set.out + tags: inplace_view + +# Not making it CompositeImplicitAutograd because lift +# should be a primitive w.r.t. functorch + +# TODO: this should have a view annotation +# TODO: shouldn't be a method +- func: lift(Tensor self) -> Tensor + dispatch: + CompositeExplicitAutograd: lift + autogen: lift.out + +# lift_fresh is called with an argument that is guaranteed to be +# fresh (i.e., newly allocated). This is ONLY called from a +# torch.tensor call; if you FX trace a lift_fresh, you are obligated +# to convert this into a lift_fresh_copy (because FX will violate the +# freshness invariant when tracing). +- func: lift_fresh(Tensor(a) self) -> Tensor(a) + dispatch: + CompositeExplicitAutograd: lift_fresh + +# Like lift, but it clones the input. +- func: lift_fresh_copy(Tensor self) -> Tensor + tags: view_copy + dispatch: + CompositeExplicitAutogradNonFunctional: lift_fresh_copy + autogen: lift_fresh_copy.out + +- func: is_set_to(Tensor self, Tensor tensor) -> bool + variants: method + device_check: NoCheck + device_guard: False + dispatch: + CPU, CUDA, MPS: is_set_to + +- func: masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CPU: masked_fill__cpu + CUDA: masked_fill__cuda + QuantizedCPU: masked_fill__quantized_cpu + QuantizedCUDA: masked_fill__quantized_cuda + MPS: masked_fill__mps + autogen: masked_fill.Scalar_out + +- func: masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: masked_fill + NestedTensorCPU, NestedTensorCUDA: NestedTensor_masked_fill + tags: pointwise + +- func: masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CPU: masked_fill__cpu + CUDA: masked_fill__cuda + QuantizedCPU: masked_fill__quantized_cpu + QuantizedCUDA: masked_fill__quantized_cuda + MPS: masked_fill__mps + autogen: masked_fill.Tensor_out + +- func: masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: masked_fill + +- func: masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) + variants: method + dispatch: + CPU: masked_scatter__cpu + CUDA: masked_scatter__cuda + MPS: masked_scatter__mps + autogen: masked_scatter.out + +- func: masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: masked_scatter + +- func: masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor + dispatch: + CompositeExplicitAutograd: masked_scatter_backward_symint + +- func: _masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor + dispatch: + CUDA: masked_softmax_cuda + CPU: masked_softmax_cpu + autogen: _masked_softmax.out + +- func: _masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor + dispatch: + CUDA: masked_softmax_backward_cuda + CPU: masked_softmax_backward_cpu + autogen: _masked_softmax_backward.out + +- func: view(Tensor(a) self, SymInt[] size) -> Tensor(a) + variants: method + device_check: NoCheck + device_guard: False + dispatch: + ZeroTensor, Meta, CPU, CUDA, QuantizedCPU, QuantizedCUDA, MPS: view + MkldnnCPU: mkldnn_view + NestedTensorCPU, NestedTensorCUDA: view_nested + tags: core + +# Warning: If you want to change the name or overload name of this +# operator, you might also want to change the `isBlockListedSchema` +# function in `torch/csrc/jit/frontend/schema_catching.cpp`. +# The name and overload name of this operator is hardcoded in that +# function in order to workaround a bug: +# https://github.com/pytorch/pytorch/issues/47964 +- func: view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a) + variants: method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: view_dtype + +- func: put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) + variants: method + dispatch: + CPU, CUDA: put_ + autogen: put.out + +- func: put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: put + +- func: index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + structured: True + variants: function + precomputed: + - dim -> int dim + dispatch: + CPU: index_add_cpu_out + CUDA: index_add_cuda_out + MPS: index_add_mps_out + +- func: index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!) + structured_delegate: index_add.out + variants: method + +- func: index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor + structured_delegate: index_add.out + variants: function, method + +- func: index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor + variants: function, method + +- func: index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) + structured: True + variants: function + precomputed: + - dim -> int dim + dispatch: + CPU: index_reduce_cpu_out + CUDA: index_reduce_cuda_out + +- func: index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!) + structured_delegate: index_reduce.out + variants: method + +- func: index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor + structured_delegate: index_reduce.out + variants: function, method + +- func: index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CPU: index_fill_ + CUDA: index_fill_ + MPS: index_fill_mps_ + autogen: index_fill.int_Scalar_out + +- func: index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: index_fill + +- func: index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CPU, CUDA: index_fill_ + MPS: index_fill_mps_ + autogen: index_fill.int_Tensor_out + +- func: index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + dispatch: + CompositeExplicitAutograd: index_fill + +- func: index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + +- func: index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + +- func: index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor + structured_delegate: scatter.src_out + variants: function, method + tags: core + +- func: scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) + structured_delegate: scatter.src_out + variants: method + +- func: scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) + structured: True + variants: function + dispatch: + CPU, CUDA: scatter_src_out + MPS: scatter_src_out_mps + +- func: scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor + structured_delegate: scatter.value_out + variants: function, method + tags: core + +- func: scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) + structured_delegate: scatter.value_out + variants: method + +- func: scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + structured: True + variants: function + dispatch: + CPU, CUDA: scatter_value_out + MPS: scatter_value_out_mps + +- func: scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor + structured_delegate: scatter.reduce_out + variants: function, method + +- func: scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!) + structured_delegate: scatter.reduce_out + variants: method + +- func: scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) + structured: True + variants: function + dispatch: + CPU, CUDA: scatter_reduce_out + MPS: scatter_reduce_out_mps + +- func: scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor + structured_delegate: scatter.value_reduce_out + variants: function, method + +- func: scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!) + structured_delegate: scatter.value_reduce_out + variants: method + +- func: scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) + structured: True + variants: function + dispatch: + CPU, CUDA: scatter_value_reduce_out + MPS: scatter_value_reduce_out_mps + +- func: scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor + variants: function, method + +- func: scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor + variants: function, method + +- func: scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor + structured_delegate: scatter_add.out + variants: function, method + tags: core + +- func: scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) + structured_delegate: scatter_add.out + variants: method + +- func: scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) + structured: True + variants: function + dispatch: + CPU, CUDA: scatter_add + MPS: scatter_add_mps_out + +- func: scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor + variants: function, method + +- func: scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor + structured_delegate: scatter_reduce.two_out + variants: function, method + tags: core + +- func: scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!) + structured_delegate: scatter_reduce.two_out + variants: method + +- func: scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) + structured: True + variants: function + dispatch: + CPU, CUDA: scatter_reduce_two + +- func: eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + structured_delegate: eq.Scalar_out + device_check: NoCheck # TensorIterator + variants: method + +- func: eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: eq.Tensor_out + device_check: NoCheck # TensorIterator + variants: method + +- func: bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + variants: function + dispatch: + CPU, CUDA: bitwise_and_out + MPS: bitwise_and_out_mps + tags: pointwise + +- func: bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: bitwise_and_out + tags: pointwise + +- func: bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CompositeExplicitAutograd: bitwise_and + tags: [core, pointwise] + +- func: bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: bitwise_and + autogen: bitwise_and.Scalar_Tensor_out + tags: pointwise + +- func: bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + structured_delegate: bitwise_and.Tensor_out + tags: [core, pointwise] + +- func: bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: bitwise_and_ + tags: pointwise + +- func: bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: bitwise_and.Tensor_out + tags: pointwise + +- func: __and__.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + +- func: __and__.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + +- func: __iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + +- func: __iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + +- func: bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + variants: function + dispatch: + CPU, CUDA: bitwise_or_out + MPS: bitwise_or_out_mps + tags: pointwise + +- func: bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: bitwise_or_out + tags: pointwise + +- func: bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CompositeExplicitAutograd: bitwise_or + tags: [core, pointwise] + +- func: bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: bitwise_or + autogen: bitwise_or.Scalar_Tensor_out + tags: pointwise + +- func: bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + structured_delegate: bitwise_or.Tensor_out + tags: [core, pointwise] + +- func: bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: bitwise_or_ + tags: pointwise + +- func: bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: bitwise_or.Tensor_out + tags: pointwise + +- func: __or__.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + +- func: __or__.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + +- func: __ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + +- func: __ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + +- func: bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + variants: function + dispatch: + CPU, CUDA: bitwise_xor_out + MPS: bitwise_xor_out_mps + tags: pointwise + +- func: bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: bitwise_xor_out + tags: pointwise + +- func: bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CompositeExplicitAutograd: bitwise_xor + tags: [core, pointwise] + +- func: bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: bitwise_xor + autogen: bitwise_xor.Scalar_Tensor_out + tags: pointwise + +- func: bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + structured_delegate: bitwise_xor.Tensor_out + tags: [core, pointwise] + +- func: bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: bitwise_xor_ + tags: pointwise + +- func: bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: bitwise_xor.Tensor_out + tags: pointwise + +- func: __xor__.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + tags: pointwise + +- func: __xor__.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + tags: pointwise + +- func: __ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + tags: pointwise + +- func: __ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + tags: pointwise + +- func: __lshift__.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CPU, CUDA: __lshift__ + tags: pointwise + +- func: __lshift__.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CPU, CUDA: __lshift__ + tags: pointwise + +- func: __ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CPU, CUDA: __ilshift__ + autogen: __lshift__.Scalar_out + tags: pointwise + +- func: __ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CPU, CUDA: __ilshift__ + autogen: __lshift__.Tensor_out + tags: pointwise + +- func: bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: bitwise_left_shift.Tensor_out + tags: pointwise + +- func: bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: bitwise_left_shift.Tensor_out + tags: pointwise + +- func: bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: bitwise_left_shift_out + tags: pointwise + +- func: bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CompositeExplicitAutograd: bitwise_left_shift + tags: pointwise + +- func: bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: bitwise_left_shift_ + tags: pointwise + +- func: bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: bitwise_left_shift_out + tags: pointwise + +- func: bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: bitwise_left_shift + autogen: bitwise_left_shift.Scalar_Tensor_out + tags: pointwise + +- func: __rshift__.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CPU, CUDA: __rshift__ + tags: pointwise + +- func: __rshift__.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CPU, CUDA: __rshift__ + tags: pointwise + +- func: __irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CPU, CUDA: __irshift__ + autogen: __rshift__.Scalar_out + +- func: __irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CPU, CUDA: __irshift__ + autogen: __rshift__.Tensor_out + +- func: bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + structured_delegate: bitwise_right_shift.Tensor_out + tags: pointwise + +- func: bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: bitwise_right_shift.Tensor_out + tags: pointwise + +- func: bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: bitwise_right_shift_out + tags: pointwise + +- func: bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CompositeExplicitAutograd: bitwise_right_shift + tags: pointwise + +- func: bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: bitwise_right_shift_ + tags: pointwise + +- func: bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: bitwise_right_shift_out + tags: pointwise + +- func: bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CompositeExplicitAutograd: bitwise_right_shift + autogen: bitwise_right_shift.Scalar_Tensor_out + tags: pointwise + +- func: tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) + structured_delegate: tril.out + variants: method + +- func: triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) + structured_delegate: triu.out + variants: method + +- func: digamma_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: digamma.out + variants: method + tags: pointwise + +- func: lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: lerp.Scalar_out + tags: pointwise + +- func: lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: lerp.Tensor_out + tags: pointwise + +- func: addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + variants: method + dispatch: + CPU, CUDA: addbmm_ + MPS: addbmm_mps_ + +- func: addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: addbmm_out + MPS: addbmm_out_mps + +- func: addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + variants: method, function + dispatch: + CPU, CUDA: addbmm + MPS: addbmm_mps + +- func: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + tags: nondeterministic_seeded + dispatch: + CPU, CUDA: random_ + Meta: random_meta_ + MPS: random_mps_ + autogen: random.from, random.from_out + +- func: random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator + tags: nondeterministic_seeded + variants: method + dispatch: + CPU, CUDA: random_ + Meta: random_meta_ + MPS: random_mps_ + autogen: random.to, random.to_out + +- func: random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator + tags: nondeterministic_seeded + variants: method + dispatch: + CPU, CUDA: random_ + MPS: random_mps_ + Meta: random_meta_ + autogen: random, random.out + +- func: uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator + tags: nondeterministic_seeded + variants: method + dispatch: + CPU, CUDA: uniform_ + MPS: uniform_mps_ + Meta: uniform_meta_ + autogen: uniform, uniform.out + +- func: cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + tags: nondeterministic_seeded + dispatch: + CPU, CUDA: cauchy_ + autogen: cauchy, cauchy.out + +- func: log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator + tags: nondeterministic_seeded + variants: method + dispatch: + CPU, CUDA: log_normal_ + autogen: log_normal, log_normal.out + +- func: exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator + tags: nondeterministic_seeded + variants: method + dispatch: + CPU, CUDA: exponential_ + MPS: exponential_mps_ + autogen: exponential, exponential.out + +- func: geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator + tags: nondeterministic_seeded + variants: method + dispatch: + CPU, CUDA: geometric_ + + # wrappers for TH functions + autogen: geometric, geometric.out + +- func: diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + +- func: diag(Tensor self, int diagonal=0) -> Tensor + variants: method, function + +- func: cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + +- func: cross(Tensor self, Tensor other, int? dim=None) -> Tensor + variants: method, function + +- func: triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU: triu_cpu + CUDA: triu_cuda + MPS: triu_mps_out + +- func: triu(Tensor self, int diagonal=0) -> Tensor + structured_delegate: triu.out + variants: method, function + +- func: tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU: tril_cpu + CUDA: tril_cuda + MPS: tril_mps_out + +- func: tril(Tensor self, int diagonal=0) -> Tensor + structured_delegate: tril.out + variants: method, function + +- func: tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CPU: tril_indices_cpu + CUDA: tril_indices_cuda + autogen: tril_indices.out + +- func: triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CPU: triu_indices_cpu + CUDA: triu_indices_cuda + autogen: triu_indices.out + +- func: trace(Tensor self) -> Tensor + variants: method, function + dispatch: + CPU: trace_cpu + CUDA: trace_cuda + MPS: trace_mps + autogen: trace.out + +- func: trace_backward(Tensor grad, SymInt[] sizes) -> Tensor + variants: function + device_check: NoCheck + device_guard: False + dispatch: + CompositeImplicitAutograd: trace_backward_symint + +- func: ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: ne_Scalar_out + MPS: ne_scalar_out_mps + QuantizedCPU: ne_out_quantized_cpu + tags: pointwise + +- func: ne.Scalar(Tensor self, Scalar other) -> Tensor + structured_delegate: ne.Scalar_out + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + QuantizedCPU: ne_quantized_cpu + tags: [core, pointwise] + +- func: ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: ne_Tensor_out + MPS: ne_tensor_out_mps + QuantizedCPU: ne_out_quantized_cpu + tags: pointwise + +- func: ne.Tensor(Tensor self, Tensor other) -> Tensor + structured_delegate: ne.Tensor_out + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + QuantizedCPU: ne_quantized_cpu + tags: [core, pointwise] + +- func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + structured_delegate: ne.Scalar_out + device_check: NoCheck # TensorIterator + variants: method + +- func: ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: ne.Tensor_out + device_check: NoCheck # TensorIterator + variants: method + +# not_equal, alias for torch.ne +- func: not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + +- func: not_equal.Scalar(Tensor self, Scalar other) -> Tensor + variants: method, function + +- func: not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + +- func: not_equal.Tensor(Tensor self, Tensor other) -> Tensor + variants: method, function + +- func: not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + variants: method + +- func: not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + variants: method + +- func: eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: eq_Scalar_out + MPS: eq_scalar_out_mps + QuantizedCPU: eq_out_quantized_cpu + tags: pointwise + +- func: eq.Scalar(Tensor self, Scalar other) -> Tensor + structured_delegate: eq.Scalar_out + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + QuantizedCPU: eq_quantized_cpu + NestedTensorCPU, NestedTensorCUDA: eq_scalar_nested + tags: [core, pointwise] + +- func: eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: eq_Tensor_out + MPS: eq_tensor_out_mps + QuantizedCPU: eq_out_quantized_cpu + tags: pointwise + +- func: eq.Tensor(Tensor self, Tensor other) -> Tensor + structured_delegate: eq.Tensor_out + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + QuantizedCPU: eq_quantized_cpu + tags: [core, pointwise] + +- func: ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: ge_Scalar_out + MPS: ge_scalar_out_mps + QuantizedCPU: ge_out_quantized_cpu + tags: pointwise + +- func: ge.Scalar(Tensor self, Scalar other) -> Tensor + structured_delegate: ge.Scalar_out + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + QuantizedCPU: ge_quantized_cpu + NestedTensorCPU, NestedTensorCUDA: ge_scalar_nested + tags: [core, pointwise] + +- func: ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: ge_Tensor_out + MPS: ge_tensor_out_mps + QuantizedCPU: ge_out_quantized_cpu + tags: pointwise + +- func: ge.Tensor(Tensor self, Tensor other) -> Tensor + structured_delegate: ge.Tensor_out + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + QuantizedCPU: ge_quantized_cpu + tags: [core, pointwise] + +- func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + structured_delegate: ge.Scalar_out + device_check: NoCheck # TensorIterator + variants: method + +- func: ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: ge.Tensor_out + device_check: NoCheck # TensorIterator + variants: method + +# greater_equal, alias for torch.ge +- func: greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + +- func: greater_equal.Scalar(Tensor self, Scalar other) -> Tensor + variants: method, function + +- func: greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + +- func: greater_equal.Tensor(Tensor self, Tensor other) -> Tensor + variants: method, function + +- func: greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + variants: method + +- func: greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + variants: method + +- func: le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: le_Scalar_out + MPS: le_scalar_out_mps + QuantizedCPU: le_out_quantized_cpu + tags: pointwise + +- func: le.Scalar(Tensor self, Scalar other) -> Tensor + structured_delegate: le.Scalar_out + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + QuantizedCPU: le_quantized_cpu + tags: [core, pointwise] + +- func: le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: le_Tensor_out + MPS: le_tensor_out_mps + QuantizedCPU: le_out_quantized_cpu + tags: pointwise + +- func: le.Tensor(Tensor self, Tensor other) -> Tensor + structured_delegate: le.Tensor_out + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + QuantizedCPU: le_quantized_cpu + tags: [core, pointwise] + +- func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + structured_delegate: le.Scalar_out + device_check: NoCheck # TensorIterator + variants: method + +- func: le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: le.Tensor_out + device_check: NoCheck # TensorIterator + variants: method + +# less_equal, alias for torch.le +- func: less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + +- func: less_equal.Scalar(Tensor self, Scalar other) -> Tensor + variants: method, function + +- func: less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + +- func: less_equal.Tensor(Tensor self, Tensor other) -> Tensor + variants: method, function + +- func: less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + variants: method + +- func: less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + variants: method + +- func: gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: gt_Scalar_out + MPS: gt_scalar_out_mps + QuantizedCPU: gt_out_quantized_cpu + tags: pointwise + +- func: gt.Scalar(Tensor self, Scalar other) -> Tensor + structured_delegate: gt.Scalar_out + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + QuantizedCPU: gt_quantized_cpu + NestedTensorCPU, NestedTensorCUDA: gt_scalar_nested + tags: [core, pointwise] + +- func: gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: gt_Tensor_out + MPS: gt_tensor_out_mps + QuantizedCPU: gt_out_quantized_cpu + tags: pointwise + +- func: gt.Tensor(Tensor self, Tensor other) -> Tensor + structured_delegate: gt.Tensor_out + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + QuantizedCPU: gt_quantized_cpu + tags: [core, pointwise] + +- func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + structured_delegate: gt.Scalar_out + device_check: NoCheck # TensorIterator + variants: method + +- func: gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: gt.Tensor_out + device_check: NoCheck # TensorIterator + variants: method + +# greater, alias for torch.gt +- func: greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + +- func: greater.Scalar(Tensor self, Scalar other) -> Tensor + variants: method, function + +- func: greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + +- func: greater.Tensor(Tensor self, Tensor other) -> Tensor + variants: method, function + +- func: greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + variants: method + +- func: greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + variants: method + +- func: lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: lt_Scalar_out + MPS: lt_scalar_out_mps + QuantizedCPU: lt_out_quantized_cpu + tags: pointwise + +- func: lt.Scalar(Tensor self, Scalar other) -> Tensor + structured_delegate: lt.Scalar_out + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + QuantizedCPU: lt_quantized_cpu + tags: [core, pointwise] + +- func: lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: lt_Tensor_out + MPS: lt_tensor_out_mps + QuantizedCPU: lt_out_quantized_cpu + tags: pointwise + +- func: lt.Tensor(Tensor self, Tensor other) -> Tensor + structured_delegate: lt.Tensor_out + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + QuantizedCPU: lt_quantized_cpu + tags: [core, pointwise] + +- func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + structured_delegate: lt.Scalar_out + device_check: NoCheck # TensorIterator + variants: method + +- func: lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: lt.Tensor_out + device_check: NoCheck # TensorIterator + variants: method + +# less, alias for torch.lt +- func: less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + +- func: less.Scalar(Tensor self, Scalar other) -> Tensor + variants: method, function + +- func: less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + +- func: less.Tensor(Tensor self, Tensor other) -> Tensor + variants: method, function + +- func: less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + variants: method + +- func: less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + variants: method + +- func: take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: take_out + +- func: take(Tensor self, Tensor index) -> Tensor + variants: method, function + dispatch: + CPU, CUDA: take + +- func: take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + +- func: take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor + variants: method, function + +- func: index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, QuantizedCPU: index_select_out_cpu_ + CUDA, QuantizedCUDA: index_select_out_cuda + MPS: index_select_out_mps + +- func: index_select(Tensor self, int dim, Tensor index) -> Tensor + variants: method, function + dispatch: + CPU: index_select_cpu_ + QuantizedCPU: index_select_quantized_cpu_ + CUDA: index_select_cuda + QuantizedCUDA: index_select_quantized_cuda + SparseCPU: index_select_sparse_cpu + SparseCUDA: index_select_sparse_cuda + MPS: index_select_mps + tags: core + +- func: index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + +- func: index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor + variants: method, function + +- func: index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor + variants: function + device_check: NoCheck + device_guard: False + dispatch: + CompositeImplicitAutograd: index_select_backward_symint + +- func: masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU: masked_select_out_cpu + CUDA: masked_select_out_cuda + MPS: masked_select_out_mps + tags: dynamic_output_shape + +- func: masked_select(Tensor self, Tensor mask) -> Tensor + variants: method, function + dispatch: + CPU: masked_select_cpu + CUDA: masked_select_cuda + MPS: masked_select_mps + tags: dynamic_output_shape + +- func: masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor + variants: function + device_check: NoCheck + device_guard: False + +- func: nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU: nonzero_out_cpu + CUDA: nonzero_out_cuda + MPS: nonzero_out_mps + tags: dynamic_output_shape + +- func: nonzero(Tensor self) -> Tensor + variants: method, function + dispatch: + CPU: nonzero_cpu + CUDA: nonzero_cuda + MPS: nonzero_mps + tags: [dynamic_output_shape, core] + +- func: nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU: nonzero_static_out_cpu + +- func: nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor + variants: method, function + dispatch: + CPU: nonzero_static_cpu + +- func: nonzero_numpy(Tensor self) -> Tensor[] + variants: method, function + +- func: argwhere(Tensor self) -> Tensor + variants: method, function + tags: dynamic_output_shape + +- func: gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU, CUDA: gather_out + MPS: gather_out_mps + +- func: gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor + variants: method, function + structured_delegate: gather.out + tags: core + +- func: gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor + variants: function + device_check: NoCheck + device_guard: False + +- func: gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + +- func: gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor + variants: method, function + +- func: _gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor + +- func: addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: addcmul_out + MPS: addcmul_out_mps + tags: pointwise + +- func: addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor + structured_delegate: addcmul.out + device_check: NoCheck # TensorIterator + variants: method, function + tags: pointwise + +- func: addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) + structured_delegate: addcmul.out + device_check: NoCheck # TensorIterator + variants: method + tags: pointwise + +- func: addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: addcdiv_out + MPS: addcdiv_out_mps + tags: pointwise + +- func: addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor + structured_delegate: addcdiv.out + device_check: NoCheck # TensorIterator + variants: method, function + tags: pointwise + +- func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) + structured_delegate: addcdiv.out + device_check: NoCheck # TensorIterator + variants: method + tags: pointwise + +- func: cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor + python_module: nn + dispatch: + CompositeImplicitAutograd: cross_entropy_loss_symint + +- func: triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) + structured: True + dispatch: + CPU, CUDA: triangular_solve_out + MPS: triangular_solve_mps_out + SparseCsrCPU: triangular_solve_out_sparse_csr_cpu + SparseCsrCUDA: triangular_solve_out_sparse_csr_cuda + +- func: triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) + structured_delegate: triangular_solve.X + variants: method, function + +- func: _linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> () + dispatch: + CompositeExplicitAutograd: _linalg_check_errors + +- func: linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + dispatch: + CPU, CUDA: linalg_solve_triangular_out + MPS: linalg_solve_triangular_mps_out + +- func: linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor + python_module: linalg + variants: function + dispatch: + CPU, CUDA: linalg_solve_triangular + MPS: linalg_solve_triangular_mps + +- func: linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor + python_module: linalg + dispatch: + CompositeImplicitAutograd: linalg_vander_symint + +- func: svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) + +- func: svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) + variants: method, function + +# swapaxes, alias for transpose +- func: swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + +- func: swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + tags: inplace_view + +# swapdims, alias for transpose +- func: swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a) + variants: function, method + device_check: NoCheck + device_guard: False + +- func: swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + variants: method + device_check: NoCheck + device_guard: False + tags: inplace_view + +- func: cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: cholesky_out + +- func: cholesky(Tensor self, bool upper=False) -> Tensor + variants: method, function + dispatch: + CPU, CUDA: cholesky + +- func: cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: cholesky_solve_out + +- func: cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor + variants: method, function + dispatch: + CompositeExplicitAutograd: cholesky_solve + +- func: _cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor + variants: function + dispatch: + CPU: _cholesky_solve_helper_cpu + CUDA: _cholesky_solve_helper_cuda + autogen: _cholesky_solve_helper.out + +- func: cholesky_inverse(Tensor self, bool upper=False) -> Tensor + variants: method, function + dispatch: + CPU, CUDA: cholesky_inverse + +- func: cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: cholesky_inverse_out + +- func: qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + +- func: qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) + variants: method, function + +- func: geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) + dispatch: + CPU, CUDA: geqrf_out + +- func: geqrf(Tensor self) -> (Tensor a, Tensor tau) + variants: method, function + dispatch: + CPU, CUDA: geqrf + +# orgqr, alias for linalg_householder_product +- func: orgqr(Tensor self, Tensor input2) -> Tensor + variants: method, function + +- func: orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) + +- func: ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: ormqr_out + +- func: ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor + variants: method, function + dispatch: + CPU, CUDA: ormqr + +- func: _lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info) + variants: function + +- func: lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) + +- func: lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor + variants: method, function + +# lu_unpack +- func: lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U) + structured_delegate: lu_unpack.out + variants: function + +- func: lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) + variants: function + structured: True + dispatch: + CPU, CUDA: lu_unpack_out + +# TODO: remove dispatch section when porting TH CUDA to ATen +- func: multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + tags: nondeterministic_seeded + dispatch: + CPU, CUDA: multinomial_out + MPS: multinomial_out_mps + +- func: multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor + variants: method, function + dispatch: + CPU, CUDA: multinomial + MPS: multinomial_mps + tags: nondeterministic_seeded + +- func: lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: lgamma_out + MPS: lgamma_out_mps + tags: pointwise + +- func: lgamma_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: lgamma.out + variants: method + tags: pointwise + +- func: lgamma(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: lgamma.out + variants: method, function + tags: pointwise + +- func: digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: digamma_out + MPS: digamma_out_mps + tags: pointwise + +- func: digamma(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: digamma.out + variants: method, function + tags: pointwise + +- func: polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: polygamma_out + MPS: polygamma_out_mps + tags: pointwise + +- func: polygamma(int n, Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: polygamma.out + variants: method, function + tags: pointwise + +- func: polygamma_(Tensor(a!) self, int n) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: polygamma_ + tags: pointwise + +- func: erfinv(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: erfinv.out + variants: method, function + dispatch: + SparseCPU, SparseCUDA: erfinv_sparse + SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr + tags: pointwise + +- func: erfinv_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: erfinv.out + variants: method + dispatch: + SparseCPU, SparseCUDA: erfinv_sparse_ + SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr_ + tags: pointwise + +- func: erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: erfinv_out + MPS: erfinv_out_mps + SparseCPU, SparseCUDA: erfinv_sparse_out + SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr_out + tags: pointwise + +- func: i0(Tensor self) -> Tensor + structured_delegate: i0.out + variants: function, method + tags: pointwise + +- func: i0_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: i0.out + variants: function, method + tags: pointwise + +- func: i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: i0_out + tags: pointwise + +- func: sign(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: sign.out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: sign_sparse + SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr + tags: [core, pointwise] + +- func: sign_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: sign.out + variants: method + dispatch: + SparseCPU, SparseCUDA: sign_sparse_ + SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_ + tags: pointwise + +- func: sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: sign_out + MPS: sign_out_mps + SparseCPU, SparseCUDA: sign_sparse_out + SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_out + tags: pointwise + +- func: signbit(Tensor self) -> Tensor + variants: function, method + structured_delegate: signbit.out + dispatch: + SparseCPU, SparseCUDA: signbit_sparse + SparseCsrCPU, SparseCsrCUDA: signbit_sparse_csr + tags: pointwise + +- func: signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU: signbit_out + CUDA: signbit_out + MPS: signbit_out_mps + SparseCPU, SparseCUDA: signbit_sparse_out + SparseCsrCPU, SparseCsrCUDA: signbit_sparse_csr_out + tags: pointwise + +- func: dist(Tensor self, Tensor other, Scalar p=2) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CompositeExplicitAutograd: dist + autogen: dist.out + +- func: atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: atan2_out + MPS: atan2_out_mps + tags: [core, pointwise] + +- func: atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: atan2.out + variants: method + tags: pointwise + +- func: atan2(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: atan2.out + variants: method, function + tags: [core, pointwise] +# arctan2, alias of atan2 + +- func: arctan2(Tensor self, Tensor other) -> Tensor + variants: method, function + +- func: arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + +- func: arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) + variants: method + +- func: lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: lerp_Scalar + MPS: lerp_Scalar_mps + tags: pointwise + +- func: lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: lerp_Tensor + MPS: lerp_Tensor_mps + tags: pointwise + +- func: lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + structured_delegate: lerp.Scalar_out + tags: pointwise + +- func: lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + structured_delegate: lerp.Tensor_out + tags: pointwise + +- func: histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, MPS: histogram_histc_out + CUDA: _histc_out_cuda + +- func: histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor + variants: method, function + dispatch: + CPU, MPS: histogram_histc + CUDA: _histc_cuda + +- func: histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) + dispatch: + CPU, MPS: histogram_out + +- func: histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) + variants: method, function + dispatch: + CPU, MPS: histogram + +- func: histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) + dispatch: + CPU, MPS: histogram_out + +- func: histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) + variants: method, function + dispatch: + CPU, MPS: histogram + +- func: _histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[] + dispatch: + CPU, MPS: histogramdd_bin_edges + autogen: _histogramdd_bin_edges.out + +- func: _histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor + dispatch: + CPU, MPS: _histogramdd + autogen: _histogramdd_from_bin_cts.out + +- func: _histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor + dispatch: + CPU, MPS: _histogramdd + autogen: _histogramdd_from_bin_tensors.out + +- func: histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) + +- func: histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) + +- func: histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) + +- func: fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CompositeExplicitAutograd: fmod_out + tags: pointwise + +- func: fmod.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CompositeExplicitAutograd: fmod + tags: [core, pointwise] + +- func: fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + dispatch: + CompositeExplicitAutograd: fmod_ + tags: pointwise + +- func: fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: fmod_out + MPS: fmod_mps_out + tags: pointwise + +- func: fmod.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: fmod.Tensor_out + variants: method, function + tags: [core, pointwise] + +- func: fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: fmod.Tensor_out + tags: pointwise + +- func: hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: hypot_out + MPS: hypot_out_mps + tags: pointwise + +- func: hypot(Tensor self, Tensor other) -> Tensor + structured_delegate: hypot.out + variants: method, function + tags: pointwise + +- func: hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: hypot.out + variants: method + tags: pointwise + +- func: igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: igamma_out + tags: pointwise + +- func: igamma(Tensor self, Tensor other) -> Tensor + structured_delegate: igamma.out + variants: method, function + tags: pointwise + +- func: igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: igamma.out + variants: method + tags: pointwise + +- func: igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: igammac_out + tags: pointwise + +- func: igammac(Tensor self, Tensor other) -> Tensor + structured_delegate: igammac.out + variants: method, function + tags: pointwise + +- func: igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: igammac.out + variants: method + tags: pointwise + +- func: nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA, MPS: nextafter_out + tags: pointwise + +- func: nextafter(Tensor self, Tensor other) -> Tensor + structured_delegate: nextafter.out + variants: method, function + tags: pointwise + +- func: nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: nextafter.out + variants: method + tags: pointwise + +- func: remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: remainder_out + tags: pointwise + +- func: remainder.Scalar(Tensor self, Scalar other) -> Tensor + variants: method, function + dispatch: + CompositeExplicitAutograd: remainder + tags: [core, pointwise] + +- func: remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + variants: method + dispatch: + CompositeExplicitAutograd: remainder_ + tags: pointwise + +- func: remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: remainder_out + MPS: remainder_out_mps + tags: pointwise + +- func: remainder.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: remainder.Tensor_out + variants: method, function + tags: [core, pointwise] + +- func: remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: remainder.Tensor_out + variants: method + tags: pointwise + +- func: remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: function + dispatch: + CPU, CUDA, MPS: remainder + autogen: remainder.Scalar_Tensor_out + tags: pointwise + +- func: min(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CPU, CUDA: min + MPS: min_mps + QuantizedCPU: min_quantized_cpu + +- func: min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: min_unary_out + QuantizedCPU: min_quantized_unary_out + +- func: fmin(Tensor self, Tensor other) -> Tensor + structured_delegate: fmin.out + device_check: NoCheck # TensorIterator + variants: method, function + tags: pointwise + +- func: fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA, MPS: fmin_out + tags: pointwise + +- func: max(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CPU, CUDA: max + MPS: max_mps + QuantizedCPU: max_quantized_cpu + +- func: fmax(Tensor self, Tensor other) -> Tensor + structured_delegate: fmax.out + device_check: NoCheck # TensorIterator + variants: method, function + tags: pointwise + +- func: fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA, MPS: fmax_out + tags: pointwise + +- func: maximum(Tensor self, Tensor other) -> Tensor + structured_delegate: maximum.out + device_check: NoCheck # TensorIterator + variants: method, function + tags: [core, pointwise] + +- func: maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: maximum_out + MPS: maximum_out_mps + tags: pointwise + +# binary max, alias of maximum +# NOTE: max is not an alias for maximum, since there is also unary max +- func: max.other(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + tags: pointwise + +- func: max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + tags: pointwise + +- func: max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: max_unary_out + QuantizedCPU: max_quantized_unary_out + +- func: minimum(Tensor self, Tensor other) -> Tensor + structured_delegate: minimum.out + device_check: NoCheck # TensorIterator + variants: method, function + tags: [core, pointwise] + +- func: minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: minimum_out + MPS: minimum_out_mps + tags: pointwise + +# binary min, alias for minimum +# NOTE: min is not an alias for minimum, since there is also unary min +- func: min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + tags: pointwise + +- func: min.other(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + tags: pointwise + +- func: quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + variants: method, function + +- func: quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + +- func: quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + variants: method, function + +- func: quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + +- func: nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + variants: method, function + +- func: nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + +- func: nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + variants: method, function + +- func: nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + +- func: sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + device_check: NoCheck # TensorIterator + dispatch: + CompositeExplicitAutograd: sort_out + +- func: sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + structured: True + dispatch: + CPU, CUDA: sort_stable_out + MPS: sort_stable_out_mps + +- func: sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CompositeExplicitAutograd: sort + tags: core + +- func: sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) + structured_delegate: sort.values_stable + variants: method, function + dispatch: + QuantizedCPU: sort_quantized_cpu_stable + +- func: sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + +- func: sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + +- func: sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) + variants: method, function + +- func: sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) + variants: method, function + +- func: msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + +- func: msort(Tensor self) -> Tensor + variants: method, function + +- func: argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + +- func: argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + dispatch: + CPU, CUDA, MPS: argsort_stable + autogen: argsort.stable_out + +- func: argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor + variants: method, function + +- func: topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + structured: True + dispatch: + CPU: topk_out_cpu + CUDA: topk_out_cuda + MPS: topk_out_mps + +- func: topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) + variants: method, function + structured_delegate: topk.values + dispatch: + QuantizedCPU: topk_quantized_cpu + tags: core + +- func: all(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: all.all_out + variants: method, function + +- func: all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + structured: True + dispatch: + CPU, CUDA: all_all_out + MPS: all_all_out_mps + +- func: any(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: any.all_out + variants: method, function + dispatch: + SparseCPU, SparseCUDA: any_sparse + tags: core + +- func: any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + structured: True + dispatch: + CPU, CUDA: any_all_out + MPS: any_all_out_mps + +- func: renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + dispatch: + CPU, CUDA: renorm_out + MPS: renorm_out_mps + +- func: renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor + device_check: NoCheck # TensorIterator + variants: method, function + structured_delegate: renorm.out + +- func: renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method + structured_delegate: renorm.out + +- func: unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) + variants: method + device_check: NoCheck + device_guard: False + dispatch: + CPU, CUDA, Meta, MPS: unfold + QuantizedCPU, QuantizedCUDA: unfold + +- func: unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor + variants: function + dispatch: + CPU, CUDA: unfold_backward + autogen: unfold_backward.out + +- func: equal(Tensor self, Tensor other) -> bool + tags: [data_dependent_output, pointwise] + variants: method, function + dispatch: + CPU: cpu_equal + CUDA: cuda_equal + MPS: mps_equal + QuantizedCPU: equal_quantized_cpu + +- func: pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: pow_Tensor_Tensor_out + MPS: pow_tensor_tensor_out_mps + tags: pointwise + +- func: pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: pow.Tensor_Tensor_out + variants: method, function + tags: [core, pointwise] + +- func: pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + dispatch: + CPU, CUDA: pow_Scalar_out + MPS: pow_Scalar_out_mps + tags: pointwise + +- func: pow.Scalar(Scalar self, Tensor exponent) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: pow.Scalar_out + tags: [core, pointwise] + +- func: pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: pow_Tensor_Scalar_out + SparseCPU, SparseCUDA: pow_out_sparse_scalar + MPS: pow_tensor_scalar_out_mps + tags: pointwise + +- func: pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: pow.Tensor_Scalar_out + variants: function, method + dispatch: + SparseCPU, SparseCUDA: pow_sparse_scalar + tags: [core, pointwise] + +- func: pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: pow.Tensor_Scalar_out + variants: method + tags: pointwise + +- func: pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: pow.Tensor_Tensor_out + variants: method + tags: pointwise + +- func: float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + tags: pointwise + +- func: float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor + variants: function, method + tags: pointwise + +- func: float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + tags: pointwise + +- func: float_power.Scalar(Scalar self, Tensor exponent) -> Tensor + tags: pointwise + +- func: float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + tags: pointwise + +- func: float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor + variants: function, method + tags: pointwise + +- func: float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) + variants: method + tags: pointwise + +- func: float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) + variants: method + tags: pointwise + +- func: normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator + tags: nondeterministic_seeded + variants: method + dispatch: + CPU, CUDA: normal_ + MPS: normal_mps_ + Meta: normal_meta_ + SparseCsrCPU, SparseCsrCUDA: normal_sparse_csr_ + NestedTensorCPU, NestedTensorCUDA: normal_nested_ + autogen: normal.out + +# Only used by the functionalization pass. +# Normally, the codegen would be able to generate a normal() NativeFunction, +# but we can't due to overload ambiguity with normal.Tensor_float. +- func: normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor + device_check: NoCheck # TensorIterator + tags: nondeterministic_seeded + dispatch: + CompositeExplicitAutograd: normal_functional + +- func: normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + tags: nondeterministic_seeded + dispatch: + CPU, CUDA: normal_out + MPS: normal_mps_out + Meta: normal_out_meta + +- func: normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor + dispatch: + CPU, CUDA: normal + MPS: normal_mps + Meta: normal_meta + tags: nondeterministic_seeded + +- func: normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: normal_out + Meta: normal_out_meta + MPS: normal_mps_out + tags: nondeterministic_seeded + +- func: normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor + dispatch: + CPU, CUDA: normal + MPS: normal_mps + Meta: normal_meta + tags: nondeterministic_seeded + +- func: normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: normal_out + Meta: normal_out_meta + MPS: normal_mps_out + tags: nondeterministic_seeded + +- func: normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor + dispatch: + CPU, CUDA: normal + MPS: normal_mps + Meta: normal_meta + tags: nondeterministic_seeded + +- func: normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + dispatch: + CompositeExplicitAutograd: normal + tags: nondeterministic_seeded + +- func: normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: normal_out + tags: nondeterministic_seeded + +- func: alias(Tensor(a) self) -> Tensor(a) + variants: method, function + dispatch: + CompositeExplicitAutograd: alias + tags: core + +- func: _amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> () + variants: function + dispatch: + CUDA: _amp_foreach_non_finite_check_and_unscale_cuda_ + CPU: _amp_foreach_non_finite_check_and_unscale_cpu_ + autogen: _amp_foreach_non_finite_check_and_unscale, _amp_foreach_non_finite_check_and_unscale.out + +- func: _amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!) + variants: function + dispatch: + CUDA: _amp_update_scale_cuda_ + CPU: _amp_update_scale_cpu_ + autogen: _amp_update_scale, _amp_update_scale.out + + #- func: _cat(Tensor[] tensors, int dim=0) -> Tensor + #dispatch: + #CPU: _cat_cpu + #CUDA: cat_cuda + #MPS: cat_mps + #QuantizedCPU: cat_quantized_cpu + + #- func: _cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + #dispatch: + #CPU: _cat_out_cpu + #CUDA: cat_out_cuda + #QuantizedCPU: cat_out_quantized_cpu + +- func: _foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_add_scalar_kernel_slow + CUDA: foreach_tensor_add_scalar_kernel_cuda + +- func: _foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_add_scalar_kernel_slow_ + CUDA: foreach_tensor_add_scalar_kernel_cuda_ + autogen: _foreach_add.Scalar_out + +- func: _foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_add_list_kernel_slow + CUDA: foreach_tensor_add_list_kernel_cuda + +- func: _foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_add_list_kernel_slow_ + CUDA: foreach_tensor_add_list_kernel_cuda_ + autogen: _foreach_add.List_out + +- func: _foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_add_scalarlist_kernel_slow + CUDA: foreach_tensor_add_scalarlist_kernel_cuda + +- func: _foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_add_scalarlist_kernel_slow_ + CUDA: foreach_tensor_add_scalarlist_kernel_cuda_ + autogen: _foreach_add.ScalarList_out + +- func: _foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_add_tensor_kernel_slow + CUDA: foreach_tensor_add_tensor_kernel_cuda + +- func: _foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_add_tensor_kernel_slow_ + CUDA: foreach_tensor_add_tensor_kernel_cuda_ + autogen: _foreach_add.Tensor_out + +- func: _foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sub_scalar_kernel_slow + CUDA: foreach_tensor_sub_scalar_kernel_cuda + +- func: _foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sub_scalar_kernel_slow_ + CUDA: foreach_tensor_sub_scalar_kernel_cuda_ + autogen: _foreach_sub.Scalar_out + +- func: _foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sub_list_kernel_slow + CUDA: foreach_tensor_sub_list_kernel_cuda + +- func: _foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sub_list_kernel_slow_ + CUDA: foreach_tensor_sub_list_kernel_cuda_ + autogen: _foreach_sub.List_out + +- func: _foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sub_scalarlist_kernel_slow + CUDA: foreach_tensor_sub_scalarlist_kernel_cuda + +- func: _foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sub_scalarlist_kernel_slow_ + CUDA: foreach_tensor_sub_scalarlist_kernel_cuda_ + autogen: _foreach_sub.ScalarList_out + +- func: _foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_mul_scalar_kernel_slow + CUDA: foreach_tensor_mul_scalar_kernel_cuda + +- func: _foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_mul_scalar_kernel_slow_ + CUDA: foreach_tensor_mul_scalar_kernel_cuda_ + autogen: _foreach_mul.Scalar_out + +- func: _foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_mul_list_kernel_slow + CUDA: foreach_tensor_mul_list_kernel_cuda + +- func: _foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_mul_list_kernel_slow_ + CUDA: foreach_tensor_mul_list_kernel_cuda_ + autogen: _foreach_mul.List_out + +- func: _foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_mul_scalarlist_kernel_slow + CUDA: foreach_tensor_mul_scalarlist_kernel_cuda + +- func: _foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_mul_scalarlist_kernel_slow_ + CUDA: foreach_tensor_mul_scalarlist_kernel_cuda_ + autogen: _foreach_mul.ScalarList_out + +- func: _foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_mul_tensor_kernel_slow + CUDA: foreach_tensor_mul_tensor_kernel_cuda + +- func: _foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_mul_tensor_kernel_slow_ + CUDA: foreach_tensor_mul_tensor_kernel_cuda_ + autogen: _foreach_mul.Tensor_out + +- func: _foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_div_scalar_kernel_slow + CUDA: foreach_tensor_div_scalar_kernel_cuda + +- func: _foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_div_scalar_kernel_slow_ + CUDA: foreach_tensor_div_scalar_kernel_cuda_ + autogen: _foreach_div.Scalar_out + +- func: _foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_div_list_kernel_slow + CUDA: foreach_tensor_div_list_kernel_cuda + +- func: _foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_div_list_kernel_slow_ + CUDA: foreach_tensor_div_list_kernel_cuda_ + autogen: _foreach_div.List_out + +- func: _foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_div_scalarlist_kernel_slow + CUDA: foreach_tensor_div_scalarlist_kernel_cuda + +- func: _foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_div_scalarlist_kernel_slow_ + CUDA: foreach_tensor_div_scalarlist_kernel_cuda_ + autogen: _foreach_div.ScalarList_out + +- func: _foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_div_tensor_kernel_slow + CUDA: foreach_tensor_div_tensor_kernel_cuda + +- func: _foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_div_tensor_kernel_slow_ + CUDA: foreach_tensor_div_tensor_kernel_cuda_ + autogen: _foreach_div.Tensor_out + +- func: _foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_max_scalar_kernel_slow + CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda + +- func: _foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_max_scalar_kernel_slow_ + CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda_ + autogen: _foreach_clamp_max.Scalar_out + +- func: _foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_max_list_kernel_slow + CUDA: foreach_tensor_clamp_max_list_kernel_cuda + +- func: _foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_max_list_kernel_slow_ + CUDA: foreach_tensor_clamp_max_list_kernel_cuda_ + autogen: _foreach_clamp_max.List_out + +- func: _foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow + CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda + +- func: _foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow_ + CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda_ + autogen: _foreach_clamp_max.ScalarList_out + +- func: _foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_min_scalar_kernel_slow + CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda + +- func: _foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_min_scalar_kernel_slow_ + CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda_ + autogen: _foreach_clamp_min.Scalar_out + +- func: _foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_min_list_kernel_slow + CUDA: foreach_tensor_clamp_min_list_kernel_cuda + +- func: _foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_min_list_kernel_slow_ + CUDA: foreach_tensor_clamp_min_list_kernel_cuda_ + autogen: _foreach_clamp_min.List_out + +- func: _foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow + CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda + +- func: _foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow_ + CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda_ + autogen: _foreach_clamp_min.ScalarList_out + +# foreach_minimum/maximum dispatches to clamp_max/min +- func: _foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_min_scalar_kernel_slow + CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda + +- func: _foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_min_scalar_kernel_slow_ + CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda_ + autogen: _foreach_maximum.Scalar_out + +# foreach_minimum/maximum dispatches to clamp_max/min +- func: _foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_min_list_kernel_slow + CUDA: foreach_tensor_clamp_min_list_kernel_cuda + +- func: _foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_min_list_kernel_slow_ + CUDA: foreach_tensor_clamp_min_list_kernel_cuda_ + autogen: _foreach_maximum.List_out + +# foreach_minimum/maximum dispatches to clamp_max/min +- func: _foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow + CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda + +- func: _foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow_ + CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda_ + autogen: _foreach_maximum.ScalarList_out + +- func: _foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_max_scalar_kernel_slow + CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda + +- func: _foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_max_scalar_kernel_slow_ + CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda_ + autogen: _foreach_minimum.Scalar_out + +- func: _foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_max_list_kernel_slow + CUDA: foreach_tensor_clamp_max_list_kernel_cuda + +- func: _foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_max_list_kernel_slow_ + CUDA: foreach_tensor_clamp_max_list_kernel_cuda_ + autogen: _foreach_minimum.List_out + +- func: _foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow + CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda + +- func: _foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow_ + CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda_ + autogen: _foreach_minimum.ScalarList_out + +- func: _foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_addcdiv_scalar_slow + CUDA: foreach_tensor_addcdiv_scalar_cuda + +- func: _foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_addcdiv_scalarlist_slow + CUDA: foreach_tensor_addcdiv_scalarlist_cuda + +- func: _foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_addcdiv_tensor_slow + CUDA: foreach_tensor_addcdiv_tensor_cuda + +- func: _foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_addcdiv_scalar_slow_ + CUDA: foreach_tensor_addcdiv_scalar_cuda_ + autogen: _foreach_addcdiv.Scalar_out + +- func: _foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_addcdiv_scalarlist_slow_ + CUDA: foreach_tensor_addcdiv_scalarlist_cuda_ + autogen: _foreach_addcdiv.ScalarList_out + +- func: _foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_addcdiv_tensor_slow_ + CUDA: foreach_tensor_addcdiv_tensor_cuda_ + autogen: _foreach_addcdiv.Tensor_out + +- func: _foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_addcmul_scalar_slow + CUDA: foreach_tensor_addcmul_scalar_cuda + +- func: _foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_addcmul_scalarlist_slow + CUDA: foreach_tensor_addcmul_scalarlist_cuda + +- func: _foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_addcmul_tensor_slow + CUDA: foreach_tensor_addcmul_tensor_cuda + +- func: _foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_addcmul_scalar_slow_ + CUDA: foreach_tensor_addcmul_scalar_cuda_ + autogen: _foreach_addcmul.Scalar_out + +- func: _foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_addcmul_scalarlist_slow_ + CUDA: foreach_tensor_addcmul_scalarlist_cuda_ + autogen: _foreach_addcmul.ScalarList_out + +- func: _foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_addcmul_tensor_slow_ + CUDA: foreach_tensor_addcmul_tensor_cuda_ + autogen: _foreach_addcmul.Tensor_out + +- func: _foreach_abs(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_abs_slow + CUDA: foreach_tensor_abs_cuda + +- func: _foreach_abs_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_abs_slow_ + CUDA: foreach_tensor_abs_cuda_ + autogen: _foreach_abs.out + +- func: _foreach_acos(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_acos_slow + CUDA: foreach_tensor_acos_cuda + +- func: _foreach_acos_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_acos_slow_ + CUDA: foreach_tensor_acos_cuda_ + autogen: _foreach_acos.out + +- func: _foreach_asin(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_asin_slow + CUDA: foreach_tensor_asin_cuda + +- func: _foreach_asin_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_asin_slow_ + CUDA: foreach_tensor_asin_cuda_ + autogen: _foreach_asin.out + +- func: _foreach_atan(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_atan_slow + CUDA: foreach_tensor_atan_cuda + +- func: _foreach_atan_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_atan_slow_ + CUDA: foreach_tensor_atan_cuda_ + autogen: _foreach_atan.out + +- func: _foreach_ceil(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_ceil_slow + CUDA: foreach_tensor_ceil_cuda + +- func: _foreach_ceil_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_ceil_slow_ + CUDA: foreach_tensor_ceil_cuda_ + autogen: _foreach_ceil.out + +- func: _foreach_cos(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_cos_slow + CUDA: foreach_tensor_cos_cuda + +- func: _foreach_cos_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_cos_slow_ + CUDA: foreach_tensor_cos_cuda_ + autogen: _foreach_cos.out + +- func: _foreach_cosh(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_cosh_slow + CUDA: foreach_tensor_cosh_cuda + +- func: _foreach_cosh_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_cosh_slow_ + CUDA: foreach_tensor_cosh_cuda_ + autogen: _foreach_cosh.out + +- func: _foreach_erf(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_erf_slow + CUDA: foreach_tensor_erf_cuda + +- func: _foreach_erf_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_erf_slow_ + CUDA: foreach_tensor_erf_cuda_ + autogen: _foreach_erf.out + +- func: _foreach_erfc(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_erfc_slow + CUDA: foreach_tensor_erfc_cuda + +- func: _foreach_erfc_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_erfc_slow_ + CUDA: foreach_tensor_erfc_cuda_ + autogen: _foreach_erfc.out + +- func: _foreach_exp(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_exp_slow + CUDA: foreach_tensor_exp_cuda + +- func: _foreach_exp_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_exp_slow_ + CUDA: foreach_tensor_exp_cuda_ + autogen: _foreach_exp.out + +- func: _foreach_expm1(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_expm1_slow + CUDA: foreach_tensor_expm1_cuda + +- func: _foreach_expm1_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_expm1_slow_ + CUDA: foreach_tensor_expm1_cuda_ + autogen: _foreach_expm1.out + +- func: _foreach_floor(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_floor_slow + CUDA: foreach_tensor_floor_cuda + +- func: _foreach_floor_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_floor_slow_ + CUDA: foreach_tensor_floor_cuda_ + autogen: _foreach_floor.out + +- func: _foreach_frac(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_frac_slow + CUDA: foreach_tensor_frac_cuda + +- func: _foreach_frac_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_frac_slow_ + CUDA: foreach_tensor_frac_cuda_ + autogen: _foreach_frac.out + +- func: _foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices + variants: function + dispatch: + CPU: foreach_tensor_ternary_lerp_slow + CUDA: foreach_tensor_lerp_ternary_cuda + autogen: _foreach_lerp.List_out + +- func: _foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices + variants: function + dispatch: + CPU: foreach_tensor_ternary_lerp_slow_ + CUDA: foreach_tensor_lerp_ternary_cuda_ + autogen: _foreach_lerp.List_out + +- func: _foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices + variants: function + dispatch: + CPU: foreach_tensor_lerp_list_kernel_slow + CUDA: foreach_tensor_lerp_list_cuda + autogen: _foreach_lerp.Scalar_out + +- func: _foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices + variants: function + dispatch: + CPU: foreach_tensor_lerp_list_kernel_slow_ + CUDA: foreach_tensor_lerp_list_cuda_ + autogen: _foreach_lerp.Scalar_out + +- func: _foreach_lgamma(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_lgamma_slow + CUDA: foreach_tensor_lgamma_cuda + +- func: _foreach_lgamma_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_lgamma_slow_ + CUDA: foreach_tensor_lgamma_cuda_ + autogen: _foreach_lgamma.out + +- func: _foreach_log(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_log_slow + CUDA: foreach_tensor_log_cuda + +- func: _foreach_log_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_log_slow_ + CUDA: foreach_tensor_log_cuda_ + autogen: _foreach_log.out + +- func: _foreach_log10(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_log10_slow + CUDA: foreach_tensor_log10_cuda + +- func: _foreach_log10_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_log10_slow_ + CUDA: foreach_tensor_log10_cuda_ + autogen: _foreach_log10.out + +- func: _foreach_log1p(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_log1p_slow + CUDA: foreach_tensor_log1p_cuda + +- func: _foreach_log1p_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_log1p_slow_ + CUDA: foreach_tensor_log1p_cuda_ + autogen: _foreach_log1p.out + +- func: _foreach_log2(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_log2_slow + CUDA: foreach_tensor_log2_cuda + +- func: _foreach_log2_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_log2_slow_ + CUDA: foreach_tensor_log2_cuda_ + autogen: _foreach_log2.out + +- func: _foreach_neg(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_neg_slow + CUDA: foreach_tensor_neg_cuda + +- func: _foreach_neg_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_neg_slow_ + CUDA: foreach_tensor_neg_cuda_ + autogen: _foreach_neg.out + +- func: _foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_norm_slow + CUDA: foreach_tensor_norm_cuda + autogen: _foreach_norm.Scalar_out + +- func: _foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_pow_list_kernel_slow + CUDA: foreach_tensor_pow_list_kernel_cuda + +- func: _foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_pow_scalar_kernel_slow + CUDA: foreach_tensor_pow_scalar_kernel_cuda + +- func: _foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_pow_scalarlist_kernel_slow + CUDA: foreach_tensor_pow_scalarlist_kernel_cuda + +- func: _foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_scalar_pow_list_kernel_slow + CUDA: foreach_scalar_pow_list_kernel_cuda + +- func: _foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> () + device_check: NoCheck + variants: function + dispatch: + CPU: foreach_tensor_pow_list_kernel_slow_ + CUDA: foreach_tensor_pow_list_kernel_cuda_ + autogen: _foreach_pow.List_out + +- func: _foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> () + device_check: NoCheck + variants: function + dispatch: + CPU: foreach_tensor_pow_scalar_kernel_slow_ + CUDA: foreach_tensor_pow_scalar_kernel_cuda_ + autogen: _foreach_pow.Scalar_out + +- func: _foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> () + device_check: NoCheck + variants: function + dispatch: + CPU: foreach_tensor_pow_scalarlist_kernel_slow_ + CUDA: foreach_tensor_pow_scalarlist_kernel_cuda_ + autogen: _foreach_pow.ScalarList_out + +- func: _foreach_reciprocal(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_reciprocal_slow + CUDA: foreach_tensor_reciprocal_cuda + +- func: _foreach_reciprocal_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_reciprocal_slow_ + CUDA: foreach_tensor_reciprocal_cuda_ + autogen: _foreach_reciprocal.out + +- func: _foreach_round(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_round_slow + CUDA: foreach_tensor_round_cuda + +- func: _foreach_round_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_round_slow_ + CUDA: foreach_tensor_round_cuda_ + autogen: _foreach_round.out + +- func: _foreach_sigmoid(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sigmoid_slow + CUDA: foreach_tensor_sigmoid_cuda + +- func: _foreach_sigmoid_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sigmoid_slow_ + CUDA: foreach_tensor_sigmoid_cuda_ + autogen: _foreach_sigmoid.out + +- func: _foreach_sign(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sign_slow + CUDA: foreach_tensor_sign_cuda + +- func: _foreach_sign_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sign_slow_ + CUDA: foreach_tensor_sign_cuda_ + autogen: _foreach_sign.out + +- func: _foreach_sin(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sin_slow + CUDA: foreach_tensor_sin_cuda + +- func: _foreach_sin_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sin_slow_ + CUDA: foreach_tensor_sin_cuda_ + autogen: _foreach_sin.out + +- func: _foreach_sinh(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sinh_slow + CUDA: foreach_tensor_sinh_cuda + +- func: _foreach_sinh_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sinh_slow_ + CUDA: foreach_tensor_sinh_cuda_ + autogen: _foreach_sinh.out + +- func: _foreach_sqrt(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sqrt_slow + CUDA: foreach_tensor_sqrt_cuda + +- func: _foreach_sqrt_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_sqrt_slow_ + CUDA: foreach_tensor_sqrt_cuda_ + autogen: _foreach_sqrt.out + +- func: _foreach_tan(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_tan_slow + CUDA: foreach_tensor_tan_cuda + +- func: _foreach_tan_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_tan_slow_ + CUDA: foreach_tensor_tan_cuda_ + autogen: _foreach_tan.out + +- func: _foreach_tanh(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_tanh_slow + CUDA: foreach_tensor_tanh_cuda + +- func: _foreach_tanh_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_tanh_slow_ + CUDA: foreach_tensor_tanh_cuda_ + autogen: _foreach_tanh.out + +- func: _foreach_trunc(Tensor[] self) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_trunc_slow + CUDA: foreach_tensor_trunc_cuda + +- func: _foreach_trunc_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_trunc_slow_ + CUDA: foreach_tensor_trunc_cuda_ + autogen: _foreach_trunc.out + +- func: _foreach_zero_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_zero_slow_ + CUDA: foreach_tensor_zero_cuda_ + autogen: _foreach_zero, _foreach_zero.out + +- func: _foreach_copy_(Tensor(a!)[] self, Tensor[] src, bool non_blocking=False) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CPU: foreach_tensor_copy_list_kernel_slow_ + CUDA: foreach_tensor_copy_list_kernel_cuda_ + autogen: _foreach_copy, _foreach_copy.out + +- func: bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + dispatch: + CPU: bucketize_cpu + CUDA: bucketize_cuda + MPS: bucketize_mps + +- func: bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU: bucketize_out_cpu + CUDA: bucketize_out_cuda + MPS: bucketize_out_mps + +- func: bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + dispatch: + CPU: bucketize_cpu + CUDA: bucketize_cuda + MPS: bucketize_mps + autogen: bucketize.Scalar_out + +- func: searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor + dispatch: + CPU: searchsorted_cpu + CUDA: searchsorted_cuda + MPS: searchsorted_mps + +- func: searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU: searchsorted_out_cpu + CUDA: searchsorted_out_cuda + MPS: searchsorted_out_mps + +- func: searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor + dispatch: + CPU: searchsorted_cpu + CUDA: searchsorted_cuda + MPS: searchsorted_mps + +- func: searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU: searchsorted_out_cpu + CUDA: searchsorted_out_cuda + MPS: searchsorted_out_mps + +- func: _convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor + structured_delegate: _convert_indices_from_coo_to_csr.out + +- func: _convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU: _convert_indices_from_coo_to_csr_structured_cpu + CUDA: _convert_indices_from_coo_to_csr_structured_cuda + +- func: _convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor + structured_delegate: _convert_indices_from_csr_to_coo.out + +- func: _convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!) + structured: True + dispatch: + CPU: _convert_indices_from_csr_to_coo_structured_cpu + CUDA: _convert_indices_from_csr_to_coo_structured_cuda + +## NN wrappers + +- func: mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU, CUDA: mse_loss_out + MPS: mse_loss_out_mps + +- func: mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: mse_loss.out + python_module: nn + +- func: mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU, CUDA: mse_loss_backward_out + MPS: mse_loss_backward_out_mps + +- func: mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor + python_module: nn + dispatch: + CPU, CUDA: mse_loss_backward + MPS: mse_loss_backward_mps + +- func: l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + python_module: nn + +- func: multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CPU: multi_margin_loss_cpu_out + CUDA: multi_margin_loss_cuda_out + +- func: multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor + python_module: nn + dispatch: + CPU: multi_margin_loss_cpu + CUDA: multi_margin_loss_cuda + +- func: multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU: multi_margin_loss_cpu_backward_out + CUDA: multi_margin_loss_cuda_backward_out + +- func: multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor + python_module: nn + dispatch: + CPU: multi_margin_loss_cpu_backward + CUDA: multi_margin_loss_cuda_backward + +- func: multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + +- func: multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + python_module: nn + +- func: multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) + python_module: nn + dispatch: + CPU: multilabel_margin_loss_forward_out_cpu + CUDA: multilabel_margin_loss_forward_out_cuda + +- func: multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target) + python_module: nn + dispatch: + CPU: multilabel_margin_loss_forward_cpu + CUDA: multilabel_margin_loss_forward_cuda + +- func: multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU: multilabel_margin_loss_backward_cpu_out + CUDA: multilabel_margin_loss_backward_cuda_out + +- func: multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor + python_module: nn + dispatch: + CPU: multilabel_margin_loss_backward_cpu + CUDA: multilabel_margin_loss_backward_cuda + +- func: nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + +- func: nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor + python_module: nn + dispatch: + CompositeImplicitAutograd: nll_loss_nd_symint + +- func: nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor + python_module: nn + dispatch: + CompositeImplicitAutograd: nll_loss_symint + +- func: nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + python_module: nn + structured: True + dispatch: + CPU: nll_loss_forward_out_cpu + CUDA: nll_loss_forward_out_cuda + MPS: nll_loss_forward_out_mps + +- func: nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) + python_module: nn + structured_delegate: nll_loss_forward.output + +- func: nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: nll_loss_backward_out_cpu + CUDA: nll_loss_backward_out_cuda + MPS: nll_loss_backward_out_mps + +- func: nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor + python_module: nn + structured_delegate: nll_loss_backward.grad_input + +- func: nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + +- func: nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor + python_module: nn + dispatch: + CompositeImplicitAutograd: nll_loss2d_symint + +- func: nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + python_module: nn + dispatch: + CPU: nll_loss2d_forward_out_cpu + CUDA: nll_loss2d_forward_out_cuda + MPS: nll_loss2d_forward_out_mps + +- func: nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) + python_module: nn + dispatch: + CPU: nll_loss2d_forward_cpu + CUDA: nll_loss2d_forward_cuda + MPS: nll_loss2d_forward_mps + +- func: nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU: nll_loss2d_backward_out_cpu + CUDA: nll_loss2d_backward_out_cuda + MPS: nll_loss2d_backward_out_mps + +- func: nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor + python_module: nn + dispatch: + CPU: nll_loss2d_backward_cpu + CUDA: nll_loss2d_backward_cuda + MPS: nll_loss2d_backward_mps + +- func: smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU, CUDA: smooth_l1_loss_out + MPS: smooth_l1_loss_out_mps + +- func: smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: smooth_l1_loss.out + python_module: nn + +- func: smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU: smooth_l1_loss_backward_out + CUDA: smooth_l1_loss_backward_out + MPS: smooth_l1_loss_backward_out_mps + +- func: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor + python_module: nn + dispatch: + CompositeExplicitAutograd: smooth_l1_loss_backward + +- func: huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CPU, CUDA: huber_loss_out + MPS: huber_loss_out_mps + +- func: huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor + python_module: nn + dispatch: + CPU, CUDA: huber_loss + MPS: huber_loss_mps + +- func: huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU, CUDA: huber_loss_backward_out + MPS: huber_loss_backward_out_mps + +- func: huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor + python_module: nn + dispatch: + CompositeExplicitAutograd: huber_loss_backward + +- func: soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CompositeExplicitAutograd: soft_margin_loss_out + +- func: soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + python_module: nn + dispatch: + CompositeExplicitAutograd: soft_margin_loss + +- func: soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CompositeExplicitAutograd: soft_margin_loss_backward_out + +- func: soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor + python_module: nn + dispatch: + CompositeExplicitAutograd: soft_margin_loss_backward + +- func: elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU, CUDA: elu_out + MPS: elu_out_mps + +- func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor + structured_delegate: elu.out + device_check: NoCheck # TensorIterator + python_module: nn + +- func: elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU, CUDA: elu_backward_out + MPS: elu_backward_out_mps + +- func: elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor + structured_delegate: elu_backward.grad_input + python_module: nn + +- func: elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) + structured_delegate: elu.out + device_check: NoCheck # TensorIterator + python_module: nn + +- func: glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU, CUDA: glu_out + MPS: glu_out_mps + +- func: glu(Tensor self, int dim=-1) -> Tensor + structured_delegate: glu.out + device_check: NoCheck # TensorIterator + python_module: nn + +- func: glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU: glu_backward_cpu_out + CUDA: glu_backward_cuda_out + MPS: glu_backward_mps_out + +- func: glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor + python_module: nn + dispatch: + CPU: glu_backward_cpu + CUDA: glu_backward_cuda + MPS: glu_backward_mps + +- func: glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor + python_module: nn + dispatch: + CPU, CUDA: glu_jvp + autogen: glu_jvp.out + +- func: glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor + python_module: nn + dispatch: + CPU, CUDA: glu_backward_jvp + autogen: glu_backward_jvp.out + +- func: hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU, CUDA: hardsigmoid_out + MPS: hardsigmoid_out_mps + QuantizedCPU: hardsigmoid_out_quantized_cpu + +- func: hardsigmoid(Tensor self) -> Tensor + structured_delegate: hardsigmoid.out + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + QuantizedCPU: hardsigmoid_quantized_cpu + +- func: hardsigmoid_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: hardsigmoid.out + device_check: NoCheck # TensorIterator + python_module: nn + +- func: hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU, CUDA: hardsigmoid_backward_out + MPS: hardsigmoid_backward_out_mps + +- func: hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor + structured_delegate: hardsigmoid_backward.grad_input + python_module: nn + +- func: hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU, CUDA, MPS: hardtanh_out + QuantizedCPU: hardtanh_out_quantized_cpu + +- func: hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU, CUDA, MPS: hardtanh + QuantizedCPU: hardtanh_quantized_cpu + tags: core + +- func: hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU, CUDA: hardtanh_backward_out + MPS: hardtanh_backward_out_mps + +- func: hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor + python_module: nn + dispatch: + CPU, CUDA: hardtanh_backward + MPS: hardtanh_backward_mps + +- func: hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU, CUDA, MPS: hardtanh_ + QuantizedCPU: hardtanh_quantized_cpu_ + +- func: hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU, CUDA: hardswish_out + MPS: hardswish_out_mps + +- func: hardswish(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU, CUDA: hardswish + MPS: hardswish_mps + +- func: hardswish_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU, CUDA: hardswish_ + MPS: hardswish_mps_ + +- func: hardswish_backward(Tensor grad_output, Tensor self) -> Tensor + python_module: nn + dispatch: + CPU, CUDA: hardswish_backward + MPS: hardswish_backward_mps + autogen: hardswish_backward.out + +- func: leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU, CUDA: leaky_relu_out + MPS: leaky_relu_out_mps + QuantizedCPU: leaky_relu_out_quantized_cpu + +- func: leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor + structured_delegate: leaky_relu.out + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + QuantizedCPU: leaky_relu_quantized_cpu + tags: core + +- func: leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU, CUDA: leaky_relu_backward_out + MPS: leaky_relu_backward_out_mps + +- func: leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor + structured_delegate: leaky_relu_backward.grad_input + python_module: nn + +- func: leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) + structured_delegate: leaky_relu.out + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + QuantizedCPU: leaky_relu_quantized_cpu_ + +- func: log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: nn + +- func: log_sigmoid(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + python_module: nn + +- func: log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU: log_sigmoid_forward_out_cpu + CUDA: log_sigmoid_forward_out_cuda + MPS: log_sigmoid_forward_out_mps + +- func: log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU: log_sigmoid_forward_cpu + CUDA: log_sigmoid_forward_cuda + MPS: log_sigmoid_forward_mps + +- func: log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU: log_sigmoid_backward_cpu_out + CUDA: log_sigmoid_backward_cuda_out + MPS: log_sigmoid_backward_mps_out + +- func: log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor + python_module: nn + dispatch: + CPU: log_sigmoid_backward_cpu + CUDA: log_sigmoid_backward_cuda + MPS: log_sigmoid_backward_mps + +- func: rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + tags: nondeterministic_seeded + dispatch: + CPU: rrelu_with_noise_out_cpu + CUDA: rrelu_with_noise_out_cuda + +- func: rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor + python_module: nn + dispatch: + CPU: rrelu_with_noise_cpu + CUDA: rrelu_with_noise_cuda + tags: nondeterministic_seeded + +- func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor + python_module: nn + dispatch: + CompositeExplicitAutograd: rrelu_with_noise_backward + autogen: rrelu_with_noise_backward.out + +- func: rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) + python_module: nn + tags: nondeterministic_seeded + dispatch: + CPU: rrelu_with_noise_cpu_ + CUDA: rrelu_with_noise_cuda_ + +- func: softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU, CUDA: softplus_out + MPS: softplus_out_mps + +- func: softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor + structured_delegate: softplus.out + device_check: NoCheck # TensorIterator + python_module: nn + +- func: softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU, CUDA: softplus_backward_out + MPS: softplus_backward_out_mps + +- func: softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor + structured_delegate: softplus_backward.grad_input + python_module: nn + +- func: softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator + python_module: nn + dispatch: + CPU, CUDA: softshrink_out + MPS: softshrink_out_mps + +- func: softshrink(Tensor self, Scalar lambd=0.5) -> Tensor + structured_delegate: softshrink.out + device_check: NoCheck # TensorIterator + python_module: nn + +- func: softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU, CUDA: softshrink_backward_out + MPS: softshrink_backward_out_mps + +- func: softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor + structured_delegate: softshrink_backward.grad_input + python_module: nn + +- func: adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CPU: adaptive_avg_pool2d_out_cpu + CUDA: adaptive_avg_pool2d_out_cuda + MPS: adaptive_avg_pool2d_out_mps + MkldnnCPU: mkldnn_adaptive_avg_pool2d_out_stub + +- func: adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor + python_module: nn + dispatch: + CompositeImplicitAutograd: adaptive_avg_pool2d_symint + +- func: mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor + dispatch: + MkldnnCPU: mkldnn_adaptive_avg_pool2d + +- func: mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + MkldnnCPU: mkldnn_adaptive_avg_pool2d_out + +- func: mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor + dispatch: + MkldnnCPU: mkldnn_adaptive_avg_pool2d_backward + autogen: mkldnn_adaptive_avg_pool2d_backward.out + +- func: _adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor + dispatch: + CPU: adaptive_avg_pool2d_cpu + CUDA: adaptive_avg_pool2d_cuda + MPS: adaptive_avg_pool2d_mps + QuantizedCPU: adaptive_avg_pool2d_quantized_cpu + QuantizedCUDA: adaptive_avg_pool2d_quantized_cuda + autogen: _adaptive_avg_pool2d.out + tags: core + +- func: _adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor + python_module: nn + dispatch: + CPU: adaptive_avg_pool2d_backward_cpu + CUDA: adaptive_avg_pool2d_backward_cuda + MPS: adaptive_avg_pool2d_backward_mps + autogen: _adaptive_avg_pool2d_backward.out + tags: core + +- func: adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CPU: adaptive_avg_pool3d_out_cpu + CUDA: adaptive_avg_pool3d_out_cuda + QuantizedCPU: adaptive_avg_pool3d_out_quantized_cpu + +- func: adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor + python_module: nn + dispatch: + CompositeImplicitAutograd: adaptive_avg_pool3d_symint + +- func: _adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor + dispatch: + CPU: adaptive_avg_pool3d_cpu + CUDA: adaptive_avg_pool3d_cuda + QuantizedCPU: adaptive_avg_pool3d_quantized_cpu + autogen: _adaptive_avg_pool3d.out + tags: core + +- func: adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU: adaptive_avg_pool3d_backward_out_cpu + CUDA: adaptive_avg_pool3d_backward_out_cuda + +- func: _adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor + python_module: nn + dispatch: + CPU: adaptive_avg_pool3d_backward_cpu + CUDA: adaptive_avg_pool3d_backward_cuda + autogen: _adaptive_avg_pool3d_backward.out + +# Return: (Tensor output, Tensor indices) +- func: adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + python_module: nn + structured: True + dispatch: + CPU: adaptive_max_pool2d_out_cpu + CUDA: adaptive_max_pool2d_out_cuda + MPS: adaptive_max_pool2d_out_mps + +# Return: (Tensor output, Tensor indices) +- func: adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) + python_module: nn + structured_delegate: adaptive_max_pool2d.out + +- func: adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: adaptive_max_pool2d_backward_out_cpu + CUDA: adaptive_max_pool2d_backward_out_cuda + MPS: adaptive_max_pool2d_backward_out_mps + +- func: adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor + python_module: nn + structured_delegate: adaptive_max_pool2d_backward.grad_input + +# Return: (Tensor output, Tensor indices) +- func: adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + python_module: nn + structured: True + dispatch: + CPU: adaptive_max_pool3d_out_cpu + CUDA: adaptive_max_pool3d_out_cuda + +# Return: (Tensor output, Tensor indices) +- func: adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) + python_module: nn + structured_delegate: adaptive_max_pool3d.out + +- func: adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: adaptive_max_pool3d_backward_out_cpu + CUDA: adaptive_max_pool3d_backward_out_cuda + +- func: adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor + python_module: nn + structured_delegate: adaptive_max_pool3d_backward.grad_input + +- func: avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + precomputed: + - kernel_size -> int kH, int kW + - stride -> int dH, int dW + - padding -> int padH, int padW + dispatch: + CPU: avg_pool2d_out_cpu + CUDA: avg_pool2d_out_cuda + MPS: avg_pool2d_out_mps + MkldnnCPU: mkldnn_avg_pool2d_out + +- func: avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor + python_module: nn + structured_delegate: avg_pool2d.out + dispatch: + MkldnnCPU: mkldnn_avg_pool2d + QuantizedCPU: avg_pool2d_quantized_cpu + tags: core + +- func: avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: avg_pool2d_backward_out_cpu + CUDA: avg_pool2d_backward_out_cuda + MPS: avg_pool2d_backward_out_mps + MkldnnCPU: mkldnn_avg_pool2d_backward_out + +- func: avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor + python_module: nn + structured_delegate: avg_pool2d_backward.grad_input + dispatch: + MkldnnCPU: mkldnn_avg_pool2d_backward + tags: core + +- func: avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: avg_pool3d_out_cpu + CUDA: avg_pool3d_out_cuda + MkldnnCPU: mkldnn_avg_pool3d_out + +- func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor + python_module: nn + structured_delegate: avg_pool3d.out + dispatch: + MkldnnCPU: mkldnn_avg_pool3d + QuantizedCPU: avg_pool3d_quantized_cpu + tags: core + +- func: avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: avg_pool3d_backward_out_cpu + CUDA: avg_pool3d_backward_out_cuda + MkldnnCPU: mkldnn_avg_pool3d_backward_out + +- func: avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor + python_module: nn + structured_delegate: avg_pool3d_backward.grad_input + dispatch: + MkldnnCPU: mkldnn_avg_pool3d_backward + +# Return: (Tensor output, Tensor indices) +- func: fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + python_module: nn + structured: True + dispatch: + CPU: fractional_max_pool2d_out_cpu + CUDA: fractional_max_pool2d_out_cuda + +# Return: (Tensor output, Tensor indices) +- func: fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) + python_module: nn + structured_delegate: fractional_max_pool2d.output + +- func: fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: fractional_max_pool2d_backward_cpu + CUDA: fractional_max_pool2d_backward_cuda + +- func: fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor + python_module: nn + structured_delegate: fractional_max_pool2d_backward.grad_input + +# Return: (Tensor output, Tensor indices) +- func: fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + python_module: nn + structured: True + precomputed: + - kernel_size -> int poolSizeT, int poolSizeH, int poolSizeW + - output_size -> int outputT, int outputH, int outputW + - int numBatch, int numPlanes, int inputT, int inputH, int inputW + dispatch: + CPU: fractional_max_pool3d_out_cpu + CUDA: fractional_max_pool3d_out_cuda + +# Return: (Tensor output, Tensor indices) +- func: fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) + python_module: nn + structured_delegate: fractional_max_pool3d.output + +- func: fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU: fractional_max_pool3d_backward_out_cpu + CUDA: fractional_max_pool3d_backward_out_cuda + +- func: fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor + python_module: nn + dispatch: + CPU: fractional_max_pool3d_backward_cpu + CUDA: fractional_max_pool3d_backward_cuda + +# Return: (Tensor output, Tensor indices) +- func: max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + python_module: nn + structured: True + dispatch: + CPU: max_pool2d_with_indices_out_cpu + CUDA: max_pool2d_with_indices_out_cuda + MPS: max_pool2d_with_indices_out_mps + +# Return: (Tensor output, Tensor indices) +- func: max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + python_module: nn + structured_delegate: max_pool2d_with_indices.out + tags: core + +- func: max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: max_pool2d_with_indices_backward_out_cpu + CUDA: max_pool2d_with_indices_backward_out_cuda + MPS: max_pool2d_with_indices_backward_out_mps + +- func: max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor + python_module: nn + structured_delegate: max_pool2d_with_indices_backward.grad_input + tags: core + +# Return: (Tensor output, Tensor indices) +- func: max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + python_module: nn + dispatch: + CPU: max_pool3d_with_indices_out_cpu + CUDA: max_pool3d_with_indices_out_cuda + +# Return: (Tensor output, Tensor indices) +- func: max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + python_module: nn + dispatch: + CPU: max_pool3d_with_indices_cpu + CUDA: max_pool3d_with_indices_cuda + tags: core + +- func: max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU: max_pool3d_with_indices_backward_out_cpu + CUDA: max_pool3d_with_indices_backward_out_cuda + +- func: max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor + python_module: nn + dispatch: + CPU: max_pool3d_with_indices_backward_cpu + CUDA: max_pool3d_with_indices_backward_cuda + +- func: max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CPU: max_unpooling2d_forward_out_cpu + CUDA: max_unpooling2d_forward_out_cuda + +- func: max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor + python_module: nn + dispatch: + CPU: max_unpooling2d_forward_cpu + CUDA: max_unpooling2d_forward_cuda + +- func: max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CPU: max_unpooling3d_forward_out_cpu + CUDA: max_unpooling3d_forward_out_cuda + +- func: max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor + python_module: nn + dispatch: + CPU: max_unpooling3d_forward_cpu + CUDA: max_unpooling3d_forward_cuda + +- func: reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: reflection_pad1d_out_cpu + QuantizedCPU: reflection_pad1d_out_quantized_cpu + CUDA: reflection_pad1d_out_cuda + MPS: reflection_pad1d_out_mps + +- func: reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor + python_module: nn + structured_delegate: reflection_pad1d.out + tags: core + +- func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: reflection_pad1d_backward_out_cpu + CUDA: reflection_pad1d_backward_out_cuda + MPS: reflection_pad1d_backward_out_mps + +- func: reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor + python_module: nn + structured_delegate: reflection_pad1d_backward.grad_input + +- func: reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CPU, QuantizedCPU: reflection_pad2d_out_cpu + CUDA: reflection_pad2d_out_cuda + MPS: reflection_pad2d_out_mps + +- func: reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor + python_module: nn + dispatch: + CPU: reflection_pad2d_cpu + QuantizedCPU: reflection_pad2d_quantized_cpu + CUDA: reflection_pad2d_cuda + MPS: reflection_pad2d_mps + tags: core + +- func: reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU: reflection_pad2d_backward_out_cpu + CUDA: reflection_pad2d_backward_out_cuda + MPS: reflection_pad2d_backward_out_mps + +- func: reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor + python_module: nn + dispatch: + CPU: reflection_pad2d_backward_cpu + CUDA: reflection_pad2d_backward_cuda + MPS: reflection_pad2d_backward_mps + +- func: reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: reflection_pad3d_out_cpu + CUDA: reflection_pad3d_out_cuda + MPS: reflection_pad3d_out_mps + +- func: reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor + python_module: nn + structured_delegate: reflection_pad3d.out + tags: core + +- func: reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: reflection_pad3d_backward_out_cpu + CUDA: reflection_pad3d_backward_out_cuda + MPS: reflection_pad3d_backward_out_mps + +- func: reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor + python_module: nn + structured_delegate: reflection_pad3d_backward.grad_input + +- func: replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: replication_pad1d_out_cpu + CUDA: replication_pad1d_out_cuda + MPS: replication_pad1d_out_mps + +- func: replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor + python_module: nn + structured_delegate: replication_pad1d.out + +- func: replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: replication_pad1d_backward_out_cpu + CUDA: replication_pad1d_backward_out_cuda + MPS: replication_pad1d_backward_out_mps + +- func: replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor + python_module: nn + structured_delegate: replication_pad1d_backward.grad_input + +- func: replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: replication_pad2d_out_cpu + CUDA: replication_pad2d_out_cuda + MPS: replication_pad2d_out_mps + +- func: replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor + python_module: nn + structured_delegate: replication_pad2d.out + tags: core + +- func: replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU: replication_pad2d_backward_out_cpu + CUDA: replication_pad2d_backward_out_cuda + MPS: replication_pad2d_backward_out_mps + +- func: replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor + python_module: nn + dispatch: + CPU: replication_pad2d_backward_cpu + CUDA: replication_pad2d_backward_cuda + MPS: replication_pad2d_backward_mps + +- func: replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: replication_pad3d_out_cpu + CUDA: replication_pad3d_out_cuda + MPS: replication_pad3d_out_mps + +- func: replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor + python_module: nn + structured_delegate: replication_pad3d.out + tags: core + + +- func: replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU: replication_pad3d_backward_out_cpu + CUDA: replication_pad3d_backward_out_cuda + MPS: replication_pad3d_backward_out_mps + +- func: replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor + python_module: nn + dispatch: + CPU: replication_pad3d_backward_cpu + CUDA: replication_pad3d_backward_cuda + MPS: replication_pad3d_backward_mps + +- func: _pad_circular(Tensor self, SymInt[] pad) -> Tensor + python_module: nn + dispatch: + CompositeImplicitAutograd: _pad_circular_symint + +- func: _pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor + python_module: nn + dispatch: + CompositeImplicitAutograd: _pad_enum_symint + +- func: pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor + python_module: nn + dispatch: + CompositeImplicitAutograd: pad_symint + +- func: upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + python_module: nn + autogen: upsample_linear1d.vec_out + +- func: upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + python_module: nn + autogen: upsample_bilinear2d.vec_out + tags: core + +- func: _upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + python_module: nn + autogen: _upsample_bilinear2d_aa.vec_out + +- func: upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + python_module: nn + autogen: upsample_trilinear3d.vec_out + +- func: upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + python_module: nn + autogen: upsample_bicubic2d.vec_out + +- func: _upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + python_module: nn + autogen: _upsample_bicubic2d_aa.vec_out + +- func: upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + python_module: nn + autogen: upsample_nearest1d.vec_out + +- func: _upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + python_module: nn + autogen: _upsample_nearest_exact1d.vec_out + +- func: upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + python_module: nn + autogen: upsample_nearest2d.vec_out + tags: core + +- func: _upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + python_module: nn + autogen: _upsample_nearest_exact2d.vec_out + +- func: upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + python_module: nn + autogen: upsample_nearest3d.vec_out + +- func: _upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + python_module: nn + autogen: _upsample_nearest_exact3d.vec_out + +# NOTE: all of the non-"vec" upsample overloads are only kept for backward compatibility. +- func: upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_linear1d_out_cpu + CUDA: upsample_linear1d_out_cuda + MPS: upsample_linear1d_out_mps + +- func: upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor + python_module: nn + structured_delegate: upsample_linear1d.out + +- func: upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_linear1d_backward_out_cpu + CUDA: upsample_linear1d_backward_out_cuda + MPS: upsample_linear1d_backward_out_mps + +- func: upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor + python_module: nn + structured_delegate: upsample_linear1d_backward.grad_input + +- func: upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_bilinear2d_out_cpu + CUDA: upsample_bilinear2d_out_cuda + MPS: upsample_bilinear2d_out_mps + +- func: upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: upsample_bilinear2d.out + dispatch: + QuantizedCPU: upsample_bilinear2d_quantized_cpu + +- func: upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_bilinear2d_backward_out_cpu + CUDA: upsample_bilinear2d_backward_out_cuda + MPS: upsample_bilinear2d_backward_out_mps + +- func: upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: upsample_bilinear2d_backward.grad_input + +- func: _upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: _upsample_bilinear2d_aa_out_cpu + CUDA: _upsample_bilinear2d_aa_out_cuda + +- func: _upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: _upsample_bilinear2d_aa.out + +- func: _upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: _upsample_bilinear2d_aa_backward_out_cpu + CUDA: _upsample_bilinear2d_aa_backward_out_cuda + +- func: _upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: _upsample_bilinear2d_aa_backward.grad_input + +- func: upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_bicubic2d_out_cpu + CUDA: upsample_bicubic2d_out_cuda + +- func: upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: upsample_bicubic2d.out + +- func: upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_bicubic2d_backward_out_cpu + CUDA: upsample_bicubic2d_backward_out_cuda + +- func: upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: upsample_bicubic2d_backward.grad_input + +- func: _upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: _upsample_bicubic2d_aa_out_cpu + CUDA: _upsample_bicubic2d_aa_out_cuda + +- func: _upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: _upsample_bicubic2d_aa.out + +- func: _upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: _upsample_bicubic2d_aa_backward_out_cpu + CUDA: _upsample_bicubic2d_aa_backward_out_cuda + +- func: _upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: _upsample_bicubic2d_aa_backward.grad_input + +- func: upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_trilinear3d_out_cpu + CUDA: upsample_trilinear3d_out_cuda + +- func: upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: upsample_trilinear3d.out + +- func: upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_trilinear3d_backward_out_cpu + CUDA: upsample_trilinear3d_backward_out_cuda + +- func: upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: upsample_trilinear3d_backward.grad_input + +- func: upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_nearest1d_out_cpu + CUDA: upsample_nearest1d_out_cuda + MPS: upsample_nearest1d_out_mps + +- func: _upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: _upsample_nearest_exact1d_out_cpu + CUDA: _upsample_nearest_exact1d_out_cuda + MPS: _upsample_nearest_exact1d_out_mps + +- func: upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor + python_module: nn + structured_delegate: upsample_nearest1d.out + +- func: _upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor + python_module: nn + structured_delegate: _upsample_nearest_exact1d.out + +- func: upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_nearest1d_backward_out_cpu + CUDA: upsample_nearest1d_backward_out_cuda + MPS: upsample_nearest1d_backward_out_mps + +- func: _upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: _upsample_nearest_exact1d_backward_out_cpu + CUDA: _upsample_nearest_exact1d_backward_out_cuda + MPS: _upsample_nearest_exact1d_backward_out_mps + +- func: upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor + python_module: nn + structured_delegate: upsample_nearest1d_backward.grad_input + +- func: _upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor + python_module: nn + structured_delegate: _upsample_nearest_exact1d_backward.grad_input + +- func: upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_nearest2d_out_cpu + CUDA: upsample_nearest2d_out_cuda + MPS: upsample_nearest2d_out_mps + +- func: _upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: _upsample_nearest_exact2d_out_cpu + CUDA: _upsample_nearest_exact2d_out_cuda + MPS: _upsample_nearest_exact2d_out_mps + +- func: upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: upsample_nearest2d.out + dispatch: + QuantizedCPU: upsample_nearest2d_quantized_cpu + +- func: _upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: _upsample_nearest_exact2d.out + dispatch: + QuantizedCPU: _upsample_nearest_exact2d_quantized_cpu + +- func: upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_nearest2d_backward_out_cpu + CUDA: upsample_nearest2d_backward_out_cuda + MPS: upsample_nearest2d_backward_out_mps + +- func: _upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: _upsample_nearest_exact2d_backward_out_cpu + CUDA: _upsample_nearest_exact2d_backward_out_cuda + MPS: _upsample_nearest_exact2d_backward_out_mps + +- func: upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: upsample_nearest2d_backward.grad_input + +- func: _upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: _upsample_nearest_exact2d_backward.grad_input + +- func: upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_nearest3d_out_cpu + CUDA: upsample_nearest3d_out_cuda + +- func: _upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: _upsample_nearest_exact3d_out_cpu + CUDA: _upsample_nearest_exact3d_out_cuda + +- func: upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: upsample_nearest3d.out + dispatch: + QuantizedCPU: upsample_nearest3d_quantized_cpu + +- func: _upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: _upsample_nearest_exact3d.out + dispatch: + QuantizedCPU: _upsample_nearest_exact3d_quantized_cpu + +- func: upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: upsample_nearest3d_backward_out_cpu + CUDA: upsample_nearest3d_backward_out_cuda + +- func: _upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: _upsample_nearest_exact3d_backward_out_cpu + CUDA: _upsample_nearest_exact3d_backward_out_cuda + +- func: upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: upsample_nearest3d_backward.grad_input + +- func: _upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + python_module: nn + structured_delegate: _upsample_nearest_exact3d_backward.grad_input + +- func: sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: sigmoid_backward_out + MPS: sigmoid_backward_out_mps + tags: pointwise + +- func: sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor + python_module: nn + structured_delegate: sigmoid_backward.grad_input + tags: pointwise + +- func: logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: logit_backward_out + MPS: logit_backward_out_mps + tags: pointwise + +- func: logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor + python_module: nn + structured_delegate: logit_backward.grad_input + tags: pointwise + +- func: tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: tanh_backward_out + MPS: tanh_backward_out_mps + tags: pointwise + +- func: tanh_backward(Tensor grad_output, Tensor output) -> Tensor + python_module: nn + structured_delegate: tanh_backward.grad_input + +# What's a thnn_conv_ versus a slow_conv_? +# +# Historically, we have inefficient implementations of convolutions +# coming from the THNN/THCUNN library. These convolutions typically +# operated by computing the Toeplitz matrix and then doing a matrix +# multiply with the input; this is very memory inefficient! However, +# occasionally, we really don't have anything better, so it's helpful +# to have these fallbacks when there is no more optimized implementation +# in cudnn or mkldnn, etc. Both thnn_ and slow_ convolutions fall +# into this bucket. +# +# The difference between these two designations, is that thnn_ refers +# to a convolution that is still written in the "legacy" style; that is, +# C code in the THNN/ or THCUNN/ directory. A slow_ convolution is +# one that is written in the native style: modern C++. Algorithmically, +# these are the same thing, but we give them different prefixes to +# make the operational distinction clear. + tags: pointwise + +- func: slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + structured: True + dispatch: + CPU: slow_conv_transpose2d_structured_cpu + CUDA: slow_conv_transpose2d_structured_cuda + +- func: slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor + python_module: nn + structured_delegate: slow_conv_transpose2d.out + +- func: slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CPU: slow_conv_transpose3d_out_cpu + CUDA: slow_conv_transpose3d_out_cuda + +- func: slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor + python_module: nn + dispatch: + CPU: slow_conv_transpose3d_cpu + CUDA: slow_conv_transpose3d_cuda + +- func: thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + +- func: thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor + python_module: nn + +- func: _slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!) + python_module: nn + dispatch: + CPU: slow_conv2d_forward_out_cpu + CUDA: slow_conv2d_forward_out_cuda + +- func: _slow_conv2d_forward(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding) -> Tensor + python_module: nn + dispatch: + CPU: slow_conv2d_forward_cpu + CUDA: slow_conv2d_forward_cuda + +- func: _slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + python_module: nn + dispatch: + CPU: slow_conv2d_backward_out_cpu + CUDA: slow_conv2d_backward_out_cuda + +- func: _slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + python_module: nn + dispatch: + CPU: slow_conv2d_backward_cpu + CUDA: slow_conv2d_backward_cuda + autogen: _slow_conv2d_backward.output_mask_out + +- func: _conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + python_module: nn + dispatch: + CUDA: conv_depthwise2d_cuda_out + +- func: _conv_depthwise2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation) -> Tensor + python_module: nn + dispatch: + CUDA: conv_depthwise2d_cuda + +- func: conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor + python_module: nn + dispatch: + CUDA: conv_depthwise3d_cuda + autogen: conv_depthwise3d.out + +- func: slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + +- func: slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor + python_module: nn + +- func: slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) + python_module: nn + dispatch: + CPU: slow_conv3d_forward_out_cpu + +- func: slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor + python_module: nn + dispatch: + CPU: slow_conv3d_forward_cpu + +- func: slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor + python_module: nn + dispatch: + CPU: slow_conv_dilated2d_cpu + CUDA: slow_conv_dilated2d_cuda + autogen: slow_conv_dilated2d.out + +- func: slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor + python_module: nn + dispatch: + CPU: slow_conv_dilated3d_cpu + CUDA: slow_conv_dilated3d_cuda + autogen: slow_conv_dilated3d.out + +- func: col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CPU: col2im_out_cpu + CUDA: col2im_out_cuda + +- func: col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + python_module: nn + dispatch: + CPU: col2im_cpu + CUDA: col2im_cuda + tags: core + +- func: column_stack(Tensor[] tensors) -> Tensor + +- func: column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + +- func: im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CPU: im2col_out_cpu + CUDA: im2col_out_cuda + +- func: im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + python_module: nn + dispatch: + CPU: im2col_cpu + CUDA: im2col_cuda + +- func: isfinite(Tensor self) -> Tensor + variants: function, method + device_check: NoCheck + device_guard: False + +- func: isinf(Tensor self) -> Tensor + variants: function, method + device_check: NoCheck + device_guard: False + dispatch: + CompositeExplicitAutograd: isinf + SparseCPU, SparseCUDA: isinf_sparse + SparseMeta: isinf_sparse_meta + SparseCsrCPU, SparseCsrCUDA: isinf_sparse_csr + autogen: isinf.out + tags: [core, pointwise] + +- func: record_stream(Tensor(a!) self, Stream s) -> () + variants: method + dispatch: + CUDA: record_stream_cuda + +- func: isposinf(Tensor self) -> Tensor + variants: function, method + structured_delegate: isposinf.out + dispatch: + SparseCPU, SparseCUDA: isposinf_sparse + SparseCsrCPU, SparseCsrCUDA: isposinf_sparse_csr + tags: pointwise + +- func: isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: isposinf_out + SparseCPU, SparseCUDA: isposinf_sparse_out + SparseCsrCPU, SparseCsrCUDA: isposinf_sparse_csr_out + tags: pointwise + +- func: isneginf(Tensor self) -> Tensor + variants: function, method + structured_delegate: isneginf.out + dispatch: + SparseCPU, SparseCUDA: isneginf_sparse + SparseCsrCPU, SparseCsrCUDA: isneginf_sparse_csr + tags: pointwise + +- func: isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: isneginf_out + SparseCPU, SparseCUDA: isneginf_sparse_out + SparseCsrCPU, SparseCsrCUDA: isneginf_sparse_csr_out + tags: pointwise + +# NOTE [_add_batch_dim and _remove_batch_dim] +# _add_batch_dim and _remove_batch_dim are meant to be used in the implementation +# of the vmap frontend API (see torch/_vmap_internals.py). They are not +# user-facing, hence the leading underscore. Please don't use them them anywhere else. +- func: _add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor + variants: function + +# See NOTE [_add_batch_dim and _remove_batch_dim] +- func: _remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor + variants: function + +## Functions related to the `torch.special` namespace +# Note [special namespace binding] +# Functions in the special python module should have their names start with +# "special_" underscore and be bound to the desired Python name in +# torch/special/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/special.h. +# The "special_" names should be hidden from the user and not documented. + +- func: special_entr(Tensor self) -> Tensor + structured_delegate: special_entr.out + python_module: special + variants: function + tags: pointwise + +- func: special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: special + variants: function + dispatch: + CPU, CUDA: special_entr_out + tags: pointwise + +- func: special_ndtri(Tensor self) -> Tensor + structured_delegate: special_ndtri.out + python_module: special + variants: function + tags: pointwise + +- func: special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: special + variants: function + dispatch: + CPU, CUDA: special_ndtri_out + tags: pointwise + +- func: special_log_ndtr(Tensor self) -> Tensor + structured_delegate: special_log_ndtr.out + python_module: special + variants: function + tags: pointwise + +- func: special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: special + variants: function + dispatch: + CPU, CUDA: special_log_ndtr_out + tags: pointwise + +- func: special_expm1(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_exp2(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_psi(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_digamma(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_gammaln(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_erf(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_erfc(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + +- func: special_erfcx(Tensor self) -> Tensor + python_module: special + variants: function + structured_delegate: special_erfcx.out + tags: pointwise + +- func: special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: special_erfcx_out + tags: pointwise + +- func: special_erfinv(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + +- func: special_ndtr(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_xlog1py(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + python_module: special + variants: function + structured_delegate: special_xlog1py.out + tags: pointwise + +- func: special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + python_module: special + variants: function + dispatch: + CompositeExplicitAutograd: special_xlog1py + tags: pointwise + +- func: special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + python_module: special + variants: function + dispatch: + CompositeExplicitAutograd: special_xlog1py + tags: pointwise + +- func: special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + python_module: special + variants: function + dispatch: + CPU, CUDA: special_xlog1py_out + tags: pointwise + +- func: special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: special + variants: function + dispatch: + CompositeExplicitAutograd: special_xlog1py_out + tags: pointwise + +- func: special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: special + variants: function + dispatch: + CompositeExplicitAutograd: special_xlog1py_out + tags: pointwise + +- func: special_xlogy(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + python_module: special + variants: function + +- func: special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + python_module: special + variants: function + +- func: special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + python_module: special + variants: function + +- func: special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: special + variants: function + +- func: special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: special + variants: function + +- func: special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: special + variants: function + +- func: special_zeta(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + python_module: special + variants: function + structured_delegate: special_zeta.out + tags: pointwise + +- func: special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + python_module: special + variants: function + dispatch: + CompositeExplicitAutograd: special_zeta + tags: pointwise + +- func: special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + python_module: special + variants: function + dispatch: + CompositeExplicitAutograd: special_zeta + tags: pointwise + +- func: special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + python_module: special + variants: function + dispatch: + CPU, CUDA: special_zeta_out + tags: pointwise + +- func: special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: special + variants: function + dispatch: + CompositeExplicitAutograd: special_zeta_out + tags: pointwise + +- func: special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: special + variants: function + dispatch: + CompositeExplicitAutograd: special_zeta_out + tags: pointwise + +- func: special_i0(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_i0e(Tensor self) -> Tensor + python_module: special + variants: function + structured_delegate: special_i0e.out + tags: pointwise + +- func: special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: special_i0e_out + tags: pointwise + +- func: special_i1(Tensor self) -> Tensor + python_module: special + variants: function + structured_delegate: special_i1.out + tags: pointwise + +- func: special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: special_i1_out + tags: pointwise + +- func: special_i1e(Tensor self) -> Tensor + python_module: special + variants: function + structured_delegate: special_i1e.out + tags: pointwise + +- func: special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: special_i1e_out + tags: pointwise + +- func: special_logit(Tensor self, float? eps=None) -> Tensor + python_module: special + variants: function + +- func: special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + +- func: special_polygamma(int n, Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + +- func: special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + python_module: special + variants: function + +- func: special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + +- func: special_expit(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_sinc(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_round(Tensor self, *, int decimals=0) -> Tensor + python_module: special + variants: function + +- func: special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_log1p(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + python_module: special + variants: function + +- func: special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_gammainc(Tensor self, Tensor other) -> Tensor + python_module: special + variants: function + +- func: special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_gammaincc(Tensor self, Tensor other) -> Tensor + python_module: special + variants: function + +- func: special_multigammaln(Tensor self, int p) -> Tensor + python_module: special + variants: function + +- func: special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + python_module: special + variants: function + +## Functions related to the fast Fourier transform and the torch.fft namespace +# Note [FFT namespace binding] +# Functions in the fft python module should have their names start with +# "fft_" underscore and be bound to the desired Python name in +# torch/fft/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/fft.h. +# The "fft_" names should be hidden from the user and not documented. +# +# See fft_fft as an example. + +# torch.fft.fft +# NOTE: NOT an alias for torch.fft, which has different semantics +- func: fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_fft_symint + +- func: fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_fft_symint_out + +- func: fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_ifft_symint + +- func: fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_ifft_symint_out + +- func: fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_rfft_symint + +- func: fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_rfft_symint_out + +- func: fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_irfft_symint + +- func: fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_irfft_symint_out + +- func: fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_hfft_symint + +- func: fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_hfft_symint_out + +- func: fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_ihfft_symint + +- func: fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_ihfft_symint_out + +- func: fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_fft2_symint + +- func: fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_fft2_symint_out + +- func: fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_ifft2_symint + +- func: fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_ifft2_symint_out + +- func: fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_rfft2_symint + +- func: fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_rfft2_symint_out + +- func: fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_irfft2_symint + +- func: fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_irfft2_symint_out + +- func: fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + use_const_ref_for_mutable_tensors: True + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_hfft2_symint + +- func: fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_hfft2_symint_out + +- func: fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + use_const_ref_for_mutable_tensors: True + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_ihfft2_symint + +- func: fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_ihfft2_symint_out + +- func: fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_fftn_symint + +- func: fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_fftn_symint_out + +- func: fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_ifftn_symint + +- func: fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_ifftn_symint_out + +- func: fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_rfftn_symint + +- func: fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_rfftn_symint_out + +- func: fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_irfftn_symint + +- func: fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_irfftn_symint_out + +- func: fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + use_const_ref_for_mutable_tensors: True + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_hfftn_symint + +- func: fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_hfftn_symint_out + +- func: fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + use_const_ref_for_mutable_tensors: True + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_ihfftn_symint + +- func: fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + python_module: fft + variants: function + dispatch: + CompositeImplicitAutograd: fft_ihfftn_symint_out + +- func: fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeExplicitAutograd: fft_fftfreq + +- func: fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeExplicitAutograd: fft_fftfreq_out + +- func: fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + python_module: fft + variants: function + dispatch: + CompositeExplicitAutograd: fft_rfftfreq + +- func: fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) + python_module: fft + variants: function + dispatch: + CompositeExplicitAutograd: fft_rfftfreq_out + +- func: fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor + python_module: fft + variants: function + +- func: fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor + python_module: fft + variants: function + +## Functions for linear algebra and the torch.linalg namespace +# Note [linalg namespace binding] +# Functions in the linalg python module should have their names start with +# "linalg_" and be bound to the desired Python name in +# torch/linalg/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/linalg.h. +# The "linalg_" names should be hidden from the user and not documented. +# +# See linalg_det as an example. + +# "_ex" stands for experimental +- func: linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info) + python_module: linalg + structured_delegate: linalg_cholesky_ex.L + +- func: linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) + python_module: linalg + structured: True + dispatch: + CPU, CUDA: linalg_cholesky_ex_out + +- func: linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor + python_module: linalg + +- func: linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +- func: linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor + python_module: linalg + variants: function + structured_delegate: linalg_cross.out + dispatch: + ZeroTensor: linalg_cross_zerotensor + +- func: linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + structured: True + dispatch: + CPU, CUDA, MPS: linalg_cross_out + +# linalg.lu_factor +- func: linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots) + python_module: linalg + variants: function + +- func: linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) + python_module: linalg + variants: function + +- func: linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info) + python_module: linalg + structured_delegate: linalg_lu_factor_ex.out + variants: function + +- func: linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) + python_module: linalg + variants: function + structured: True + dispatch: + CPU, CUDA: linalg_lu_factor_ex_out + +# linalg.lu +- func: linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U) + python_module: linalg + structured_delegate: linalg_lu.out + variants: function + +- func: linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) + python_module: linalg + variants: function + structured: True + dispatch: + CPU, CUDA: linalg_lu_out + +# linalg.lu_solve +- func: linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor + python_module: linalg + structured_delegate: linalg_lu_solve.out + variants: function + +- func: linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + structured: True + dispatch: + CPU, CUDA: linalg_lu_solve_out + +# linalg.det +- func: _linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots) + structured_delegate: _linalg_det.result + +- func: _linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) + structured: True + dispatch: + CPU, CUDA: _linalg_det_out + +- func: linalg_det(Tensor A) -> Tensor + python_module: linalg + variants: function + +- func: linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +# torch.det, alias for torch.linalg.det +- func: det(Tensor self) -> Tensor + variants: function, method + +- func: linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info) + structured_delegate: linalg_ldl_factor_ex.out + python_module: linalg + variants: function + +- func: linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) + structured: True + python_module: linalg + variants: function + dispatch: + CPU, CUDA: linalg_ldl_factor_ex_out + +- func: linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots) + python_module: linalg + variants: function + +- func: linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) + python_module: linalg + variants: function + +- func: linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor + structured_delegate: linalg_ldl_solve.out + python_module: linalg + variants: function + +- func: linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + structured: True + python_module: linalg + variants: function + dispatch: + CPU, CUDA: linalg_ldl_solve_out + +- func: linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) + python_module: linalg + variants: function + dispatch: + CompositeExplicitAutograd: linalg_lstsq + tags: dynamic_output_shape + +- func: linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) + python_module: linalg + variants: function + dispatch: + CPU, CUDA: linalg_lstsq_out + tags: dynamic_output_shape + +# torch.linalg.matmul, alias for torch.matmul +- func: linalg_matmul(Tensor self, Tensor other) -> Tensor + python_module: linalg + variants: function + +- func: linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +- func: linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor + python_module: linalg + variants: function + +- func: linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +- func: linalg_matrix_exp(Tensor self) -> Tensor + python_module: linalg + variants: function + dispatch: + CPU, CUDA: linalg_matrix_exp + autogen: linalg_matrix_exp.out + +- func: _linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots) + structured_delegate: _linalg_slogdet.sign + +- func: _linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) + structured: True + dispatch: + CPU, CUDA: _linalg_slogdet_out + +- func: linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet) + python_module: linalg + +- func: linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) + python_module: linalg + +- func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) + variants: function, method + +- func: slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) + variants: function + +- func: logdet(Tensor self) -> Tensor + variants: function, method + +- func: linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors) + python_module: linalg + variants: function + dispatch: + CPU, CUDA: linalg_eig + +- func: linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + python_module: linalg + dispatch: + CPU, CUDA: linalg_eig_out + +- func: _linalg_eigvals(Tensor self) -> Tensor + python_module: linalg + dispatch: + CPU, CUDA: _linalg_eigvals + +- func: linalg_eigvals(Tensor self) -> Tensor + python_module: linalg + +- func: linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + dispatch: + CPU, CUDA: linalg_eigvals_out + +# This function is exposes the `compute_v` flag, which is then used to implement `linalg.eigh` and +# `linalg.eigvalsh` as composite functions that call this one +- func: _linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors) + structured_delegate: _linalg_eigh.eigenvalues + +- func: _linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + structured: True + dispatch: + CPU, CUDA: _linalg_eigh_out + +- func: linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors) + python_module: linalg + +- func: linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + python_module: linalg + +- func: linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor + python_module: linalg + +- func: linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +- func: linalg_householder_product(Tensor input, Tensor tau) -> Tensor + python_module: linalg + variants: function + dispatch: + CPU, CUDA: linalg_householder_product + +- func: linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + dispatch: + CPU, CUDA: linalg_householder_product_out + +- func: linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info) + python_module: linalg + structured_delegate: linalg_inv_ex.inverse + +- func: linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) + python_module: linalg + structured: True + dispatch: + CPU, CUDA: linalg_inv_ex_out + MPS: linalg_inv_ex_out_mps + +- func: linalg_inv(Tensor A) -> Tensor + python_module: linalg + +- func: linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +- func: inverse(Tensor self) -> Tensor + variants: function, method + +- func: inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + +- func: inner(Tensor self, Tensor other) -> Tensor + variants: function, method + +- func: inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + +- func: outer(Tensor self, Tensor vec2) -> Tensor + variants: function, method + +- func: outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + +# torch.ger, alias for torch.outer +- func: ger(Tensor self, Tensor vec2) -> Tensor + variants: function, method + +- func: ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + +- func: linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + python_module: linalg + variants: function + +- func: linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + python_module: linalg + variants: function + +- func: linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + python_module: linalg + variants: function + structured_delegate: linalg_vector_norm.out + +- func: linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + structured: True + dispatch: + CPU, CUDA: linalg_vector_norm_out + MPS: linalg_vector_norm_out_mps + +- func: linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + python_module: linalg + +- func: linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +- func: linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + python_module: linalg + +- func: linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +# This function is exposes the `compute_uv` flag, which is then used to implement `linalg.svd` and +# `linalg.svdvals` as composite functions that call this one +- func: _linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh) + variants: function + structured_delegate: _linalg_svd.U + +- func: _linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + structured: True + dispatch: + CPU, CUDA: _linalg_svd_out + +- func: linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh) + python_module: linalg + variants: function + +- func: linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + python_module: linalg + variants: function + +- func: linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor + python_module: linalg + variants: function + +- func: linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: linalg_cond(Tensor self, Scalar? p=None) -> Tensor + python_module: linalg + variants: function + +- func: linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: linalg_cond.p_str(Tensor self, str p) -> Tensor + python_module: linalg + variants: function + +- func: linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor + python_module: linalg + variants: function + dispatch: + # calls svd, which calls mH() (view op) + # also calls narrow() + CompositeExplicitAutogradNonFunctional: linalg_pinv + +- func: linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + dispatch: + CompositeExplicitAutograd: linalg_pinv_out + +- func: linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor + cpp_no_default_args: ['atol', 'rtol'] + python_module: linalg + variants: function + +- func: linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + cpp_no_default_args: ['atol', 'rtol'] + python_module: linalg + variants: function + +- func: linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor + python_module: linalg + variants: function + +- func: linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor + python_module: linalg + variants: function + +- func: linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: _linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info) + structured_delegate: _linalg_solve_ex.result + +- func: _linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) + structured: True + dispatch: + CPU, CUDA: _linalg_solve_ex_out + +- func: linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info) + python_module: linalg + +- func: linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) + python_module: linalg + +- func: linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor + python_module: linalg + +- func: linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +- func: linalg_tensorinv(Tensor self, int ind=2) -> Tensor + python_module: linalg + variants: function + +- func: linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor + python_module: linalg + variants: function + +- func: linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R) + python_module: linalg + variants: function + structured_delegate: linalg_qr.out + +- func: linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + python_module: linalg + structured: True + dispatch: + CPU, CUDA: linalg_qr_out + +- func: linalg_matrix_power(Tensor self, int n) -> Tensor + python_module: linalg + +- func: linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +- func: linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor + python_module: linalg + variants: function + +- func: linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor + cpp_no_default_args: ['atol', 'rtol'] + python_module: linalg + variants: function + +- func: linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + cpp_no_default_args: ['atol', 'rtol'] + python_module: linalg + variants: function + +- func: linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor + python_module: linalg + variants: function + +- func: linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor + python_module: linalg + variants: function + +- func: linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: linalg_multi_dot(Tensor[] tensors) -> Tensor + python_module: linalg + +- func: linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +## Functions related to the `torch.nested` namespace +# Note [nested namespace binding] +# Functions in the nested python module should have their names start with +# "nested_" underscore and be bound to the desired Python name in +# torch/nested/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/nested.h. +# The "nested_" names should be hidden from the user and not documented. + +- func: nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor + python_module: nested + variants: function + +## Functions that are only for testing +# It is undocumented and should not be used outside of tests. +- func: _test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor + +# Note: for testing COW materialization within `at::parallel_for` loop function +- func: _test_parallel_materialize(Tensor self, int num_parallel, bool skip_first=False) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: _test_parallel_materialize + +# Note: this function is only for testing. +- func: _test_optional_intlist(Tensor values, int[]? addends) -> Tensor + python_module: nn + dispatch: + CPU: _test_optional_intlist + autogen: _test_optional_intlist.out + +# Note: this function is only for testing. +- func: _test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor + python_module: nn + dispatch: + CPU: _test_optional_intlist + autogen: _test_optional_filled_intlist.out + +# Note: this function is only for testing. +- func: _test_optional_floatlist(Tensor values, float[]? addends) -> Tensor + python_module: nn + dispatch: + CPU: _test_optional_floatlist + autogen: _test_optional_floatlist.out + +# Note: this function is only for testing. +- func: _test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor + python_module: nn + +# Note: this function is only for testing. +- func: _test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor + python_module: nn + +# Note: this function is only for testing. +- func: _test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor + cpp_no_default_args: ['a', 'b'] + python_module: nn + +# Note: this function is only for testing. +- func: _test_warn_in_autograd(Tensor self) -> Tensor + python_module: nn + dispatch: + CompositeExplicitAutograd: _test_warn_in_autograd + autogen: _test_warn_in_autograd.out + +# Note: this function is only for testing. +- func: _test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor + dispatch: + # the NestedTensor keys are necessary because NestedTensor has been removed + # from the CompositeExplicitAutograd keyset see Note [NestedTensor Not Included in Backend Keys] + CompositeExplicitAutograd, NestedTensorCPU, NestedTensorCUDA: _test_autograd_multiple_dispatch_fullcoverage + autogen: _test_autograd_multiple_dispatch.fullcoverage_out + +# Note: this function is only for testing. +- func: _test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor + dispatch: + CompositeImplicitAutograd, NestedTensorCPU, NestedTensorCUDA: _test_autograd_multiple_dispatch_ntonly + +# Note: this function is only for testing. +- func: _test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a) + dispatch: + CompositeExplicitAutograd: _test_autograd_multiple_dispatch_view + +# Note: this function is only for testing. +- func: _test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: _test_autograd_multiple_dispatch_view_copy + tags: view_copy + autogen: _test_autograd_multiple_dispatch_view_copy.out + +- func: segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor + variants: function + dispatch: + CPU, CUDA: segment_reduce_kernel + autogen: segment_reduce.out + +- func: _segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor + variants: function + dispatch: + CPU, CUDA: _segment_reduce_backward_kernel + autogen: _segment_reduce_backward.out + +- func: pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor + python_module: nn + variants: function + +- func: flatten_dense_tensors(Tensor[] tensors) -> Tensor + variants: function + python_module: nn + +- func: unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[] + variants: function + python_module: nn + +- func: _nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + variants: function + dispatch: + CompositeExplicitAutograd: _nested_tensor_from_tensor_list + autogen: _nested_tensor_from_tensor_list.out + +- func: _fw_primal_copy(Tensor self, int level) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: _fw_primal_copy + tags: view_copy + autogen: _fw_primal_copy.out + +- func: _make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: _make_dual_copy + tags: view_copy + autogen: _make_dual_copy.out + +- func: view_as_real_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: view_as_real_copy + tags: view_copy + autogen: view_as_real_copy.out + +- func: view_as_complex_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: view_as_complex_copy + tags: view_copy + autogen: view_as_complex_copy.out + +- func: _conj_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: _conj_copy + tags: view_copy + autogen: _conj_copy.out + +- func: _neg_view_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: _neg_view_copy + tags: view_copy + autogen: _neg_view_copy.out + +- func: as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: as_strided_copy_symint + tags: view_copy + autogen: as_strided_copy.out + +- func: _sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: _sparse_broadcast_to_copy + tags: view_copy + autogen: _sparse_broadcast_to_copy.out + +- func: diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: diagonal_copy + tags: view_copy + autogen: diagonal_copy.out + +- func: expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: expand_copy_symint + tags: view_copy + autogen: expand_copy.out + +- func: permute_copy(Tensor self, int[] dims) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: permute_copy + tags: view_copy + autogen: permute_copy.out + +- func: _reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: _reshape_alias_copy_symint + tags: view_copy + autogen: _reshape_alias_copy.out + +- func: select_copy.int(Tensor self, int dim, SymInt index) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: select_copy_symint + SparseCsrCPU, SparseCsrCUDA: select_copy_sparse_csr + tags: view_copy + autogen: select_copy.int_out + +- func: detach_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: detach_copy + tags: view_copy + autogen: detach_copy.out + +- func: slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: slice_copy_Tensor_symint + tags: view_copy + autogen: slice_copy.Tensor_out + +- func: split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: split_copy_Tensor_symint + tags: view_copy + +- func: split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: split_with_sizes_copy_symint + tags: view_copy + +- func: squeeze_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: squeeze_copy + tags: view_copy + autogen: squeeze_copy.out + +- func: squeeze_copy.dim(Tensor self, int dim) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: squeeze_copy_dim + tags: view_copy + autogen: squeeze_copy.dim_out + +- func: squeeze_copy.dims(Tensor self, int[] dim) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: squeeze_copy_dims + tags: view_copy + autogen: squeeze_copy.dims_out + +- func: t_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: t_copy + tags: view_copy + autogen: t_copy.out + +- func: transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: transpose_copy_int + tags: view_copy + autogen: transpose_copy.int_out + +- func: unsqueeze_copy(Tensor self, int dim) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: unsqueeze_copy + tags: view_copy + autogen: unsqueeze_copy.out + +- func: _indices_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: _indices_copy + tags: view_copy + autogen: _indices_copy.out + +- func: _values_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: _values_copy + tags: view_copy + autogen: _values_copy.out + +- func: indices_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: indices_copy + tags: view_copy + autogen: indices_copy.out + +- func: values_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: values_copy + tags: view_copy + autogen: values_copy.out + +- func: crow_indices_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: crow_indices_copy + tags: view_copy + autogen: crow_indices_copy.out + +- func: col_indices_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: col_indices_copy + tags: view_copy + autogen: col_indices_copy.out + +- func: ccol_indices_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: ccol_indices_copy + tags: view_copy + autogen: ccol_indices_copy.out + +- func: row_indices_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: row_indices_copy + tags: view_copy + autogen: row_indices_copy.out + +- func: unbind_copy.int(Tensor self, int dim=0) -> Tensor[] + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: unbind_copy_int + tags: view_copy + +- func: unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () + variants: function + dispatch: + CompositeExplicitAutograd: unbind_copy_int_out + +- func: split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () + variants: function + dispatch: + CompositeExplicitAutograd: split_copy_Tensor_out + + +- func: split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + variants: function + dispatch: + CompositeExplicitAutograd: split_with_sizes_copy_out + CUDA: split_with_sizes_copy_out_cuda + +- func: view_copy(Tensor self, SymInt[] size) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: view_copy_symint + tags: view_copy + autogen: view_copy.out + +- func: view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: view_copy_dtype + tags: view_copy + autogen: view_copy.dtype_out + +- func: unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: unfold_copy + tags: view_copy + autogen: unfold_copy.out + +- func: alias_copy(Tensor self) -> Tensor + variants: function + dispatch: + CompositeExplicitAutogradNonFunctional: alias_copy + tags: view_copy + autogen: alias_copy.out + +- func: to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor + variants: method + dispatch: + NestedTensorCPU: NestedTensor_to_padded_tensor_generic + NestedTensorCUDA: NestedTensor_to_padded_tensor_cuda + autogen: to_padded_tensor.out + +- func: _nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor + dispatch: + NestedTensorCPU: NestedTensor_softmax_dropout + NestedTensorCUDA: NestedTensor_softmax_dropout_cuda + tags: nondeterministic_seeded + +# Apparently, putting "forward" in the name will cause Python bindings to be skipped, so "fwd" it is. +- func: _transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor + variants: function + dispatch: + CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: transformer_encoder_layer_forward + autogen: _transformer_encoder_layer_fwd.out + +- func: _native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor) + variants: function + dispatch: + CPU, NestedTensorCPU: native_multi_head_attention_cpu + CUDA, NestedTensorCUDA: native_multi_head_attention_cuda + autogen: _native_multi_head_attention.out + +- func: scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> Tensor + python_module: nn + variants: function + autogen: scaled_dot_product_attention.out + tags: nondeterministic_seeded + +# This aten function is kept so that we can test the choice function from Python +- func: _fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> int + dispatch: + Meta: _fused_sdp_choice_meta + CPU, NestedTensorCPU: _fused_sdp_choice_cpp + CUDA, NestedTensorCUDA: _fused_sdp_choice_cuda + tags: nondeterministic_seeded + +- func: _scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None) -> (Tensor, Tensor) + variants: function + tags: nondeterministic_seeded + +- func: _scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) + dispatch: + CUDA: _scaled_dot_product_flash_attention_cuda + NestedTensorCUDA: _scaled_dot_product_flash_attention_nestedtensor_cuda + tags: nondeterministic_seeded + +- func: _scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp) + dispatch: + CPU: _scaled_dot_product_flash_attention_cpu + tags: nondeterministic_seeded + +- func: _scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) + device_check: NoCheck + variants: function + dispatch: + CUDA: _scaled_dot_product_flash_attention_backward_cuda + NestedTensorCUDA: _scaled_dot_product_flash_attention_backward_nested + +- func: _scaled_dot_product_flash_attention_for_cpu_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, float dropout_p, bool is_causal, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) + device_check: NoCheck + variants: function + dispatch: + CPU: _scaled_dot_product_flash_attention_cpu_backward + +- func: _scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset) + dispatch: + CUDA: _scaled_dot_product_efficient_attention_cuda + NestedTensorCUDA: _scaled_dot_product_efficient_attention_nestedtensor_cuda + tags: nondeterministic_seeded + +- func: _scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor) + device_check: NoCheck + dispatch: + CUDA: _scaled_dot_product_efficient_attention_backward_cuda + tags: nondeterministic_seeded + +- func: _scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset) + dispatch: + CUDA: _scaled_dot_product_cudnn_attention_cuda + tags: nondeterministic_seeded + +- func: _flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) + variants: function + dispatch: + CUDA: _flash_attention_forward + tags: nondeterministic_seeded + +- func: _flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor, Tensor, Tensor) + device_check: NoCheck + variants: function + dispatch: + CUDA: _flash_attention_backward + +# Returns output, logsumexp if compute_logsumexp +- func: _efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, int? max_seqlen_k, float dropout_p, int custom_mask_type, bool compute_log_sumexp=False, *, float? scale=None, Tensor? causal_diagonal=None, Tensor? seqlen_k=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, SymInt max_seqlen_batch_q, SymInt max_seqlen_batch_k) + variants: function + dispatch: + CUDA: _efficient_attention_forward + tags: nondeterministic_seeded + +- func: _efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None) -> (Tensor, Tensor, Tensor, Tensor) + device_check: NoCheck + variants: function + dispatch: + CUDA: _efficient_attention_backward + +- func: _triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor + variants: function + dispatch: + CUDA: triton_scaled_dot_attention + tags: nondeterministic_seeded + autogen: _triton_scaled_dot_attention.out + +- func: _fill_mem_eff_dropout_mask_(Tensor(a!) self, float dropout_p, int seed, int offset) -> Tensor(a!) + variants: function + dispatch: + CUDA: _fill_mem_eff_dropout_mask_ + tags: nondeterministic_seeded + +- func: _triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor + variants: function + dispatch: + CUDA: triton_multi_head_attention + autogen: _triton_multi_head_attention.out + +- func: special_airy_ai(Tensor x) -> Tensor + python_module: special + structured_delegate: special_airy_ai.out + variants: function + tags: pointwise + +- func: special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: special_airy_ai_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_bessel_j0(Tensor self) -> Tensor + python_module: special + structured_delegate: special_bessel_j0.out + variants: function + tags: pointwise + +- func: special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: special_bessel_j0_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_bessel_j1(Tensor self) -> Tensor + python_module: special + structured_delegate: special_bessel_j1.out + variants: function + tags: pointwise + +- func: special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: special_bessel_j1_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_bessel_y0(Tensor self) -> Tensor + python_module: special + structured_delegate: special_bessel_y0.out + variants: function + tags: pointwise + +- func: special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: special_bessel_y0_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_bessel_y1(Tensor self) -> Tensor + python_module: special + structured_delegate: special_bessel_y1.out + variants: function + tags: pointwise + +- func: special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: special_bessel_y1_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor + device_check: NoCheck + python_module: special + structured_delegate: special_chebyshev_polynomial_t.out + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_t + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_t + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + dispatch: + CPU, CUDA: special_chebyshev_polynomial_t_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_t_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_t_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor + device_check: NoCheck + python_module: special + structured_delegate: special_chebyshev_polynomial_u.out + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_u + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_u + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + dispatch: + CPU, CUDA: special_chebyshev_polynomial_u_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_u_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_u_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor + device_check: NoCheck + python_module: special + structured_delegate: special_chebyshev_polynomial_v.out + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_v + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_v + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + dispatch: + CPU, CUDA: special_chebyshev_polynomial_v_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_v_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_v_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor + device_check: NoCheck + python_module: special + structured_delegate: special_chebyshev_polynomial_w.out + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_w + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_w + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + dispatch: + CPU, CUDA: special_chebyshev_polynomial_w_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_w_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_chebyshev_polynomial_w_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor + device_check: NoCheck + python_module: special + structured_delegate: special_hermite_polynomial_h.out + variants: function + tags: pointwise + +- func: special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_hermite_polynomial_h + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_hermite_polynomial_h + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + dispatch: + CPU, CUDA: special_hermite_polynomial_h_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_hermite_polynomial_h_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_hermite_polynomial_h_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor + device_check: NoCheck + python_module: special + structured_delegate: special_hermite_polynomial_he.out + variants: function + tags: pointwise + +- func: special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_hermite_polynomial_he + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_hermite_polynomial_he + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + dispatch: + CPU, CUDA: special_hermite_polynomial_he_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_hermite_polynomial_he_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_hermite_polynomial_he_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor + device_check: NoCheck + python_module: special + structured_delegate: special_laguerre_polynomial_l.out + variants: function + tags: pointwise + +- func: special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_laguerre_polynomial_l + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_laguerre_polynomial_l + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + dispatch: + CPU, CUDA: special_laguerre_polynomial_l_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_laguerre_polynomial_l_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_laguerre_polynomial_l_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor + device_check: NoCheck + python_module: special + structured_delegate: special_legendre_polynomial_p.out + variants: function + tags: pointwise + +- func: special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_legendre_polynomial_p + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_legendre_polynomial_p + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + dispatch: + CPU, CUDA: special_legendre_polynomial_p_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_legendre_polynomial_p_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_legendre_polynomial_p_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_modified_bessel_i0(Tensor self) -> Tensor + python_module: special + structured_delegate: special_modified_bessel_i0.out + variants: function + tags: pointwise + +- func: special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: special_modified_bessel_i0_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_modified_bessel_i1(Tensor self) -> Tensor + python_module: special + structured_delegate: special_modified_bessel_i1.out + variants: function + tags: pointwise + +- func: special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: special_modified_bessel_i1_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_modified_bessel_k0(Tensor self) -> Tensor + python_module: special + structured_delegate: special_modified_bessel_k0.out + variants: function + tags: pointwise + +- func: special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: special_modified_bessel_k0_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_modified_bessel_k1(Tensor self) -> Tensor + python_module: special + structured_delegate: special_modified_bessel_k1.out + variants: function + tags: pointwise + +- func: special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: special_modified_bessel_k1_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_scaled_modified_bessel_k0(Tensor x) -> Tensor + python_module: special + structured_delegate: special_scaled_modified_bessel_k0.out + variants: function + tags: pointwise + +- func: special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: special_scaled_modified_bessel_k0_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_scaled_modified_bessel_k1(Tensor x) -> Tensor + python_module: special + structured_delegate: special_scaled_modified_bessel_k1.out + variants: function + tags: pointwise + +- func: special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: special_scaled_modified_bessel_k1_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor + device_check: NoCheck + python_module: special + structured_delegate: special_shifted_chebyshev_polynomial_t.out + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_t + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_t + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + dispatch: + CPU, CUDA: special_shifted_chebyshev_polynomial_t_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_t_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_t_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor + device_check: NoCheck + python_module: special + structured_delegate: special_shifted_chebyshev_polynomial_u.out + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_u + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_u + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + dispatch: + CPU, CUDA: special_shifted_chebyshev_polynomial_u_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_u_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_u_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor + device_check: NoCheck + python_module: special + structured_delegate: special_shifted_chebyshev_polynomial_v.out + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_v + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_v + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + dispatch: + CPU, CUDA: special_shifted_chebyshev_polynomial_v_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_v_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_v_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor + device_check: NoCheck + python_module: special + structured_delegate: special_shifted_chebyshev_polynomial_w.out + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_w + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_w + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck + dispatch: + CPU, CUDA: special_shifted_chebyshev_polynomial_w_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_w_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_w_out + device_check: NoCheck + python_module: special + variants: function + tags: pointwise + +- func: special_spherical_bessel_j0(Tensor x) -> Tensor + python_module: special + structured_delegate: special_spherical_bessel_j0.out + variants: function + tags: pointwise + +- func: special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: special_spherical_bessel_j0_out + python_module: special + structured_inherits: TensorIteratorBase + structured: True + variants: function + tags: pointwise + +# Aux function used in the test TestPythonDispatch.test_kwarg_only_and_positional_default +# within test/test_python_dispatch.py +- func: _foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor + dispatch: + CPU: foobar + autogen: _foobar.out + +# Fused Optimizer CUDA kernels. +- func: _fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now). + variants: function + dispatch: + CUDA: _fused_adam_kernel_cuda_ + autogen: _fused_adam, _fused_adam.out + +- func: _fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now), + # but still skip the device check as the Tensor LR can be on CPU + device_check: NoCheck + variants: function + dispatch: + CUDA: _fused_adam_kernel_cuda_ + autogen: _fused_adam.tensor_lr, _fused_adam.tensor_lr_out + +- func: _fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now). + variants: function + dispatch: + CUDA: _fused_adamw_kernel_cuda_ + autogen: _fused_adamw, _fused_adamw.out + +- func: _fused_adamw_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now), + # but still skip the device check as the Tensor LR can be on CPU + device_check: NoCheck + variants: function + dispatch: + CUDA: _fused_adamw_kernel_cuda_ + autogen: _fused_adamw.tensor_lr, _fused_adamw.tensor_lr_out + +- func: _fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now). + variants: function + dispatch: + CUDA: _fused_sgd_kernel_cuda_ + autogen: _fused_sgd, _fused_sgd.out + +- func: _fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now). + # but still skip the device check as the Tensor LR can be on CPU + device_check: NoCheck + variants: function + dispatch: + CUDA: _fused_sgd_kernel_cuda_ + autogen: _fused_sgd.tensor_lr, _fused_sgd.tensor_lr_out + +# This op is ONLY used by pytorch/XLA in functionalization, and should never show up in vanilla eager mode or in any pytorch tracing contexts. +- func: _propagate_xla_data(Tensor input, Tensor output) -> () + variants: function diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/native/tags.yaml b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/native/tags.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c31721729036ff14989f3053d22400d9d100558a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/native/tags.yaml @@ -0,0 +1,65 @@ +# This yaml file contains all the possible tags that can be defined in `tags` in `native_functions.yaml` + +- tag: inplace_view + desc: | + This tag indicates if an operator *only* modifies the tensor metadata +- tag: pt2_compliant_tag + desc: | + This tag indicates if the operator is guaranteed to + work with the PT2 compilation APIs (torch.compile, + torch.export, etc). If you add this tag to an + operator, please use + `torch.testing._internal.optest.opcheck` to test that + the operator has been registered correctly and + works with torch.compile +- tag: view_copy + desc: | + This tag indicates operators that are *_copy* variants + of view/aliasing operators. If an operator has a view_copy tag, + then it should have the name {op}_copy, where {op} is a view operator. +- tag: dynamic_output_shape + desc: | + This tag indicates if an operator's output's shape depends on input Tensor + data. +- tag: data_dependent_output + desc: | + Operator has a non-Tensor output whose value is dependent on the data + of Tensor inputs. Among other things, this implies that this operator + cannot be run with meta tensor (since data is not available), nor + can it be symbolically traced. +- tag: generated + desc: | + This tag indicates that the operator doesn't have an explicit entry in + native_functions.yaml, and instead was generated automatically by the codegen. +- tag: nondeterministic_seeded + desc: | + This tag indicates if an operator is nondeterministically seeded + (i.e., is random) such that the operator intentionally produces + different results when run twice on the same inputs, but this randomness + is controlled by a Generator which, if reseeded would give you the + same result. +- tag: nondeterministic_bitwise + desc: | + This tag indicates if an operator doesn't guarantee bitwise equivalence + across different runs of an operator with identical inputs. +- tag: needs_fixed_stride_order + desc: | + This tag indicates that the operator should be passed Tensors following + the same stride permutation as observed in eager when compiled in inductor. + +# NOTE [Core ATen Ops] +- tag: core + desc: | + Core aten ops is a subset of aten ops that remains after aten-to-aten decomposition and + functionalization pass. Core aten ops are fully functional and adhere to single static + assignment (SSA): this implies there will be no `inplace` or `_out` variants in this opset. + This opset is designed to serve as the functional IR to interface with compiler backends. + In contrast to primTorch, core aten opset doesn't decompose ops into explicit + type promotion and broadcasting ops. + Core aten ops is also effectively the opset produced by torchdynamo.export(aten_graph=True), + and thus can be used as an opset for export purpose. +- tag: pointwise + desc: | + Pointwise operators are operators where each element of the output is computed only by accessing + the corresponding element of all the broadcasted inputs. The output shape will be the broadcasted + shape of the inputs. diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunctions.h b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..ffae71319137257b2481c10f3b3d2a00b4a136fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +${inline_headers} diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunctions_inl.h b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..fbb71c2cb123cb21fb57ec32341d86bff06f6a17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunctions_inl.h @@ -0,0 +1,22 @@ +#pragma once +// ${generated_comment} + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +${DispatchKeyFunctions_inl_includes} + + +${dispatch_namespaced_declarations} diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Function.h b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Function.h new file mode 100644 index 0000000000000000000000000000000000000000..5bbd742aae0ad0933d22790715599b4309efca8a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Function.h @@ -0,0 +1,26 @@ +#pragma once + +// ${generated_comment} + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +${static_dispatch_ops_headers} + +${operator_includes} + +namespace at { + +${function_definitions} + +} diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Functions.h b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Functions.h new file mode 100644 index 0000000000000000000000000000000000000000..fb531363f53ea5fbaf1da4ca80e781145d628dca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Functions.h @@ -0,0 +1,143 @@ +#pragma once + +// ${generated_comment} + +#ifdef TORCH_ASSERT_NO_OPERATORS +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if your change would be better placed in \ + another file, or if a more specific header might achieve the same goal. \ + See NOTE: [Tensor vs. TensorBase] +#endif + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from and \ + see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +// NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS] +// +// In ATen, certain generated headers files include the definitions of +// every single operator in PyTorch. Unfortunately this means every +// time an operator signature is updated or changed in +// native_functions.yaml, you (and every other PyTorch developer) need +// to recompile every source file that includes any of these headers. +// +// To break up these header dependencies, and improve incremental +// build times for all PyTorch developers. These headers are split +// into per-operator headers in the `ATen/ops` folder. This limits +// incremental builds to only changes to methods of `Tensor`, or files +// that use the specific operator being changed. With `at::sum` as an +// example, you should include +// +// // instead of ATen/Functions.h +// // instead of ATen/NativeFunctions.h +// // instead of ATen/Operators.h +// // instead of ATen/CPUFunctions.h +// +// However, even if you're careful to use this in your own code. +// `Functions.h` might be included indirectly through another header +// without you realising. To avoid this, you can add +// +// #define TORCH_ASSERT_ONLY_METHOD_OPERATORS +// +// to the top of your source file. This way any time the non-specific +// headers are included, the compiler will error out. +// +// Also, be aware that `ops` are not available in all build +// configurations (namely fb-internal) so you must guard these +// includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g. +// +// #ifndef AT_PER_OPERATOR_HEADERS +// #include +// #else +// #include +// #endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +${Functions_includes} + +namespace at { + +${Functions_declarations} + +// Special C++ only overloads for std()-like functions (See gh-40287) +// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef +// So, for example std(0) would select the std(unbiased=False) overload +TORCH_API inline Tensor var(const Tensor& self, int dim) { + return at::var(self, IntArrayRef{dim}); +} +TORCH_API inline std::tuple var_mean(const Tensor& self, int dim) { + return at::var_mean(self, IntArrayRef{dim}); +} +TORCH_API inline Tensor std(const Tensor& self, int dim) { + return at::std(self, IntArrayRef{dim}); +} +TORCH_API inline std::tuple std_mean(const Tensor& self, int dim) { + return at::std_mean(self, IntArrayRef{dim}); +} + +inline int64_t numel(const Tensor& tensor) { + return tensor.numel(); +} + +inline int64_t size(const Tensor& tensor, int64_t dim) { + return tensor.size(dim); +} + +inline int64_t stride(const Tensor& tensor, int64_t dim) { + return tensor.stride(dim); +} + +inline bool is_complex(const Tensor& tensor) { + return tensor.is_complex(); +} + +inline bool is_floating_point(const Tensor& tensor) { + return tensor.is_floating_point(); +} + +inline bool is_signed(const Tensor& tensor) { + return tensor.is_signed(); +} + +inline bool is_inference(const Tensor& tensor) { + return tensor.is_inference(); +} + +inline bool _is_zerotensor(const Tensor& tensor) { + return tensor._is_zerotensor(); +} + +inline bool is_conj(const Tensor& tensor) { + return tensor.is_conj(); +} + +inline Tensor conj(const Tensor& tensor) { + return tensor.conj(); +} + +inline bool is_neg(const Tensor& tensor) { + return tensor.is_neg(); +} + +} diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyNonNativeIr.h b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyNonNativeIr.h new file mode 100644 index 0000000000000000000000000000000000000000..18eaf6da52e4b3654becac6cc89849bc0806ae09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyNonNativeIr.h @@ -0,0 +1,11 @@ +#pragma once + +${lazy_non_native_ir_inc} + +// This file contains autogenerated LazyTensor Non Native IR nodes + +${namespace_prologue} + +${non_native_ir_nodes} + +${namespace_epilogue} diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h new file mode 100644 index 0000000000000000000000000000000000000000..4f70db62a4c6429ee8e782fb13fb0ae6ffc5d957 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h @@ -0,0 +1,17 @@ +#pragma once + +// ${generated_comment} + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +${extra_includes} + +${native_function_declarations} diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunction.h b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunction.h new file mode 100644 index 0000000000000000000000000000000000000000..d660becdd9ec8bd7fe06737ad6b562054bfc161f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunction.h @@ -0,0 +1,23 @@ +#pragma once + +// ${generated_comment} + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +${meta_function_declarations} + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operators.cpp b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operators.cpp new file mode 100644 index 0000000000000000000000000000000000000000..082bb67c3e2043f2c36b29345f57048ec2e9eea7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operators.cpp @@ -0,0 +1,19 @@ +#include +#include + +// ${generated_comment} +// NOTE See [Sharded File] comment in VariableType + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +${operator_headers} +#endif + +${static_dispatch_extra_headers} + +namespace at { namespace _ops { + +${definitions} + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RedispatchFunctions.h b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RedispatchFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..d89975a4a62257cdc163f32c7b9f13261b04f33f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RedispatchFunctions.h @@ -0,0 +1,32 @@ +#pragma once + +// ${generated_comment} + +#ifdef TORCH_ASSERT_ONLY_METHOD_OPERATORS +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider using the at::_ops::{name}::redispatch() interface by including \ + the specific operator from +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +namespace redispatch { + ${function_redispatch_definitions} +} // namespace redispatch + +} diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterCodegenUnboxedKernels.cpp b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterCodegenUnboxedKernels.cpp new file mode 100644 index 0000000000000000000000000000000000000000..279f987c66a26c2eb5d11c664c85b3604b67684b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterCodegenUnboxedKernels.cpp @@ -0,0 +1,41 @@ +#include +#include +#include + +#include + +// ${generated_comment} + +// NOTE [Sharded File]: This file is generated in a sharded fashion to speed up +// incremental rebuilds. See the comment at the top of +// templates/VariableType.cpp for an analogous, in-depth discussion. +// +// Generated by tools/jit/gen_unboxing.py. This file registers all ATen ops into JIT op registry instead of c10 +// dispatcher. JIT op registry only takes boxed kernels, so we are calling unboxing functions in UnboxingFunctions.h +// to cast arguments into C++ types (instead of IValue) and delegate to unboxed kernels. + +namespace torch { namespace jit { + +using autograd::Variable; +using autograd::variable_list; +using at::Scalar; +using at::ScalarType; +using at::Tensor; +using at::TensorOptions; +using at::DeviceGuard; + +using ::c10::fmap; +using ::c10::filter; + +namespace { + +RegisterOperators reg({ + + // Generated operators + ${unboxed_ops} +}); + +} // anon namespace + + +}} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchKey.cpp b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchKey.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7a1584d505f5a3c42861fde0ea5ee4da67485a32 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchKey.cpp @@ -0,0 +1,54 @@ +// required for old g++ to compile PRId64 macros, see +// https://github.com/pytorch/pytorch/issues/3571 +// for context +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +// an external backend might generate file within its code tree +// and check all the source files within the tree with clang-format. +// so, disable it since the backend might have a different config. +// clang-format off + +// NOTE: This condition is true for all PyTorch internal libraries, it +// just excludes external projects such as torch_xla which +// re-use some of the PyTorch codegen machinery. +#if defined(CAFFE2_BUILD_MAIN_LIB) || \ + defined(TORCH_CUDA_BUILD_MAIN_LIB) || \ + defined(TORCH_HIP_BUILD_MAIN_LIB) || \ + defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \ + defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB) +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +#endif + +// ${generated_comment} + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +$extra_cuda_headers +$external_backend_headers +$dispatch_headers +$ops_headers + +// See template file RegisterDispatchDefinitions.ini +$dispatch_definitions diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/TensorBody.h b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/TensorBody.h new file mode 100644 index 0000000000000000000000000000000000000000..010f12d4cfbce98804d42ed7028e686ce7ba1174 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/TensorBody.h @@ -0,0 +1,753 @@ +#pragma once + +#ifdef TORCH_ASSERT_NO_OPERATORS +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if your change would be better placed in \ + another file, or if a more specific header might achieve the same goal. \ + See NOTE: [Tensor vs. TensorBase] +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include + +namespace c10{ +template class List; +template class IListRef; +} +namespace at { +struct Generator; +struct Type; +class DeprecatedTypeProperties; +class Tensor; +} // namespace at +namespace at { +namespace indexing { +struct TensorIndex; +} // namespace indexing +} // namespace at + +namespace torch { namespace autograd { + +struct Node; + +}} // namespace torch::autograd + +namespace at { + +class OptionalTensorRef; +class TensorRef; +class Tensor; +using TensorList = ArrayRef; +using ITensorList = c10::IListRef; + +using Stream = c10::Stream; + +// Tensor is a "generic" object holding a pointer to the underlying TensorImpl object, which +// has an embedded reference count. In this way, Tensor is similar to boost::intrusive_ptr. +// +// For example: +// +// void func(Tensor a) { +// Tensor b = a; +// ... +// } +// +// In this example, when we say Tensor b = a, we are creating a new object that points to the +// same underlying TensorImpl, and bumps its reference count. When b goes out of scope, the +// destructor decrements the reference count by calling release() on the TensorImpl it points to. +// The existing constructors, operator overloads, etc. take care to implement the correct semantics. +// +// Note that Tensor can also be NULL, i.e. it is not associated with any underlying TensorImpl, and +// special care must be taken to handle this. +class TORCH_API Tensor: public TensorBase { + protected: + // Create a Tensor with a +0 reference count. Special care must be + // taken to avoid decrementing this reference count at destruction + // time. Intended to support MaybeOwnedTraits. + explicit Tensor(unsafe_borrow_t, const TensorBase& rhs): TensorBase(unsafe_borrow_t{}, rhs) {} + friend MaybeOwnedTraits; + friend OptionalTensorRef; + friend TensorRef; + + public: + Tensor() = default; + // This constructor should not be used by end users and is an implementation + // detail invoked by autogenerated code. + explicit Tensor( + c10::intrusive_ptr tensor_impl) + : TensorBase(std::move(tensor_impl)) {} + Tensor(const Tensor &tensor) = default; + Tensor(Tensor &&tensor) = default; + + // Implicitly move-constructible from TensorBase, but must be explicit to increase refcount + explicit Tensor(const TensorBase &base): TensorBase(base) {} + /*implicit*/ Tensor(TensorBase &&base): TensorBase(std::move(base)) {} + + // Creates a new wrapper from TensorImpl. Intentionally a free method because + // it should be used with care. Checks necessary invariants + static Tensor wrap_tensor_impl( + c10::intrusive_ptr tensor_impl) { + return TensorBase::wrap_tensor_impl(std::move(tensor_impl)); + } + + Tensor contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const { + return TensorBase::contiguous(memory_format); + } + + Tensor conj() const { + if (!this->is_complex()) { + return *this; + } + + switch (this->layout()) { + case at::kSparse: + case at::kSparseCsr: + case at::kSparseCsc: + case at::kSparseBsr: + case at::kSparseBsc: + return this->conj_physical(); + default: + return this->_conj(); + } + } + + // Aliased by Dimname overloads, so need explicit using + using TensorBase::size; + using TensorBase::sym_size; + using TensorBase::stride; + + /// Should be used if *this can reasonably be expected to be contiguous and + /// performance is important. + /// Compared to contiguous, it saves a reference count + /// increment/decrement if *this is already contiguous, at the cost + /// in all cases of an extra pointer of stack usage, an extra branch + /// to access, and an extra branch at destruction time. + c10::MaybeOwned expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const &; + + // Use .contiguous() instead. Trying to borrow from a prvalue Tensor + // will only lead to trouble and dangling references. + c10::MaybeOwned expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) && = delete; + + // The following overloads are very intruiging. Consider the following + // program: + // + // x[1] = 3; + // + // We would expect that the first entry of x is written to 3. But how can we + // actually achieve this? x[1] evaluates to a tensor... + // + // The answer is, using a ref-qualifier. x[1] is an rvalue, which cannot be + // (profitably) assigned to in the traditional sense, so we overload + // assignment to mean, "Actually, copy 3 into the tensor data." This is done + // with an rvalue-reference ref-qualified overload (the methods with && at the + // end of their type.) + // + // There's one more fly in the ointment: We also want + // + // Tensor x = y; + // + // to work, and we want it NOT to copy. So we need a traditional operator= + // overload. But we MUST specify a mutable lvalue ref-qualifier, to + // disambiguate the traditional overload from the rvalue-reference + // ref-qualified overload. Otherwise, it will be ambiguous, because + // a non ref-qualified method is eligible for all situations. + + // Unfortunately, we have to write these constructors out manually + // to work around an MSVC bug: + // error C2580: 'at::Tensor &at::Tensor::operator =(const at::Tensor &) &': + // multiple versions of a defaulted special member functions are not allowed + // Tensor& operator=(const Tensor&) & = default; + // Tensor& operator=(Tensor&&) & = default; + + // Also MSVC will wrongly issue the following warning with the aforementioned fix + // warning C4522: 'at::Tensor': multiple assignment operators specified + // Let's just skip the warning. + // + // TODO: temporarily disabled + + Tensor& operator=(const TensorBase& x) & { + impl_ = x.getIntrusivePtr(); + return *this; + } + Tensor& operator=(TensorBase&& x) & noexcept { + impl_ = x.unsafeReleaseIntrusivePtr(); + return *this; + } + + Tensor& operator=(const Tensor &x) & { + return operator=(static_cast(x)); + } + Tensor& operator=(Tensor &&x) & noexcept { + return operator=(static_cast(x)); + } + + Tensor& operator=(const Scalar &v) && { + return fill_(v); + } + Tensor& operator=(const Tensor &rhs) && { + return copy_(rhs); + } + Tensor& operator=(Tensor&& rhs) && { + return copy_(rhs); + } + + C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().") + DeprecatedTypeProperties & type() const { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + dispatchKeyToBackend(legacyExtractDispatchKey(key_set())), + scalar_type()); + } + + Tensor toType(ScalarType t) const { + return to(options().dtype(t), /*non_blocking*/ false, /*copy*/ false); + } + + // TODO: Deprecate me + Tensor toBackend(Backend b) const { + return to(options().device(backendToDeviceType(b)).layout(layout_from_backend(b)), /*non_blocking*/ false, /*copy*/ false); + } + + C10_DEPRECATED_MESSAGE("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())") + bool is_variable() const noexcept { + return !at::impl::variable_excluded_from_dispatch(); + } + + template + C10_DEPRECATED_MESSAGE("Tensor.data() is deprecated. Please use Tensor.data_ptr() instead.") + T * data() const { + return data_ptr(); + } + + template + T item() const; + + template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> + C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead") + GenericPackedTensorAccessor packed_accessor() const & { + return generic_packed_accessor(); + } + template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> + C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead") + GenericPackedTensorAccessor packed_accessor() && = delete; + + Tensor operator~() const { + return bitwise_not(); + } + Tensor operator-() const { + return neg(); + } + Tensor& operator+=(const Tensor & other) { + return add_(other); + } + Tensor& operator+=(const Scalar & other) { + return add_(other); + } + Tensor& operator-=(const Tensor & other) { + return sub_(other); + } + Tensor& operator-=(const Scalar & other) { + return sub_(other); + } + Tensor& operator*=(const Tensor & other) { + return mul_(other); + } + Tensor& operator*=(const Scalar & other) { + return mul_(other); + } + Tensor& operator/=(const Tensor & other) { + return div_(other); + } + Tensor& operator/=(const Scalar & other) { + return div_(other); + } + Tensor& operator&=(const Tensor & other) { + return bitwise_and_(other); + } + Tensor& operator|=(const Tensor & other) { + return bitwise_or_(other); + } + Tensor& operator^=(const Tensor & other) { + return bitwise_xor_(other); + } + Tensor operator[](const Scalar & index) const { + if (!index.isIntegral(false)) { + TORCH_CHECK_INDEX(false, "Can only index tensors with integral scalars"); + } + return this->operator[](index.toLong()); + } + Tensor operator[](const Tensor & index) const { + // These properties are checked in the Scalar constructor, but we already + // check them here to provide more useful diagnostics for the user. + if (!index.defined()) { + TORCH_CHECK_INDEX(false, "Can only index with tensors that are defined"); + } + if (index.dim() != 0) { + TORCH_CHECK_INDEX(false, + "Can only index with tensors that are scalars (zero-dim)"); + } + // The Scalar(Tensor) constructor is explicit, so we need to call it. + return this->operator[](index.item()); + } + Tensor operator[](int64_t index) const { + return select(0, index); + } + + Tensor index(ArrayRef indices) const; + Tensor index(std::initializer_list indices) const; + + Tensor & index_put_(ArrayRef indices, Tensor const & rhs); + Tensor & index_put_(ArrayRef indices, const Scalar& v); + Tensor & index_put_(std::initializer_list indices, Tensor const & rhs); + Tensor & index_put_(std::initializer_list indices, const Scalar& v); + + Tensor cpu() const { + return to(options().device(c10::DeviceType::CPU), /*non_blocking*/ false, /*copy*/ false); + } + + // TODO: The Python version also accepts arguments + Tensor cuda() const { + return to(options().device(c10::DeviceType::CUDA), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor hip() const { + return to(options().device(c10::DeviceType::HIP), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor ve() const { + return to(options().device(c10::DeviceType::VE), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor vulkan() const { + return to(options().device(c10::DeviceType::Vulkan), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor metal() const { + return to(options().device(c10::DeviceType::Metal), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor meta() const { + return to(options().device(c10::DeviceType::Meta), /*non_blocking*/ false, /*copy*/ false); + } + + // ~~~~~ Autograd API ~~~~~ + + /// \fn bool is_leaf() const; + /// + /// All Tensors that have `requires_grad()` which is ``false`` will be leaf Tensors by convention. + /// + /// For Tensors that have `requires_grad()` which is ``true``, they will be leaf Tensors if they were + /// created by the user. This means that they are not the result of an operation and so + /// `grad_fn()` is `nullptr`. + /// + /// Only leaf Tensors will have their `grad()` populated during a call to `backward()`. + /// To get `grad()` populated for non-leaf Tensors, you can use `retain_grad()`. + /// + /// Example: + /// @code + /// auto a = torch::rand(10, torch::requires_grad()); + /// std::cout << a.is_leaf() << std::endl; // prints `true` + /// + /// auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA); + /// std::cout << b.is_leaf() << std::endl; // prints `false` + /// // b was created by the operation that cast a cpu Tensor into a cuda Tensor + /// + /// auto c = torch::rand(10, torch::requires_grad()) + 2; + /// std::cout << c.is_leaf() << std::endl; // prints `false` + /// // c was created by the addition operation + /// + /// auto d = torch::rand(10).cuda(); + /// std::cout << d.is_leaf() << std::endl; // prints `true` + /// // d does not require gradients and so has no operation creating it (that is tracked by the autograd engine) + /// + /// auto e = torch::rand(10).cuda().requires_grad_(); + /// std::cout << e.is_leaf() << std::endl; // prints `true` + /// // e requires gradients and has no operations creating it + /// + /// auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true)); + /// std::cout << f.is_leaf() << std::endl; // prints `true` + /// // f requires grad, has no operation creating it + /// @endcode + + /// \fn void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const; + /// + /// Computes the gradient of current tensor with respect to graph leaves. + /// + /// The graph is differentiated using the chain rule. If the tensor is + /// non-scalar (i.e. its data has more than one element) and requires + /// gradient, the function additionally requires specifying ``gradient``. + /// It should be a tensor of matching type and location, that contains + /// the gradient of the differentiated function w.r.t. this Tensor. + /// + /// This function accumulates gradients in the leaves - you might need to + /// zero them before calling it. + /// + /// \param gradient Gradient w.r.t. the + /// tensor. If it is a tensor, it will be automatically converted + /// to a Tensor that does not require grad unless ``create_graph`` is True. + /// None values can be specified for scalar Tensors or ones that + /// don't require grad. If a None value would be acceptable then + /// this argument is optional. + /// \param retain_graph If ``false``, the graph used to compute + /// the grads will be freed. Note that in nearly all cases setting + /// this option to True is not needed and often can be worked around + /// in a much more efficient way. Defaults to the value of + /// ``create_graph``. + /// \param create_graph If ``true``, graph of the derivative will + /// be constructed, allowing to compute higher order derivative + /// products. Defaults to ``false``. + /// \param inputs Inputs w.r.t. which the gradient will be accumulated into + /// ``at::Tensor::grad``. All other Tensors will be ignored. If not + /// provided, the gradient is accumulated into all the leaf Tensors + /// that were used to compute the current tensor. + /// When inputs are provided and a given input is not a leaf, + /// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients). + /// It is an implementation detail on which the user should not rely. + /// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details. + void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const { + // NB: Adding this wrapper to _backward here because we'd like our + // 'backwards' api to accept the 'inputs' argument optionally. Since code gen + // currently does not support optional of TensorList our approach is to replace + // backward in native_functions.yaml with _backward and call it here instead. + if (inputs.has_value()) { + TORCH_CHECK(inputs.value().size() > 0, "'inputs' argument to backward cannot be empty") + this->_backward(inputs.value(), gradient, retain_graph, create_graph); + } else { + this->_backward({}, gradient, retain_graph, create_graph); + } + } + + /// \fn Tensor detach() const; + /// + /// Returns a new Tensor, detached from the current graph. + /// The result will never require gradient. + + /// \fn Tensor & detach_() const; + /// + /// Detaches the Tensor from the graph that created it, making it a leaf. + /// Views cannot be detached in-place. + + /// \fn void retain_grad() const; + /// + /// Enables this Tensor to have their :attr:`grad` populated during + /// :func:`backward`. This is a no-op for leaf tensors. + + /// \fn bool retains_grad() const; + /// + /// Is ``true`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be + /// populated during :func:`backward`, ``false`` otherwise. + + const Tensor& set_requires_grad(bool requires_grad) const { + TensorBase::set_requires_grad(requires_grad); + return *this; + } + + /// Return a mutable reference to the gradient. This is conventionally + /// used as `t.grad() = x` to set a gradient to a completely new tensor. + /// Note that this function work with a non-const Tensor and is not + /// thread safe. + Tensor& mutable_grad() const { + return impl_->mutable_grad(); + } + + /// This function returns an undefined tensor by default and returns a defined tensor + /// the first time a call to `backward()` computes gradients for this Tensor. + /// The attribute will then contain the gradients computed and future calls + /// to `backward()` will accumulate (add) gradients into it. + const Tensor& grad() const { + const Tensor& maybe_grad = impl_->grad(); + if (!is_leaf() && !retains_grad() && !maybe_grad.defined()) { + TORCH_WARN( + "The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad " + "attribute won't be populated during autograd.backward(). If you indeed want the .grad " + "field to be populated for a non-leaf Tensor, use .retain_grad() on the non-leaf Tensor. " + "If you access the non-leaf Tensor by mistake, make sure you access the leaf Tensor " + "instead. See github.com/pytorch/pytorch/pull/30531 for more informations."); + } + return maybe_grad; + } + + // The Forward AD API functions below are low level and are not to be used by end + // users who should use the API provided in torch/csrc/autograd.h + + /// This function returns the forward gradient for this Tensor at the given level. + const Tensor& _fw_grad(uint64_t level) const { + return impl_->_fw_grad(level, *this); + } + + /// This function can be used to set the value of the forward grad. + /// Note that the given new_grad might not be used directly if it has different + /// metadata (size/stride/storage offset) compared to this Tensor. In that case, + /// new_grad content will be copied into a new Tensor + void _set_fw_grad(const TensorBase& new_grad, uint64_t level, bool is_inplace_op) const { + impl_->_set_fw_grad(new_grad, *this, level, is_inplace_op); + } + + + // STOP. Thinking of adding a method here, which only makes use + // of other ATen methods? Define it in native_functions.yaml. + + //example + //Tensor * add(Tensor & b); + ${tensor_method_declarations} + + // Special C++ only overloads for std()-like functions (See gh-40287) + // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef + // So, for example std(0) would select the std(unbiased=False) overload + + Tensor var(int dim) const { + return var(IntArrayRef{dim}); + } + + Tensor std(int dim) const { + return std(IntArrayRef{dim}); + } + + // We changed .dtype() to return a TypeMeta in #12766. Ideally, we want the + // at::kDouble and its friends to be TypeMeta's, but that hasn't happened yet. + // Before that change, we make this method to maintain BC for C++ usage like + // `x.to(y.dtype)`. + // TODO: remove following two after at::kDouble and its friends are TypeMeta's. + inline Tensor to(caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const { + return this->to(/*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy); + } + inline Tensor to(Device device, caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const { + return this->to(device, /*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy); + } + + template + decltype(auto) m(F func, Args&&... params) const { + return func(*this, std::forward(params)...); + } + + /// NOTE: This is similar to the legacy `.data()` function on `Variable`, and is intended + /// to be used from functions that need to access the `Variable`'s equivalent `Tensor` + /// (i.e. `Tensor` that shares the same storage and tensor metadata with the `Variable`). + /// + /// One notable difference with the legacy `.data()` function is that changes to the + /// returned `Tensor`'s tensor metadata (e.g. sizes / strides / storage / storage_offset) + /// will not update the original `Variable`, due to the fact that this function + /// shallow-copies the `Variable`'s underlying TensorImpl. + at::Tensor tensor_data() const { + return TensorBase::tensor_data(); + } + + /// NOTE: `var.variable_data()` in C++ has the same semantics as `tensor.data` + /// in Python, which create a new `Variable` that shares the same storage and + /// tensor metadata with the original `Variable`, but with a completely new + /// autograd history. + /// + /// NOTE: If we change the tensor metadata (e.g. sizes / strides / + /// storage / storage_offset) of a variable created from `var.variable_data()`, those + /// changes will not update the original variable `var`. In `.variable_data()`, we set + /// `allow_tensor_metadata_change_` to false to make such changes explicitly illegal, + /// in order to prevent users from changing metadata of `var.variable_data()` + /// and expecting the original variable `var` to also be updated. + at::Tensor variable_data() const { + return TensorBase::variable_data(); + } + + // Hooks + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + template + using hook_return_void_t = std::enable_if_t>::value, unsigned>; + template + using hook_return_var_t = std::enable_if_t, Tensor>::value, unsigned>; + + /// Registers a backward hook. + /// + /// The hook will be called every time a gradient with respect to the Tensor is computed. + /// The hook should have one of the following signature: + /// ``` + /// hook(Tensor grad) -> Tensor + /// ``` + /// ``` + /// hook(Tensor grad) -> void + /// ``` + /// The hook should not modify its argument, but it can optionally return a new gradient + /// which will be used in place of `grad`. + /// + /// This function returns the index of the hook in the list which can be used to remove hook. + /// + /// Example: + /// @code + /// auto v = torch::tensor({0., 0., 0.}, torch::requires_grad()); + /// auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient + /// v.backward(torch::tensor({1., 2., 3.})); + /// // This prints: + /// // ``` + /// // 2 + /// // 4 + /// // 6 + /// // [ CPUFloatType{3} ] + /// // ``` + /// std::cout << v.grad() << std::endl; + /// v.remove_hook(h); // removes the hook + /// @endcode + template + hook_return_void_t register_hook(T&& hook) const; + template + hook_return_var_t register_hook(T&& hook) const; + + // Variable methods + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Tensor data() const { + return TensorBase::data(); + } + + void _backward(TensorList inputs, const c10::optional& gradient, c10::optional keep_graph, bool create_graph) const; + + const Tensor& requires_grad_(bool _requires_grad=true) const { + TensorBase::requires_grad_(_requires_grad); + return *this; + } +}; + +namespace detail { +// Helper creator for Tensor class which doesn't requires the users to pass +// in an intrusive_ptr instead it just converts the argument passed to +// requested intrusive_ptr type. +template +Tensor make_tensor(Args&&... args) { + return Tensor(c10::make_intrusive(std::forward(args)...)); +} + +} // namespace detail + +} // namespace at + + +namespace at { +${tensor_method_definitions} +} // namespace at + + +namespace c10 { +template <> +struct MaybeOwnedTraits { + using owned_type = at::Tensor; + using borrow_type = at::Tensor; + + static borrow_type createBorrow(const owned_type& from) { + // NOTE: this can be implemented without the special + // unsafe_borrow_t Tensor constructor as + // + // return borrow_type(c10::intrusive_ptr::reclaim(from.unsafeGetTensorImpl())); + // + // but that hurts inlining due to the nullptr check in the + // Tensor(c10::intrusive_ptr<...>) constructor. We already know + // that from.impl_ isn't null because from is a valid Tensor, so + // we needn't do the check again. (using __builtin_assume can + // avoid this, but wouldn't be portable to MSVC.) + return borrow_type(borrow_type::unsafe_borrow_t{}, from); + } + + static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) { + lhs.unsafeReleaseTensorImpl(); + // See above note: this can be implemented with public API + // similarly to createBorrow(), but that would hurt inlining. + lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs); + } + + static void destroyBorrow(borrow_type& toDestroy) { + toDestroy.unsafeReleaseTensorImpl(); // "leak" it, but it was already +0. + } + + static const owned_type& referenceFromBorrow(const borrow_type& borrow) { + return borrow; + } + + static const owned_type* pointerFromBorrow(const borrow_type& borrow) { + return &borrow; + } + + static bool debugBorrowIsValid(const borrow_type& /*borrow*/) { + return true; + } +}; + +template <> +struct ExclusivelyOwnedTraits { + using repr_type = at::Tensor; + using pointer_type = at::Tensor*; + using const_pointer_type = const at::Tensor*; + + static repr_type nullRepr() { + return at::Tensor(); + } + + template + static repr_type createInPlace(Args&&... args) { + return at::Tensor(std::forward(args)...); + } + + static repr_type moveToRepr(at::Tensor&& x) { + return std::move(x); + } + + static void destroyOwned(at::Tensor& x) { + return ExclusivelyOwnedTraits::destroyOwned(x); + } + + static at::Tensor take(at::Tensor& x) { + return std::move(x); + } + + static pointer_type getImpl(repr_type& x) { + return &x; + } + + static const_pointer_type getImpl(const repr_type& x) { + return &x; + } +}; +} // namespace c10 + +namespace at { + +inline c10::MaybeOwned borrow_from_optional_tensor( + const c10::optional& opt) { + return opt.has_value() + ? c10::MaybeOwned::borrowed(*opt) + : c10::MaybeOwned::owned(std::in_place); +} + +inline c10::MaybeOwned Tensor::expect_contiguous(MemoryFormat memory_format) const & { + if (is_contiguous(memory_format)) { + return c10::MaybeOwned::borrowed(*this); + } else { + return c10::MaybeOwned::owned(__dispatch_contiguous(memory_format)); + } +} +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/TensorMethods.cpp b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/TensorMethods.cpp new file mode 100644 index 0000000000000000000000000000000000000000..76439040eda45ec34f627298260e7bf081fd728c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/TensorMethods.cpp @@ -0,0 +1,61 @@ +#include +#include + +#include + +namespace at { + +namespace { + +// Verifies the requested type is the same as the Tensor's type. +void check_type(const TensorBase& tensor, ScalarType type, c10::string_view type_name) { + TORCH_CHECK( + tensor.scalar_type() == type + || (isQIntType(tensor.scalar_type()) + && toUnderlying(tensor.scalar_type()) == type), + "expected scalar type ", type_name, " but found ", tensor.scalar_type()); +} + +} // namespace + +#define DEFINE_CAST(T, name) \ + template <> \ + TORCH_API const T* TensorBase::const_data_ptr() const { \ + check_type(*this, ScalarType::name, #name); \ + return this->unsafeGetTensorImpl()->data_ptr_impl(); \ + } \ + \ + template <> \ + TORCH_API const T* TensorBase::const_data_ptr() const { \ + check_type(*this, ScalarType::name, #name); \ + return this->unsafeGetTensorImpl()->data_ptr_impl>(); \ + } \ + \ + template <> \ + TORCH_API T* TensorBase::mutable_data_ptr() const { \ + check_type(*this, ScalarType::name, #name); \ + return this->unsafeGetTensorImpl()->mutable_data_ptr_impl(); \ + } \ + \ + template <> \ + TORCH_API T* TensorBase::data_ptr() const { \ + return mutable_data_ptr(); \ + } \ + + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CAST) + AT_FORALL_QINT_TYPES(DEFINE_CAST) + DEFINE_CAST(uint16_t, UInt16) + DEFINE_CAST(uint32_t, UInt32) + DEFINE_CAST(uint64_t, UInt64) + #undef DEFINE_CAST + + #define DEFINE_ITEM(T, name) \ + template <> \ + TORCH_API T Tensor::item() const { \ + return item().to##name(); \ + } + + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_ITEM) + #undef DEFINE_ITEM + + } //namespace at diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UfuncCUDA.cu b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UfuncCUDA.cu new file mode 100644 index 0000000000000000000000000000000000000000..e75d82d9cc84bd8fddfd303f610412e5d0a98729 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UfuncCUDA.cu @@ -0,0 +1,21 @@ +#define TORCH_ASSERT_NO_OPERATORS + +#include +#include +#include +#include +${cuda_headers} + +namespace at { + +// NB: this is explicitly copied here (via codegen) rather than +// included via NativeFunctions.h to avoid recompiling this file when +// NativeFunctions.h changes +namespace meta { +${meta_declaration} +} + +namespace native { +${native_declaration} +${native_definitions} +}} // namespace at::native diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UnboxingFunctions.cpp b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UnboxingFunctions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..86c13235d8623964d734e743f5f15cf68a8df63c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UnboxingFunctions.cpp @@ -0,0 +1,35 @@ +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace at { +namespace unboxing { + +using ::c10::fmap; +using ::c10::filter; +using torch::jit::peek; +using torch::jit::drop; +using torch::jit::pack; +using torch::jit::pop; + +// Generated function declaration +${definitions} + +} // namespace unboxing +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UnboxingFunctions.h b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UnboxingFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..a65469a9b0123cbfd4075ff3c263276aa47f137f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UnboxingFunctions.h @@ -0,0 +1,32 @@ +// ${generated_comment} + +// Generated by tools/jit/gen_unboxing.py. This file declares code generated boxed C++ functions for operators, +// base off of native_functions.yaml (or similar yaml file with the same syntax). The definition of such a boxed +// function will pop out IValues from the stack then convert them into the correct C++ types based on given schema. This +// unboxing logic is an alternative to template-based metaprogramming unboxing. + +#pragma once + +#include +namespace at { +namespace unboxing { +namespace { + +template +std::array as_array(const c10::List& list) { + std::array res; + AT_ASSERT(list.size() == N); + std::vector vec; + for (c10::IValue elem : list) { + vec.push_back(elem.to()); + } + std::copy(vec.begin(), vec.end(), res.begin()); + return res; +} +} // namespace +using Stack = std::vector; +// Generated function declaration +${declarations} + +} // namespace unboxing +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/aten_interned_strings.h b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/aten_interned_strings.h new file mode 100644 index 0000000000000000000000000000000000000000..326d4622334a776f4f1f94fb49a70f2c53c7e6eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/aten_interned_strings.h @@ -0,0 +1,22 @@ +#pragma once + +// ${generated_comment} + +#if defined(TORCH_ASSERT_NO_OPERATORS) || defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if including for \ + the c10::Symbol class would be sufficient, or if your change would be \ + better placed in another file. +#endif + +// ATen symbols correspond exactly to operators defined in ATen. Every +// symbol here corresponds exactly to an ATen operation defined in +// native_functions.yaml; attributes are in one-to-one correspondence +// with their ATen name. + +#define FORALL_ATEN_BASE_SYMBOLS(_) \ +${aten_symbols} + +#define FORALL_ATTR_BASE_SYMBOLS(_) \ +${attr_symbols} diff --git a/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/enum_tag.h b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/enum_tag.h new file mode 100644 index 0000000000000000000000000000000000000000..1320fbc28ab8f7d72655816292f49a4c9a9b727d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/enum_tag.h @@ -0,0 +1,10 @@ +#pragma once + +// ${generated_comment} + +namespace at { + // Enum of valid tags obtained from the entries in tags.yaml + enum class Tag { + ${enum_of_valid_tags} + }; +} diff --git a/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f26bcf4d2de6eb136e31006ca3ab447d5e488adf --- /dev/null +++ b/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/LICENSE @@ -0,0 +1,279 @@ +A. HISTORY OF THE SOFTWARE +========================== + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see https://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations, which became +Zope Corporation. In 2001, the Python Software Foundation (PSF, see +https://www.python.org/psf/) was formed, a non-profit organization +created specifically to own Python-related Intellectual Property. +Zope Corporation was a sponsoring member of the PSF. + +All Python releases are Open Source (see https://opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases. + + Release Derived Year Owner GPL- + from compatible? (1) + + 0.9.0 thru 1.2 1991-1995 CWI yes + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes + 1.6 1.5.2 2000 CNRI no + 2.0 1.6 2000 BeOpen.com no + 1.6.1 1.6 2001 CNRI yes (2) + 2.1 2.0+1.6.1 2001 PSF no + 2.0.1 2.0+1.6.1 2001 PSF yes + 2.1.1 2.1+2.0.1 2001 PSF yes + 2.1.2 2.1.1 2002 PSF yes + 2.1.3 2.1.2 2002 PSF yes + 2.2 and above 2.1.1 2001-now PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, + because its license has a choice of law clause. According to + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 + is "not incompatible" with the GPL. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +Python software and documentation are licensed under the +Python Software Foundation License Version 2. + +Starting with Python 3.8.6, examples, recipes, and other code in +the documentation are dual licensed under the PSF License Version 2 +and the Zero-Clause BSD license. + +Some software incorporated into Python is under different licenses. +The licenses are listed with code falling under that license. + + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative version +prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, +The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION +---------------------------------------------------------------------- + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..5089b4ddde4835e1dd7dd8ef6ac52f27398c0ae5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/METADATA @@ -0,0 +1,66 @@ +Metadata-Version: 2.1 +Name: typing_extensions +Version: 4.11.0 +Summary: Backported and Experimental Type Hints for Python 3.8+ +Keywords: annotations,backport,checker,checking,function,hinting,hints,type,typechecking,typehinting,typehints,typing +Author-email: "Guido van Rossum, Jukka Lehtosalo, Łukasz Langa, Michael Lee" +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Python Software Foundation License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Software Development +Project-URL: Bug Tracker, https://github.com/python/typing_extensions/issues +Project-URL: Changes, https://github.com/python/typing_extensions/blob/main/CHANGELOG.md +Project-URL: Documentation, https://typing-extensions.readthedocs.io/ +Project-URL: Home, https://github.com/python/typing_extensions +Project-URL: Q & A, https://github.com/python/typing/discussions +Project-URL: Repository, https://github.com/python/typing_extensions + +# Typing Extensions + +[![Chat at https://gitter.im/python/typing](https://badges.gitter.im/python/typing.svg)](https://gitter.im/python/typing) + +[Documentation](https://typing-extensions.readthedocs.io/en/latest/#) – +[PyPI](https://pypi.org/project/typing-extensions/) + +## Overview + +The `typing_extensions` module serves two related purposes: + +- Enable use of new type system features on older Python versions. For example, + `typing.TypeGuard` is new in Python 3.10, but `typing_extensions` allows + users on previous Python versions to use it too. +- Enable experimentation with new type system PEPs before they are accepted and + added to the `typing` module. + +`typing_extensions` is treated specially by static type checkers such as +mypy and pyright. Objects defined in `typing_extensions` are treated the same +way as equivalent forms in `typing`. + +`typing_extensions` uses +[Semantic Versioning](https://semver.org/). The +major version will be incremented only for backwards-incompatible changes. +Therefore, it's safe to depend +on `typing_extensions` like this: `typing_extensions >=x.y, <(x+1)`, +where `x.y` is the first version that includes all features you need. + +## Included items + +See [the documentation](https://typing-extensions.readthedocs.io/en/latest/#) for a +complete listing of module contents. + +## Contributing + +See [CONTRIBUTING.md](https://github.com/python/typing_extensions/blob/main/CONTRIBUTING.md) +for how to contribute to `typing_extensions`. + diff --git a/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..b06fbb5eeff1415d272c12b9800824a544d3618d --- /dev/null +++ b/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/RECORD @@ -0,0 +1,7 @@ +__pycache__/typing_extensions.cpython-310.pyc,, +typing_extensions-4.11.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +typing_extensions-4.11.0.dist-info/LICENSE,sha256=Oy-B_iHRgcSZxZolbI4ZaEVdZonSaaqFNzv7avQdo78,13936 +typing_extensions-4.11.0.dist-info/METADATA,sha256=rhGXH-iuwULXNZDAtjXcjwU2jcxotMTkFiUg0OqYXzI,2967 +typing_extensions-4.11.0.dist-info/RECORD,, +typing_extensions-4.11.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81 +typing_extensions.py,sha256=mvJ5zBIuLMsaVGHZjRLsGh8ouwg5Kr3uNbWCHX8-Ao0,122293 diff --git a/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..3b5e64b5e6c4a210201d1676a891fd57b15cda99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.9.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..2f90c8a4d9b3dfa426f453fdf3f47ade1614e09e --- /dev/null +++ b/venv/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/RECORD @@ -0,0 +1,75 @@ +urllib3-2.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +urllib3-2.2.1.dist-info/METADATA,sha256=uROmjQwfAbwRYjV9PMdc5JF5NA3kRkpoKafPkNzybfc,6434 +urllib3-2.2.1.dist-info/RECORD,, +urllib3-2.2.1.dist-info/WHEEL,sha256=TJPnKdtrSue7xZ_AVGkp9YXcvDrobsjBds1du3Nx6dc,87 +urllib3-2.2.1.dist-info/licenses/LICENSE.txt,sha256=Ew46ZNX91dCWp1JpRjSn2d8oRGnehuVzIQAmgEHj1oY,1093 +urllib3/__init__.py,sha256=JMo1tg1nIV1AeJ2vENC_Txfl0e5h6Gzl9DGVk1rWRbo,6979 +urllib3/__pycache__/__init__.cpython-310.pyc,, +urllib3/__pycache__/_base_connection.cpython-310.pyc,, +urllib3/__pycache__/_collections.cpython-310.pyc,, +urllib3/__pycache__/_request_methods.cpython-310.pyc,, +urllib3/__pycache__/_version.cpython-310.pyc,, +urllib3/__pycache__/connection.cpython-310.pyc,, +urllib3/__pycache__/connectionpool.cpython-310.pyc,, +urllib3/__pycache__/exceptions.cpython-310.pyc,, +urllib3/__pycache__/fields.cpython-310.pyc,, +urllib3/__pycache__/filepost.cpython-310.pyc,, +urllib3/__pycache__/http2.cpython-310.pyc,, +urllib3/__pycache__/poolmanager.cpython-310.pyc,, +urllib3/__pycache__/response.cpython-310.pyc,, +urllib3/_base_connection.py,sha256=p-DOG_Me7-sJXO1R9VgDpNmdVU_kIS8VtaC7ptEllA0,5640 +urllib3/_collections.py,sha256=vzKA-7X-9resOamEWq52uV1nHshChjbYDvz47H0mMjw,17400 +urllib3/_request_methods.py,sha256=ucEpHQyQf06b9o1RxKLkCpzGH0ct-v7X2xGpU6rmmlo,9984 +urllib3/_version.py,sha256=12idLAcGmrAURPX52rGioBo33oQ__-ENJEdeqHvUUZg,98 +urllib3/connection.py,sha256=zFgaaoqrICsl7-kBp-_4va9m82sYhioAuy4-4iDpK0I,34704 +urllib3/connectionpool.py,sha256=XjTfYowLwN5ZzRMO41_OTbGNX4ANifgYVpWsVMRuC00,43556 +urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +urllib3/contrib/__pycache__/__init__.cpython-310.pyc,, +urllib3/contrib/__pycache__/pyopenssl.cpython-310.pyc,, +urllib3/contrib/__pycache__/socks.cpython-310.pyc,, +urllib3/contrib/emscripten/__init__.py,sha256=u6KNgzjlFZbuAAXa_ybCR7gQ71VJESnF-IIdDA73brw,733 +urllib3/contrib/emscripten/__pycache__/__init__.cpython-310.pyc,, +urllib3/contrib/emscripten/__pycache__/connection.cpython-310.pyc,, +urllib3/contrib/emscripten/__pycache__/fetch.cpython-310.pyc,, +urllib3/contrib/emscripten/__pycache__/request.cpython-310.pyc,, +urllib3/contrib/emscripten/__pycache__/response.cpython-310.pyc,, +urllib3/contrib/emscripten/connection.py,sha256=kaBe2tWt7Yy9vNUFRBV7CSyDnfhCYILGxju9KTZj8Sw,8755 +urllib3/contrib/emscripten/emscripten_fetch_worker.js,sha256=CDfYF_9CDobtx2lGidyJ1zjDEvwNT5F-dchmVWXDh0E,3655 +urllib3/contrib/emscripten/fetch.py,sha256=ymwJlHBBuw6WTpKgPHpdmmrNBxlsr75HqoD4Rn27YXk,14131 +urllib3/contrib/emscripten/request.py,sha256=mL28szy1KvE3NJhWor5jNmarp8gwplDU-7gwGZY5g0Q,566 +urllib3/contrib/emscripten/response.py,sha256=wIDmdJ4doFWqLl5s86l9n0V70gFjQ2HWaPgz69jM52E,9546 +urllib3/contrib/pyopenssl.py,sha256=X31eCYGwB09EkAHX8RhDKC0X0Ki7d0cCVWoMJZUM5bQ,19161 +urllib3/contrib/socks.py,sha256=gFS2-zOw4_vLGpUvExOf3fNVT8liz6vhM2t6lBPn3CY,7572 +urllib3/exceptions.py,sha256=RDaiudtR7rqbVKTKpLSgZBBtwaIqV7eZtervZV_mZag,9393 +urllib3/fields.py,sha256=8vi0PeRo_pE5chPmJA07LZtMkVls4UrBS1k2xM506jM,10843 +urllib3/filepost.py,sha256=-9qJT11cNGjO9dqnI20-oErZuTvNaM18xZZPCjZSbOE,2395 +urllib3/http2.py,sha256=4QQcjTM9UYOQZe0r8KnA8anU9ST4p_s3SB3gRTueyPc,7480 +urllib3/poolmanager.py,sha256=fcC3OwjFKxha06NsOORwbZOzrVt1pyY-bNCbKiqC0l8,22935 +urllib3/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93 +urllib3/response.py,sha256=lmvseToQbkLXuFyA3jcSyCPjTgSfa6YPA4xUhVqq8QI,43874 +urllib3/util/__init__.py,sha256=-qeS0QceivazvBEKDNFCAI-6ACcdDOE4TMvo7SLNlAQ,1001 +urllib3/util/__pycache__/__init__.cpython-310.pyc,, +urllib3/util/__pycache__/connection.cpython-310.pyc,, +urllib3/util/__pycache__/proxy.cpython-310.pyc,, +urllib3/util/__pycache__/request.cpython-310.pyc,, +urllib3/util/__pycache__/response.cpython-310.pyc,, +urllib3/util/__pycache__/retry.cpython-310.pyc,, +urllib3/util/__pycache__/ssl_.cpython-310.pyc,, +urllib3/util/__pycache__/ssl_match_hostname.cpython-310.pyc,, +urllib3/util/__pycache__/ssltransport.cpython-310.pyc,, +urllib3/util/__pycache__/timeout.cpython-310.pyc,, +urllib3/util/__pycache__/url.cpython-310.pyc,, +urllib3/util/__pycache__/util.cpython-310.pyc,, +urllib3/util/__pycache__/wait.cpython-310.pyc,, +urllib3/util/connection.py,sha256=QeUUEuNmhznpuKNPL-B0IVOkMdMCu8oJX62OC0Vpzug,4462 +urllib3/util/proxy.py,sha256=seP8-Q5B6bB0dMtwPj-YcZZQ30vHuLqRu-tI0JZ2fzs,1148 +urllib3/util/request.py,sha256=PQnBmKUHMQ0hQQ41uhbLNAeA24ke60m6zeiwfwocpGo,8102 +urllib3/util/response.py,sha256=vQE639uoEhj1vpjEdxu5lNIhJCSUZkd7pqllUI0BZOA,3374 +urllib3/util/retry.py,sha256=WB-7x1m7fQH_-Qqtrk2OGvz93GvBTxc-pRn8Vf3p4mg,18384 +urllib3/util/ssl_.py,sha256=FeymdS68RggEROwMB9VLGSqLHq2hRUKnIbQC_bCpGJI,19109 +urllib3/util/ssl_match_hostname.py,sha256=gaWqixoYtQ_GKO8fcRGFj3VXeMoqyxQQuUTPgWeiL_M,5812 +urllib3/util/ssltransport.py,sha256=SF__JQXVcHBQniFJZp3P9q-UeHM310WVwcBwqT9dCLE,9034 +urllib3/util/timeout.py,sha256=4eT1FVeZZU7h7mYD1Jq2OXNe4fxekdNvhoWUkZusRpA,10346 +urllib3/util/url.py,sha256=wHORhp80RAXyTlAIkTqLFzSrkU7J34ZDxX-tN65MBZk,15213 +urllib3/util/util.py,sha256=j3lbZK1jPyiwD34T8IgJzdWEZVT-4E-0vYIJi9UjeNA,1146 +urllib3/util/wait.py,sha256=_ph8IrUR3sqPqi0OopQgJUlH4wzkGeM5CiyA7XGGtmI,4423 diff --git a/venv/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..5998f3aab327ceb8cb346647a3461e220359aebf --- /dev/null +++ b/venv/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.21.1 +Root-Is-Purelib: true +Tag: py3-none-any